当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.executing_eagerly函数代码示例

本文整理汇总了Python中tensorflow.executing_eagerly函数的典型用法代码示例。如果您正苦于以下问题:Python executing_eagerly函数的具体用法?Python executing_eagerly怎么用?Python executing_eagerly使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了executing_eagerly函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _value_and_gradients

def _value_and_gradients(fn, fn_arg_list, result=None, grads=None, name=None):
  """Helper to `maybe_call_fn_and_grads`."""
  with tf.name_scope(name, 'value_and_gradients', [fn_arg_list, result, grads]):
    def _convert_to_tensor(x, name):
      ctt = lambda x_: x_ if x_ is None else tf.convert_to_tensor(x_, name=name)
      return [ctt(x_) for x_ in x] if is_list_like(x) else ctt(x)

    fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)
                   else [fn_arg_list])
    fn_arg_list = _convert_to_tensor(fn_arg_list, 'fn_arg')

    if result is None:
      result = fn(*fn_arg_list)
      if grads is None and tf.executing_eagerly():
        # Ensure we disable bijector cacheing in eager mode.
        # TODO(b/72831017): Remove this once bijector cacheing is fixed for
        # eager mode.
        fn_arg_list = [0 + x for x in fn_arg_list]

    result = _convert_to_tensor(result, 'fn_result')

    if grads is not None:
      grads = _convert_to_tensor(grads, 'fn_grad')
      return result, grads

    if tf.executing_eagerly():
      if is_list_like(result) and len(result) == len(fn_arg_list):
        # Compute the block diagonal of Jacobian.
        # TODO(b/79158574): Guard this calculation by an arg which explicitly
        # requests block diagonal Jacobian calculation.
        def make_fn_slice(i):
          """Needed to prevent `cell-var-from-loop` pylint warning."""
          return lambda *args: fn(*args)[i]
        grads = [
            tfe.gradients_function(make_fn_slice(i))(*fn_arg_list)[i]
            for i in range(len(result))
        ]
      else:
        grads = tfe.gradients_function(fn)(*fn_arg_list)
    else:
      if is_list_like(result) and len(result) == len(fn_arg_list):
        # Compute the block diagonal of Jacobian.
        # TODO(b/79158574): Guard this calculation by an arg which explicitly
        # requests block diagonal Jacobian calculation.
        grads = [tf.gradients(result[i], fn_arg_list[i])[0]
                 for i in range(len(result))]
      else:
        grads = tf.gradients(result, fn_arg_list)

    return result, grads
开发者ID:asudomoeva,项目名称:probability,代码行数:50,代码来源:util.py

示例2: testEventShape

  def testEventShape(self):
    # Shape is always known for reshaping in eager mode, so we skip these tests.
    if tf.executing_eagerly():
      return

    event_shape_in, event_shape_out = self.build_shapes([2, 3], [6])
    bijector = tfb.Reshape(
        event_shape_out=event_shape_out,
        event_shape_in=event_shape_in,
        validate_args=True)

    self.assertEqual(
        bijector.forward_event_shape(tf.TensorShape([4, 2, 3])).as_list(),
        [4, None])
    self.assertEqual(
        bijector.forward_event_shape(tf.TensorShape([None, 2, 3])).as_list(),
        [None, None])
    self.assertEqual(
        bijector.inverse_event_shape(tf.TensorShape([4, 6])).as_list(),
        [4, None, None])
    self.assertEqual(
        bijector.inverse_event_shape(tf.TensorShape([None, 6])).as_list(),
        [None, None, None])
    # If the input shape is totally unknown, there's nothing we can do!
    self.assertIsNone(
        bijector.forward_event_shape(tf.TensorShape(None)).ndims)
开发者ID:asudomoeva,项目名称:probability,代码行数:26,代码来源:reshape_test.py

示例3: testSampleWithSameSeed

  def testSampleWithSameSeed(self):
    if tf.executing_eagerly():
      return
    scale = make_pd(1., 2)
    df = 4

    chol_w = tfd.Wishart(
        df, scale_tril=chol(scale), input_output_cholesky=False)

    x = self.evaluate(chol_w.sample(1, seed=42))
    chol_x = [chol(x[0])]

    full_w = tfd.Wishart(df, scale, input_output_cholesky=False)
    self.assertAllClose(x, self.evaluate(full_w.sample(1, seed=42)))

    chol_w_chol = tfd.Wishart(
        df, scale_tril=chol(scale), input_output_cholesky=True)
    self.assertAllClose(chol_x, self.evaluate(chol_w_chol.sample(1, seed=42)))
    eigen_values = tf.matrix_diag_part(chol_w_chol.sample(1000, seed=42))
    np.testing.assert_array_less(0., self.evaluate(eigen_values))

    full_w_chol = tfd.Wishart(df, scale=scale, input_output_cholesky=True)
    self.assertAllClose(chol_x, self.evaluate(full_w_chol.sample(1, seed=42)))
    eigen_values = tf.matrix_diag_part(full_w_chol.sample(1000, seed=42))
    np.testing.assert_array_less(0., self.evaluate(eigen_values))
开发者ID:asudomoeva,项目名称:probability,代码行数:25,代码来源:wishart_test.py

示例4: _call

  def _call(self, *args, **kwargs):
    """Entry point when a module is called to connect it to the graph.

    This is the entry point when users connect a Module into the Graph. The
    underlying _build method will have been wrapped in a Template by the
    constructor, and we call this template with the provided inputs here.

    Note we use `_call` instead of `__call__` to allow instance level monkey
    patching (see `defun`).

    Args:
      *args: Arguments for underlying _build method.
      **kwargs: Keyword arguments for underlying _build method.

    Returns:
      The result of the underlying _build method.
    """
    self._check_init_called()
    self._check_same_graph()
    with self._capture_variables():
      outputs, subgraph_name_scope = self._template(*args, **kwargs)
    self._is_connected = True
    if not tf.executing_eagerly():
      # In eager mode the module is called a lot more frequently than in graph
      # mode (for each training step) and so we don't keep track of connected
      # subgraphs (since there will be orders of magnitude more of them).
      self._add_connected_subgraph(self._build, outputs, subgraph_name_scope,
                                   *args, **kwargs)
    return outputs
开发者ID:ccchang0111,项目名称:sonnet,代码行数:29,代码来源:base.py

示例5: testRegularizers

  def testRegularizers(self, trainable, state_size):
    batch_size = 6

    # Set the attribute to the class since it we can't set properties of
    # abstract classes
    snt.RNNCore.state_size = state_size
    flat_state_size = nest.flatten(state_size)
    core = snt.RNNCore(name="dummy_core")
    flat_regularizer = ([tf.contrib.layers.l1_regularizer(scale=0.5)] *
                        len(flat_state_size))
    trainable_regularizers = nest.pack_sequence_as(
        structure=state_size, flat_sequence=flat_regularizer)

    core.initial_state(batch_size, dtype=tf.float32, trainable=trainable,
                       trainable_regularizers=trainable_regularizers)

    graph_regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    if not trainable:
      self.assertFalse(graph_regularizers)
    else:
      self.assertEqual(len(graph_regularizers), len(flat_state_size))
      if not tf.executing_eagerly():
        for i in range(len(flat_state_size)):
          self.assertRegexpMatches(
              graph_regularizers[i].name, ".*l1_regularizer.*")
开发者ID:ccchang0111,项目名称:sonnet,代码行数:25,代码来源:rnn_core_test.py

示例6: testInitialStateNames

  def testInitialStateNames(self):
    if tf.executing_eagerly():
      return self.skipTest("Tensor.name is meaningless in eager mode.")

    hidden_size_a = 3
    hidden_size_b = 4
    batch_size = 5
    deep_rnn = snt.DeepRNN(
        [snt.LSTM(hidden_size_a, name="a"), snt.LSTM(hidden_size_b, name="b")])
    deep_rnn_state = deep_rnn.initial_state(batch_size, trainable=True)
    self.assertEqual(
        deep_rnn_state[0][0].name,
        "deep_rnn_initial_state/a_initial_state/state_hidden_tiled:0")
    self.assertEqual(
        deep_rnn_state[0][1].name,
        "deep_rnn_initial_state/a_initial_state/state_cell_tiled:0")
    self.assertEqual(
        deep_rnn_state[1][0].name,
        "deep_rnn_initial_state/b_initial_state/state_hidden_tiled:0")
    self.assertEqual(
        deep_rnn_state[1][1].name,
        "deep_rnn_initial_state/b_initial_state/state_cell_tiled:0")

    other_start_state = deep_rnn.initial_state(
        batch_size, trainable=True, name="blah")
    self.assertEqual(other_start_state[0][0].name,
                     "blah/a_initial_state/state_hidden_tiled:0")
    self.assertEqual(other_start_state[0][1].name,
                     "blah/a_initial_state/state_cell_tiled:0")
    self.assertEqual(other_start_state[1][0].name,
                     "blah/b_initial_state/state_hidden_tiled:0")
    self.assertEqual(other_start_state[1][1].name,
                     "blah/b_initial_state/state_cell_tiled:0")
开发者ID:ccchang0111,项目名称:sonnet,代码行数:33,代码来源:basic_rnn_test.py

示例7: testActivateBiasFlags

  def testActivateBiasFlags(self, activate_final, use_bias, use_dropout):
    mlp = snt.nets.MLP(name=self.module_name,
                       output_sizes=self.output_sizes,
                       activate_final=activate_final,
                       use_bias=use_bias,
                       use_dropout=use_dropout)

    inputs = tf.random_normal(
        dtype=tf.float32, shape=[self.batch_size, self.input_size])
    net = mlp(inputs)

    if not tf.executing_eagerly():
      if activate_final:
        self.assertEqual(net.op.type, "Relu")
      elif use_bias:
        self.assertEqual(net.op.type, "Add")
      else:
        self.assertEqual(net.op.type, "MatMul")

    variables = mlp.get_variables()

    if use_bias:
      self.assertEqual(len(variables), len(self.output_sizes) * 2)
    else:
      self.assertEqual(len(variables), len(self.output_sizes))
开发者ID:ccchang0111,项目名称:sonnet,代码行数:25,代码来源:mlp_test.py

示例8: test_copy_layers

 def test_copy_layers(self):
   """Test copying layers."""
   tg = dc.models.TensorGraph()
   features = Feature(shape=(None, 10))
   dense = Dense(
       10, in_layers=features, biases_initializer=tf.random_normal_initializer)
   constant = Constant(10.0)
   output = dense + constant
   tg.add_output(output)
   tg.set_loss(output)
   tg.fit_generator([])
   replacements = {constant: Constant(20.0)}
   copy = output.copy(replacements, tg)
   assert isinstance(copy, Add)
   assert isinstance(copy.in_layers[0], Dense)
   assert isinstance(copy.in_layers[0].in_layers[0], Feature)
   assert copy.in_layers[1] == replacements[constant]
   variables = tg.get_layer_variables(dense)
   with tg._get_tf("Graph").as_default():
     if tf.executing_eagerly():
       values = [v.numpy() for v in variables]
     else:
       values = tg.session.run(variables)
   for v1, v2 in zip(values, copy.in_layers[0].variable_values):
     assert np.array_equal(v1, v2)
开发者ID:ktaneishi,项目名称:deepchem,代码行数:25,代码来源:test_tensor_graph.py

示例9: _set_seed

def _set_seed(seed):
  """Helper which uses graph seed if using TFE."""
  # TODO(b/68017812): Deprecate once TFE supports seed.
  if tf.executing_eagerly():
    tf.set_random_seed(seed)
    return None
  return seed
开发者ID:asudomoeva,项目名称:probability,代码行数:7,代码来源:replica_exchange_mc_test.py

示例10: testCustomGetter

  def testCustomGetter(self):
    custom_getter = snt.custom_getters.Context(snt.custom_getters.stop_gradient)
    module = snt.nets.ConvNet2D(output_channels=self.output_channels,
                                kernel_shapes=self.kernel_shapes,
                                rates=self.rates,
                                strides=self.strides,
                                paddings=self.paddings,
                                custom_getter=custom_getter)

    input_shape = [10, 100, 100, 3]
    input_to_net = tf.random_normal(dtype=tf.float32, shape=input_shape)

    if tf.executing_eagerly():
      with tf.GradientTape() as tape0:
        out0 = module(input_to_net)
      with tf.GradientTape() as tape1:
        with custom_getter:
          out1 = module(input_to_net)
      all_vars = tf.trainable_variables()
      out0_grads = tape0.gradient(out0, all_vars)
      out1_grads = tape1.gradient(out1, all_vars)

    else:
      out0 = module(input_to_net)
      with custom_getter:
        out1 = module(input_to_net)
      all_vars = tf.trainable_variables()
      out0_grads = tf.gradients(out0, all_vars)
      out1_grads = tf.gradients(out1, all_vars)

    for grad in out0_grads:
      self.assertNotEqual(None, grad)
    self.assertEqual([None] * len(out1_grads), out1_grads)
开发者ID:ccchang0111,项目名称:sonnet,代码行数:33,代码来源:convnet_test.py

示例11: testReprWorksCorrectlyMultivariate

  def testReprWorksCorrectlyMultivariate(self):
    mvn_static = tfd.MultivariateNormalDiag(
        loc=np.zeros([2, 2]), name="MVN")
    self.assertEqual(
        repr(mvn_static),
        "<tfp.distributions.MultivariateNormalDiag"
        " 'MVN/'"
        " batch_shape=(2,)"
        " event_shape=(2,)"
        " dtype=float64>")

    # There's no notion of partially known shapes in eager mode, so exit
    # early.
    if tf.executing_eagerly():
      return

    mvn_dynamic = tfd.MultivariateNormalDiag(
        loc=tf.placeholder_with_default(
            input=np.ones((3, 3), dtype=np.float32), shape=[None, 3]),
        name="MVN2")
    self.assertEqual(
        repr(mvn_dynamic),
        "<tfp.distributions.MultivariateNormalDiag"
        " 'MVN2/'"
        " batch_shape=(?,)"  # Partially known.
        " event_shape=(3,)"
        " dtype=float32>")
开发者ID:asudomoeva,项目名称:probability,代码行数:27,代码来源:distribution_test.py

示例12: testDataFormat

  def testDataFormat(self, module, data_format):
    net = module(
        output_channels=self.output_channels,
        kernel_shapes=self.kernel_shapes,
        strides=self.strides,
        paddings=self.paddings,
        data_format=data_format)

    input_height, input_width, input_channels = 100, 100, 3
    batch_size = 10
    final_channel = self.output_channels[-1]
    if data_format == "NHWC":
      input_shape = [batch_size, input_height, input_width, input_channels]
      expected_output_shape = [
          batch_size, input_height, input_width, final_channel
      ]

    else:
      input_shape = [batch_size, input_channels, input_height, input_width]
      expected_output_shape = [
          batch_size, final_channel, input_height, input_width
      ]
    input_to_net = tf.random_normal(dtype=tf.float32, shape=input_shape)

    if tf.executing_eagerly() and data_format == "NCHW":
      expected_exception = (
          tf.errors.UnimplementedError
          if module == snt.nets.ConvNet2D else tf.errors.InvalidArgumentError)
      with self.assertRaisesRegexp(expected_exception, "only supports NHWC"):
        output = net(input_to_net)

    else:
      output = net(input_to_net)
      self.assertEqual(output.get_shape().as_list(), expected_output_shape)
开发者ID:ccchang0111,项目名称:sonnet,代码行数:34,代码来源:convnet_test.py

示例13: testNormalizations

  def testNormalizations(self, conv_ctor, norm_ctor, norm_kwargs):
    if tf.executing_eagerly():
      self.skipTest("Cannot test normalization correctness in Eager.")
    module = conv_ctor(
        output_channels=[16, 16],
        kernel_shapes=(3,),
        strides=(1,),
        paddings=("SAME",),
        normalization_ctor=norm_ctor,
        normalization_kwargs=norm_kwargs,
        normalize_final=True,
        activate_final=False)  # No final activation, that would un-normalize.
    inputs = tf.random_uniform([16, 48, 64, 3])
    output = module(inputs)
    with tf.train.SingularMonitoredSession() as session:
      output_np = session.run(output)

    # Convert the output into something where all the dimensions that should be
    # jointly normalized are combined to be on axis=1.
    if "axis" in norm_kwargs and norm_kwargs["axis"] == [1, 2]:
      # Check for instance normalization - combine spatial dimensions.
      output_np = np.reshape(output_np, [16, -1, 3])
    else:
      # Check for layer normalization - combine all non-batch dimensions.
      output_np = np.reshape(output_np, [16, -1])
    mean = np.mean(output_np, axis=1)
    std_dev = np.std(output_np, axis=1)
    # High tolerance - summing across big images, this normalization is fairly
    # approximate.
    self.assertAllClose(mean, np.zeros_like(mean), atol=2e-2)
    self.assertAllClose(std_dev, np.ones_like(std_dev), atol=2e-2)
开发者ID:ccchang0111,项目名称:sonnet,代码行数:31,代码来源:convnet_test.py

示例14: _check_same_graph

  def _check_same_graph(self):
    """Checks that the module is not being connect to multiple Graphs.

    An instance of a Sonnet module 'owns' the variables it contains, and permits
    seamless variable sharing. As such, connecting a single module instance to
    multiple Graphs is not possible - this function will raise an error should
    that occur.

    Raises:
      DifferentGraphError: if the module is connected to a different Graph than
        it was previously used in.
    """
    with ops.init_scope():
      # We need `init_scope` incase we're running inside a defun. In that case
      # what we want is information about where the function will be called not
      # where the function is being built.
      current_graph = tf.get_default_graph()
      will_call_in_eager_context = tf.executing_eagerly()

    if self._graph is None:
      self._graph = current_graph
      self._set_module_info()

    if not will_call_in_eager_context:
      # Same graph checks only make sense when calling from graph mode (in eager
      # mode there is a single process level context where all modules are
      # created).
      if self._graph != current_graph:
        raise DifferentGraphError("Cannot connect module to multiple Graphs.")
开发者ID:ccchang0111,项目名称:sonnet,代码行数:29,代码来源:base.py

示例15: testRegularizersInRegularizationLosses

  def testRegularizersInRegularizationLosses(self, transpose, use_bias):
    if transpose:
      module = functools.partial(snt.nets.ConvNet2DTranspose,
                                 output_shapes=[[100, 100]])
    else:
      module = snt.nets.ConvNet2D
    if use_bias:
      regularizers = {"w": tf.contrib.layers.l1_regularizer(scale=0.5),
                      "b": tf.contrib.layers.l2_regularizer(scale=0.5)}
    else:
      regularizers = {"w": tf.contrib.layers.l1_regularizer(scale=0.5)}

    model = module(output_channels=self.output_channels,
                   kernel_shapes=self.kernel_shapes,
                   strides=self.strides,
                   paddings=self.paddings,
                   use_bias=use_bias,
                   regularizers=regularizers)

    input_to_net = tf.random_normal(dtype=tf.float32, shape=(1, 100, 100, 3))
    model(input_to_net)

    regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    expected_num_regularizers = 3 * (2 if use_bias else 1)
    self.assertLen(regularizers, expected_num_regularizers)
    if not tf.executing_eagerly():
      self.assertRegexpMatches(regularizers[0].name, ".*l1_regularizer.*")
      if use_bias:
        self.assertRegexpMatches(regularizers[1].name, ".*l2_regularizer.*")
开发者ID:ccchang0111,项目名称:sonnet,代码行数:29,代码来源:convnet_test.py


注:本文中的tensorflow.executing_eagerly函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。