当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.layers方法代码示例

本文整理汇总了Python中tensorflow.layers方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.layers方法的具体用法?Python tensorflow.layers怎么用?Python tensorflow.layers使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.layers方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import layers [as 别名]
def __init__(self, data_format):
    """Creates a model for classifying a hand-written digit.

    Args:
      data_format: Either 'channels_first' or 'channels_last'.
        'channels_first' is typically faster on GPUs while 'channels_last' is
        typically faster on CPUs. See
        https://www.tensorflow.org/performance/performance_guide#data_formats
    """
    super(MNISTModel, self).__init__(name='')
    if data_format == 'channels_first':
      self._input_shape = [-1, 1, 28, 28]
    else:
      assert data_format == 'channels_last'
      self._input_shape = [-1, 28, 28, 1]
    self.conv1 = self.track_layer(
        tf.layers.Conv2D(32, 5, data_format=data_format, activation=tf.nn.relu))
    self.conv2 = self.track_layer(
        tf.layers.Conv2D(64, 5, data_format=data_format, activation=tf.nn.relu))
    self.fc1 = self.track_layer(tf.layers.Dense(1024, activation=tf.nn.relu))
    self.fc2 = self.track_layer(tf.layers.Dense(10))
    self.dropout = self.track_layer(tf.layers.Dropout(0.5))
    self.max_pool2d = self.track_layer(
        tf.layers.MaxPooling2D(
            (2, 2), (2, 2), padding='SAME', data_format=data_format)) 
开发者ID:floydhub,项目名称:dockerfiles,代码行数:27,代码来源:mnist_eager.py

示例2: _conv2d_impl

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import layers [as 别名]
def _conv2d_impl(self, input_layer, num_channels_in, filters, kernel_size,
                   strides, padding, kernel_initializer):
    if self.use_tf_layers:
      return conv_layers.conv2d(input_layer, filters, kernel_size, strides,
                                padding, self.channel_pos,
                                kernel_initializer=kernel_initializer,
                                use_bias=False)
    else:
      weights_shape = [kernel_size[0], kernel_size[1], num_channels_in, filters]
      # We use the name 'conv2d/kernel' so the variable has the same name as its
      # tf.layers equivalent. This way, if a checkpoint is written when
      # self.use_tf_layers == True, it can be loaded when
      # self.use_tf_layers == False, and vice versa.
      weights = self.get_variable('conv2d/kernel', weights_shape,
                                  self.variable_dtype, self.dtype,
                                  initializer=kernel_initializer)
      if self.data_format == 'NHWC':
        strides = [1] + strides + [1]
      else:
        strides = [1, 1] + strides
      return tf.nn.conv2d(input_layer, weights, strides, padding,
                          data_format=self.data_format) 
开发者ID:tensorpack,项目名称:benchmarks,代码行数:24,代码来源:convnet_builder.py

示例3: _compute_concat_output_shape

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import layers [as 别名]
def _compute_concat_output_shape(input_shape, axis):
    """Infers the output shape of concat given the input shape.

    The code is adapted from the ConcatLayer of lasagne
    (https://github.com/Lasagne/Lasagne/blob/master/lasagne/layers/merge.py)

    Args:
        input_shape (list): A list of shapes, each of which is in turn a
            list or TensorShape.
        axis (int): Axis of the concat operation.

    Returns:
        list: Output shape of concat.
    """
    # The size of each axis of the output shape equals the first
    # input size of respective axis that is not `None`
    input_shape = [tf.TensorShape(s).as_list() for s in input_shape]
    output_shape = [next((s for s in sizes if s is not None), None)
                    for sizes in zip(*input_shape)]
    axis_sizes = [s[axis] for s in input_shape]
    concat_axis_size = None if any(s is None for s in axis_sizes) \
            else sum(axis_sizes)
    output_shape[axis] = concat_axis_size
    return output_shape 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:26,代码来源:layers.py

示例4: get_pooling_layer_hparams

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import layers [as 别名]
def get_pooling_layer_hparams(hparams):
    """Creates pooling layer hparams `dict` usable for :func:`get_layer`.

    If the :attr:`hparams` sets `'pool_size'` to `None`, the layer will be
    changed to the respective reduce-pooling layer. For example,
    :class:`tf.layers.MaxPooling1D <layers/MaxPooling1D>` is replaced with
    :class:`~texar.core.MaxReducePooling1D`.
    """
    if isinstance(hparams, HParams):
        hparams = hparams.todict()

    new_hparams = copy.copy(hparams)
    kwargs = new_hparams.get('kwargs', None)

    if kwargs and kwargs.get('pool_size', None) is None:
        pool_type = hparams['type']
        new_hparams['type'] = _POOLING_TO_REDUCE.get(pool_type, pool_type)
        kwargs.pop('pool_size', None)
        kwargs.pop('strides', None)
        kwargs.pop('padding', None)

    return new_hparams 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:24,代码来源:layers.py

示例5: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import layers [as 别名]
def __init__(self,
                 layers,
                 trainable=True,
                 name=None,
                 **kwargs):
        super(SequentialLayer, self).__init__(
            trainable=trainable, name=name, **kwargs)

        if len(layers) == 0:
            raise ValueError("'layers' must be a non-empty list.")
        self._layers = []
        for layer in layers:
            if isinstance(layer, tf.layers.Layer):
                self._layers.append(layer)
            else:
                self._layers.append(get_layer(hparams=layer))

        # Keep tracks of whether trainable variables have been created
        self._vars_built = False 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:21,代码来源:layers.py

示例6: call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import layers [as 别名]
def call(self, inputs, mode=None): # pylint: disable=arguments-differ
        training = is_train_mode(mode)

        outputs = inputs
        for layer in self._layers:
            if isinstance(layer, tf.layers.Dropout) or \
                    isinstance(layer, tf.layers.BatchNormalization):
                outputs = layer(outputs, training=training)
            else:
                outputs = layer(inputs)
            inputs = outputs

        if not self.built or not self._vars_built:
            self._collect_weights()
            self._vars_built = True

        return outputs 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:19,代码来源:layers.py

示例7: layer_normalize

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import layers [as 别名]
def layer_normalize(inputs,
                    scope=None,
                    **kwargs):
    """Applies layer normalization. Normalizes over the last dimension.

    Args:
        inputs: A tensor with 2 or more dimensions, where the first
            dimension must be `batch_size`.
        scope (optional): variable scope.

    Returns:
        A tensor with the same shape and data dtype as `inputs`.
    """
    return tf.contrib.layers.layer_norm(
        inputs=inputs, begin_norm_axis=-1, begin_params_axis=-1, scope=scope,
        **kwargs
    ) 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:19,代码来源:layers.py

示例8: stacked_lstm

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import layers [as 别名]
def stacked_lstm(self, inputs, states, hidden_size, output_size, nlayers):
    """Stacked LSTM layers with FC layers as input and output embeddings.

    Args:
      inputs: input tensor
      states: a list of internal lstm states for each layer
      hidden_size: number of lstm units
      output_size: size of the output
      nlayers: number of lstm layers
    Returns:
      net: output of the network
      skips: a list of updated lstm states for each layer
    """
    net = inputs
    net = tfl.dense(
        net, hidden_size, activation=None, name="af1")
    for i in range(nlayers):
      net, states[i] = common_video.basic_lstm(
          net, states[i], hidden_size, name="alstm%d"%i)
    net = tfl.dense(
        net, output_size, activation=tf.nn.tanh, name="af2")
    return net, states 
开发者ID:yyht,项目名称:BERT,代码行数:24,代码来源:emily.py

示例9: lstm_gaussian

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import layers [as 别名]
def lstm_gaussian(self, inputs, states, hidden_size, output_size, nlayers,
                    name):
    """Stacked LSTM layers with FC layer as input and gaussian as output.

    Args:
      inputs: input tensor
      states: a list of internal lstm states for each layer
      hidden_size: number of lstm units
      output_size: size of the output
      nlayers: number of lstm layers
      name: the lstm name for scope definition
    Returns:
      mu: mean of the predicted gaussian
      logvar: log(var) of the predicted gaussian
      skips: a list of updated lstm states for each layer
    """
    net = inputs
    net = tfl.dense(net, hidden_size, activation=None, name="%sf1"%name)
    for i in range(nlayers):
      net, states[i] = common_video.basic_lstm(
          net, states[i], hidden_size, name="%slstm%d"%(name, i))
    mu = tfl.dense(net, output_size, activation=None, name="%sf2mu"%name)
    logvar = tfl.dense(net, output_size, activation=None, name="%sf2log"%name)
    return mu, logvar, states 
开发者ID:yyht,项目名称:BERT,代码行数:26,代码来源:emily.py

示例10: update_internal_states_early

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import layers [as 别名]
def update_internal_states_early(self, internal_states, frames):
    """Update the internal states early in the network in GRU-like way."""
    batch_size = common_layers.shape_list(frames[0])[0]
    internal_state = internal_states[0][0][:batch_size, :, :, :]
    state_activation = tf.concat([internal_state, frames[0]], axis=-1)
    state_gate_candidate = tf.layers.conv2d(
        state_activation, 2 * self.hparams.recurrent_state_size,
        (3, 3), padding="SAME", name="state_conv")
    state_gate, state_candidate = tf.split(state_gate_candidate, 2, axis=-1)
    state_gate = tf.nn.sigmoid(state_gate)
    state_candidate = tf.tanh(state_candidate)
    internal_state = internal_state * state_gate
    internal_state += state_candidate * (1.0 - state_gate)
    max_batch_size = max(_MAX_BATCH, self.hparams.batch_size)
    diff_batch_size = max_batch_size - batch_size
    internal_state = tf.pad(
        internal_state, [[0, diff_batch_size], [0, 0], [0, 0], [0, 0]])
    return [[internal_state]] 
开发者ID:yyht,项目名称:BERT,代码行数:20,代码来源:basic_stochastic.py

示例11: reward_prediction_mid

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import layers [as 别名]
def reward_prediction_mid(
      self, input_images, input_reward, action, latent, mid_outputs):
    """Builds a reward prediction network from intermediate layers."""
    encoded = []
    for i, output in enumerate(mid_outputs):
      enc = output
      enc = tfl.conv2d(enc, 64, [3, 3], strides=(1, 1), activation=tf.nn.relu)
      enc = tfl.conv2d(enc, 32, [3, 3], strides=(2, 2), activation=tf.nn.relu)
      enc = tfl.conv2d(enc, 16, [3, 3], strides=(2, 2), activation=tf.nn.relu)
      enc = tfl.flatten(enc)
      enc = tfl.dense(enc, 64, activation=tf.nn.relu, name="rew_enc_%d" % i)
      encoded.append(enc)
    x = encoded
    x = tf.stack(x, axis=1)
    x = tfl.flatten(x)
    x = tfl.dense(x, 256, activation=tf.nn.relu, name="rew_dense1")
    x = tfl.dense(x, 128, activation=tf.nn.relu, name="rew_dense2")
    return x 
开发者ID:yyht,项目名称:BERT,代码行数:20,代码来源:sv2p.py

示例12: gated_linear_unit_layer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import layers [as 别名]
def gated_linear_unit_layer(x, name=None):
  """Gated linear unit layer.

  Paper: Language Modeling with Gated Convolutional Networks.
  Link: https://arxiv.org/abs/1612.08083
  x = Wx * sigmoid(W'x).

  Args:
    x: A tensor
    name: A string

  Returns:
    A tensor of the same shape as x.
  """
  with tf.variable_scope(name, default_name="glu_layer", values=[x]):
    depth = shape_list(x)[-1]
    x = layers().Dense(depth * 2, activation=None)(x)
    x, gating_x = tf.split(x, 2, axis=-1)
    return x * tf.nn.sigmoid(gating_x) 
开发者ID:yyht,项目名称:BERT,代码行数:21,代码来源:common_layers.py

示例13: double_discriminator

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import layers [as 别名]
def double_discriminator(x, filters1=128, filters2=None,
                         kernel_size=8, strides=4, pure_mean=False):
  """A convolutional discriminator with 2 layers and concatenated output."""
  if filters2 is None:
    filters2 = 4 * filters1
  with tf.variable_scope("discriminator"):
    batch_size = shape_list(x)[0]
    net = layers().Conv2D(
        filters1, kernel_size, strides=strides, padding="SAME", name="conv1")(x)
    if pure_mean:
      net1 = tf.reduce_mean(net, [1, 2])
    else:
      net1 = mean_with_attention(net, "mean_with_attention1")
      tf.reshape(net, [batch_size, -1])
    net = tf.nn.relu(net)
    net = layers().Conv2D(
        filters2, kernel_size, strides=strides, padding="SAME", name="conv2")(x)
    if pure_mean:
      net2 = tf.reduce_mean(net, [1, 2])
    else:
      net2 = mean_with_attention(net, "mean_with_attention2")
    return tf.concat([net1, net2], axis=-1) 
开发者ID:yyht,项目名称:BERT,代码行数:24,代码来源:common_layers.py

示例14: max_pool_layer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import layers [as 别名]
def max_pool_layer(layer_id, inputs, kernel_size, stride):
  """Build a max-pooling layer.

  Args:
    layer_id: int. Integer ID for this layer's variables.
    inputs: Tensor of shape [num_examples, width, height, in_channels]. Each row
      corresponds to a single example.
    kernel_size: int. Width and height to pool over per input channel. The
      kernel is assumed to be square.
    stride: int. Step size between pooling operations.

  Returns:
    Tensor of shape [num_examples, width/stride, height/stride, out_channels].
    Result of applying max pooling to 'inputs'.
  """
  # TODO(b/67004004): Delete this function and rely on tf.layers exclusively.
  with tf.variable_scope("pool_%d" % layer_id):
    return tf.nn.max_pool(
        inputs, [1, kernel_size, kernel_size, 1], [1, stride, stride, 1],
        padding="SAME",
        name="pool") 
开发者ID:tensorflow,项目名称:kfac,代码行数:23,代码来源:convnet.py

示例15: lstm_gaussian

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import layers [as 别名]
def lstm_gaussian(self, inputs, states, hidden_size, output_size, nlayers):
    """Stacked LSTM layers with FC layer as input and gaussian as output.

    Args:
      inputs: input tensor
      states: a list of internal lstm states for each layer
      hidden_size: number of lstm units
      output_size: size of the output
      nlayers: number of lstm layers
    Returns:
      mu: mean of the predicted gaussian
      logvar: log(var) of the predicted gaussian
      skips: a list of updated lstm states for each layer
    """
    net = inputs
    net = tfl.dense(net, hidden_size, activation=None, name="bf1")
    for i in range(nlayers):
      net, states[i] = common_video.basic_lstm(
          net, states[i], hidden_size, name="blstm%d"%i)
    mu = tfl.dense(net, output_size, activation=None, name="bf2mu")
    logvar = tfl.dense(net, output_size, activation=None, name="bf2log")
    return mu, logvar, states 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:24,代码来源:emily.py


注:本文中的tensorflow.layers方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。