当前位置: 首页>>代码示例>>Python>>正文


Python init_ops.zeros_initializer方法代码示例

本文整理汇总了Python中tensorflow.python.ops.init_ops.zeros_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python init_ops.zeros_initializer方法的具体用法?Python init_ops.zeros_initializer怎么用?Python init_ops.zeros_initializer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.init_ops的用法示例。


在下文中一共展示了init_ops.zeros_initializer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: vgg_arg_scope

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import zeros_initializer [as 别名]
def vgg_arg_scope(weight_decay=0.0005):
  """Defines the VGG arg scope.

  Args:
    weight_decay: The l2 regularization coefficient.

  Returns:
    An arg_scope.
  """
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      activation_fn=nn_ops.relu,
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      biases_initializer=init_ops.zeros_initializer()):
    with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
      return arg_sc 
开发者ID:MingtaoGuo,项目名称:Chinese-Character-and-Calligraphic-Image-Processing,代码行数:18,代码来源:vgg16.py

示例2: _create_dense_column_weighted_sum

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import zeros_initializer [as 别名]
def _create_dense_column_weighted_sum(
    column, builder, units, weight_collections, trainable):
  """Create a weighted sum of a dense column for linear_model."""
  tensor = column._get_dense_tensor(  # pylint: disable=protected-access
      builder,
      weight_collections=weight_collections,
      trainable=trainable)
  num_elements = column._variable_shape.num_elements()  # pylint: disable=protected-access
  batch_size = array_ops.shape(tensor)[0]
  tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))
  weight = variable_scope.get_variable(
      name='weights',
      shape=[num_elements, units],
      initializer=init_ops.zeros_initializer(),
      trainable=trainable,
      collections=weight_collections)
  return math_ops.matmul(tensor, weight, name='weighted_sum') 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:19,代码来源:feature_column.py

示例3: _get_or_create_eval_step

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import zeros_initializer [as 别名]
def _get_or_create_eval_step():
  """Gets or creates the eval step `Tensor`.

  Returns:
    A `Tensor` representing a counter for the evaluation step.

  Raises:
    ValueError: If multiple `Tensors` have been added to the
      `tf.GraphKeys.EVAL_STEP` collection.
  """
  graph = ops.get_default_graph()
  eval_steps = graph.get_collection(ops.GraphKeys.EVAL_STEP)
  if len(eval_steps) == 1:
    return eval_steps[0]
  elif len(eval_steps) > 1:
    raise ValueError('Multiple tensors added to tf.GraphKeys.EVAL_STEP')
  else:
    counter = variable_scope.get_variable(
        'eval_step',
        shape=[],
        dtype=dtypes.int64,
        initializer=init_ops.zeros_initializer(),
        trainable=False,
        collections=[ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.EVAL_STEP])
    return counter 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:27,代码来源:evaluation.py

示例4: __init__

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import zeros_initializer [as 别名]
def __init__(self, units,
               activation=None,
               use_bias=True,
               kernel_initializer=None,
               bias_initializer=init_ops.zeros_initializer(),
               kernel_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               trainable=True,
               name=None,
               **kwargs):
    super(Dense, self).__init__(trainable=trainable, name=name, **kwargs)
    self.units = units
    self.activation = activation
    self.use_bias = use_bias
    self.kernel_initializer = kernel_initializer
    self.bias_initializer = bias_initializer
    self.kernel_regularizer = kernel_regularizer
    self.bias_regularizer = bias_regularizer
    self.activity_regularizer = activity_regularizer
    self.input_spec = base.InputSpec(min_ndim=2) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:23,代码来源:core.py

示例5: __init__

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import zeros_initializer [as 别名]
def __init__(self, units,
               activation=None,
               use_bias=True,
               kernel_initializer=None,
               bias_initializer=init_ops.zeros_initializer(),
               kernel_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               trainable=True,
               name=None,
               **kwargs):
    super(Dense, self).__init__(trainable=trainable, name=name, **kwargs)
    self.units = units
    self.activation = activation
    self.use_bias = use_bias
    self.kernel_initializer = kernel_initializer
    self.bias_initializer = bias_initializer
    self.kernel_regularizer = kernel_regularizer
    self.bias_regularizer = bias_regularizer
    self.activity_regularizer = activity_regularizer 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:22,代码来源:core.py

示例6: _create_baseline

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import zeros_initializer [as 别名]
def _create_baseline(self, n_output=1, n_hidden=100,
                       is_zero_init=False,
                       collection='BASELINE'):
    # center input
    h = self._x
    if self.mean_xs is not None:
      h -= self.mean_xs

    if is_zero_init:
      initializer = init_ops.zeros_initializer()
    else:
      initializer = slim.variance_scaling_initializer()

    with slim.arg_scope([slim.fully_connected],
                        variables_collections=[collection, Q_COLLECTION],
                        trainable=False,
                        weights_initializer=initializer):
      h = slim.fully_connected(h, n_hidden, activation_fn=tf.nn.tanh)
      baseline = slim.fully_connected(h, n_output, activation_fn=None)

      if n_output == 1:
        baseline = tf.reshape(baseline, [-1])  # very important to reshape
    return baseline 
开发者ID:rky0930,项目名称:yolo_v2,代码行数:25,代码来源:rebar.py

示例7: vgg_arg_scope

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import zeros_initializer [as 别名]
def vgg_arg_scope(weight_decay=0.0005):
    """Defines the VGG arg scope.

    Args:
      weight_decay: The l2 regularization coefficient.

    Returns:
      An arg_scope.
    """
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected],
        activation_fn=nn_ops.relu,
        weights_regularizer=regularizers.l2_regularizer(weight_decay),
        biases_initializer=init_ops.zeros_initializer()
    ):
        with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
            return arg_sc 
开发者ID:Sargunan,项目名称:Table-Detection-using-Deep-learning,代码行数:19,代码来源:truncated_vgg.py

示例8: _adaptive_max_norm

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import zeros_initializer [as 别名]
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
  """Find max_norm given norm and previous average."""
  with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
    log_norm = math_ops.log(norm + epsilon)

    def moving_average(name, value, decay):
      moving_average_variable = vs.get_variable(
          name, shape=value.get_shape(), dtype=value.dtype,
          initializer=init_ops.zeros_initializer, trainable=False)
      return moving_averages.assign_moving_average(
          moving_average_variable, value, decay, zero_debias=False)

    # quicker adaptation at the beginning
    if global_step is not None:
      n = math_ops.to_float(global_step)
      decay = math_ops.minimum(decay, n / (n + 1.))

    # update averages
    mean = moving_average("mean", log_norm, decay)
    sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)

    variance = sq_mean - math_ops.square(mean)
    std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
    max_norms = math_ops.exp(mean + std_factor*std)
    return max_norms, mean 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:27,代码来源:optimizers.py

示例9: create_global_step

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import zeros_initializer [as 别名]
def create_global_step(graph=None):
  """Create global step tensor in graph.

  Args:
    graph: The graph in which to create the global step. If missing, use default
        graph.

  Returns:
    Global step tensor.

  Raises:
    ValueError: if global step key is already defined.
  """
  graph = ops.get_default_graph() if graph is None else graph
  if get_global_step(graph) is not None:
    raise ValueError('"global_step" already exists.')
  # Create in proper graph and base name_scope.
  with graph.as_default() as g, g.name_scope(None):
    collections = [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]
    return variable(ops.GraphKeys.GLOBAL_STEP, shape=[], dtype=dtypes.int64,
                    initializer=init_ops.zeros_initializer, trainable=False,
                    collections=collections) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:24,代码来源:variables.py

示例10: convolution1d

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import zeros_initializer [as 别名]
def convolution1d(inputs,
                  num_outputs,
                  kernel_size,
                  stride=1,
                  padding='SAME',
                  data_format=None,
                  rate=1,
                  activation_fn=nn.relu,
                  normalizer_fn=None,
                  normalizer_params=None,
                  weights_initializer=initializers.xavier_initializer(),
                  weights_regularizer=None,
                  biases_initializer=init_ops.zeros_initializer(),
                  biases_regularizer=None,
                  reuse=None,
                  variables_collections=None,
                  outputs_collections=None,
                  trainable=True,
                  scope=None):
  return convolution(
      inputs,
      num_outputs,
      kernel_size,
      stride,
      padding,
      data_format,
      rate,
      activation_fn,
      normalizer_fn,
      normalizer_params,
      weights_initializer,
      weights_regularizer,
      biases_initializer,
      biases_regularizer,
      reuse,
      variables_collections,
      outputs_collections,
      trainable,
      scope,
      conv_dims=1) 
开发者ID:taehoonlee,项目名称:tensornets,代码行数:42,代码来源:layers.py

示例11: convolution2d

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import zeros_initializer [as 别名]
def convolution2d(inputs,
                  num_outputs,
                  kernel_size,
                  stride=1,
                  padding='SAME',
                  data_format=None,
                  rate=1,
                  activation_fn=nn.relu,
                  normalizer_fn=None,
                  normalizer_params=None,
                  weights_initializer=initializers.xavier_initializer(),
                  weights_regularizer=None,
                  biases_initializer=init_ops.zeros_initializer(),
                  biases_regularizer=None,
                  reuse=None,
                  variables_collections=None,
                  outputs_collections=None,
                  trainable=True,
                  scope=None):
  return convolution(
      inputs,
      num_outputs,
      kernel_size,
      stride,
      padding,
      data_format,
      rate,
      activation_fn,
      normalizer_fn,
      normalizer_params,
      weights_initializer,
      weights_regularizer,
      biases_initializer,
      biases_regularizer,
      reuse,
      variables_collections,
      outputs_collections,
      trainable,
      scope,
      conv_dims=2) 
开发者ID:taehoonlee,项目名称:tensornets,代码行数:42,代码来源:layers.py

示例12: convolution3d

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import zeros_initializer [as 别名]
def convolution3d(inputs,
                  num_outputs,
                  kernel_size,
                  stride=1,
                  padding='SAME',
                  data_format=None,
                  rate=1,
                  activation_fn=nn.relu,
                  normalizer_fn=None,
                  normalizer_params=None,
                  weights_initializer=initializers.xavier_initializer(),
                  weights_regularizer=None,
                  biases_initializer=init_ops.zeros_initializer(),
                  biases_regularizer=None,
                  reuse=None,
                  variables_collections=None,
                  outputs_collections=None,
                  trainable=True,
                  scope=None):
  return convolution(
      inputs,
      num_outputs,
      kernel_size,
      stride,
      padding,
      data_format,
      rate,
      activation_fn,
      normalizer_fn,
      normalizer_params,
      weights_initializer,
      weights_regularizer,
      biases_initializer,
      biases_regularizer,
      reuse,
      variables_collections,
      outputs_collections,
      trainable,
      scope,
      conv_dims=3) 
开发者ID:taehoonlee,项目名称:tensornets,代码行数:42,代码来源:layers.py

示例13: _adaptive_max_norm

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import zeros_initializer [as 别名]
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
  """Find max_norm given norm and previous average."""
  with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
    log_norm = math_ops.log(norm + epsilon)

    def moving_average(name, value, decay):
      moving_average_variable = vs.get_variable(
          name,
          shape=value.get_shape(),
          dtype=value.dtype,
          initializer=init_ops.zeros_initializer(),
          trainable=False)
      return moving_averages.assign_moving_average(
          moving_average_variable, value, decay, zero_debias=False)

    # quicker adaptation at the beginning
    if global_step is not None:
      n = math_ops.cast(global_step, dtypes.float32)
      decay = math_ops.minimum(decay, n / (n + 1.))

    # update averages
    mean = moving_average("mean", log_norm, decay)
    sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)

    variance = sq_mean - math_ops.square(mean)
    std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
    max_norms = math_ops.exp(mean + std_factor * std)
    return max_norms, mean 
开发者ID:taehoonlee,项目名称:tensornets,代码行数:30,代码来源:optimizers.py

示例14: _get_default_initializer

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import zeros_initializer [as 别名]
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
    """Provide a default initializer and a corresponding value.

    Args:
      name: see get_variable.
      shape: see get_variable.
      dtype: see get_variable.

    Returns:
      initializer and initializing_from_value. See get_variable above.

    Raises:
      ValueError: When giving unsupported dtype.
    """
    # If dtype is DT_FLOAT, provide a uniform unit scaling initializer
    if dtype.is_floating:
      initializer = init_ops.glorot_uniform_initializer()
      initializing_from_value = False
    # If dtype is DT_INT/DT_UINT, provide a default value `zero`
    # If dtype is DT_BOOL, provide a default value `FALSE`
    elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
      initializer = init_ops.zeros_initializer()(
          shape=shape, dtype=dtype.base_dtype)
      initializing_from_value = True
    # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
    else:
      raise ValueError("An initializer for variable %s of %s is required"
                       % (name, dtype.base_dtype))

    return initializer, initializing_from_value


# To stop regularization, use this regularizer 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:35,代码来源:variable_scope.py

示例15: _create_categorical_column_weighted_sum

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import zeros_initializer [as 别名]
def _create_categorical_column_weighted_sum(
    column, builder, units, sparse_combiner, weight_collections, trainable):
  """Create a weighted sum of a categorical column for linear_model."""
  sparse_tensors = column._get_sparse_tensors(  # pylint: disable=protected-access
      builder,
      weight_collections=weight_collections,
      trainable=trainable)
  id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [
      array_ops.shape(sparse_tensors.id_tensor)[0], -1
  ])
  weight_tensor = sparse_tensors.weight_tensor
  if weight_tensor is not None:
    weight_tensor = sparse_ops.sparse_reshape(
        weight_tensor, [array_ops.shape(weight_tensor)[0], -1])

  weight = variable_scope.get_variable(
      name='weights',
      shape=(column._num_buckets, units),  # pylint: disable=protected-access
      initializer=init_ops.zeros_initializer(),
      trainable=trainable,
      collections=weight_collections)
  return _safe_embedding_lookup_sparse(
      weight,
      id_tensor,
      sparse_weights=weight_tensor,
      combiner=sparse_combiner,
      name='weighted_sum') 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:29,代码来源:feature_column.py


注:本文中的tensorflow.python.ops.init_ops.zeros_initializer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。