当前位置: 首页>>代码示例>>Python>>正文


Python init_ops.random_normal_initializer函数代码示例

本文整理汇总了Python中tensorflow.python.ops.init_ops.random_normal_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python random_normal_initializer函数的具体用法?Python random_normal_initializer怎么用?Python random_normal_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了random_normal_initializer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: logistic_regression

def logistic_regression(X, y, class_weight=None, init_mean=None,
                        init_stddev=1.0):
    """Creates logistic regression TensorFlow subgraph.

    Args:
        X: tensor or placeholder for input features,
           shape should be [batch_size, n_features].
        y: tensor or placeholder for target,
           shape should be [batch_size, n_classes].
        class_weight: tensor, [n_classes], where for each class
                      it has weight of the class. If not provided
                      will check if graph contains tensor `class_weight:0`.
                      If that is not provided either all ones are used.
        init_mean: the mean value to use for initialization.
        init_stddev: the standard devation to use for initialization.

    Returns:
        Predictions and loss tensors.

    Side effects:
        The variables linear_regression.weights and linear_regression.bias are
        initialized as follows.  If init_mean is not None, then initialization
        will be done using a random normal initializer with the given init_mean
        and init_stddv.  (These may be set to 0.0 each if a zero initialization
        is desirable for convex use cases.)  If init_mean is None, then the
        uniform_unit_scaling_initialzer will be used.
    """
    with vs.variable_scope('logistic_regression'):
        logging_ops.histogram_summary('logistic_regression.X', X)
        logging_ops.histogram_summary('logistic_regression.y', y)
        # Set up the requested initialization.
        if (init_mean is None):
            weights = vs.get_variable('weights',
                                      [X.get_shape()[1], y.get_shape()[-1]])
            bias = vs.get_variable('bias',
                                   [y.get_shape()[-1]])
        else:
            weights = vs.get_variable('weights',
                                      [X.get_shape()[1], y.get_shape()[-1]],
                                      initializer=init_ops.random_normal_initializer(
                                          init_mean, init_stddev))
            bias = vs.get_variable('bias',
                                   [y.get_shape()[-1]],
                                   initializer=init_ops.random_normal_initializer(
                                       init_mean, init_stddev))
        logging_ops.histogram_summary('logistic_regression.weights', weights)
        logging_ops.histogram_summary('logistic_regression.bias', bias)
        # If no class weight provided, try to retrieve one from pre-defined
        # tensor name in the graph.
        if not class_weight:
            try:
                class_weight = ops.get_default_graph().get_tensor_by_name('class_weight:0')
            except KeyError:
                pass

        return losses_ops.softmax_classifier(X, y, weights, bias,
                                  class_weight=class_weight)
开发者ID:01bui,项目名称:tensorflow,代码行数:57,代码来源:models.py

示例2: _BuildSmallModel

 def _BuildSmallModel(self):
   image = array_ops.zeros([2, 6, 6, 3])
   kernel = variable_scope.get_variable(
       'DW', [3, 3, 3, 6],
       dtypes.float32,
       initializer=init_ops.random_normal_initializer(stddev=0.001))
   x = nn_ops.conv2d(image, kernel, [1, 2, 2, 1], padding='SAME')
   kernel = variable_scope.get_variable(
       'DW2', [2, 2, 6, 12],
       dtypes.float32,
       initializer=init_ops.random_normal_initializer(stddev=0.001))
   x = nn_ops.conv2d(x, kernel, [1, 2, 2, 1], padding='SAME')
   return x
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:13,代码来源:model_analyzer_test.py

示例3: linear_regression

def linear_regression(x, y, init_mean=None, init_stddev=1.0):
  """Creates linear regression TensorFlow subgraph.

  Args:
    x: tensor or placeholder for input features.
    y: tensor or placeholder for labels.
    init_mean: the mean value to use for initialization.
    init_stddev: the standard devation to use for initialization.

  Returns:
    Predictions and loss tensors.

  Side effects:
    The variables linear_regression.weights and linear_regression.bias are
    initialized as follows.  If init_mean is not None, then initialization
    will be done using a random normal initializer with the given init_mean
    and init_stddv.  (These may be set to 0.0 each if a zero initialization
    is desirable for convex use cases.)  If init_mean is None, then the
    uniform_unit_scaling_initialzer will be used.
  """
  with vs.variable_scope('linear_regression'):
    scope_name = vs.get_variable_scope().name
    summary.histogram('%s.x' % scope_name, x)
    summary.histogram('%s.y' % scope_name, y)
    dtype = x.dtype.base_dtype
    y_shape = y.get_shape()
    if len(y_shape) == 1:
      output_shape = 1
    else:
      output_shape = y_shape[1]
    # Set up the requested initialization.
    if init_mean is None:
      weights = vs.get_variable(
          'weights', [x.get_shape()[1], output_shape], dtype=dtype)
      bias = vs.get_variable('bias', [output_shape], dtype=dtype)
    else:
      weights = vs.get_variable(
          'weights', [x.get_shape()[1], output_shape],
          initializer=init_ops.random_normal_initializer(
              init_mean, init_stddev, dtype=dtype),
          dtype=dtype)
      bias = vs.get_variable(
          'bias', [output_shape],
          initializer=init_ops.random_normal_initializer(
              init_mean, init_stddev, dtype=dtype),
          dtype=dtype)
    summary.histogram('%s.weights' % scope_name, weights)
    summary.histogram('%s.bias' % scope_name, bias)
    return losses_ops.mean_squared_error_regressor(x, y, weights, bias)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:49,代码来源:models.py

示例4: doTestIndexedSlicesGradientInCondInWhileLoop

  def doTestIndexedSlicesGradientInCondInWhileLoop(self, use_resource=False):
    with ops.Graph().as_default():
      embedding_matrix = variable_scope.get_variable(
          "embedding_matrix", [5, 5],
          initializer=init_ops.random_normal_initializer(),
          use_resource=use_resource)

      def Cond(it, _):
        return it < 5

      def Body(it, cost):
        embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
        cost = control_flow_ops.cond(
            math_ops.equal(it, 3), lambda: math_ops.square(cost),
            lambda: cost + math_ops.reduce_sum(embedding))
        return it + 1, cost

      _, cost = control_flow_ops.while_loop(
          Cond, Body, [constant_op.constant(0), constant_op.constant(0.0)])

      dynamic_grads = gradients_impl.gradients(cost, [embedding_matrix])[0]
      dynamic_grads = math_ops.segment_sum(dynamic_grads.values,
                                           dynamic_grads.indices)

      embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
      static = math_ops.square(
          math_ops.reduce_sum(embedding) + math_ops.reduce_sum(embedding) +
          math_ops.reduce_sum(embedding)) + math_ops.reduce_sum(embedding)
      static_grads = gradients_impl.gradients(static, [embedding_matrix])[0]
      static_grads = math_ops.segment_sum(static_grads.values,
                                          static_grads.indices)

      with self.test_session() as sess:
        sess.run(variables.global_variables_initializer())
        self.assertAllEqual(*sess.run([static_grads, dynamic_grads]))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:35,代码来源:control_flow_ops_test.py

示例5: batch_normalize

def batch_normalize(tensor_in, epsilon=1e-5, convnet=False, decay=0.9, scale_after_normalization=True):
    """Batch Normalization

  Args:
    tensor_in: input Tensor, 4D shape: [batch, in_height, in_width, in_depth].
    epsilon : A float number to avoid being divided by 0.
    decay: decay rate for exponential moving average.
    convnet: Whether this is for convolutional net use. If this is True,
      moments will sum across axis [0, 1, 2]. Otherwise, only [0].
    scale_after_normalization: Whether to scale after normalization.
  """
    shape = tensor_in.get_shape().as_list()

    with vs.variable_scope("batch_norm"):
        gamma = vs.get_variable("gamma", [shape[-1]], initializer=init_ops.random_normal_initializer(1.0, 0.02))
        beta = vs.get_variable("beta", [shape[-1]], initializer=init_ops.constant_initializer(0.0))
        ema = moving_averages.ExponentialMovingAverage(decay=decay)
        if convnet:
            assign_mean, assign_var = nn.moments(tensor_in, [0, 1, 2])
        else:
            assign_mean, assign_var = nn.moments(tensor_in, [0])
        ema_assign_op = ema.apply([assign_mean, assign_var])
        ema_mean, ema_var = ema.average(assign_mean), ema.average(assign_var)

        def update_mean_var():
            """Internal function that updates mean and variance during training"""
            with ops.control_dependencies([ema_assign_op]):
                return array_ops_.identity(assign_mean), array_ops_.identity(assign_var)

        is_training = array_ops_.squeeze(ops.get_collection("IS_TRAINING"))
        mean, variance = control_flow_ops.cond(is_training, update_mean_var, lambda: (ema_mean, ema_var))
        return nn.batch_norm_with_global_normalization(
            tensor_in, mean, variance, beta, gamma, epsilon, scale_after_normalization=scale_after_normalization
        )
开发者ID:RuhiSharma,项目名称:tensorflow,代码行数:34,代码来源:batch_norm_ops.py

示例6: batch_normalize

def batch_normalize(tensor_in,
                    epsilon=1e-5,
                    convnet=False,
                    decay=0.9,
                    scale_after_normalization=True):
  """Batch normalization.

  Args:
    tensor_in: input `Tensor`, 4D shape: [batch, in_height, in_width, in_depth].
    epsilon : A float number to avoid being divided by 0.
    convnet: Whether this is for convolutional net use. If `True`, moments
        will sum across axis `[0, 1, 2]`. Otherwise, only `[0]`.
    decay: Decay rate for exponential moving average.
    scale_after_normalization: Whether to scale after normalization.

  Returns:
    A batch-normalized `Tensor`.
  """
  shape = tensor_in.get_shape().as_list()

  with vs.variable_scope("batch_norm"):
    gamma = vs.get_variable(
        "gamma", [shape[-1]],
        initializer=init_ops.random_normal_initializer(1., 0.02))
    beta = vs.get_variable("beta", [shape[-1]],
                           initializer=init_ops.constant_initializer(0.))
    moving_mean = vs.get_variable(
        'moving_mean',
        shape=[shape[-1]],
        initializer=init_ops.zeros_initializer,
        trainable=False)
    moving_var = vs.get_variable(
        'moving_var',
        shape=[shape[-1]],
        initializer=init_ops.ones_initializer,
        trainable=False)

    def _update_mean_var():
      """Internal function that updates mean and variance during training."""
      axis = [0, 1, 2] if convnet else [0]
      mean, var = nn.moments(tensor_in, axis)
      update_moving_mean = moving_averages.assign_moving_average(
          moving_mean, mean, decay)
      update_moving_var = moving_averages.assign_moving_average(
          moving_var, var, decay)
      with ops.control_dependencies([update_moving_mean, update_moving_var]):
        return array_ops_.identity(mean), array_ops_.identity(var)

    is_training = array_ops_.squeeze(ops.get_collection("IS_TRAINING"))
    mean, variance = control_flow_ops.cond(is_training, _update_mean_var,
                                           lambda: (moving_mean, moving_var))
    return nn.batch_norm_with_global_normalization(
        tensor_in,
        mean,
        variance,
        beta,
        gamma,
        epsilon,
        scale_after_normalization=scale_after_normalization)
开发者ID:Assassin0028,项目名称:tensorflow,代码行数:59,代码来源:batch_norm_ops.py

示例7: __init__

 def __init__(self, W_in=init_ops.random_normal_initializer(stddev=0.1),
              W_hid=init_ops.random_normal_initializer(stddev=0.1),
              W_cell=init_ops.random_normal_initializer(stddev=0.1),
              b=init_ops.constant_initializer(0.),
              activation=None):
   self.W_in = W_in
   self.W_hid = W_hid
   # Don't store a cell weight vector when cell is None
   if W_cell is not None:
       self.W_cell = W_cell
   if b is not None:
     self.b = b
   # For the activation, if None is supplied, use identity
   if activation is None:
       self.activation = control_flow_ops.identity
   else:
       self.activation = activation
开发者ID:Styrke,项目名称:master-code,代码行数:17,代码来源:custom_gru_cell.py

示例8: linear_regression

def linear_regression(X, y, init_mean=None, init_stddev=1.0):
    """Creates linear regression TensorFlow subgraph.

    Args:
        X: tensor or placeholder for input features.
        y: tensor or placeholder for target.
        init_mean: the mean value to use for initialization.
        init_stddev: the standard devation to use for initialization.

    Returns:
        Predictions and loss tensors.

    Side effects:
        The variables linear_regression.weights and linear_regression.bias are
        initialized as follows.  If init_mean is not None, then initialization
        will be done using a random normal initializer with the given init_mean
        and init_stddv.  (These may be set to 0.0 each if a zero initialization
        is desirable for convex use cases.)  If init_mean is None, then the
        uniform_unit_scaling_initialzer will be used.
    """
    with vs.variable_scope('linear_regression'):
        logging_ops.histogram_summary('linear_regression.X', X)
        logging_ops.histogram_summary('linear_regression.y', y)
        y_shape = y.get_shape()
        if len(y_shape) == 1:
            output_shape = 1
        else:
            output_shape = y_shape[1]
        # Set up the requested initialization.
        if (init_mean is None):
            weights = vs.get_variable('weights',
                                      [X.get_shape()[1], output_shape])
            bias = vs.get_variable('bias',
                                   [output_shape])
        else:
            weights = vs.get_variable('weights',
                                      [X.get_shape()[1], output_shape],
                                      initializer=init_ops.random_normal_initializer(
                                          init_mean, init_stddev))
            bias = vs.get_variable('bias',
                                   [output_shape],
                                   initializer=init_ops.random_normal_initializer(
                                       init_mean, init_stddev))
        logging_ops.histogram_summary('linear_regression.weights', weights)
        logging_ops.histogram_summary('linear_regression.bias', bias)
        return losses_ops.mean_squared_error_regressor(X, y, weights, bias)
开发者ID:01bui,项目名称:tensorflow,代码行数:46,代码来源:models.py

示例9: BuildSplitableModel

def BuildSplitableModel():
  """Build a small model that can be run partially in each step."""
  image = array_ops.zeros([2, 6, 6, 3])

  kernel1 = variable_scope.get_variable(
      'DW', [3, 3, 3, 6],
      dtypes.float32,
      initializer=init_ops.random_normal_initializer(stddev=0.001))
  r1 = nn_ops.conv2d(image, kernel1, [1, 2, 2, 1], padding='SAME')

  kernel2 = variable_scope.get_variable(
      'DW2', [2, 3, 3, 6],
      dtypes.float32,
      initializer=init_ops.random_normal_initializer(stddev=0.001))
  r2 = nn_ops.conv2d(image, kernel2, [1, 2, 2, 1], padding='SAME')

  r3 = r1 + r2
  return r1, r2, r3
开发者ID:1000sprites,项目名称:tensorflow,代码行数:18,代码来源:model_analyzer_testlib.py

示例10: BuildSmallModel

def BuildSmallModel():
  """Build a small forward conv model."""
  image = array_ops.zeros([2, 6, 6, 3])
  _ = variable_scope.get_variable(
      'ScalarW', [],
      dtypes.float32,
      initializer=init_ops.random_normal_initializer(stddev=0.001))
  kernel = variable_scope.get_variable(
      'DW', [3, 3, 3, 6],
      dtypes.float32,
      initializer=init_ops.random_normal_initializer(stddev=0.001))
  x = nn_ops.conv2d(image, kernel, [1, 2, 2, 1], padding='SAME')
  kernel = variable_scope.get_variable(
      'DW2', [2, 2, 6, 12],
      dtypes.float32,
      initializer=init_ops.random_normal_initializer(stddev=0.001))
  x = nn_ops.conv2d(x, kernel, [1, 2, 2, 1], padding='SAME')
  return x
开发者ID:1000sprites,项目名称:tensorflow,代码行数:18,代码来源:model_analyzer_testlib.py

示例11: _TestOneSimpleTraining

  def _TestOneSimpleTraining(self, rnn_mode, num_layers, num_units, input_size,
                             batch_size, seq_length, dir_count, dropout, dtype,
                             delta, tolerance):
    # Gradient checking runs two forward ops with almost the same input. Need to
    # make sure the drop patterns across the two runs are the same.
    logging.info("Training test with config: %s", locals())
    old_env_state = os.environ.get("TF_CUDNN_RESET_RND_GEN_STATE", str(False))
    os.environ["TF_CUDNN_RESET_RND_GEN_STATE"] = str(True)
    random_seed.set_random_seed(5678)
    has_input_c = (rnn_mode == CUDNN_LSTM)
    direction = (CUDNN_RNN_UNIDIRECTION
                 if dir_count == 1 else CUDNN_RNN_BIDIRECTION)
    model = CudnnTestModel(
        rnn_mode,
        num_layers,
        num_units,
        input_size,
        direction=direction,
        dropout=dropout,
        dtype=dtype,
        training=True,
        bias_initializer=init_ops.random_normal_initializer(
            mean=1., dtype=dtype))
    rnn = model.rnn
    params = rnn.trainable_variables[0]

    inputs = variables.Variable(
        random_ops.random_uniform(
            [seq_length, batch_size, input_size], dtype=dtype),
        dtype=dtype)
    input_h = variables.Variable(
        random_ops.random_uniform(
            [num_layers * dir_count, batch_size, num_units], dtype=dtype),
        dtype=dtype)
    if has_input_c:
      input_c = variables.Variable(
          random_ops.random_uniform(
              [num_layers * dir_count, batch_size, num_units], dtype=dtype),
          dtype=dtype)
      initial_state = (input_h, input_c)
    else:
      initial_state = (input_h,)
    total_sum = model.FProp(inputs, initial_state, training=True)

    with self.test_session(use_gpu=True, graph=ops.get_default_graph()) as sess:
      sess.run(variables.global_variables_initializer())
      all_inputs = [inputs, params]
      for s in initial_state:
        all_inputs.append(s)
      self._GradientCheck(
          sess, total_sum, all_inputs, tolerance=tolerance, delta=delta)
      os.environ["TF_CUDNN_RESET_RND_GEN_STATE"] = old_env_state
开发者ID:ilya-edrenkin,项目名称:tensorflow,代码行数:52,代码来源:cudnn_rnn_test.py

示例12: compute_spectral_norm

def compute_spectral_norm(w_tensor, power_iteration_rounds=1, name=None):
  """Estimates the largest singular value in the weight tensor.

  Args:
    w_tensor: The weight matrix whose spectral norm should be computed.
    power_iteration_rounds: The number of iterations of the power method to
      perform. A higher number yields a better approximation.
    name: An optional scope name.

  Returns:
    The largest singular value (the spectral norm) of w.
  """
  with variable_scope.variable_scope(name, 'spectral_norm'):
    # The paper says to flatten convnet kernel weights from
    # (C_out, C_in, KH, KW) to (C_out, C_in * KH * KW). But TensorFlow's Conv2D
    # kernel weight shape is (KH, KW, C_in, C_out), so it should be reshaped to
    # (KH * KW * C_in, C_out), and similarly for other layers that put output
    # channels as last dimension.
    # n.b. this means that w here is equivalent to w.T in the paper.
    w = array_ops.reshape(w_tensor, (-1, w_tensor.get_shape()[-1]))

    # Persisted approximation of first left singular vector of matrix `w`.
    u_var = variable_scope.get_variable(
        _PERSISTED_U_VARIABLE_SUFFIX,
        shape=(w.shape[0], 1),
        dtype=w.dtype,
        initializer=init_ops.random_normal_initializer(),
        trainable=False)
    u = u_var

    # Use power iteration method to approximate spectral norm.
    for _ in range(power_iteration_rounds):
      # `v` approximates the first right singular vector of matrix `w`.
      v = nn.l2_normalize(math_ops.matmul(array_ops.transpose(w), u))
      u = nn.l2_normalize(math_ops.matmul(w, v))

    # Update persisted approximation.
    with ops.control_dependencies([u_var.assign(u, name='update_u')]):
      u = array_ops.identity(u)

    u = array_ops.stop_gradient(u)
    v = array_ops.stop_gradient(v)

    # Largest singular value of `w`.
    spectral_norm = math_ops.matmul(
        math_ops.matmul(array_ops.transpose(u), w), v)
    spectral_norm.shape.assert_is_fully_defined()
    spectral_norm.shape.assert_is_compatible_with([1, 1])

    return spectral_norm[0][0]
开发者ID:ahmedsaiduk,项目名称:tensorflow,代码行数:50,代码来源:spectral_normalization_impl.py

示例13: __call__

 def __call__(self, inputs, state, scope=None):
   dtype = inputs.dtype
   batch_size, input_size = inputs.get_shape().as_list() # as_list() so that it is a float. Seems strange...
   if self._O is not None:
     input_size = input_size - self._O.get_shape().as_list()[0]
   with vs.variable_scope(scope or type(self).__name__):
     A = vs.get_variable('A', [self._num_units, self._num_units], dtype=dtype, initializer=init_ops.random_normal_initializer(stddev=1/math.sqrt(self._num_units)))
     B = vs.get_variable('B', [input_size, self._num_units],      dtype=dtype, initializer=init_ops.random_normal_initializer(stddev=1/math.sqrt(input_size)))
     b = vs.get_variable('b', [self._num_units], initializer=init_ops.random_normal_initializer(stddev=0.01))
     
     if self._O is not None:
       output = (1 - self._dt_tau)*state + self._dt_tau*(math_ops.matmul(self._activation(state), A) + math_ops.matmul(inputs, array_ops.concat(0,[B,self._O])) + b + random_ops.random_normal([batch_size, self._num_units], stddev=self._sigma))
     else:
       output = (1 - self._dt_tau)*state + self._dt_tau*(math_ops.matmul(self._activation(state), A) + math_ops.matmul(inputs, B) + b + random_ops.random_normal([batch_size, self._num_units], stddev=self._sigma))
   return output, output
开发者ID:jsseely,项目名称:ready-set-go-tensorflow,代码行数:15,代码来源:ctrnncell.py

示例14: build

  def build(self, inputs_shape):
    if inputs_shape[1].value is None:
      raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s"
                       % inputs_shape)

    input_depth = inputs_shape[1].value
    if self._input_initializer is None:
      self._input_initializer = init_ops.random_normal_initializer(mean=0.0,
                                                                   stddev=0.001)
    self._input_kernel = self.add_variable(
        "input_kernel",
        shape=[input_depth, self._num_units],
        initializer=self._input_initializer)

    if self._recurrent_initializer is None:
      self._recurrent_initializer = init_ops.constant_initializer(1.)
    self._recurrent_kernel = self.add_variable(
        "recurrent_kernel",
        shape=[self._num_units],
        initializer=self._recurrent_initializer)

    # Clip the absolute values of the recurrent weights to the specified minimum
    if self._recurrent_min_abs:
      abs_kernel = math_ops.abs(self._recurrent_kernel)
      min_abs_kernel = math_ops.maximum(abs_kernel, self._recurrent_min_abs)
      self._recurrent_kernel = math_ops.multiply(
          math_ops.sign(self._recurrent_kernel),
          min_abs_kernel
      )

    # Clip the absolute values of the recurrent weights to the specified maximum
    if self._recurrent_max_abs:
      self._recurrent_kernel = clip_ops.clip_by_value(self._recurrent_kernel,
                                                      -self._recurrent_max_abs,
                                                      self._recurrent_max_abs)

    self._bias = self.add_variable(
        "bias",
        shape=[self._num_units],
        initializer=init_ops.zeros_initializer(dtype=self.dtype))

    self.built = True
开发者ID:xkp793003821,项目名称:indrnn,代码行数:42,代码来源:ind_rnn_cell.py

示例15: testIndexedSlicesGradient

    def testIndexedSlicesGradient(self):
        with ops.Graph().as_default():
            embedding_matrix = variable_scope.get_variable(
                "embedding_matrix", [5, 5], initializer=init_ops.random_normal_initializer()
            )

            def Cond(it, _):
                return it < 5

            def Body(it, cost):
                embedding = embedding_ops.embedding_lookup(embedding_matrix + 0.0, [0])
                cost += math_ops.reduce_sum(embedding)
                return it + 1, cost

            _, cost = control_flow_ops.while_loop(Cond, Body, [constant_op.constant(0), constant_op.constant(0.0)])
            optimizer = momentum.MomentumOptimizer(0.1, 0.9)
            train_op = optimizer.minimize(cost)
            with self.test_session() as sess:
                sess.run(variables.global_variables_initializer())
                for _ in range(10):
                    sess.run([train_op])
开发者ID:tensorflow,项目名称:tensorflow,代码行数:21,代码来源:control_flow_ops_test.py


注:本文中的tensorflow.python.ops.init_ops.random_normal_initializer函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。