當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.add方法代碼示例

本文整理匯總了Python中tensorflow.add方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.add方法的具體用法?Python tensorflow.add怎麽用?Python tensorflow.add使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.add方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _inv_preemphasis

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import add [as 別名]
def _inv_preemphasis(x):
    N = tf.shape(x)[0]
    i = tf.constant(0)
    W = tf.zeros(shape=tf.shape(x), dtype=tf.float32)

    def condition(i, y):
        return tf.less(i, N)

    def body(i, y):
        tmp = tf.slice(x, [0], [i + 1])
        tmp = tf.concat([tf.zeros([N - i - 1]), tmp], -1)
        y = hparams.preemphasis * y + tmp
        i = tf.add(i, 1)
        return [i, y]

    final = tf.while_loop(condition, body, [i, W])

    y = final[1]

    return y 
開發者ID:candlewill,項目名稱:Griffin_lim,代碼行數:22,代碼來源:griffin_lim.py

示例2: _variable_with_weight_decay

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import add [as 別名]
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
  var = _variable_on_cpu(
      name,
      shape,
      tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
  if wd is not None:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:27,代碼來源:cifar10.py

示例3: accum_val_ops

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import add [as 別名]
def accum_val_ops(outputs, names, global_step, output_dir, metric_summary, N):
  """Processes the collected outputs to compute AP for action prediction.
  
  Args:
    outputs        : List of scalar ops to summarize.
    names          : Name of the scalar ops.
    global_step    : global_step.
    output_dir     : where to store results.
    metric_summary : summary object to add summaries to.
    N              : number of outputs to process.
  """
  outs = []
  if N >= 0:
    outputs = outputs[:N]
  for i in range(len(outputs[0])):
    scalar = np.array(map(lambda x: x[i], outputs))
    assert(scalar.ndim == 1)
    add_value_to_summary(metric_summary, names[i], np.mean(scalar),
                         tag_str='{:>27s}:  [{:s}]: %f'.format(names[i], ''))
    outs.append(np.mean(scalar))
  return outs 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:23,代碼來源:tf_utils.py

示例4: add_regularizer

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import add [as 別名]
def add_regularizer(self, cost):
    """Adds L2 regularization for parameters which have it turned on.

    Args:
      cost: float cost before regularization.

    Returns:
      Updated cost optionally including regularization.
    """
    if self.network is None:
      return cost
    regularized_weights = self.network.get_l2_regularized_weights()
    if not regularized_weights:
      return cost
    l2_coeff = self.master.hyperparams.l2_regularization_coefficient
    if l2_coeff == 0.0:
      return cost
    tf.logging.info('[%s] Regularizing parameters: %s', self.name,
                    [w.name for w in regularized_weights])
    l2_costs = [tf.nn.l2_loss(p) for p in regularized_weights]
    return tf.add(cost, l2_coeff * tf.add_n(l2_costs), name='regularizer') 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:23,代碼來源:component.py

示例5: _variable_with_weight_decay

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import add [as 別名]
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd is not None:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:24,代碼來源:deep_cnn.py

示例6: l1_l2_regularizer

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import add [as 別名]
def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):
  """Define a L1L2 regularizer.

  Args:
    weight_l1: scale the L1 loss by this factor.
    weight_l2: scale the L2 loss by this factor.
    scope: Optional scope for name_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.name_scope(scope, 'L1L2Regularizer', [tensor]):
      weight_l1_t = tf.convert_to_tensor(weight_l1,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l1')
      weight_l2_t = tf.convert_to_tensor(weight_l2,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l2')
      reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(tensor)),
                      name='value_l1')
      reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(tensor),
                      name='value_l2')
      return tf.add(reg_l1, reg_l2, name='value')
  return regularizer 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:27,代碼來源:losses.py

示例7: __init__

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import add [as 別名]
def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimizer = tf.train.AdamOptimizer()):
        self.n_input = n_input
        self.n_hidden = n_hidden
        self.transfer = transfer_function

        network_weights = self._initialize_weights()
        self.weights = network_weights

        # model
        self.x = tf.placeholder(tf.float32, [None, self.n_input])
        self.hidden = self.transfer(tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1']))
        self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])

        # cost
        self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
        self.optimizer = optimizer.minimize(self.cost)

        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        self.sess.run(init) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:22,代碼來源:Autoencoder.py

示例8: __init__

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import add [as 別名]
def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(),
                 scale = 0.1):
        self.n_input = n_input
        self.n_hidden = n_hidden
        self.transfer = transfer_function
        self.scale = tf.placeholder(tf.float32)
        self.training_scale = scale
        network_weights = self._initialize_weights()
        self.weights = network_weights

        # model
        self.x = tf.placeholder(tf.float32, [None, self.n_input])
        self.hidden = self.transfer(tf.add(tf.matmul(self.x + scale * tf.random_normal((n_input,)),
                self.weights['w1']),
                self.weights['b1']))
        self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])

        # cost
        self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
        self.optimizer = optimizer.minimize(self.cost)

        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        self.sess.run(init) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:26,代碼來源:DenoisingAutoencoder.py

示例9: simulate

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import add [as 別名]
def simulate(self, action):
    with tf.name_scope("environment/simulate"):  # Do we need this?
      initializer = (tf.zeros_like(self._observ),
                     tf.fill((len(self),), 0.0), tf.fill((len(self),), False))

      def not_done_step(a, _):
        reward, done = self._batch_env.simulate(action)
        with tf.control_dependencies([reward, done]):
          # TODO(piotrmilos): possibly ignore envs with done
          r0 = tf.maximum(a[0], self._batch_env.observ)
          r1 = tf.add(a[1], reward)
          r2 = tf.logical_or(a[2], done)

          return (r0, r1, r2)

      simulate_ret = tf.scan(not_done_step, tf.range(self.skip),
                             initializer=initializer, parallel_iterations=1,
                             infer_shape=False)
      simulate_ret = [ret[-1, ...] for ret in simulate_ret]

      with tf.control_dependencies([self._observ.assign(simulate_ret[0])]):
        return tf.identity(simulate_ret[1]), tf.identity(simulate_ret[2]) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:24,代碼來源:tf_atari_wrappers.py

示例10: add_scope

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import add [as 別名]
def add_scope(scope=None, scope_fn=None):
  """Return a decorator which add a TF name/variable scope to a function.

  Note that the function returned by the decorator accept an additional 'name'
  parameter, which can overwrite the name scope given when the function is
  created.

  Args:
    scope (str): name of the scope. If None, the function name is used.
    scope_fn (fct): Either tf.name_scope or tf.variable_scope

  Returns:
    fct: the add_scope decorator
  """
  def decorator(f):

    @functools.wraps(f)
    def decorated(*args, **kwargs):
      name = kwargs.pop("name", None)  # Python 2 hack for keyword only args
      with scope_fn(name or scope or f.__name__):
        return f(*args, **kwargs)

    return decorated

  return decorator 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:27,代碼來源:expert_utils.py

示例11: get_loss

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import add [as 別名]
def get_loss(predicted_transformation, batch_size, template_pointclouds_pl, source_pointclouds_pl):
	with tf.variable_scope('loss') as LossEvaluation:
		predicted_position = tf.slice(predicted_transformation,[0,0],[batch_size,3])
		predicted_quat = tf.slice(predicted_transformation,[0,3],[batch_size,4])

		# with tf.variable_scope('quat_normalization') as norm:
		norm_predicted_quat = tf.reduce_sum(tf.square(predicted_quat),1)
		norm_predicted_quat = tf.sqrt(norm_predicted_quat)
		norm_predicted_quat = tf.reshape(norm_predicted_quat,(batch_size,1))
		const = tf.constant(0.0000001,shape=(batch_size,1),dtype=tf.float32)
		norm_predicted_quat = tf.add(norm_predicted_quat,const)
		predicted_norm_quat = tf.divide(predicted_quat,norm_predicted_quat)

		transformed_predicted_point_cloud = helper.transformation_quat_tensor(source_pointclouds_pl, predicted_norm_quat,predicted_position)

		#loss = tf_util_loss.earth_mover(template_pointclouds_pl, transformed_predicted_point_cloud)
		loss = tf_util_loss.chamfer(template_pointclouds_pl, transformed_predicted_point_cloud)
	return loss 
開發者ID:vinits5,項目名稱:pointnet-registration-framework,代碼行數:20,代碼來源:ipcr_model.py

示例12: get_loss_b

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import add [as 別名]
def get_loss_b(self,predicted_transformation,batch_size,template_pointclouds_pl,source_pointclouds_pl):	
		with tf.variable_scope('loss') as LossEvaluation:
			predicted_position = tf.slice(predicted_transformation,[0,0],[batch_size,3])
			predicted_quat = tf.slice(predicted_transformation,[0,3],[batch_size,4])

			# with tf.variable_scope('quat_normalization') as norm:
			norm_predicted_quat = tf.reduce_sum(tf.square(predicted_quat),1)
			norm_predicted_quat = tf.sqrt(norm_predicted_quat)
			norm_predicted_quat = tf.reshape(norm_predicted_quat,(batch_size,1))
			const = tf.constant(0.0000001,shape=(batch_size,1),dtype=tf.float32)
			norm_predicted_quat = tf.add(norm_predicted_quat,const)
			predicted_norm_quat = tf.divide(predicted_quat,norm_predicted_quat)
	
			transformed_predicted_point_cloud = helper.transformation_quat_tensor(source_pointclouds_pl, predicted_norm_quat, predicted_position)

			# Use 1024 Points to find loss.
			#loss = tf_util_loss.earth_mover(template_pointclouds_pl, transformed_predicted_point_cloud)
			loss = tf_util_loss.chamfer(template_pointclouds_pl, transformed_predicted_point_cloud)
			# loss = 0
		return loss 
開發者ID:vinits5,項目名稱:pointnet-registration-framework,代碼行數:22,代碼來源:pcr_model.py

示例13: conv2d

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import add [as 別名]
def conv2d(x, n_kernel, k_sz, stride=1):
  """convolutional layer with relu activation wrapper
  Args:
    x:          4d tensor [batch, height, width, channels]
    n_kernel:   number of kernels (output size)
    k_sz:       2d array, kernel size. e.g. [8,8]
    stride:     stride
  Returns
    a conv2d layer
  """
  W = tf.Variable(tf.random_normal([k_sz[0], k_sz[1], int(x.get_shape()[3]), n_kernel]))
  b = tf.Variable(tf.random_normal([n_kernel]))
  # - strides[0] and strides[1] must be 1
  # - padding can be 'VALID'(without padding) or 'SAME'(zero padding)
  #     - http://stackoverflow.com/questions/37674306/what-is-the-difference-between-same-and-valid-padding-in-tf-nn-max-pool-of-t
  conv = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='SAME')
  conv = tf.nn.bias_add(conv, b) # add bias term
  return tf.nn.relu(conv) # rectified linear unit: https://en.wikipedia.org/wiki/Rectifier_(neural_networks) 
開發者ID:yrlu,項目名稱:reinforcement_learning,代碼行數:20,代碼來源:tf_utils.py

示例14: conv2d

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import add [as 別名]
def conv2d(x, n_kernel, k_sz, stride=1):
  """convolutional layer with relu activation wrapper
  Args:
    x:          4d tensor [batch, height, width, channels]
    n_kernel:   number of kernels (output size)
    k_sz:       2d array, kernel size. e.g. [8,8]
    stride:     stride
  Returns
    a conv2d layer
  """
  W = tf.Variable(tf.random_normal([k_sz[0], k_sz[1], int(x.get_shape()[3]), n_kernel]))
  b = tf.Variable(tf.random_normal([n_kernel]))
  # - strides[0] and strides[1] must be 1
  # - padding can be 'VALID'(without padding) or 'SAME'(zero padding)
  #     - http://stackoverflow.com/questions/37674306/what-is-the-difference-between-same-and-valid-padding-in-tf-nn-max-pool-of-t
  conv = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='SAME')
  conv = tf.nn.bias_add(conv, b)  # add bias term
  # rectified linear unit: https://en.wikipedia.org/wiki/Rectifier_(neural_networks)
  return tf.nn.relu(conv) 
開發者ID:yrlu,項目名稱:reinforcement_learning,代碼行數:21,代碼來源:tf_utils.py

示例15: fc

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import add [as 別名]
def fc(x, n_output, scope="fc", activation_fn=None, initializer=None):
  """fully connected layer with relu activation wrapper
  Args
    x:          2d tensor [batch, n_input]
    n_output    output size
  """
  with tf.variable_scope(scope):
    if initializer is None:
      # default initialization
      W = tf.Variable(tf.random_normal([int(x.get_shape()[1]), n_output]))
      b = tf.Variable(tf.random_normal([n_output]))
    else:
      W = tf.get_variable("W", shape=[int(x.get_shape()[1]), n_output], initializer=initializer)
      b = tf.get_variable("b", shape=[n_output],
                          initializer=tf.constant_initializer(.0, dtype=tf.float32))
    fc1 = tf.add(tf.matmul(x, W), b)
    if not activation_fn is None:
      fc1 = activation_fn(fc1)
  return fc1 
開發者ID:yrlu,項目名稱:reinforcement_learning,代碼行數:21,代碼來源:tf_utils.py


注:本文中的tensorflow.add方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。