当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.mul方法代码示例

本文整理汇总了Python中tensorflow.mul方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.mul方法的具体用法?Python tensorflow.mul怎么用?Python tensorflow.mul使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.mul方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _step

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mul [as 别名]
def _step(self, f, z, o):
        """
        Args:
            f:
            z:
            o:
        Returns:
            h:
        """
        with tf.variable_scope("fo-Pool"):
            # f,z,o is batch_size x size
            f = tf.sigmoid(f)
            z = tf.tanh(z)
            o = tf.sigmoid(o)
            self.c = tf.mul(f, self.c) + tf.mul(1 - f, z)
            self.h = tf.mul(o, self.c)  # h is size vector

        return self.h 
开发者ID:hirofumi0810,项目名称:tensorflow_end2end_speech_recognition,代码行数:20,代码来源:qrnn.py

示例2: _variable_with_weight_decay

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mul [as 别名]
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    # weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var 
开发者ID:hohoins,项目名称:ml,代码行数:25,代码来源:cifar10.py

示例3: test_binary_ops_combined

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mul [as 别名]
def test_binary_ops_combined(self):
        # computation
        a = tf.placeholder(tf.float32, shape=(2, 3))
        b = tf.placeholder(tf.float32, shape=(2, 3))
        c = tf.add(a, b)
        d = tf.mul(c, a)
        e = tf.div(d, b)
        f = tf.sub(a, e)
        g = tf.maximum(a, f)

        # value
        a_val = np.random.rand(*tf_obj_shape(a))
        b_val = np.random.rand(*tf_obj_shape(b))

        # test
        self.run(g, tf_feed_dict={a: a_val, b: b_val}) 
开发者ID:NervanaSystems,项目名称:ngraph-python,代码行数:18,代码来源:test_computations.py

示例4: l1_regularizer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mul [as 别名]
def l1_regularizer(weight=1.0, scope=None):
  """Define a L1 regularizer.

  Args:
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L1Regularizer'):
      l1_weight = tf.convert_to_tensor(weight,
                                       dtype=tensor.dtype.base_dtype,
                                       name='weight')
      return tf.mul(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value')
  return regularizer 
开发者ID:Cyber-Neuron,项目名称:inception_v3,代码行数:19,代码来源:losses.py

示例5: l2_regularizer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mul [as 别名]
def l2_regularizer(weight=1.0, scope=None):
  """Define a L2 regularizer.

  Args:
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L2Regularizer'):
      l2_weight = tf.convert_to_tensor(weight,
                                       dtype=tensor.dtype.base_dtype,
                                       name='weight')
      return tf.mul(l2_weight, tf.nn.l2_loss(tensor), name='value')
  return regularizer 
开发者ID:Cyber-Neuron,项目名称:inception_v3,代码行数:19,代码来源:losses.py

示例6: l1_l2_regularizer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mul [as 别名]
def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):
  """Define a L1L2 regularizer.

  Args:
    weight_l1: scale the L1 loss by this factor.
    weight_l2: scale the L2 loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L1L2Regularizer'):
      weight_l1_t = tf.convert_to_tensor(weight_l1,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l1')
      weight_l2_t = tf.convert_to_tensor(weight_l2,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l2')
      reg_l1 = tf.mul(weight_l1_t, tf.reduce_sum(tf.abs(tensor)),
                      name='value_l1')
      reg_l2 = tf.mul(weight_l2_t, tf.nn.l2_loss(tensor),
                      name='value_l2')
      return tf.add(reg_l1, reg_l2, name='value')
  return regularizer 
开发者ID:Cyber-Neuron,项目名称:inception_v3,代码行数:27,代码来源:losses.py

示例7: l1_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mul [as 别名]
def l1_loss(tensor, weight=1.0, scope=None):
  """Define a L1Loss, useful for regularize, i.e. lasso.

  Args:
    tensor: tensor to regularize.
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    the L1 loss op.
  """
  with tf.op_scope([tensor], scope, 'L1Loss'):
    weight = tf.convert_to_tensor(weight,
                                  dtype=tensor.dtype.base_dtype,
                                  name='loss_weight')
    loss = tf.mul(weight, tf.reduce_sum(tf.abs(tensor)), name='value')
    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss 
开发者ID:Cyber-Neuron,项目名称:inception_v3,代码行数:20,代码来源:losses.py

示例8: l2_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mul [as 别名]
def l2_loss(tensor, weight=1.0, scope=None):
  """Define a L2Loss, useful for regularize, i.e. weight decay.

  Args:
    tensor: tensor to regularize.
    weight: an optional weight to modulate the loss.
    scope: Optional scope for op_scope.

  Returns:
    the L2 loss op.
  """
  with tf.op_scope([tensor], scope, 'L2Loss'):
    weight = tf.convert_to_tensor(weight,
                                  dtype=tensor.dtype.base_dtype,
                                  name='loss_weight')
    loss = tf.mul(weight, tf.nn.l2_loss(tensor), name='value')
    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss 
开发者ID:Cyber-Neuron,项目名称:inception_v3,代码行数:20,代码来源:losses.py

示例9: Minibatch_Discriminator

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mul [as 别名]
def Minibatch_Discriminator(input, num_kernels=100, dim_per_kernel=5, init=False, name='MD'):
    num_inputs=df_dim*4
    theta = tf.get_variable(name+"/theta",[num_inputs, num_kernels, dim_per_kernel], initializer=tf.random_normal_initializer(stddev=0.05))
    log_weight_scale = tf.get_variable(name+"/lws",[num_kernels, dim_per_kernel], initializer=tf.constant_initializer(0.0))
    W = tf.mul(theta, tf.expand_dims(tf.exp(log_weight_scale)/tf.sqrt(tf.reduce_sum(tf.square(theta),0)),0))
    W = tf.reshape(W,[-1,num_kernels*dim_per_kernel])
    x = input
    x=tf.reshape(x, [batchsize,num_inputs])
    activation = tf.matmul(x, W)
    activation = tf.reshape(activation,[-1,num_kernels,dim_per_kernel])
    abs_dif = tf.mul(tf.reduce_sum(tf.abs(tf.sub(tf.expand_dims(activation,3),tf.expand_dims(tf.transpose(activation,[1,2,0]),0))),2),
                                                1-tf.expand_dims(tf.constant(np.eye(batchsize),dtype=np.float32),1))
    f = tf.reduce_sum(tf.exp(-abs_dif),2)/tf.reduce_sum(tf.exp(-abs_dif))
    print(f.get_shape())
    print(input.get_shape())
    return tf.concat(1,[x, f]) 
开发者ID:val-iisc,项目名称:deligan,代码行数:18,代码来源:dg_mnist.py

示例10: _variable_with_weight_decay

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mul [as 别名]
def _variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.
    
    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.
    
    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.
    
    Returns:
      Variable Tensor
    """
    var = tf.Variable(tf.random_normal(shape, stddev=stddev), name=name)
    '''if wd:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)'''
    return var 
开发者ID:samitok,项目名称:deeppose,代码行数:23,代码来源:LSPModels.py

示例11: _variable_with_weight_decay

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mul [as 别名]
def _variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.
    
    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.
    
    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.
    
    Returns:
      Variable Tensor
    """
    var = tf.Variable(tf.random_normal(shape, stddev=stddev), name=name)
    if wd:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var 
开发者ID:samitok,项目名称:deeppose,代码行数:23,代码来源:LSPModels.py

示例12: loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mul [as 别名]
def loss(logits, labels):
    """Calculates Mean Pixel Error.
    
    Args:
      logits: Logits from inference().
      labels: Labels from distorted_inputs or inputs(). 1-D tensor
              of shape [batch_size]
    
    Returns:
      Loss tensor of type float.
    """
    
    labelValidity = tf.sign(labels, name='label_validity')
    
    minop = tf.sub(logits, labels, name='Diff_Op')
    
    absop = tf.abs(minop, name='Abs_Op')
    
    lossValues = tf.mul(labelValidity, absop, name='lossValues')
    
    loss_mean = tf.reduce_mean(lossValues, name='MeanPixelError')
    
    tf.add_to_collection('losses', loss_mean)
    
    return tf.add_n(tf.get_collection('losses'), name='total_loss'), loss_mean 
开发者ID:samitok,项目名称:deeppose,代码行数:27,代码来源:LSPModels.py

示例13: compute_kumar2beta_kld

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mul [as 别名]
def compute_kumar2beta_kld(a, b, alpha, beta):
    # precompute some terms
    ab = tf.mul(a,b)
    a_inv = tf.pow(a, -1)
    b_inv = tf.pow(b, -1)

    # compute taylor expansion for E[log (1-v)] term
    kl = tf.mul(tf.pow(1+ab,-1), beta_fn(a_inv, b))
    for idx in xrange(10):
        kl += tf.mul(tf.pow(idx+2+ab,-1), beta_fn(tf.mul(idx+2., a_inv), b))
    kl = tf.mul(tf.mul(beta-1,b), kl)

    kl += tf.mul(tf.div(a-alpha,a), -0.57721 - tf.digamma(b) - b_inv)
    # add normalization constants                                                                                                                         
    kl += tf.log(ab) + tf.log(beta_fn(alpha, beta))

    # final term                                                                                                  
    kl += tf.div(-(b-1),b)

    return kl 
开发者ID:enalisnick,项目名称:mixture_density_VAEs,代码行数:22,代码来源:gaussMMVAE_collapsed.py

示例14: f_prop

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mul [as 别名]
def f_prop(self):

        # init variational params
        self.mu = []
        self.sigma = []
        self.kumar_a = []
        self.kumar_b = []
        self.z = []
        x_recon_linear = []

        h1 = mlp(self.X, self.encoder_params['base'])
        
        for k in xrange(self.K):
            self.mu.append(mlp(h1, self.encoder_params['mu'][k]))
            self.sigma.append(tf.exp(mlp(h1, self.encoder_params['sigma'][k])))
            self.z.append(self.mu[-1] + tf.mul(self.sigma[-1], tf.random_normal(tf.shape(self.sigma[-1]))))
            x_recon_linear.append(mlp(self.z[-1], self.decoder_params))

        self.kumar_a = tf.exp(mlp(h1, self.encoder_params['kumar_a']))
        self.kumar_b = tf.exp(mlp(h1, self.encoder_params['kumar_b']))

        return x_recon_linear 
开发者ID:enalisnick,项目名称:mixture_density_VAEs,代码行数:24,代码来源:gaussMMVAE_collapsed.py

示例15: get_component_samples

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mul [as 别名]
def get_component_samples(self, latent_dim, batchSize):
        a_inv = tf.pow(self.kumar_a,-1)
        b_inv = tf.pow(self.kumar_b,-1)

        # compose into stick segments using pi = v \prod (1-v)
        v_means = tf.mul(self.kumar_b, beta_fn(1.+a_inv, self.kumar_b))
        components = tf.to_int32(tf.argmax(tf.concat(1, self.compose_stick_segments(v_means)), 1))
        components = tf.concat(1, [tf.expand_dims(tf.range(0,batchSize),1), tf.expand_dims(components,1)])

        # sample a z
        all_z = []
        for d in xrange(latent_dim):
            temp_z = tf.concat(1, [tf.expand_dims(self.z[k][:, d],1) for k in xrange(self.K)])
            all_z.append(tf.expand_dims(tf.gather_nd(temp_z, components),1))

        return tf.concat(1, all_z) 
开发者ID:enalisnick,项目名称:mixture_density_VAEs,代码行数:18,代码来源:gaussMMVAE_collapsed.py


注:本文中的tensorflow.mul方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。