当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.less方法代码示例

本文整理汇总了Python中tensorflow.less方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.less方法的具体用法?Python tensorflow.less怎么用?Python tensorflow.less使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.less方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _inv_preemphasis

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import less [as 别名]
def _inv_preemphasis(x):
    N = tf.shape(x)[0]
    i = tf.constant(0)
    W = tf.zeros(shape=tf.shape(x), dtype=tf.float32)

    def condition(i, y):
        return tf.less(i, N)

    def body(i, y):
        tmp = tf.slice(x, [0], [i + 1])
        tmp = tf.concat([tf.zeros([N - i - 1]), tmp], -1)
        y = hparams.preemphasis * y + tmp
        i = tf.add(i, 1)
        return [i, y]

    final = tf.while_loop(condition, body, [i, W])

    y = final[1]

    return y 
开发者ID:candlewill,项目名称:Griffin_lim,代码行数:22,代码来源:griffin_lim.py

示例2: get_hash_slots

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import less [as 别名]
def get_hash_slots(self, query):
    """Gets hashed-to buckets for batch of queries.

    Args:
      query: 2-d Tensor of query vectors.

    Returns:
      A list of hashed-to buckets for each hash function.
    """

    binary_hash = [
        tf.less(tf.matmul(query, self.hash_vecs[i], transpose_b=True), 0)
        for i in xrange(self.num_libraries)]
    hash_slot_idxs = [
        tf.reduce_sum(
            tf.to_int32(binary_hash[i]) *
            tf.constant([[2 ** i for i in xrange(self.num_hashes)]],
                        dtype=tf.int32), 1)
        for i in xrange(self.num_libraries)]
    return hash_slot_idxs 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:22,代码来源:memory.py

示例3: _create_learning_rate

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import less [as 别名]
def _create_learning_rate(hyperparams, step_var):
  """Creates learning rate var, with decay and switching for CompositeOptimizer.

  Args:
    hyperparams: a GridPoint proto containing optimizer spec, particularly
      learning_method to determine optimizer class to use.
    step_var: tf.Variable, global training step.

  Returns:
    a scalar `Tensor`, the learning rate based on current step and hyperparams.
  """
  if hyperparams.learning_method != 'composite':
    base_rate = hyperparams.learning_rate
  else:
    spec = hyperparams.composite_optimizer_spec
    switch = tf.less(step_var, spec.switch_after_steps)
    base_rate = tf.cond(switch, lambda: tf.constant(spec.method1.learning_rate),
                        lambda: tf.constant(spec.method2.learning_rate))
  return tf.train.exponential_decay(
      base_rate,
      step_var,
      hyperparams.decay_steps,
      hyperparams.decay_base,
      staircase=hyperparams.decay_staircase) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:26,代码来源:graph_builder.py

示例4: _compute_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import less [as 别名]
def _compute_loss(self, prediction_tensor, target_tensor, weights):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the (encoded) predicted locations of objects.
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the regression targets
      weights: a float tensor of shape [batch_size, num_anchors]

    Returns:
      loss: a (scalar) tensor representing the value of the loss function
    """
    diff = prediction_tensor - target_tensor
    abs_diff = tf.abs(diff)
    abs_diff_lt_1 = tf.less(abs_diff, 1)
    anchorwise_smooth_l1norm = tf.reduce_sum(
        tf.where(abs_diff_lt_1, 0.5 * tf.square(abs_diff), abs_diff - 0.5),
        2) * weights
    if self._anchorwise_output:
      return anchorwise_smooth_l1norm
    return tf.reduce_sum(anchorwise_smooth_l1norm) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:24,代码来源:losses.py

示例5: memory_run

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import less [as 别名]
def memory_run(step, nmaps, mem_size, batch_size, vocab_size,
               global_step, do_training, update_mem, decay_factor, num_gpus,
               target_emb_weights, output_w, gpu_targets_tn, it):
  """Run memory."""
  q = step[:, 0, it, :]
  mlabels = gpu_targets_tn[:, it, 0]
  res, mask, mem_loss = memory_call(
      q, mlabels, nmaps, mem_size, vocab_size, num_gpus, update_mem)
  res = tf.gather(target_emb_weights, res) * tf.expand_dims(mask[:, 0], 1)

  # Mix gold and original in the first steps, 20% later.
  gold = tf.nn.dropout(tf.gather(target_emb_weights, mlabels), 0.7)
  use_gold = 1.0 - tf.cast(global_step, tf.float32) / (1000. * decay_factor)
  use_gold = tf.maximum(use_gold, 0.2) * do_training
  mem = tf.cond(tf.less(tf.random_uniform([]), use_gold),
                lambda: use_gold * gold + (1.0 - use_gold) * res,
                lambda: res)
  mem = tf.reshape(mem, [-1, 1, 1, nmaps])
  return mem, mem_loss, update_mem 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:21,代码来源:neural_gpu.py

示例6: dsn_loss_coefficient

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import less [as 别名]
def dsn_loss_coefficient(params):
  """The global_step-dependent weight that specifies when to kick in DSN losses.

  Args:
    params: A dictionary of parameters. Expecting 'domain_separation_startpoint'

  Returns:
    A weight to that effectively enables or disables the DSN-related losses,
    i.e. similarity, difference, and reconstruction losses.
  """
  return tf.where(
      tf.less(slim.get_or_create_global_step(),
              params['domain_separation_startpoint']), 1e-10, 1.0)


################################################################################
# MODEL CREATION
################################################################################ 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:20,代码来源:dsn.py

示例7: neural_gpu_body

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import less [as 别名]
def neural_gpu_body(inputs, hparams, name=None):
  """The core Neural GPU."""
  with tf.variable_scope(name, "neural_gpu"):

    def step(state, inp):  # pylint: disable=missing-docstring
      x = tf.nn.dropout(state, 1.0 - hparams.dropout)
      for layer in range(hparams.num_hidden_layers):
        x = common_layers.conv_gru(
            x, (hparams.kernel_height, hparams.kernel_width),
            hparams.hidden_size,
            name="cgru_%d" % layer)
      # Padding input is zeroed-out in the modality, we check this by summing.
      padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001)
      new_state = tf.where(padding_inp, state, x)  # No-op where inp is padding.
      return new_state

    return tf.foldl(
        step,
        tf.transpose(inputs, [1, 0, 2, 3]),
        initializer=inputs,
        parallel_iterations=1,
        swap_memory=True) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:24,代码来源:neural_gpu.py

示例8: sample

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import less [as 别名]
def sample(self, features=None):
    del features
    hp = self.hparams
    div_x = 2**hp.num_hidden_layers
    div_y = 1 if self.is1d else 2**hp.num_hidden_layers
    size = [
        hp.batch_size, hp.sample_height // div_x, hp.sample_width // div_y,
        hp.bottleneck_bits
    ]
    rand = tf.random_uniform(size)
    res = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0
    # If you want to set some first bits to a fixed value, do this:
    # fixed = tf.zeros_like(rand) - 1.0
    # nbits = 3
    # res = tf.concat([fixed[:, :, :, :nbits], res[:, :, :, nbits:]], axis=-1)
    return res 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:18,代码来源:autoencoders.py

示例9: bottleneck

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import less [as 别名]
def bottleneck(self, x):  # pylint: disable=arguments-differ
    hparams = self.hparams
    if hparams.unordered:
      return super(AutoencoderOrderedDiscrete, self).bottleneck(x)
    noise = hparams.bottleneck_noise
    hparams.bottleneck_noise = 0.0  # We'll add noise below.
    x, loss = discretization.parametrized_bottleneck(x, hparams)
    hparams.bottleneck_noise = noise
    if hparams.mode == tf.estimator.ModeKeys.TRAIN:
      # We want a number p such that p^bottleneck_bits = 1 - noise.
      # So log(p) * bottleneck_bits = log(noise)
      log_p = tf.log(1 - float(noise) / 2) / float(hparams.bottleneck_bits)
      # Probabilities of flipping are p, p^2, p^3, ..., p^bottleneck_bits.
      noise_mask = 1.0 - tf.exp(tf.cumsum(tf.zeros_like(x) + log_p, axis=-1))
      # Having the no-noise mask, we can make noise just uniformly at random.
      ordered_noise = tf.random_uniform(tf.shape(x))
      # We want our noise to be 1s at the start and random {-1, 1} bits later.
      ordered_noise = tf.to_float(tf.less(noise_mask, ordered_noise))
      # Now we flip the bits of x on the noisy positions (ordered and normal).
      x *= 2.0 * ordered_noise - 1
    return x, loss 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:23,代码来源:autoencoders.py

示例10: add_positional_embedding

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import less [as 别名]
def add_positional_embedding(x, max_length, name, positions=None):
  """Add positional embedding.

  Args:
    x: a Tensor with shape [batch, length, depth]
    max_length: an integer.  static maximum size of any dimension.
    name: a name for this layer.
    positions: an optional tensor with shape [batch, length]

  Returns:
    a Tensor the same shape as x.
  """
  _, length, depth = common_layers.shape_list(x)
  var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype)
  if positions is None:
    sliced = tf.cond(
        tf.less(length, max_length),
        lambda: tf.slice(var, [0, 0], [length, -1]),
        lambda: tf.pad(var, [[0, length - max_length], [0, 0]]))
    return x + tf.expand_dims(sliced, 0)
  else:
    return x + tf.gather(var, tf.to_int32(positions)) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:24,代码来源:common_attention.py

示例11: isemhash_bottleneck

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import less [as 别名]
def isemhash_bottleneck(x, bottleneck_bits, bottleneck_noise,
                        discretize_warmup_steps, mode,
                        isemhash_noise_dev=0.5, isemhash_mix_prob=0.5):
  """Improved semantic hashing bottleneck."""
  with tf.variable_scope("isemhash_bottleneck"):
    x = tf.layers.dense(x, bottleneck_bits, name="dense")
    y = common_layers.saturating_sigmoid(x)
    if isemhash_noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN:
      noise = tf.truncated_normal(
          common_layers.shape_list(x), mean=0.0, stddev=isemhash_noise_dev)
      y = common_layers.saturating_sigmoid(x + noise)
    d = tf.to_float(tf.less(0.5, y)) + y - tf.stop_gradient(y)
    d = 2.0 * d - 1.0  # Move from [0, 1] to [-1, 1].
    if mode == tf.estimator.ModeKeys.TRAIN:  # Flip some bits.
      noise = tf.random_uniform(common_layers.shape_list(x))
      noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0
      d *= noise
      d = common_layers.mix(d, 2.0 * y - 1.0, discretize_warmup_steps,
                            mode == tf.estimator.ModeKeys.TRAIN,
                            max_prob=isemhash_mix_prob)
    return d, 0.0 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:23,代码来源:discretization.py

示例12: mode

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import less [as 别名]
def mode(cls, parameters: Dict[str, Tensor]) -> Tensor:
        mu = parameters["mu"]
        tau = parameters["tau"]
        nu = parameters["nu"]
        beta = parameters["beta"]

        lam = 1./beta
        mode = tf.zeros_like(mu) * tf.zeros_like(mu)
        mode = tf.where(tf.logical_and(tf.greater(nu, mu),
                                       tf.less(mu+lam/tau, nu)),
                        mu+lam/tau,
                        mode)
        mode = tf.where(tf.logical_and(tf.greater(nu, mu),
                                       tf.greater_equal(mu+lam/tau, nu)),
                        nu,
                        mode)
        mode = tf.where(tf.logical_and(tf.less_equal(nu, mu),
                                       tf.greater(mu-lam/tau, nu)),
                        mu-lam/tau,
                        mode)
        mode = tf.where(tf.logical_and(tf.less_equal(nu, mu),
                                       tf.less_equal(mu-lam/tau, nu)),
                        nu,
                        mode)
        return(mode) 
开发者ID:bethgelab,项目名称:decompose,代码行数:27,代码来源:jumpNormalAlgorithms.py

示例13: _adapt_mass

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import less [as 别名]
def _adapt_mass(self, t, num_chain_dims):
        ewmv = ExponentialWeightedMovingVariance(
            self.mass_decay, self.data_shapes, num_chain_dims)
        new_mass = tf.cond(self.adapt_mass,
                           lambda: ewmv.get_updated_precision(self.q),
                           lambda: ewmv.precision())
        if not isinstance(new_mass, list):
            new_mass = [new_mass]

        # print('New mass is = {}'.format(new_mass))
        # TODO incorrect shape?
        # print('New mass={}'.format(new_mass))
        # print('q={}, NMS={}'.format(self.q[0].get_shape(),
        #                             new_mass[0].get_shape()))
        with tf.control_dependencies(new_mass):
            current_mass = tf.cond(
                tf.less(tf.cast(t, tf.int32), self.mass_collect_iters),
                lambda: [tf.ones(shape) for shape in self.data_shapes],
                lambda: new_mass)
        if not isinstance(current_mass, list):
            current_mass = [current_mass]
        return current_mass 
开发者ID:thu-ml,项目名称:zhusuan,代码行数:24,代码来源:hmc.py

示例14: _leapfrog

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import less [as 别名]
def _leapfrog(self, q, p, step_size, get_gradient, mass):
        def loop_cond(i, q, p):
            return i < self.n_leapfrogs + 1

        def loop_body(i, q, p):
            step_size1 = tf.cond(i > 0,
                                 lambda: step_size,
                                 lambda: tf.constant(0.0, dtype=tf.float32))

            step_size2 = tf.cond(tf.logical_and(tf.less(i, self.n_leapfrogs),
                                                tf.less(0, i)),
                                 lambda: step_size,
                                 lambda: step_size / 2)

            q, p = leapfrog_integrator(q, p, step_size1, step_size2,
                                       lambda q: get_gradient(q), mass)
            return [i + 1, q, p]

        i = tf.constant(0)
        _, q, p = tf.while_loop(loop_cond,
                                loop_body,
                                [i, q, p],
                                back_prop=False,
                                parallel_iterations=1)
        return q, p 
开发者ID:thu-ml,项目名称:zhusuan,代码行数:27,代码来源:hmc.py

示例15: random_flip_left_right

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import less [as 别名]
def random_flip_left_right(image, bboxes, seed=None):
    """Random flip left-right of an image and its bounding boxes.
    """
    def flip_bboxes(bboxes):
        """Flip bounding boxes coordinates.
        """
        bboxes = tf.stack([1.0 - bboxes[:, 2], bboxes[:, 1],
                           1.0 - bboxes[:, 0], bboxes[:, 3]], axis=-1)        
        return bboxes

    # Random flip. Tensorflow implementation.
    with tf.name_scope('random_flip_left_right'):
        image = tf.convert_to_tensor(image, name='image')
        uniform_random = tf.random.uniform([], 0, 1.0, seed=seed)
        mirror_cond = tf.less(uniform_random, .5)
        # Flip image.
        image = tf.cond(mirror_cond,
                        lambda: tf.image.flip_left_right(image),
                        lambda: image)
        # Flip bboxes.
        bboxes = tf.cond(mirror_cond,
                         lambda: flip_bboxes(bboxes),
                         lambda: bboxes)
        return image, bboxes 
开发者ID:lambdal,项目名称:lambda-deep-learning-demo,代码行数:26,代码来源:ssd_augmenter.py


注:本文中的tensorflow.less方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。