當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.to_int32方法代碼示例

本文整理匯總了Python中tensorflow.to_int32方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.to_int32方法的具體用法?Python tensorflow.to_int32怎麽用?Python tensorflow.to_int32使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.to_int32方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: sequence_to_images

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import to_int32 [as 別名]
def sequence_to_images(tensor, num_batches):
  """Convert a batch of sequences into a batch of images.

  Args:
    tensor: (num_steps, num_batchesRNN, depth) sequence tensor
    num_batches: the number of image batches

  Returns:
    (num_batches, height, width, depth) tensor
  """

  shapeT = tf.shape(tensor)
  shapeL = tensor.get_shape().as_list()
  # Calculate the ouput size of the upsampled tensor
  height = tf.to_int32(shapeT[1] / num_batches)
  n_shape = tf.stack([
      shapeT[0],
      num_batches,
      height,
      shapeL[2]
  ])

  reshaped = tf.reshape(tensor, n_shape)
  return tf.transpose(reshaped, [1, 2, 0, 3]) 
開發者ID:TobiasGruening,項目名稱:ARU-Net,代碼行數:26,代碼來源:layers.py

示例2: get_hash_slots

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import to_int32 [as 別名]
def get_hash_slots(self, query):
    """Gets hashed-to buckets for batch of queries.

    Args:
      query: 2-d Tensor of query vectors.

    Returns:
      A list of hashed-to buckets for each hash function.
    """

    binary_hash = [
        tf.less(tf.matmul(query, self.hash_vecs[i], transpose_b=True), 0)
        for i in xrange(self.num_libraries)]
    hash_slot_idxs = [
        tf.reduce_sum(
            tf.to_int32(binary_hash[i]) *
            tf.constant([[2 ** i for i in xrange(self.num_hashes)]],
                        dtype=tf.int32), 1)
        for i in xrange(self.num_libraries)]
    return hash_slot_idxs 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:22,代碼來源:memory.py

示例3: filter_groundtruth_with_nan_box_coordinates

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import to_int32 [as 別名]
def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
  """Filters out groundtruth with no bounding boxes.

  Args:
    tensor_dict: a dictionary of following groundtruth tensors -
      fields.InputDataFields.groundtruth_boxes
      fields.InputDataFields.groundtruth_classes
      fields.InputDataFields.groundtruth_is_crowd
      fields.InputDataFields.groundtruth_area
      fields.InputDataFields.groundtruth_label_types

  Returns:
    a dictionary of tensors containing only the groundtruth that have bounding
    boxes.
  """
  groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
  nan_indicator_vector = tf.greater(tf.reduce_sum(tf.to_int32(
      tf.is_nan(groundtruth_boxes)), reduction_indices=[1]), 0)
  valid_indicator_vector = tf.logical_not(nan_indicator_vector)
  valid_indices = tf.where(valid_indicator_vector)

  return retain_groundtruth(tensor_dict, valid_indices) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:24,代碼來源:ops.py

示例4: _testBuildDefaultModel

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import to_int32 [as 別名]
def _testBuildDefaultModel(self):
    images = tf.to_float(np.random.rand(32, 28, 28, 1))
    labels = {}
    labels['classes'] = tf.one_hot(
        tf.to_int32(np.random.randint(0, 9, (32))), 10)

    params = {
        'use_separation': True,
        'layers_to_regularize': 'fc3',
        'weight_decay': 0.0,
        'ps_tasks': 1,
        'domain_separation_startpoint': 1,
        'alpha_weight': 1,
        'beta_weight': 1,
        'gamma_weight': 1,
        'recon_loss_name': 'sum_of_squares',
        'decoder_name': 'small_decoder',
        'encoder_name': 'default_encoder',
    }
    return images, labels, params 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:22,代碼來源:dsn_test.py

示例5: padded_accuracy_topk

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import to_int32 [as 別名]
def padded_accuracy_topk(predictions,
                         labels,
                         k,
                         weights_fn=common_layers.weights_nonzero):
  """Percentage of times that top-k predictions matches labels on non-0s."""
  with tf.variable_scope("padded_accuracy_topk", values=[predictions, labels]):
    padded_predictions, padded_labels = common_layers.pad_with_zeros(
        predictions, labels)
    weights = weights_fn(padded_labels)
    effective_k = tf.minimum(k,
                             common_layers.shape_list(padded_predictions)[-1])
    _, outputs = tf.nn.top_k(padded_predictions, k=effective_k)
    outputs = tf.to_int32(outputs)
    padded_labels = tf.to_int32(padded_labels)
    padded_labels = tf.expand_dims(padded_labels, axis=-1)
    padded_labels += tf.zeros_like(outputs)  # Pad to same shape.
    same = tf.to_float(tf.equal(outputs, padded_labels))
    same_topk = tf.reduce_sum(same, axis=-1)
    return same_topk, weights 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:21,代碼來源:metrics.py

示例6: rouge_l_fscore

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import to_int32 [as 別名]
def rouge_l_fscore(predictions, labels, **unused_kwargs):
  """ROUGE scores computation between labels and predictions.

  This is an approximate ROUGE scoring method since we do not glue word pieces
  or decode the ids and tokenize the output.

  Args:
    predictions: tensor, model predictions
    labels: tensor, gold output.

  Returns:
    rouge_l_fscore: approx rouge-l f1 score.
  """
  outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
  # Convert the outputs and labels to a [batch_size, input_length] tensor.
  outputs = tf.squeeze(outputs, axis=[-1, -2])
  labels = tf.squeeze(labels, axis=[-1, -2])
  rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
                               tf.float32)
  return rouge_l_f_score, tf.constant(1.0) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:22,代碼來源:rouge.py

示例7: rouge_2_fscore

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import to_int32 [as 別名]
def rouge_2_fscore(predictions, labels, **unused_kwargs):
  """ROUGE-2 F1 score computation between labels and predictions.

  This is an approximate ROUGE scoring method since we do not glue word pieces
  or decode the ids and tokenize the output.

  Args:
    predictions: tensor, model predictions
    labels: tensor, gold output.

  Returns:
    rouge2_fscore: approx rouge-2 f1 score.
  """

  outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
  # Convert the outputs and labels to a [batch_size, input_length] tensor.
  outputs = tf.squeeze(outputs, axis=[-1, -2])
  labels = tf.squeeze(labels, axis=[-1, -2])
  rouge_2_f_score = tf.py_func(rouge_n, (outputs, labels), tf.float32)
  return rouge_2_f_score, tf.constant(1.0) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:22,代碼來源:rouge.py

示例8: __init__

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import to_int32 [as 別名]
def __init__(self, pad_mask):
    """Compute and store the location of the padding.

    Args:
      pad_mask (tf.Tensor): Reference padding tensor of shape
        [batch_size,length] or [dim_origin] (dim_origin=batch_size*length)
        containing non-zeros positive values to indicate padding location.
    """
    self.nonpad_ids = None
    self.dim_origin = None

    with tf.name_scope("pad_reduce/get_ids"):
      pad_mask = tf.reshape(pad_mask, [-1])  # Flatten the batch
      # nonpad_ids contains coordinates of zeros rows (as pad_mask is
      # float32, checking zero equality is done with |x| < epsilon, with
      # epsilon=1e-9 as standard, here pad_mask only contains positive values
      # so tf.abs would be redundant)
      self.nonpad_ids = tf.to_int32(tf.where(pad_mask < 1e-9))
      self.dim_origin = tf.shape(pad_mask)[:1] 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:21,代碼來源:expert_utils.py

示例9: bleu_score

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import to_int32 [as 別名]
def bleu_score(predictions, labels, **unused_kwargs):
  """BLEU score computation between labels and predictions.

  An approximate BLEU scoring method since we do not glue word pieces or
  decode the ids and tokenize the output. By default, we use ngram order of 4
  and use brevity penalty. Also, this does not have beam search.

  Args:
    predictions: tensor, model predictions
    labels: tensor, gold output.

  Returns:
    bleu: int, approx bleu score
  """
  outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
  # Convert the outputs and labels to a [batch_size, input_length] tensor.
  outputs = tf.squeeze(outputs, axis=[-1, -2])
  labels = tf.squeeze(labels, axis=[-1, -2])

  bleu = tf.py_func(compute_bleu, (labels, outputs), tf.float32)
  return bleu, tf.constant(1.0) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:23,代碼來源:bleu_hook.py

示例10: noise_from_step_num

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import to_int32 [as 別名]
def noise_from_step_num():
  """Quantization noise equal to (phi * (step_num + 1)) mod 1.0.

  Not using random_uniform here due to a problem on TPU in that random seeds
  are not respected, which may cause the parameters on different replicas
  to go out-of-sync.

  Returns:
    a float32 scalar
  """
  step = tf.to_int32(tf.train.get_or_create_global_step()) + 1
  phi = ((5 ** 0.5) - 1) / 2
  # Naive computation tf.mod(phi * step, 1.0) in float32 would be disastrous
  # due to loss of precision when the step number gets large.
  # Computation in doubles does not work on TPU, so we use this complicated
  # alternative computation which does not suffer from these roundoff errors.
  ret = 0.0
  for i in range(30):
    ret += (((phi * (2 ** i)) % 1.0)  # double-precision computation in python
            * tf.to_float(tf.mod(step // (2 ** i), 2)))
  return tf.mod(ret, 1.0) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:23,代碼來源:quantization.py

示例11: xception_exit

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import to_int32 [as 別名]
def xception_exit(inputs):
  """Xception exit flow."""
  with tf.variable_scope("xception_exit"):
    x = inputs
    x_shape = x.get_shape().as_list()
    if x_shape[1] is None or x_shape[2] is None:
      length_float = tf.to_float(tf.shape(x)[1])
      length_float *= tf.to_float(tf.shape(x)[2])
      spatial_dim_float = tf.sqrt(length_float)
      spatial_dim = tf.to_int32(spatial_dim_float)
      x_depth = x_shape[3]
      x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])
    elif x_shape[1] != x_shape[2]:
      spatial_dim = int(math.sqrt(float(x_shape[1] * x_shape[2])))
      if spatial_dim * spatial_dim != x_shape[1] * x_shape[2]:
        raise ValueError("Assumed inputs were square-able but they were "
                         "not. Shape: %s" % x_shape)
      x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])

    x = common_layers.conv_block_downsample(x, (3, 3), (2, 2), "SAME")
    return tf.nn.relu(x) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:23,代碼來源:xception.py

示例12: bytenet_internal

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import to_int32 [as 別名]
def bytenet_internal(inputs, targets, hparams):
  """ByteNet, main step used for training."""
  with tf.variable_scope("bytenet"):
    # Flatten inputs and extend length by 50%.
    inputs = tf.expand_dims(common_layers.flatten4d3d(inputs), axis=2)
    extend_length = tf.to_int32(0.5 * tf.to_float(tf.shape(inputs)[1]))
    inputs_shape = inputs.shape.as_list()
    inputs = tf.pad(inputs, [[0, 0], [0, extend_length], [0, 0], [0, 0]])
    inputs_shape[1] = None
    inputs.set_shape(inputs_shape)  # Don't lose the other shapes when padding.
    # Pad inputs and targets to be the same length, divisible by 50.
    inputs, targets = common_layers.pad_to_same_length(
        inputs, targets, final_length_divisible_by=50)
    final_encoder = residual_dilated_conv(inputs, hparams.num_block_repeat,
                                          "SAME", "encoder", hparams)

    shifted_targets = common_layers.shift_right(targets)
    kernel = (hparams.kernel_height, hparams.kernel_width)
    decoder_start = common_layers.conv_block(
        tf.concat([final_encoder, shifted_targets], axis=3),
        hparams.hidden_size, [((1, 1), kernel)],
        padding="LEFT")

    return residual_dilated_conv(decoder_start, hparams.num_block_repeat,
                                 "LEFT", "decoder", hparams) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:27,代碼來源:bytenet.py

示例13: targets_bottom

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import to_int32 [as 別名]
def targets_bottom(self, x, summary_prefix="targets_bottom"):  # pylint: disable=arguments-differ
    inputs = x
    with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
      common_layers.summarize_video(inputs, summary_prefix)
      inputs_shape = common_layers.shape_list(inputs)
      # We embed each of 256=self.top_dimensionality possible pixel values.
      embedding_var = tf.get_variable(
          "pixel_embedding",
          [self.top_dimensionality, self.PIXEL_EMBEDDING_SIZE])
      hot_inputs = tf.one_hot(tf.to_int32(inputs), self.top_dimensionality)
      hot_inputs = tf.reshape(hot_inputs, [-1, self.top_dimensionality])
      embedded = tf.matmul(hot_inputs, embedding_var)
      # Let's now merge all channels that were embedded into a single vector.
      merged_size = self.PIXEL_EMBEDDING_SIZE * inputs_shape[4]
      embedded = tf.reshape(embedded, inputs_shape[:4] + [merged_size])
      transposed = common_layers.time_to_channels(embedded)
      return tf.layers.dense(
          transposed,
          self._body_input_depth,
          name="merge_pixel_embedded_frames") 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:22,代碼來源:modalities.py

示例14: top_1_tpu

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import to_int32 [as 別名]
def top_1_tpu(inputs):
  """find max and argmax over the last dimension.

  Works well on TPU

  Args:
    inputs: A tensor with shape [..., depth]

  Returns:
    values: a Tensor with shape [...]
    indices: a Tensor with shape [...]
  """
  inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True)
  mask = tf.to_int32(tf.equal(inputs_max, inputs))
  index = tf.range(tf.shape(inputs)[-1]) * mask
  return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:18,代碼來源:common_layers.py

示例15: add_positional_embedding

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import to_int32 [as 別名]
def add_positional_embedding(x, max_length, name, positions=None):
  """Add positional embedding.

  Args:
    x: a Tensor with shape [batch, length, depth]
    max_length: an integer.  static maximum size of any dimension.
    name: a name for this layer.
    positions: an optional tensor with shape [batch, length]

  Returns:
    a Tensor the same shape as x.
  """
  _, length, depth = common_layers.shape_list(x)
  var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype)
  if positions is None:
    sliced = tf.cond(
        tf.less(length, max_length),
        lambda: tf.slice(var, [0, 0], [length, -1]),
        lambda: tf.pad(var, [[0, length - max_length], [0, 0]]))
    return x + tf.expand_dims(sliced, 0)
  else:
    return x + tf.gather(var, tf.to_int32(positions)) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:24,代碼來源:common_attention.py


注:本文中的tensorflow.to_int32方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。