当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.float方法代码示例

本文整理汇总了Python中tensorflow.float方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.float方法的具体用法?Python tensorflow.float怎么用?Python tensorflow.float使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.float方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import float [as 别名]
def __init__(self, input_dim, output_dim, hparams):
    """Class used to map final node states to an output vector.

    Args:

      input_dim: Dimension of the node states taken as input
      output_dim: Dimension of the vector valued output of the network
      hparams: Specifies the architecture of the output neural nets.

    Relevant hparams for this function:
      hparams.num_output_hidden_layers: (int) number of hidden layers in the
        output
      neural nets
      hparams.hidden_dim: (int) hidden dim shared by all hidden layers.
      hparams.activation: (str - 'relu' or 'tanh') indicates what activation fct
      to use in the neural nets
      hparams.normalizer: (str - 'layer' or 'none') whether or not to use layer
      norm in the neural nets
      hparams.keep_prob: (float) dropout keep prob for the output neural nets

    """
    super(GraphLevelOutput, self).__init__(hparams)
    self.input_dim = input_dim
    self.output_dim = output_dim
    self.init_fprop() 
开发者ID:brain-research,项目名称:mpnn,代码行数:27,代码来源:mpnn.py

示例2: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import float [as 别名]
def __init__(self, scales,
               aspect_ratios=[0.5, 1.0, 2.0],
               base_anchor_size=None):
    """Constructs a FpnAnchorGenerator. See the paper by Lin et al.

    Anchors that are returned by calling the `generate` method on the returned
    MultipleGridAnchorGenerator object are always in normalized coordinates
    and clipped to the unit square: (i.e. all coordinates lie in [0, 1]x[0, 1]).

    Args:
    num_layers: integer number of grid layers to create anchors for (actual
      grid sizes passed in at generation time)
    aspect_ratios: list or tuple of (float) aspect ratios to place on each
      pyramid location.
    base_anchor_size: base anchor size as [height, width].
    """
    self._pyramid_scales = scales
    box_specs_list = [[(scale, aspect_ratio)
        for aspect_ratio in aspect_ratios] for scale in scales]
    super().__init__(box_specs_list, base_anchor_size) 
开发者ID:simonmeister,项目名称:motion-rcnn,代码行数:22,代码来源:multiple_grid_anchor_generator.py

示例3: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import float [as 别名]
def __call__(
            self,
            input_vector,
            regularizer,
            dropout_rate,
            is_training=True
    ):
        """
            :param input_vector: The input vector fed into the encoder.
                   Shape: [batch x 19], type tf.int8
            :type input_vector: Tensor
            :param regularizer: The regularizer to use for the weights
                   of the encoder.
            :type regularizer:
            :param dropout_rate: Tensor (tf.float) of the probability of dropout
            :type dropout_rate: Tensor
            :param is_training: Tesnor (tf.bool) specifying if in training mode
                   (important for dropout)
            :type is_training: Tensor
        """
        # ================ Embeddings ================
        embedded_h3, _ = self.h3_embed(
            input_vector,
            regularizer,
            dropout_rate,
            is_training=is_training
        )

        # ================ RNN ================
        hidden, hidden_size = self.recurrent_stack(
            embedded_h3,
            regularizer=regularizer,
            dropout_rate=dropout_rate,
            is_training=is_training
        )

        return hidden, hidden_size 
开发者ID:uber,项目名称:ludwig,代码行数:39,代码来源:h3_encoders.py

示例4: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import float [as 别名]
def __call__(
            self,
            input_sequence,
            regularizer,
            dropout_rate,
            is_training=True
    ):
        """
            :param input_sequence: The input sequence fed into the encoder.
                   Shape: [batch x sequence length], type tf.int32
            :type input_sequence: Tensor
            :param regularizer: The regularizer to use for the weights
                   of the encoder.
            :type regularizer:
            :param dropout_rate: Tensor (tf.float) of the probability of dropout
            :type dropout_rate: Tensor
            :param is_training: Tesnor (tf.bool) specifying if in training mode
                   (important for dropout)
            :type is_training: Tensor
        """
        input_sequence = tf.cast(input_sequence, tf.float32)
        while len(input_sequence.shape) < 3:
            input_sequence = tf.expand_dims(
                input_sequence, -1)
        hidden_size = input_sequence.shape[-1]

        hidden = reduce_sequence(input_sequence, self.reduce_output)

        return hidden, hidden_size 
开发者ID:uber,项目名称:ludwig,代码行数:31,代码来源:sequence_encoders.py

示例5: random_flip_image_and_annotation

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import float [as 别名]
def random_flip_image_and_annotation(image_tensor, annotation_tensor, shapes):
    """Accepts image tensor and annotation tensor and returns randomly flipped tensors of both.
    The function performs random flip of image and annotation tensors with probability of 1/2
    The flip is performed or not performed for image and annotation consistently, so that
    annotation matches the image.
    Parameters
    ----------
    image_tensor : Tensor of size (width, height, 3)
        Tensor with image
    annotation_tensor : Tensor of size (width, height, 1)
        Tensor with annotation
    Returns
    -------
    randomly_flipped_img : Tensor of size (width, height, 3) of type tf.float.
        Randomly flipped image tensor
    randomly_flipped_annotation : Tensor of size (width, height, 1)
        Randomly flipped annotation tensor
    """
    original_shape = tf.shape(annotation_tensor)
    # ensure the annotation tensor has shape (width, height, 1)
    annotation_tensor = tf.cond(tf.rank(annotation_tensor) < 3, lambda: tf.expand_dims(annotation_tensor, axis=2), lambda: annotation_tensor)

    # Random variable: two possible outcomes (0 or 1)
    # with a 1 in 2 chance
    random_var = tf.random_uniform(maxval=2, dtype=tf.int32, shape=[])

    randomly_flipped_img = tf.cond(pred=tf.equal(random_var, 0), true_fn=lambda: tf.image.flip_left_right(image_tensor), false_fn=lambda: image_tensor)

    randomly_flipped_annotation = tf.cond(
        pred=tf.equal(random_var, 0), true_fn=lambda: tf.image.flip_left_right(annotation_tensor), false_fn=lambda: annotation_tensor)

    return randomly_flipped_img, tf.reshape(randomly_flipped_annotation, original_shape), shapes 
开发者ID:autoai-org,项目名称:CVTron,代码行数:34,代码来源:read_data.py

示例6: distort_randomly_image_color

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import float [as 别名]
def distort_randomly_image_color(image_tensor, annotation_tensor, shapes):
    """Accepts image tensor of (width, height, 3) and returns color distorted image.
    The function performs random brightness, saturation, hue, contrast change as it is performed
    for inception model training in TF-Slim (you can find the link below in comments). All the
    parameters of random variables were originally preserved. There are two regimes for the function
    to work: fast and slow. Slow one performs only saturation and brightness random change is performed.
    Parameters
    ----------
    image_tensor : Tensor of size (width, height, 3) of tf.int32 or tf.float
        Tensor with image with range [0,255]
    fast_mode : boolean
        Boolean value representing whether to use fast or slow mode
    Returns
    -------
    img_float_distorted_original_range : Tensor of size (width, height, 3) of type tf.float.
        Image Tensor with distorted color in [0,255] intensity range
    """
    fast_mode = False
    # Make the range to be in [0,1]
    img_float_zero_one_range = tf.to_float(image_tensor) / 255

    # Randomly distort the color of image. There are 4 ways to do it.
    # Credit: TF-Slim
    # https://github.com/tensorflow/models/blob/master/slim/preprocessing/inception_preprocessing.py#L224
    # Most probably the inception models were trainined using this color augmentation:
    # https://github.com/tensorflow/models/tree/master/slim#pre-trained-models
    distorted_image = apply_with_random_selector(img_float_zero_one_range, lambda x, ordering: distort_color(x, ordering, fast_mode=fast_mode), num_cases=4)

    img_float_distorted_original_range = distorted_image * 255

    return img_float_distorted_original_range, annotation_tensor, shapes 
开发者ID:autoai-org,项目名称:CVTron,代码行数:33,代码来源:read_data.py

示例7: _fprop

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import float [as 别名]
def _fprop(
      self,
      node_states,
      adjacency_in,
      distance,  # pylint: disable=unused-argument
      reuse_graph_tensors=False):
    """Computes a_t from h_{t-1}, see bottom of page 3 in the paper.

    Args:
      node_states: [batch_size, num_nodes, node_dim] tensor (h_{t-1})
      adjacency_in (tf.int32): [batch_size, num_nodes, num_nodes]
      distance (tf.float): [batch_size, num_nodes, num_nodes] NOT USED.
      reuse_graph_tensors: (boolean) must be set to True the first time that
        fprop is called so that we can compute the a_in and a_out tensors.

    Returns:
     a_t: [batch_size * num_nodes, node_dim] which is the node represenations
     after a single propgation step

     This also sets graph_precomputed to True to indicate that part of the
     graph has been cached and will be reused in future calls of _fprop
    """
    # build the larger A matrices on the first call of _fprop
    if not reuse_graph_tensors:
      self._precompute_graph(adjacency_in)

    return message_pass(node_states, self._a_in, self._a_out,
                        self.hparams.node_dim) 
开发者ID:brain-research,项目名称:mpnn,代码行数:30,代码来源:mpnn.py

示例8: _annotate_buckets

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import float [as 别名]
def _annotate_buckets(x, bucket_boundaries):
  """Annotates a bucketized tensor with the boundaries that were applied.

  Creates a deferred annotation for the specified tensor.

  Args:
    x: The tensor to annotate.
    bucket_boundaries: A tensor of boundaries that were used to bucketize x.
  """
  # The annotations proto currently isn't available in OSS builds, so schema
  # annotations are not supported.
  if not common.IS_ANNOTATIONS_PB_AVAILABLE:
    return
  from tensorflow_transform import annotations_pb2  # pylint: disable=g-import-not-at-top
  message_type = annotations_pb2.BucketBoundaries.DESCRIPTOR.full_name

  # The BucketBoundaries annotation expects a float field.
  bucket_boundaries = tf.cast(bucket_boundaries, tf.float32)
  # Some callers provide rank 2 boundaries like [[.25], [.5], [.75], [1.]],
  # whereas we expect rank 2 boundaries like [[.25, .5, .75, 1.]]
  bucket_boundaries = tf.reshape(bucket_boundaries, [-1])
  bucket_boundaries = tf.expand_dims(bucket_boundaries, 0)
  size = (tf.shape(bucket_boundaries)[1],)
  message_proto = tf.raw_ops.EncodeProto(sizes=[size],
                                         values=[bucket_boundaries],
                                         field_names=['boundaries'],
                                         message_type=message_type)
  assert message_proto.shape == [1]
  message_proto = message_proto[0]

  type_url = os.path.join(common.ANNOTATION_PREFIX_URL, message_type)
  schema_inference.annotate(type_url, message_proto, tensor=x) 
开发者ID:tensorflow,项目名称:transform,代码行数:34,代码来源:mappers.py

示例9: random_horiz_flip

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import float [as 别名]
def random_horiz_flip(image_tensor, annotation_tensor):
    """Accepts image tensor and annotation tensor and returns randomly flipped tensors of both.
    The function performs random flip of image and annotation tensors with probability of 1/2
    The flip is performed or not performed for image and annotation consistently, so that
    annotation matches the image.
    
    Parameters
    ----------
    image_tensor : Tensor of size (width, height, 3)
        Tensor with image
    annotation_tensor : Tensor of size (width, height, 1)
        Tensor with annotation
        
    Returns
    -------
    randomly_flipped_img : Tensor of size (width, height, 3) of type tf.float.
        Randomly flipped image tensor
    randomly_flipped_annotation : Tensor of size (width, height, 1)
        Randomly flipped annotation tensor
        
    """
    
    # Random variable: two possible outcomes (0 or 1)
    # with a 1 in 2 chance
    random_var = tf.random_uniform(maxval=2, dtype=tf.int32, shape=[])

    randomly_flipped_img = control_flow_ops.cond(pred=tf.equal(random_var, 0),
                                                 fn1=lambda: tf.image.flip_left_right(image_tensor),
                                                 fn2=lambda: image_tensor)

    randomly_flipped_annotation = control_flow_ops.cond(pred=tf.equal(random_var, 0),
                                                        fn1=lambda: tf.image.flip_left_right(annotation_tensor),
                                                        fn2=lambda: annotation_tensor)
    
    return randomly_flipped_img, randomly_flipped_annotation 
开发者ID:hailotech,项目名称:seg-mentor,代码行数:37,代码来源:augmentation.py

示例10: distort_randomly_image_color

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import float [as 别名]
def distort_randomly_image_color(image_tensor, fast_mode=False):
    """Accepts image tensor of (width, height, 3) and returns color distorted image.
    The function performs random brightness, saturation, hue, contrast change as it is performed
    for inception model training in TF-Slim (you can find the link below in comments). All the
    parameters of random variables were originally preserved. There are two regimes for the function
    to work: fast and slow. Slow one performs only saturation and brightness random change is performed.
    
    Parameters
    ----------
    image_tensor : Tensor of size (width, height, 3) of tf.int32 or tf.float
        Tensor with image with range [0,255]
    fast_mode : boolean
        Boolean value representing whether to use fast or slow mode
        
    Returns
    -------
    img_float_distorted_original_range : Tensor of size (width, height, 3) of type tf.float.
        Image Tensor with distorted color in [0,255] intensity range
    """
    
    # Make the range to be in [0,1]
    img_float_zero_one_range = tf.to_float(image_tensor) / 255
    
    # Randomly distort the color of image. There are 4 ways to do it.
    # Credit: TF-Slim
    # https://github.com/tensorflow/models/blob/master/slim/preprocessing/inception_preprocessing.py#L224
    # Most probably the inception models were trainined using this color augmentation:
    # https://github.com/tensorflow/models/tree/master/slim#pre-trained-models
    distorted_image = apply_with_random_selector(img_float_zero_one_range,
                                                 lambda x, ordering: distort_color(x, ordering, fast_mode=fast_mode),
                                                 num_cases=4)
    
    img_float_distorted_original_range = distorted_image * 255
    
    return img_float_distorted_original_range 
开发者ID:hailotech,项目名称:seg-mentor,代码行数:37,代码来源:augmentation.py

示例11: _parse_inputs

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import float [as 别名]
def _parse_inputs(self, inputs):
        # NOTE
        # label_img is always NHWC/NHW/channel_last
        # If label_img is not one-hot, the distribution doesn't include void.
        # label_img is 0-vec for void labels
        image, label_img = inputs
        if not self.options.is_label_one_hot:
            # From now on label_img is tf.float one hot, void has 0-vector.
            # because we assume void >=num_classes
            label_img = tf.one_hot(label_img, self.num_classes, axis=-1)

        def nonvoid_mask(prob_img, name=None):
            mask = tf.cast(tf.greater(tf.reduce_sum(prob_img, axis=-1),
                                      self.options.eval_threshold),
                           dtype=tf.float32)
            mask = tf.reshape(mask, [-1], name=name)
            # TODO is this actually beneficial; and which KeepProb to use?
            #mask = Dropout(name, mask, keep_prob=0.5)
            return mask

        def flatten_label(prob_img, name=None):
            return tf.reshape(prob_img, [-1, self.num_classes], name=name)

        l_mask = []
        l_label = []
        l_dyn_hw = []
        label_img = tf.identity(label_img, name='label_img_0')
        n_label_scales = self.n_pools + 1 if not self.do_scale_feat_to_label else 1
        for pi in range(n_label_scales):
            l_mask.append(nonvoid_mask(label_img, 'eval_mask_{}'.format(pi)))
            l_label.append(flatten_label(label_img, 'label_{}'.format(pi)))
            img_shape = tf.shape(label_img)
            l_dyn_hw.append([img_shape[1], img_shape[2]])
            if pi == self.n_pools:
                break
            label_img = AvgPooling('label_img_{}'.format(pi+1), label_img, 2, \
                                   padding='same', data_format='channels_last')
        return image, [l_label, l_mask, l_dyn_hw] 
开发者ID:microsoft,项目名称:petridishnn,代码行数:40,代码来源:anytime_fcn.py

示例12: flip_randomly_left_right_image_with_annotation

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import float [as 别名]
def flip_randomly_left_right_image_with_annotation(image_tensor, annotation_tensor):
    """Accepts image tensor and annotation tensor and returns randomly flipped tensors of both.
    The function performs random flip of image and annotation tensors with probability of 1/2
    The flip is performed or not performed for image and annotation consistently, so that
    annotation matches the image.
    
    Parameters
    ----------
    image_tensor : Tensor of size (width, height, 3)
        Tensor with image
    annotation_tensor : Tensor of size (width, height, 1)
        Tensor with annotation
        
    Returns
    -------
    randomly_flipped_img : Tensor of size (width, height, 3) of type tf.float.
        Randomly flipped image tensor
    randomly_flipped_annotation : Tensor of size (width, height, 1)
        Randomly flipped annotation tensor
        
    """
    
    # Random variable: two possible outcomes (0 or 1)
    # with a 1 in 2 chance
    random_var = tf.random_uniform(maxval=2, dtype=tf.int32, shape=[])


    randomly_flipped_img = control_flow_ops.cond(pred=tf.equal(random_var, 0),
                                                 fn1=lambda: tf.image.flip_left_right(image_tensor),
                                                 fn2=lambda: image_tensor)

    randomly_flipped_annotation = control_flow_ops.cond(pred=tf.equal(random_var, 0),
                                                        fn1=lambda: tf.image.flip_left_right(annotation_tensor),
                                                        fn2=lambda: annotation_tensor)
    
    return randomly_flipped_img, randomly_flipped_annotation 
开发者ID:warmspringwinds,项目名称:tf-image-segmentation,代码行数:38,代码来源:augmentation.py

示例13: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import float [as 别名]
def __init__(self, cell, attention_mechanism, dropout, attn_cell_config,
        num_proj, dtype=tf.float32):
        """
        Args:
            cell: (RNNCell)
            attention_mechanism: (AttentionMechanism)
            dropout: (tf.float)
            attn_cell_config: (dict) hyper params

        """
        # variables and tensors
        self._cell                = cell
        self._attention_mechanism = attention_mechanism
        self._dropout             = dropout

        # hyperparameters and shapes
        self._n_channels     = self._attention_mechanism._n_channels
        self._dim_e          = attn_cell_config["dim_e"]
        self._dim_o          = attn_cell_config["dim_o"]
        self._num_units      = attn_cell_config["num_units"]
        self._dim_embeddings = attn_cell_config["dim_embeddings"]
        self._num_proj       = num_proj
        self._dtype          = dtype

        # for RNNCell
        self._state_size = AttentionState(self._cell._state_size, self._dim_o) 
开发者ID:guillaumegenthial,项目名称:im2latex,代码行数:28,代码来源:attention_cell.py

示例14: _get_td_error

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import float [as 别名]
def _get_td_error(self, qnet_qvalues, actions, y):
        return y - tf.reduce_sum(qnet_qvalues * tf.cast(actions, tf.float), axis=1) 
开发者ID:asyml,项目名称:texar,代码行数:4,代码来源:dqn_agent.py

示例15: q_too_small

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import float [as 别名]
def q_too_small(agent,
                state,
                action,
                transition_type,
                environment_steps,
                num_episodes,
                q_min=0.5):
  """True of q is too small.

  Args:
    agent: RL agent.
    state: A [num_state_dims] tensor representing a state.
    action: Action performed.
    transition_type: Type of transition after action
    environment_steps: Number of steps performed by environment.
    num_episodes: Number of episodes.
    q_min: Returns true if the qval is less than q_min
  Returns:
    cond: Returns an op that evaluates to true if qval is less than q_min.
  """
  del transition_type, environment_steps, num_episodes
  state_for_reset_agent = tf.stack(state[:-1], tf.constant([0], dtype=tf.float))
  qval = agent.BASE_AGENT_CLASS.critic_net(
      tf.expand_dims(state_for_reset_agent, 0), tf.expand_dims(action, 0))[0, :]
  cond = tf.greater(tf.constant(q_min), qval)
  return cond 
开发者ID:generalized-iou,项目名称:g-tensorflow-models,代码行数:28,代码来源:cond_fn.py


注:本文中的tensorflow.float方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。