当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.logical_and方法代码示例

本文整理汇总了Python中tensorflow.logical_and方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.logical_and方法的具体用法?Python tensorflow.logical_and怎么用?Python tensorflow.logical_and使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.logical_and方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: check_tensor_shape

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_and [as 别名]
def check_tensor_shape(tensor_tf, target_shape):
    """ Return a Tensorflow boolean graph that indicates whether
    sample[features_key] has the specified target shape. Only check
    not None entries of target_shape.

    :param tensor_tf: Tensor to check shape for.
    :param target_shape: Target shape to compare tensor to.
    :returns: True if shape is valid, False otherwise (as TF boolean).
    """
    result = tf.constant(True)
    for i, target_length in enumerate(target_shape):
        if target_length:
            result = tf.logical_and(
                result,
                tf.equal(tf.constant(target_length), tf.shape(tensor_tf)[i]))
    return result 
开发者ID:deezer,项目名称:spleeter,代码行数:18,代码来源:tensor.py

示例2: prune_small_boxes

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_and [as 别名]
def prune_small_boxes(boxlist, min_side, scope=None):
  """Prunes small boxes in the boxlist which have a side smaller than min_side.

  Args:
    boxlist: BoxList holding N boxes.
    min_side: Minimum width AND height of box to survive pruning.
    scope: name scope.

  Returns:
    A pruned boxlist.
  """
  with tf.name_scope(scope, 'PruneSmallBoxes'):
    height, width = height_width(boxlist)
    is_valid = tf.logical_and(tf.greater_equal(width, min_side),
                              tf.greater_equal(height, min_side))
    return gather(boxlist, tf.reshape(tf.where(is_valid), [-1])) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:18,代码来源:box_list_ops.py

示例3: assert_box_normalized

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_and [as 别名]
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1):
  """Asserts the input box tensor is normalized.

  Args:
    boxes: a tensor of shape [N, 4] where N is the number of boxes.
    maximum_normalized_coordinate: Maximum coordinate value to be considered
      as normalized, default to 1.1.

  Returns:
    a tf.Assert op which fails when the input box tensor is not normalized.

  Raises:
    ValueError: When the input box tensor is not normalized.
  """
  box_minimum = tf.reduce_min(boxes)
  box_maximum = tf.reduce_max(boxes)
  return tf.Assert(
      tf.logical_and(
          tf.less_equal(box_maximum, maximum_normalized_coordinate),
          tf.greater_equal(box_minimum, 0)),
      [boxes]) 
开发者ID:ahmetozlu,项目名称:vehicle_counting_tensorflow,代码行数:23,代码来源:shape_utils.py

示例4: init

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_and [as 别名]
def init(self, data: Tensor) -> None:
        tau = self.__tauInit
        dtype = self.__dtype
        properties = self.__properties
        noiseDistribution = CenNormal(tau=tf.constant([tau], dtype=dtype),
                                      properties=properties)
        self.__noiseDistribution = noiseDistribution
        observedMask = tf.logical_not(tf.is_nan(data))
        trainMask = tf.logical_not(self.cv.mask(X=data))
        trainMask = tf.get_variable("trainMask",
                                    dtype=trainMask.dtype,
                                    initializer=trainMask)
        trainMask = tf.logical_and(trainMask, observedMask)
        testMask = tf.logical_and(observedMask,
                                  tf.logical_not(trainMask))
        self.__observedMask = observedMask
        self.__trainMask = trainMask
        self.__testMask = testMask 
开发者ID:bethgelab,项目名称:decompose,代码行数:20,代码来源:cvNormalNdLikelihood.py

示例5: updateK

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_and [as 别名]
def updateK(self, k, prepVars, U):
        f = self.__f
        UfShape = U[f].get_shape()

        lhUfk = self.__likelihood.lhUfk(U[f], prepVars, f, k)
        postfk = lhUfk*self.prior[k].cond()
        Ufk = postfk.draw()
        Ufk = tf.expand_dims(Ufk, 0)

        normUfk = tf.norm(Ufk)
        notNanNorm = tf.logical_not(tf.is_nan(normUfk))
        finiteNorm = tf.is_finite(normUfk)
        positiveNorm = normUfk > 0.
        isValid = tf.logical_and(notNanNorm,
                                 tf.logical_and(finiteNorm,
                                                positiveNorm))
        Uf = tf.cond(isValid, lambda: self.updateUf(U[f], Ufk, k),
                     lambda: U[f])

        # TODO: if valid -> self.__likelihood.lhU()[f].updateUfk(U[f][k], k)
        Uf.set_shape(UfShape)
        U[f] = Uf
        return(U) 
开发者ID:bethgelab,项目名称:decompose,代码行数:25,代码来源:postU.py

示例6: _get_prediction_from_topk

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_and [as 别名]
def _get_prediction_from_topk(self, topk_predicted_words):
        # apply given filter
        masks = []
        if self.predicted_words_filters is not None:
            masks = [fltr(topk_predicted_words) for fltr in self.predicted_words_filters]
        if masks:
            # assert all(mask.shape.assert_is_compatible_with(top_k_pred_indices) for mask in masks)
            legal_predicted_target_words_mask = reduce(tf.logical_and, masks)
        else:
            legal_predicted_target_words_mask = tf.cast(tf.ones_like(topk_predicted_words), dtype=tf.bool)

        # the first legal predicted word is our prediction
        first_legal_predicted_target_word_mask = common.tf_get_first_true(legal_predicted_target_words_mask)
        first_legal_predicted_target_word_idx = tf.where(first_legal_predicted_target_word_mask)
        first_legal_predicted_word_string = tf.gather_nd(topk_predicted_words,
                                                         first_legal_predicted_target_word_idx)

        prediction = tf.reshape(first_legal_predicted_word_string, [-1])
        return prediction 
开发者ID:tech-srl,项目名称:code2vec,代码行数:21,代码来源:keras_words_subtoken_metrics.py

示例7: _filter_input_rows

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_and [as 别名]
def _filter_input_rows(self, *row_parts) -> tf.bool:
        row_parts = self.model_input_tensors_former.from_model_input_form(row_parts)

        #assert all(tensor.shape == (self.config.MAX_CONTEXTS,) for tensor in
        #           {row_parts.path_source_token_indices, row_parts.path_indices,
        #            row_parts.path_target_token_indices, row_parts.context_valid_mask})

        # FIXME: Does "valid" here mean just "no padding" or "neither padding nor OOV"? I assumed just "no padding".
        any_word_valid_mask_per_context_part = [
            tf.not_equal(tf.reduce_max(row_parts.path_source_token_indices, axis=0),
                         self.vocabs.token_vocab.word_to_index[self.vocabs.token_vocab.special_words.PAD]),
            tf.not_equal(tf.reduce_max(row_parts.path_target_token_indices, axis=0),
                         self.vocabs.token_vocab.word_to_index[self.vocabs.token_vocab.special_words.PAD]),
            tf.not_equal(tf.reduce_max(row_parts.path_indices, axis=0),
                         self.vocabs.path_vocab.word_to_index[self.vocabs.path_vocab.special_words.PAD])]
        any_contexts_is_valid = reduce(tf.logical_or, any_word_valid_mask_per_context_part)  # scalar

        if self.estimator_action.is_evaluate:
            cond = any_contexts_is_valid  # scalar
        else:  # training
            word_is_valid = tf.greater(
                row_parts.target_index, self.vocabs.target_vocab.word_to_index[self.vocabs.target_vocab.special_words.OOV])  # scalar
            cond = tf.logical_and(word_is_valid, any_contexts_is_valid)  # scalar

        return cond  # scalar 
开发者ID:tech-srl,项目名称:code2vec,代码行数:27,代码来源:path_context_reader.py

示例8: call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_and [as 别名]
def call(self, y_pred, **kwargs):
        y_pred.shape.assert_has_rank(2)
        top_k_pred_indices = tf.cast(tf.nn.top_k(y_pred, k=self.top_k).indices,
                                     dtype=self.index_to_word_table.key_dtype)
        predicted_target_words_strings = self.index_to_word_table.lookup(top_k_pred_indices)

        # apply given filter
        masks = []
        if self.predicted_words_filters is not None:
            masks = [fltr(top_k_pred_indices, predicted_target_words_strings) for fltr in self.predicted_words_filters]
        if masks:
            # assert all(mask.shape.assert_is_compatible_with(top_k_pred_indices) for mask in masks)
            legal_predicted_target_words_mask = reduce(tf.logical_and, masks)
        else:
            legal_predicted_target_words_mask = tf.cast(tf.ones_like(top_k_pred_indices), dtype=tf.bool)

        # the first legal predicted word is our prediction
        first_legal_predicted_target_word_mask = common.tf_get_first_true(legal_predicted_target_words_mask)
        first_legal_predicted_target_word_idx = tf.where(first_legal_predicted_target_word_mask)
        first_legal_predicted_word_string = tf.gather_nd(predicted_target_words_strings,
                                                         first_legal_predicted_target_word_idx)

        prediction = tf.reshape(first_legal_predicted_word_string, [-1])
        return prediction 
开发者ID:tech-srl,项目名称:code2vec,代码行数:26,代码来源:keras_word_prediction_layer.py

示例9: filter_out_of_bound_boxes

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_and [as 别名]
def filter_out_of_bound_boxes(boxes, feature_shape, stride):
    """
    过滤图像边框外的anchor
    :param boxes: [n,y1,x1,y2,x2]
    :param feature_shape: 特征图的长宽 [h,w]
    :param stride: 网络步长
    :return:
    """
    # 图像原始长宽为特征图长宽*步长
    h, w = feature_shape[0], feature_shape[1]
    h = tf.cast(h * stride, tf.float32)
    w = tf.cast(w * stride, tf.float32)

    valid_boxes_tag = tf.logical_and(tf.logical_and(tf.logical_and(boxes[:, 0] >= 0,
                                                                   boxes[:, 1] >= 0),
                                                    boxes[:, 2] <= h),
                                     boxes[:, 3] <= w)
    boxes = tf.boolean_mask(boxes, valid_boxes_tag)
    valid_boxes_indices = tf.where(valid_boxes_tag)[:, 0]
    return boxes, valid_boxes_indices 
开发者ID:yizt,项目名称:keras-ctpn,代码行数:22,代码来源:anchor.py

示例10: get_acceptance_rate

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_and [as 别名]
def get_acceptance_rate(q, p, new_q, new_p, log_posterior, mass, data_axes):
    old_hamiltonian, old_log_prob = hamiltonian(
        q, p, log_posterior, mass, data_axes)
    new_hamiltonian, new_log_prob = hamiltonian(
        new_q, new_p, log_posterior, mass, data_axes)
    old_log_prob = tf.check_numerics(
        old_log_prob,
        'HMC: old_log_prob has numeric errors! Try better initialization.')
    acceptance_rate = tf.exp(
        tf.minimum(-new_hamiltonian + old_hamiltonian, 0.0))
    is_finite = tf.logical_and(tf.is_finite(acceptance_rate),
                               tf.is_finite(new_log_prob))
    acceptance_rate = tf.where(is_finite, acceptance_rate,
                               tf.zeros_like(acceptance_rate))
    return old_hamiltonian, new_hamiltonian, old_log_prob, new_log_prob, \
        acceptance_rate 
开发者ID:thu-ml,项目名称:zhusuan,代码行数:18,代码来源:hmc.py

示例11: _leapfrog

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_and [as 别名]
def _leapfrog(self, q, p, step_size, get_gradient, mass):
        def loop_cond(i, q, p):
            return i < self.n_leapfrogs + 1

        def loop_body(i, q, p):
            step_size1 = tf.cond(i > 0,
                                 lambda: step_size,
                                 lambda: tf.constant(0.0, dtype=tf.float32))

            step_size2 = tf.cond(tf.logical_and(tf.less(i, self.n_leapfrogs),
                                                tf.less(0, i)),
                                 lambda: step_size,
                                 lambda: step_size / 2)

            q, p = leapfrog_integrator(q, p, step_size1, step_size2,
                                       lambda q: get_gradient(q), mass)
            return [i + 1, q, p]

        i = tf.constant(0)
        _, q, p = tf.while_loop(loop_cond,
                                loop_body,
                                [i, q, p],
                                back_prop=False,
                                parallel_iterations=1)
        return q, p 
开发者ID:thu-ml,项目名称:zhusuan,代码行数:27,代码来源:hmc.py

示例12: _crop

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_and [as 别名]
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  cropped_shape = control_flow_ops.with_dependencies(
      [rank_assertion],
      tf.stack([crop_height, crop_width, original_shape[2]]))

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  image = control_flow_ops.with_dependencies(
      [size_assertion],
      tf.slice(image, offsets, cropped_shape))
  return tf.reshape(image, cropped_shape) 
开发者ID:CharlesShang,项目名称:FastMaskRCNN,代码行数:26,代码来源:utils.py

示例13: truncate_example

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_and [as 别名]
def truncate_example(self, tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids):
    max_training_sentences = self.config["max_training_sentences"]
    num_sentences = context_word_emb.shape[0]
    assert num_sentences > max_training_sentences

    sentence_offset = random.randint(0, num_sentences - max_training_sentences)
    word_offset = text_len[:sentence_offset].sum()
    num_words = text_len[sentence_offset:sentence_offset + max_training_sentences].sum()
    tokens = tokens[sentence_offset:sentence_offset + max_training_sentences, :]
    context_word_emb = context_word_emb[sentence_offset:sentence_offset + max_training_sentences, :, :]
    head_word_emb = head_word_emb[sentence_offset:sentence_offset + max_training_sentences, :, :]
    lm_emb = lm_emb[sentence_offset:sentence_offset + max_training_sentences, :, :, :]
    char_index = char_index[sentence_offset:sentence_offset + max_training_sentences, :, :]
    text_len = text_len[sentence_offset:sentence_offset + max_training_sentences]

    speaker_ids = speaker_ids[word_offset: word_offset + num_words]
    gold_spans = np.logical_and(gold_ends >= word_offset, gold_starts < word_offset + num_words)
    gold_starts = gold_starts[gold_spans] - word_offset
    gold_ends = gold_ends[gold_spans] - word_offset
    cluster_ids = cluster_ids[gold_spans]

    return tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids 
开发者ID:sattree,项目名称:gap,代码行数:24,代码来源:coref_model.py

示例14: get_batch_dataset

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_and [as 别名]
def get_batch_dataset(record_file, parser, config):
    num_threads = tf.constant(config.num_threads, dtype=tf.int32)
    dataset = tf.data.TFRecordDataset(record_file).map(
        parser, num_parallel_calls=num_threads).shuffle(config.capacity).repeat()
    if config.is_bucket:
        buckets = [tf.constant(num) for num in range(*config.bucket_range)]

        def key_func(context_idxs, ques_idxs, context_char_idxs, ques_char_idxs, y1, y2, qa_id):
            c_len = tf.reduce_sum(
                tf.cast(tf.cast(context_idxs, tf.bool), tf.int32))
            buckets_min = [np.iinfo(np.int32).min] + buckets
            buckets_max = buckets + [np.iinfo(np.int32).max]
            conditions_c = tf.logical_and(
                tf.less(buckets_min, c_len), tf.less_equal(c_len, buckets_max))
            bucket_id = tf.reduce_min(tf.where(conditions_c))
            return bucket_id

        def reduce_func(key, elements):
            return elements.batch(config.batch_size)

        dataset = dataset.apply(tf.contrib.data.group_by_window(
            key_func, reduce_func, window_size=5 * config.batch_size)).shuffle(len(buckets) * 25)
    else:
        dataset = dataset.batch(config.batch_size)
    return dataset 
开发者ID:HKUST-KnowComp,项目名称:R-Net,代码行数:27,代码来源:util.py

示例15: chk_idx_out_of_bounds_along_axis

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_and [as 别名]
def chk_idx_out_of_bounds_along_axis(cls, data, axis, indices):
    """ Check indices out of bounds for ScatterElement
    In Tensorflow GPU version, if an out of bound index is found,
    the index is ignored for ScatterND/TensorScatterNDUpdate.
    But ONNX spec state that it is an error if any index values
    are out of bounds. Therefore the converter need to run this
    function to verify all the indices are in bounds along the
    axis before send it to Tensoflow. If out of bound is detected
    then the caller of this function need to throw
    InvalidArgumentError exception.
    """
    data_shape = tf.cast(tf_shape(data), indices.dtype)
    limit = data_shape[axis]
    cond1 = tf.greater_equal(indices, tf.negative(limit))
    cond2 = tf.less(indices, limit)
    return tf.logical_and(cond1, cond2) 
开发者ID:onnx,项目名称:onnx-tensorflow,代码行数:18,代码来源:gather_and_scatter_mixin.py


注:本文中的tensorflow.logical_and方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。