當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.logical_and方法代碼示例

本文整理匯總了Python中tensorflow.logical_and方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.logical_and方法的具體用法?Python tensorflow.logical_and怎麽用?Python tensorflow.logical_and使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.logical_and方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: check_tensor_shape

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import logical_and [as 別名]
def check_tensor_shape(tensor_tf, target_shape):
    """ Return a Tensorflow boolean graph that indicates whether
    sample[features_key] has the specified target shape. Only check
    not None entries of target_shape.

    :param tensor_tf: Tensor to check shape for.
    :param target_shape: Target shape to compare tensor to.
    :returns: True if shape is valid, False otherwise (as TF boolean).
    """
    result = tf.constant(True)
    for i, target_length in enumerate(target_shape):
        if target_length:
            result = tf.logical_and(
                result,
                tf.equal(tf.constant(target_length), tf.shape(tensor_tf)[i]))
    return result 
開發者ID:deezer,項目名稱:spleeter,代碼行數:18,代碼來源:tensor.py

示例2: prune_small_boxes

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import logical_and [as 別名]
def prune_small_boxes(boxlist, min_side, scope=None):
  """Prunes small boxes in the boxlist which have a side smaller than min_side.

  Args:
    boxlist: BoxList holding N boxes.
    min_side: Minimum width AND height of box to survive pruning.
    scope: name scope.

  Returns:
    A pruned boxlist.
  """
  with tf.name_scope(scope, 'PruneSmallBoxes'):
    height, width = height_width(boxlist)
    is_valid = tf.logical_and(tf.greater_equal(width, min_side),
                              tf.greater_equal(height, min_side))
    return gather(boxlist, tf.reshape(tf.where(is_valid), [-1])) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:18,代碼來源:box_list_ops.py

示例3: assert_box_normalized

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import logical_and [as 別名]
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1):
  """Asserts the input box tensor is normalized.

  Args:
    boxes: a tensor of shape [N, 4] where N is the number of boxes.
    maximum_normalized_coordinate: Maximum coordinate value to be considered
      as normalized, default to 1.1.

  Returns:
    a tf.Assert op which fails when the input box tensor is not normalized.

  Raises:
    ValueError: When the input box tensor is not normalized.
  """
  box_minimum = tf.reduce_min(boxes)
  box_maximum = tf.reduce_max(boxes)
  return tf.Assert(
      tf.logical_and(
          tf.less_equal(box_maximum, maximum_normalized_coordinate),
          tf.greater_equal(box_minimum, 0)),
      [boxes]) 
開發者ID:ahmetozlu,項目名稱:vehicle_counting_tensorflow,代碼行數:23,代碼來源:shape_utils.py

示例4: init

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import logical_and [as 別名]
def init(self, data: Tensor) -> None:
        tau = self.__tauInit
        dtype = self.__dtype
        properties = self.__properties
        noiseDistribution = CenNormal(tau=tf.constant([tau], dtype=dtype),
                                      properties=properties)
        self.__noiseDistribution = noiseDistribution
        observedMask = tf.logical_not(tf.is_nan(data))
        trainMask = tf.logical_not(self.cv.mask(X=data))
        trainMask = tf.get_variable("trainMask",
                                    dtype=trainMask.dtype,
                                    initializer=trainMask)
        trainMask = tf.logical_and(trainMask, observedMask)
        testMask = tf.logical_and(observedMask,
                                  tf.logical_not(trainMask))
        self.__observedMask = observedMask
        self.__trainMask = trainMask
        self.__testMask = testMask 
開發者ID:bethgelab,項目名稱:decompose,代碼行數:20,代碼來源:cvNormalNdLikelihood.py

示例5: updateK

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import logical_and [as 別名]
def updateK(self, k, prepVars, U):
        f = self.__f
        UfShape = U[f].get_shape()

        lhUfk = self.__likelihood.lhUfk(U[f], prepVars, f, k)
        postfk = lhUfk*self.prior[k].cond()
        Ufk = postfk.draw()
        Ufk = tf.expand_dims(Ufk, 0)

        normUfk = tf.norm(Ufk)
        notNanNorm = tf.logical_not(tf.is_nan(normUfk))
        finiteNorm = tf.is_finite(normUfk)
        positiveNorm = normUfk > 0.
        isValid = tf.logical_and(notNanNorm,
                                 tf.logical_and(finiteNorm,
                                                positiveNorm))
        Uf = tf.cond(isValid, lambda: self.updateUf(U[f], Ufk, k),
                     lambda: U[f])

        # TODO: if valid -> self.__likelihood.lhU()[f].updateUfk(U[f][k], k)
        Uf.set_shape(UfShape)
        U[f] = Uf
        return(U) 
開發者ID:bethgelab,項目名稱:decompose,代碼行數:25,代碼來源:postU.py

示例6: _get_prediction_from_topk

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import logical_and [as 別名]
def _get_prediction_from_topk(self, topk_predicted_words):
        # apply given filter
        masks = []
        if self.predicted_words_filters is not None:
            masks = [fltr(topk_predicted_words) for fltr in self.predicted_words_filters]
        if masks:
            # assert all(mask.shape.assert_is_compatible_with(top_k_pred_indices) for mask in masks)
            legal_predicted_target_words_mask = reduce(tf.logical_and, masks)
        else:
            legal_predicted_target_words_mask = tf.cast(tf.ones_like(topk_predicted_words), dtype=tf.bool)

        # the first legal predicted word is our prediction
        first_legal_predicted_target_word_mask = common.tf_get_first_true(legal_predicted_target_words_mask)
        first_legal_predicted_target_word_idx = tf.where(first_legal_predicted_target_word_mask)
        first_legal_predicted_word_string = tf.gather_nd(topk_predicted_words,
                                                         first_legal_predicted_target_word_idx)

        prediction = tf.reshape(first_legal_predicted_word_string, [-1])
        return prediction 
開發者ID:tech-srl,項目名稱:code2vec,代碼行數:21,代碼來源:keras_words_subtoken_metrics.py

示例7: _filter_input_rows

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import logical_and [as 別名]
def _filter_input_rows(self, *row_parts) -> tf.bool:
        row_parts = self.model_input_tensors_former.from_model_input_form(row_parts)

        #assert all(tensor.shape == (self.config.MAX_CONTEXTS,) for tensor in
        #           {row_parts.path_source_token_indices, row_parts.path_indices,
        #            row_parts.path_target_token_indices, row_parts.context_valid_mask})

        # FIXME: Does "valid" here mean just "no padding" or "neither padding nor OOV"? I assumed just "no padding".
        any_word_valid_mask_per_context_part = [
            tf.not_equal(tf.reduce_max(row_parts.path_source_token_indices, axis=0),
                         self.vocabs.token_vocab.word_to_index[self.vocabs.token_vocab.special_words.PAD]),
            tf.not_equal(tf.reduce_max(row_parts.path_target_token_indices, axis=0),
                         self.vocabs.token_vocab.word_to_index[self.vocabs.token_vocab.special_words.PAD]),
            tf.not_equal(tf.reduce_max(row_parts.path_indices, axis=0),
                         self.vocabs.path_vocab.word_to_index[self.vocabs.path_vocab.special_words.PAD])]
        any_contexts_is_valid = reduce(tf.logical_or, any_word_valid_mask_per_context_part)  # scalar

        if self.estimator_action.is_evaluate:
            cond = any_contexts_is_valid  # scalar
        else:  # training
            word_is_valid = tf.greater(
                row_parts.target_index, self.vocabs.target_vocab.word_to_index[self.vocabs.target_vocab.special_words.OOV])  # scalar
            cond = tf.logical_and(word_is_valid, any_contexts_is_valid)  # scalar

        return cond  # scalar 
開發者ID:tech-srl,項目名稱:code2vec,代碼行數:27,代碼來源:path_context_reader.py

示例8: call

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import logical_and [as 別名]
def call(self, y_pred, **kwargs):
        y_pred.shape.assert_has_rank(2)
        top_k_pred_indices = tf.cast(tf.nn.top_k(y_pred, k=self.top_k).indices,
                                     dtype=self.index_to_word_table.key_dtype)
        predicted_target_words_strings = self.index_to_word_table.lookup(top_k_pred_indices)

        # apply given filter
        masks = []
        if self.predicted_words_filters is not None:
            masks = [fltr(top_k_pred_indices, predicted_target_words_strings) for fltr in self.predicted_words_filters]
        if masks:
            # assert all(mask.shape.assert_is_compatible_with(top_k_pred_indices) for mask in masks)
            legal_predicted_target_words_mask = reduce(tf.logical_and, masks)
        else:
            legal_predicted_target_words_mask = tf.cast(tf.ones_like(top_k_pred_indices), dtype=tf.bool)

        # the first legal predicted word is our prediction
        first_legal_predicted_target_word_mask = common.tf_get_first_true(legal_predicted_target_words_mask)
        first_legal_predicted_target_word_idx = tf.where(first_legal_predicted_target_word_mask)
        first_legal_predicted_word_string = tf.gather_nd(predicted_target_words_strings,
                                                         first_legal_predicted_target_word_idx)

        prediction = tf.reshape(first_legal_predicted_word_string, [-1])
        return prediction 
開發者ID:tech-srl,項目名稱:code2vec,代碼行數:26,代碼來源:keras_word_prediction_layer.py

示例9: filter_out_of_bound_boxes

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import logical_and [as 別名]
def filter_out_of_bound_boxes(boxes, feature_shape, stride):
    """
    過濾圖像邊框外的anchor
    :param boxes: [n,y1,x1,y2,x2]
    :param feature_shape: 特征圖的長寬 [h,w]
    :param stride: 網絡步長
    :return:
    """
    # 圖像原始長寬為特征圖長寬*步長
    h, w = feature_shape[0], feature_shape[1]
    h = tf.cast(h * stride, tf.float32)
    w = tf.cast(w * stride, tf.float32)

    valid_boxes_tag = tf.logical_and(tf.logical_and(tf.logical_and(boxes[:, 0] >= 0,
                                                                   boxes[:, 1] >= 0),
                                                    boxes[:, 2] <= h),
                                     boxes[:, 3] <= w)
    boxes = tf.boolean_mask(boxes, valid_boxes_tag)
    valid_boxes_indices = tf.where(valid_boxes_tag)[:, 0]
    return boxes, valid_boxes_indices 
開發者ID:yizt,項目名稱:keras-ctpn,代碼行數:22,代碼來源:anchor.py

示例10: get_acceptance_rate

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import logical_and [as 別名]
def get_acceptance_rate(q, p, new_q, new_p, log_posterior, mass, data_axes):
    old_hamiltonian, old_log_prob = hamiltonian(
        q, p, log_posterior, mass, data_axes)
    new_hamiltonian, new_log_prob = hamiltonian(
        new_q, new_p, log_posterior, mass, data_axes)
    old_log_prob = tf.check_numerics(
        old_log_prob,
        'HMC: old_log_prob has numeric errors! Try better initialization.')
    acceptance_rate = tf.exp(
        tf.minimum(-new_hamiltonian + old_hamiltonian, 0.0))
    is_finite = tf.logical_and(tf.is_finite(acceptance_rate),
                               tf.is_finite(new_log_prob))
    acceptance_rate = tf.where(is_finite, acceptance_rate,
                               tf.zeros_like(acceptance_rate))
    return old_hamiltonian, new_hamiltonian, old_log_prob, new_log_prob, \
        acceptance_rate 
開發者ID:thu-ml,項目名稱:zhusuan,代碼行數:18,代碼來源:hmc.py

示例11: _leapfrog

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import logical_and [as 別名]
def _leapfrog(self, q, p, step_size, get_gradient, mass):
        def loop_cond(i, q, p):
            return i < self.n_leapfrogs + 1

        def loop_body(i, q, p):
            step_size1 = tf.cond(i > 0,
                                 lambda: step_size,
                                 lambda: tf.constant(0.0, dtype=tf.float32))

            step_size2 = tf.cond(tf.logical_and(tf.less(i, self.n_leapfrogs),
                                                tf.less(0, i)),
                                 lambda: step_size,
                                 lambda: step_size / 2)

            q, p = leapfrog_integrator(q, p, step_size1, step_size2,
                                       lambda q: get_gradient(q), mass)
            return [i + 1, q, p]

        i = tf.constant(0)
        _, q, p = tf.while_loop(loop_cond,
                                loop_body,
                                [i, q, p],
                                back_prop=False,
                                parallel_iterations=1)
        return q, p 
開發者ID:thu-ml,項目名稱:zhusuan,代碼行數:27,代碼來源:hmc.py

示例12: _crop

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import logical_and [as 別名]
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  cropped_shape = control_flow_ops.with_dependencies(
      [rank_assertion],
      tf.stack([crop_height, crop_width, original_shape[2]]))

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  image = control_flow_ops.with_dependencies(
      [size_assertion],
      tf.slice(image, offsets, cropped_shape))
  return tf.reshape(image, cropped_shape) 
開發者ID:CharlesShang,項目名稱:FastMaskRCNN,代碼行數:26,代碼來源:utils.py

示例13: truncate_example

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import logical_and [as 別名]
def truncate_example(self, tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids):
    max_training_sentences = self.config["max_training_sentences"]
    num_sentences = context_word_emb.shape[0]
    assert num_sentences > max_training_sentences

    sentence_offset = random.randint(0, num_sentences - max_training_sentences)
    word_offset = text_len[:sentence_offset].sum()
    num_words = text_len[sentence_offset:sentence_offset + max_training_sentences].sum()
    tokens = tokens[sentence_offset:sentence_offset + max_training_sentences, :]
    context_word_emb = context_word_emb[sentence_offset:sentence_offset + max_training_sentences, :, :]
    head_word_emb = head_word_emb[sentence_offset:sentence_offset + max_training_sentences, :, :]
    lm_emb = lm_emb[sentence_offset:sentence_offset + max_training_sentences, :, :, :]
    char_index = char_index[sentence_offset:sentence_offset + max_training_sentences, :, :]
    text_len = text_len[sentence_offset:sentence_offset + max_training_sentences]

    speaker_ids = speaker_ids[word_offset: word_offset + num_words]
    gold_spans = np.logical_and(gold_ends >= word_offset, gold_starts < word_offset + num_words)
    gold_starts = gold_starts[gold_spans] - word_offset
    gold_ends = gold_ends[gold_spans] - word_offset
    cluster_ids = cluster_ids[gold_spans]

    return tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids 
開發者ID:sattree,項目名稱:gap,代碼行數:24,代碼來源:coref_model.py

示例14: get_batch_dataset

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import logical_and [as 別名]
def get_batch_dataset(record_file, parser, config):
    num_threads = tf.constant(config.num_threads, dtype=tf.int32)
    dataset = tf.data.TFRecordDataset(record_file).map(
        parser, num_parallel_calls=num_threads).shuffle(config.capacity).repeat()
    if config.is_bucket:
        buckets = [tf.constant(num) for num in range(*config.bucket_range)]

        def key_func(context_idxs, ques_idxs, context_char_idxs, ques_char_idxs, y1, y2, qa_id):
            c_len = tf.reduce_sum(
                tf.cast(tf.cast(context_idxs, tf.bool), tf.int32))
            buckets_min = [np.iinfo(np.int32).min] + buckets
            buckets_max = buckets + [np.iinfo(np.int32).max]
            conditions_c = tf.logical_and(
                tf.less(buckets_min, c_len), tf.less_equal(c_len, buckets_max))
            bucket_id = tf.reduce_min(tf.where(conditions_c))
            return bucket_id

        def reduce_func(key, elements):
            return elements.batch(config.batch_size)

        dataset = dataset.apply(tf.contrib.data.group_by_window(
            key_func, reduce_func, window_size=5 * config.batch_size)).shuffle(len(buckets) * 25)
    else:
        dataset = dataset.batch(config.batch_size)
    return dataset 
開發者ID:HKUST-KnowComp,項目名稱:R-Net,代碼行數:27,代碼來源:util.py

示例15: chk_idx_out_of_bounds_along_axis

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import logical_and [as 別名]
def chk_idx_out_of_bounds_along_axis(cls, data, axis, indices):
    """ Check indices out of bounds for ScatterElement
    In Tensorflow GPU version, if an out of bound index is found,
    the index is ignored for ScatterND/TensorScatterNDUpdate.
    But ONNX spec state that it is an error if any index values
    are out of bounds. Therefore the converter need to run this
    function to verify all the indices are in bounds along the
    axis before send it to Tensoflow. If out of bound is detected
    then the caller of this function need to throw
    InvalidArgumentError exception.
    """
    data_shape = tf.cast(tf_shape(data), indices.dtype)
    limit = data_shape[axis]
    cond1 = tf.greater_equal(indices, tf.negative(limit))
    cond2 = tf.less(indices, limit)
    return tf.logical_and(cond1, cond2) 
開發者ID:onnx,項目名稱:onnx-tensorflow,代碼行數:18,代碼來源:gather_and_scatter_mixin.py


注:本文中的tensorflow.logical_and方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。