當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.shape方法代碼示例

本文整理匯總了Python中tensorflow.shape方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.shape方法的具體用法?Python tensorflow.shape怎麽用?Python tensorflow.shape使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.shape方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: build

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import shape [as 別名]
def build(self, input_shape):
        if not self.supports_masking:
            if not isinstance(input_shape, list) or len(input_shape) != 3:
                raise ValueError('A `AttentionSequencePoolingLayer` layer should be called '
                                 'on a list of 3 inputs')

            if len(input_shape[0]) != 3 or len(input_shape[1]) != 3 or len(input_shape[2]) != 2:
                raise ValueError("Unexpected inputs dimensions,the 3 tensor dimensions are %d,%d and %d , expect to be 3,3 and 2" % (
                    len(input_shape[0]), len(input_shape[1]), len(input_shape[2])))

            if input_shape[0][-1] != input_shape[1][-1] or input_shape[0][1] != 1 or input_shape[2][1] != 1:
                raise ValueError('A `AttentionSequencePoolingLayer` layer requires '
                                 'inputs of a 3 inputs with shape (None,1,embedding_size),(None,T,embedding_size) and (None,1)'
                                 'Got different shapes: %s,%s and %s' % (input_shape))
        else:
            pass
        super(AttentionSequencePoolingLayer, self).build(
            input_shape)  # Be sure to call this somewhere! 
開發者ID:ShenDezhou,項目名稱:icme2019,代碼行數:20,代碼來源:sequence.py

示例2: call

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import shape [as 別名]
def call(self, x):
        if (self.size == None) or (self.mode == 'sum'):
            self.size = int(x.shape[-1])

        position_j = 1. / \
            K.pow(10000., 2 * K.arange(self.size / 2, dtype='float32') / self.size)
        position_j = K.expand_dims(position_j, 0)

        position_i = tf.cumsum(K.ones_like(x[:, :, 0]), 1) - 1
        position_i = K.expand_dims(position_i, 2)
        position_ij = K.dot(position_i, position_j)
        outputs = K.concatenate(
            [K.cos(position_ij), K.sin(position_ij)], 2)

        if self.mode == 'sum':
            if self.scale:
                outputs = outputs * outputs ** 0.5
            return x + outputs
        elif self.mode == 'concat':
            return K.concatenate([outputs, x], 2) 
開發者ID:ShenDezhou,項目名稱:icme2019,代碼行數:22,代碼來源:sequence.py

示例3: _mapper

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import shape [as 別名]
def _mapper(example_proto):
  features = {
      'samples': tf.FixedLenSequenceFeature([1], tf.float32, allow_missing=True),
      'label': tf.FixedLenSequenceFeature([], tf.string, allow_missing=True)
  }
  example = tf.parse_single_example(example_proto, features)

  wav = example['samples'][:, 0]

  wav = wav[:16384]
  wav_len = tf.shape(wav)[0]
  wav = tf.pad(wav, [[0, 16384 - wav_len]])

  label = tf.reduce_join(example['label'], 0)

  return wav, label 
開發者ID:acheketa,項目名稱:cwavegan,代碼行數:18,代碼來源:dump_tfrecord.py

示例4: _create_autosummary_var

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import shape [as 別名]
def _create_autosummary_var(name, value_expr):
    assert not _autosummary_finalized
    v = tf.cast(value_expr, tf.float32)
    if v.shape.ndims is 0:
        v = [v, np.float32(1.0)]
    elif v.shape.ndims is 1:
        v = [tf.reduce_sum(v), tf.cast(tf.shape(v)[0], tf.float32)]
    else:
        v = [tf.reduce_sum(v), tf.reduce_prod(tf.cast(tf.shape(v), tf.float32))]
    v = tf.cond(tf.is_finite(v[0]), lambda: tf.stack(v), lambda: tf.zeros(2))
    with tf.control_dependencies(None):
        var = tf.Variable(tf.zeros(2)) # [numerator, denominator]
    update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
    if name in _autosummary_vars:
        _autosummary_vars[name].append(var)
    else:
        _autosummary_vars[name] = [var]
    return update_op

#----------------------------------------------------------------------------
# Call filewriter.add_summary() with all summaries in the default graph,
# automatically finalizing and merging them on the first call. 
開發者ID:zalandoresearch,項目名稱:disentangling_conditional_gans,代碼行數:24,代碼來源:tfutil.py

示例5: print_layers

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import shape [as 別名]
def print_layers(self, title=None, hide_layers_with_no_params=False):
        if title is None: title = self.name
        print()
        print('%-28s%-12s%-24s%-24s' % (title, 'Params', 'OutputShape', 'WeightShape'))
        print('%-28s%-12s%-24s%-24s' % (('---',) * 4))

        total_params = 0
        for layer_name, layer_output, layer_trainables in self.list_layers():
            weights = [var for var in layer_trainables if var.name.endswith('/weight:0')]
            num_params = sum(np.prod(shape_to_list(var.shape)) for var in layer_trainables)
            total_params += num_params
            if hide_layers_with_no_params and num_params == 0:
                continue

            print('%-28s%-12s%-24s%-24s' % (
                layer_name,
                num_params if num_params else '-',
                layer_output.shape,
                weights[0].shape if len(weights) == 1 else '-'))

        print('%-28s%-12s%-24s%-24s' % (('---',) * 4))
        print('%-28s%-12s%-24s%-24s' % ('Total', total_params, '', ''))
        print()

    # Construct summary ops to include histograms of all trainable parameters in TensorBoard. 
開發者ID:zalandoresearch,項目名稱:disentangling_conditional_gans,代碼行數:27,代碼來源:tfutil.py

示例6: fprop

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import shape [as 別名]
def fprop(self, x, y, **kwargs):
        if self.attack is not None:
            x = x, self.attack(x)
        else:
            x = x,

        # Catching RuntimeError: Variable -= value not supported by tf.eager.
        try:
            y -= self.smoothing * (y - 1. / tf.cast(y.shape[-1], tf.float32))
        except RuntimeError:
            y.assign_sub(self.smoothing * (y - 1. / tf.cast(y.shape[-1],
                                                            tf.float32)))

        logits = [self.model.get_logits(x, **kwargs) for x in x]
        loss = sum(
            softmax_cross_entropy_with_logits(labels=y,
                                              logits=logit)
            for logit in logits)
        warnings.warn("LossCrossEntropy is deprecated, switch to "
                      "CrossEntropy. LossCrossEntropy may be removed on "
                      "or after 2019-03-06.")
        return loss 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:24,代碼來源:loss.py

示例7: model_argmax

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import shape [as 別名]
def model_argmax(sess, x, predictions, samples, feed=None):
    """
    Helper function that computes the current class prediction
    :param sess: TF session
    :param x: the input placeholder
    :param predictions: the model's symbolic output
    :param samples: numpy array with input samples (dims must match x)
    :param feed: An optional dictionary that is appended to the feeding
             dictionary before the session runs. Can be used to feed
             the learning phase of a Keras model for instance.
    :return: the argmax output of predictions, i.e. the current predicted class
    """
    feed_dict = {x: samples}
    if feed is not None:
        feed_dict.update(feed)
    probabilities = sess.run(predictions, feed_dict)

    if samples.shape[0] == 1:
        return np.argmax(probabilities)
    else:
        return np.argmax(probabilities, axis=1) 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:23,代碼來源:utils_tf.py

示例8: _inv_preemphasis

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import shape [as 別名]
def _inv_preemphasis(x):
    N = tf.shape(x)[0]
    i = tf.constant(0)
    W = tf.zeros(shape=tf.shape(x), dtype=tf.float32)

    def condition(i, y):
        return tf.less(i, N)

    def body(i, y):
        tmp = tf.slice(x, [0], [i + 1])
        tmp = tf.concat([tf.zeros([N - i - 1]), tmp], -1)
        y = hparams.preemphasis * y + tmp
        i = tf.add(i, 1)
        return [i, y]

    final = tf.while_loop(condition, body, [i, W])

    y = final[1]

    return y 
開發者ID:candlewill,項目名稱:Griffin_lim,代碼行數:22,代碼來源:griffin_lim.py

示例9: pad_and_reshape

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import shape [as 別名]
def pad_and_reshape(instr_spec, frame_length, F):
    """
    :param instr_spec:
    :param frame_length:
    :param F:
    :returns:
    """
    spec_shape = tf.shape(instr_spec)
    extension_row = tf.zeros((spec_shape[0], spec_shape[1], 1, spec_shape[-1]))
    n_extra_row = (frame_length) // 2 + 1 - F
    extension = tf.tile(extension_row, [1, 1, n_extra_row, 1])
    extended_spec = tf.concat([instr_spec, extension], axis=2)
    old_shape = tf.shape(extended_spec)
    new_shape = tf.concat([
        [old_shape[0] * old_shape[1]],
        old_shape[2:]],
        axis=0)
    processed_instr_spec = tf.reshape(extended_spec, new_shape)
    return processed_instr_spec 
開發者ID:deezer,項目名稱:spleeter,代碼行數:21,代碼來源:tensor.py

示例10: check_tensor_shape

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import shape [as 別名]
def check_tensor_shape(tensor_tf, target_shape):
    """ Return a Tensorflow boolean graph that indicates whether
    sample[features_key] has the specified target shape. Only check
    not None entries of target_shape.

    :param tensor_tf: Tensor to check shape for.
    :param target_shape: Target shape to compare tensor to.
    :returns: True if shape is valid, False otherwise (as TF boolean).
    """
    result = tf.constant(True)
    for i, target_length in enumerate(target_shape):
        if target_length:
            result = tf.logical_and(
                result,
                tf.equal(tf.constant(target_length), tf.shape(tensor_tf)[i]))
    return result 
開發者ID:deezer,項目名稱:spleeter,代碼行數:18,代碼來源:tensor.py

示例11: _inverse_stft

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import shape [as 別名]
def _inverse_stft(self, stft_t, time_crop=None):
        """ Inverse and reshape the given STFT

        :param stft_t: input STFT
        :returns: inverse STFT (waveform)
        """
        inversed = inverse_stft(
            tf.transpose(stft_t, perm=[2, 0, 1]),
            self._frame_length,
            self._frame_step,
            window_fn=lambda frame_length, dtype: (
                hann_window(frame_length, periodic=True, dtype=dtype))
        ) * self.WINDOW_COMPENSATION_FACTOR
        reshaped = tf.transpose(inversed)
        if time_crop is None:
            time_crop = tf.shape(self._features['waveform'])[0]
        return reshaped[:time_crop, :] 
開發者ID:deezer,項目名稱:spleeter,代碼行數:19,代碼來源:__init__.py

示例12: get_logits

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import shape [as 別名]
def get_logits(new_input, length, first=[]):
    """
    Compute the logits for a given waveform.

    First, preprocess with the TF version of MFC above,
    and then call DeepSpeech on the features.
    """
    # new_input = tf.Print(new_input, [tf.shape(new_input)])

    # We need to init DeepSpeech the first time we're called
    if first == []:
        first.append(False)
        # Okay, so this is ugly again.
        # We just want it to not crash.
        tf.app.flags.FLAGS.alphabet_config_path = "DeepSpeech/data/alphabet.txt"
        DeepSpeech.initialize_globals()
        print('initialized deepspeech globals')

    batch_size = new_input.get_shape()[0]

    # 1. Compute the MFCCs for the input audio
    # (this is differentable with our implementation above)
    empty_context = np.zeros((batch_size, 9, 26), dtype=np.float32)
    new_input_to_mfcc = compute_mfcc(new_input)[:, ::2]
    features = tf.concat((empty_context, new_input_to_mfcc, empty_context), 1)

    # 2. We get to see 9 frames at a time to make our decision,
    # so concatenate them together.
    features = tf.reshape(features, [new_input.get_shape()[0], -1])
    features = tf.stack([features[:, i:i+19*26] for i in range(0,features.shape[1]-19*26+1,26)],1)
    features = tf.reshape(features, [batch_size, -1, 19*26])

    # 3. Whiten the data
    mean, var = tf.nn.moments(features, axes=[0,1,2])
    features = (features-mean)/(var**.5)

    # 4. Finally we process it with DeepSpeech
    logits = DeepSpeech.BiRNN(features, length, [0]*10)

    return logits 
開發者ID:rtaori,項目名稱:Black-Box-Audio,代碼行數:42,代碼來源:tf_logits.py

示例13: get_sequence_output

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import shape [as 別名]
def get_sequence_output(self):
        """Gets final hidden layer of encoder.

        Returns:
          float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
          to the final hidden of the transformer encoder.
        """
        return self.sequence_output 
開發者ID:Socialbird-AILab,項目名稱:BERT-Classification-Tutorial,代碼行數:10,代碼來源:modeling.py

示例14: get_embedding_output

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import shape [as 別名]
def get_embedding_output(self):
        """Gets output of the embedding lookup (i.e., input to the transformer).

        Returns:
          float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
          to the output of the embedding layer, after summing the word
          embeddings with the positional embeddings and the token type embeddings,
          then performing layer normalization. This is the input to the transformer.
        """
        return self.embedding_output 
開發者ID:Socialbird-AILab,項目名稱:BERT-Classification-Tutorial,代碼行數:12,代碼來源:modeling.py

示例15: create_attention_mask_from_input_mask

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import shape [as 別名]
def create_attention_mask_from_input_mask(from_tensor, to_mask):
    """Create 3D attention mask from a 2D tensor mask.

    Args:
      from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
      to_mask: int32 Tensor of shape [batch_size, to_seq_length].

    Returns:
      float Tensor of shape [batch_size, from_seq_length, to_seq_length].
    """
    from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
    batch_size = from_shape[0]
    from_seq_length = from_shape[1]

    to_shape = get_shape_list(to_mask, expected_rank=2)
    to_seq_length = to_shape[1]

    to_mask = tf.cast(
        tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)

    # We don't assume that `from_tensor` is a mask (although it could be). We
    # don't actually care if we attend *from* padding tokens (only *to* padding)
    # tokens so we create a tensor of all ones.
    #
    # `broadcast_ones` = [batch_size, from_seq_length, 1]
    broadcast_ones = tf.ones(
        shape=[batch_size, from_seq_length, 1], dtype=tf.float32)

    # Here we broadcast along two dimensions to create the mask.
    mask = broadcast_ones * to_mask

    return mask 
開發者ID:Socialbird-AILab,項目名稱:BERT-Classification-Tutorial,代碼行數:34,代碼來源:modeling.py


注:本文中的tensorflow.shape方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。