當前位置: 首頁>>代碼示例>>Python>>正文


Python audio.spectrogram方法代碼示例

本文整理匯總了Python中util.audio.spectrogram方法的典型用法代碼示例。如果您正苦於以下問題:Python audio.spectrogram方法的具體用法?Python audio.spectrogram怎麽用?Python audio.spectrogram使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在util.audio的用法示例。


在下文中一共展示了audio.spectrogram方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _process_utterance

# 需要導入模塊: from util import audio [as 別名]
# 或者: from util.audio import spectrogram [as 別名]
def _process_utterance(out_dir, index, wav_path, labels_path, text):
  # Load the wav file and trim silence from the ends:
  wav = audio.load_wav(wav_path)
  start_offset, end_offset = _parse_labels(labels_path)
  start = int(start_offset * hparams.sample_rate)
  end = int(end_offset * hparams.sample_rate) if end_offset is not None else -1
  wav = wav[start:end]
  max_samples = _max_out_length * hparams.frame_shift_ms / 1000 * hparams.sample_rate
  if len(wav) > max_samples:
    return None
  spectrogram = audio.spectrogram(wav).astype(np.float32)
  n_frames = spectrogram.shape[1]
  mel_spectrogram = audio.melspectrogram(wav).astype(np.float32)
  spectrogram_filename = 'blizzard-spec-%05d.npy' % index
  mel_filename = 'blizzard-mel-%05d.npy' % index
  np.save(os.path.join(out_dir, spectrogram_filename), spectrogram.T, allow_pickle=False)
  np.save(os.path.join(out_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)
  return (spectrogram_filename, mel_filename, n_frames, text) 
開發者ID:yanggeng1995,項目名稱:vae_tacotron,代碼行數:20,代碼來源:blizzard.py

示例2: _process_utterance

# 需要導入模塊: from util import audio [as 別名]
# 或者: from util.audio import spectrogram [as 別名]
def _process_utterance(out_dir, index, wav_path, text):

  # Load the audio to a numpy array:
  wav = audio.load_wav(wav_path)

  # Compute the linear-scale spectrogram from the wav:
  spectrogram = audio.spectrogram(wav).astype(np.float32)
  n_frames = spectrogram.shape[1]

  # Compute a mel-scale spectrogram from the wav:
  mel_spectrogram = audio.melspectrogram(wav).astype(np.float32)

  # Write the spectrograms to disk:
  spectrogram_filename = 'nawar-spec-%05d.npy' % index
  mel_filename = 'nawar-mel-%05d.npy' % index
  np.save(os.path.join(out_dir, spectrogram_filename), spectrogram.T, allow_pickle=False)
  np.save(os.path.join(out_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)

  # Return a tuple describing this training example:
  return (spectrogram_filename, mel_filename, n_frames, text) 
開發者ID:youssefsharief,項目名稱:arabic-tacotron-tts,代碼行數:22,代碼來源:nawar.py

示例3: _process_utterance

# 需要導入模塊: from util import audio [as 別名]
# 或者: from util.audio import spectrogram [as 別名]
def _process_utterance(out_dir, index, wav_path, text):
  '''Preprocesses a single utterance audio/text pair.
  This writes the mel and linear scale spectrograms to disk and returns a tuple to write
  to the train.txt file.
  Args:
    out_dir: The directory to write the spectrograms into
    index: The numeric index to use in the spectrogram filenames.
    wav_path: Path to the audio file containing the speech input
    text: The text spoken in the input audio file
  Returns:
    A (spectrogram_filename, mel_filename, n_frames, text) tuple to write to train.txt
  '''

  # Load the audio to a numpy array:
  wav = audio.load_wav(wav_path)
  max_samples = _max_out_length * hparams.frame_shift_ms / 1000 * hparams.sample_rate
  if len(wav) > max_samples and _max_out_length is not None:
    return None

  # Compute the linear-scale spectrogram from the wav:
  spectrogram = audio.spectrogram(wav).astype(np.float32)
  n_frames = spectrogram.shape[1]

  # Compute a mel-scale spectrogram from the wav:
  mel_spectrogram = audio.melspectrogram(wav).astype(np.float32)

  # Write the spectrograms to disk:
  spectrogram_filename = 'blizzard2013-spec-%05d.npy' % index
  mel_filename = 'blizzard2013-mel-%05d.npy' % index
  np.save(os.path.join(out_dir, spectrogram_filename), spectrogram.T, allow_pickle=False)
  np.save(os.path.join(out_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)

  # Return a tuple describing this training example:
  return (spectrogram_filename, mel_filename, n_frames, text) 
開發者ID:yanggeng1995,項目名稱:vae_tacotron,代碼行數:36,代碼來源:blizzard2013.py

示例4: _process_utterance

# 需要導入模塊: from util import audio [as 別名]
# 或者: from util.audio import spectrogram [as 別名]
def _process_utterance(out_dir, index, wav_path, text):
  '''Preprocesses a single utterance audio/text pair.

  This writes the mel and linear scale spectrograms to disk and returns a tuple to write
  to the train.txt file.

  Args:
    out_dir: The directory to write the spectrograms into
    index: The numeric index to use in the spectrogram filenames.
    wav_path: Path to the audio file containing the speech input
    text: The text spoken in the input audio file

  Returns:
    A (spectrogram_filename, mel_filename, n_frames, text) tuple to write to train.txt
  '''

  # Load the audio to a numpy array:
  wav = audio.load_wav(wav_path)

  # Compute the linear-scale spectrogram from the wav:
  spectrogram = audio.spectrogram(wav).astype(np.float32)
  n_frames = spectrogram.shape[1]

  # Compute a mel-scale spectrogram from the wav:
  mel_spectrogram = audio.melspectrogram(wav).astype(np.float32)

  # Write the spectrograms to disk:
  spectrogram_filename = 'ljspeech-spec-%05d.npy' % index
  mel_filename = 'ljspeech-mel-%05d.npy' % index
  np.save(os.path.join(out_dir, spectrogram_filename), spectrogram.T, allow_pickle=False)
  np.save(os.path.join(out_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)

  # Return a tuple describing this training example:
  return (spectrogram_filename, mel_filename, n_frames, text) 
開發者ID:yanggeng1995,項目名稱:vae_tacotron,代碼行數:36,代碼來源:ljspeech.py

示例5: loss

# 需要導入模塊: from util import audio [as 別名]
# 或者: from util.audio import spectrogram [as 別名]
def loss(self, x, y):
    '''
    `loss` needs rewriting because the whole procedure changes (we're using exe now).
    Need to add VQ part into `loss`
    `x`: [b, T] (unpadded, `int`)
    `y`: [b,] (`int`)
    '''
    tf.summary.histogram('y', y)

    T = tf.shape(x)[1]
    x_analog = self._D2A(x)
    z_enc = self._Enc(x_analog)

    # Passing tiled embeddings might seem inefficient, but it's necessary for TF.
    z_ids = self._C(z_enc)
    z_exe = tf.nn.embedding_lookup(self.z_emb, z_ids)
    z_exe_up = self._uptile(z_exe)
    z_exe_up = z_exe_up[:, -T:]
    x_ar = self._Dec(x_analog, z_exe_up, y)

    with tf.name_scope('Loss'):
      P = self.n_padding()
      x_answer_ar = x[:, P + 1:]
      x_pred_ar_logits = x_ar[:, :-1]  # no answer for the last prediction

      xh = tf.argmax(x_pred_ar_logits, axis=-1)

      loss_ar = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
          logits=x_pred_ar_logits,
          labels=x_answer_ar,
        )
      )
      accuracy_of_minibatch(x_answer_ar, xh)


      # loss = self._Wasserstein_objective(z_enc)
      loss = {}
      loss['reconst'] = loss_ar

      loss['vq'] = tf.reduce_mean(tf.reduce_sum(tf.square(z_enc - z_exe), -1))
      tf.summary.scalar('vq', loss['vq'])

      loss_reg = tf.reduce_sum(tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
      )
      tf.summary.scalar('loss_reg', loss_reg)
      loss['reg'] = loss_reg

      # tf.summary.audio('x', mu_law_decode(x), self.arch['fs'])
      # tf.summary.audio('xh', mu_law_decode(xh), self.arch['fs'])
      tf.summary.scalar('xent_ar', loss_ar)
      tf.summary.histogram('log_z_enc_norm', tf.log(tf.norm(z_enc, axis=-1)))
      # TODO: only applicable to audio input with dim_symbol_emb=1.
      # spectrogram(x_analog, 2 ** self.arch['n_downsample_stack'])

      loss.update({'z_enc': z_enc, 'z_exe': z_exe})
      return loss 
開發者ID:JeremyCCHsu,項目名稱:vqvae-speech,代碼行數:60,代碼來源:vqvae.py


注:本文中的util.audio.spectrogram方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。