本文整理汇总了Python中util.audio.spectrogram方法的典型用法代码示例。如果您正苦于以下问题:Python audio.spectrogram方法的具体用法?Python audio.spectrogram怎么用?Python audio.spectrogram使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类util.audio
的用法示例。
在下文中一共展示了audio.spectrogram方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _process_utterance
# 需要导入模块: from util import audio [as 别名]
# 或者: from util.audio import spectrogram [as 别名]
def _process_utterance(out_dir, index, wav_path, labels_path, text):
# Load the wav file and trim silence from the ends:
wav = audio.load_wav(wav_path)
start_offset, end_offset = _parse_labels(labels_path)
start = int(start_offset * hparams.sample_rate)
end = int(end_offset * hparams.sample_rate) if end_offset is not None else -1
wav = wav[start:end]
max_samples = _max_out_length * hparams.frame_shift_ms / 1000 * hparams.sample_rate
if len(wav) > max_samples:
return None
spectrogram = audio.spectrogram(wav).astype(np.float32)
n_frames = spectrogram.shape[1]
mel_spectrogram = audio.melspectrogram(wav).astype(np.float32)
spectrogram_filename = 'blizzard-spec-%05d.npy' % index
mel_filename = 'blizzard-mel-%05d.npy' % index
np.save(os.path.join(out_dir, spectrogram_filename), spectrogram.T, allow_pickle=False)
np.save(os.path.join(out_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)
return (spectrogram_filename, mel_filename, n_frames, text)
示例2: _process_utterance
# 需要导入模块: from util import audio [as 别名]
# 或者: from util.audio import spectrogram [as 别名]
def _process_utterance(out_dir, index, wav_path, text):
# Load the audio to a numpy array:
wav = audio.load_wav(wav_path)
# Compute the linear-scale spectrogram from the wav:
spectrogram = audio.spectrogram(wav).astype(np.float32)
n_frames = spectrogram.shape[1]
# Compute a mel-scale spectrogram from the wav:
mel_spectrogram = audio.melspectrogram(wav).astype(np.float32)
# Write the spectrograms to disk:
spectrogram_filename = 'nawar-spec-%05d.npy' % index
mel_filename = 'nawar-mel-%05d.npy' % index
np.save(os.path.join(out_dir, spectrogram_filename), spectrogram.T, allow_pickle=False)
np.save(os.path.join(out_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)
# Return a tuple describing this training example:
return (spectrogram_filename, mel_filename, n_frames, text)
示例3: _process_utterance
# 需要导入模块: from util import audio [as 别名]
# 或者: from util.audio import spectrogram [as 别名]
def _process_utterance(out_dir, index, wav_path, text):
'''Preprocesses a single utterance audio/text pair.
This writes the mel and linear scale spectrograms to disk and returns a tuple to write
to the train.txt file.
Args:
out_dir: The directory to write the spectrograms into
index: The numeric index to use in the spectrogram filenames.
wav_path: Path to the audio file containing the speech input
text: The text spoken in the input audio file
Returns:
A (spectrogram_filename, mel_filename, n_frames, text) tuple to write to train.txt
'''
# Load the audio to a numpy array:
wav = audio.load_wav(wav_path)
max_samples = _max_out_length * hparams.frame_shift_ms / 1000 * hparams.sample_rate
if len(wav) > max_samples and _max_out_length is not None:
return None
# Compute the linear-scale spectrogram from the wav:
spectrogram = audio.spectrogram(wav).astype(np.float32)
n_frames = spectrogram.shape[1]
# Compute a mel-scale spectrogram from the wav:
mel_spectrogram = audio.melspectrogram(wav).astype(np.float32)
# Write the spectrograms to disk:
spectrogram_filename = 'blizzard2013-spec-%05d.npy' % index
mel_filename = 'blizzard2013-mel-%05d.npy' % index
np.save(os.path.join(out_dir, spectrogram_filename), spectrogram.T, allow_pickle=False)
np.save(os.path.join(out_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)
# Return a tuple describing this training example:
return (spectrogram_filename, mel_filename, n_frames, text)
示例4: _process_utterance
# 需要导入模块: from util import audio [as 别名]
# 或者: from util.audio import spectrogram [as 别名]
def _process_utterance(out_dir, index, wav_path, text):
'''Preprocesses a single utterance audio/text pair.
This writes the mel and linear scale spectrograms to disk and returns a tuple to write
to the train.txt file.
Args:
out_dir: The directory to write the spectrograms into
index: The numeric index to use in the spectrogram filenames.
wav_path: Path to the audio file containing the speech input
text: The text spoken in the input audio file
Returns:
A (spectrogram_filename, mel_filename, n_frames, text) tuple to write to train.txt
'''
# Load the audio to a numpy array:
wav = audio.load_wav(wav_path)
# Compute the linear-scale spectrogram from the wav:
spectrogram = audio.spectrogram(wav).astype(np.float32)
n_frames = spectrogram.shape[1]
# Compute a mel-scale spectrogram from the wav:
mel_spectrogram = audio.melspectrogram(wav).astype(np.float32)
# Write the spectrograms to disk:
spectrogram_filename = 'ljspeech-spec-%05d.npy' % index
mel_filename = 'ljspeech-mel-%05d.npy' % index
np.save(os.path.join(out_dir, spectrogram_filename), spectrogram.T, allow_pickle=False)
np.save(os.path.join(out_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)
# Return a tuple describing this training example:
return (spectrogram_filename, mel_filename, n_frames, text)
示例5: loss
# 需要导入模块: from util import audio [as 别名]
# 或者: from util.audio import spectrogram [as 别名]
def loss(self, x, y):
'''
`loss` needs rewriting because the whole procedure changes (we're using exe now).
Need to add VQ part into `loss`
`x`: [b, T] (unpadded, `int`)
`y`: [b,] (`int`)
'''
tf.summary.histogram('y', y)
T = tf.shape(x)[1]
x_analog = self._D2A(x)
z_enc = self._Enc(x_analog)
# Passing tiled embeddings might seem inefficient, but it's necessary for TF.
z_ids = self._C(z_enc)
z_exe = tf.nn.embedding_lookup(self.z_emb, z_ids)
z_exe_up = self._uptile(z_exe)
z_exe_up = z_exe_up[:, -T:]
x_ar = self._Dec(x_analog, z_exe_up, y)
with tf.name_scope('Loss'):
P = self.n_padding()
x_answer_ar = x[:, P + 1:]
x_pred_ar_logits = x_ar[:, :-1] # no answer for the last prediction
xh = tf.argmax(x_pred_ar_logits, axis=-1)
loss_ar = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=x_pred_ar_logits,
labels=x_answer_ar,
)
)
accuracy_of_minibatch(x_answer_ar, xh)
# loss = self._Wasserstein_objective(z_enc)
loss = {}
loss['reconst'] = loss_ar
loss['vq'] = tf.reduce_mean(tf.reduce_sum(tf.square(z_enc - z_exe), -1))
tf.summary.scalar('vq', loss['vq'])
loss_reg = tf.reduce_sum(tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
)
tf.summary.scalar('loss_reg', loss_reg)
loss['reg'] = loss_reg
# tf.summary.audio('x', mu_law_decode(x), self.arch['fs'])
# tf.summary.audio('xh', mu_law_decode(xh), self.arch['fs'])
tf.summary.scalar('xent_ar', loss_ar)
tf.summary.histogram('log_z_enc_norm', tf.log(tf.norm(z_enc, axis=-1)))
# TODO: only applicable to audio input with dim_symbol_emb=1.
# spectrogram(x_analog, 2 ** self.arch['n_downsample_stack'])
loss.update({'z_enc': z_enc, 'z_exe': z_exe})
return loss