本文整理匯總了Python中hparams.hparams.num_mels方法的典型用法代碼示例。如果您正苦於以下問題:Python hparams.num_mels方法的具體用法?Python hparams.num_mels怎麽用?Python hparams.num_mels使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類hparams.hparams
的用法示例。
在下文中一共展示了hparams.num_mels方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: build_model
# 需要導入模塊: from hparams import hparams [as 別名]
# 或者: from hparams.hparams import num_mels [as 別名]
def build_model():
"""build model with hparams settings
"""
if hp.input_type == 'raw':
print('building model with Beta distribution output')
elif hp.input_type == 'mixture':
print("building model with mixture of logistic output")
elif hp.input_type == 'bits':
print("building model with quantized bit audio")
elif hp.input_type == 'mulaw':
print("building model with quantized mulaw encoding")
else:
raise ValueError('input_type provided not supported')
model = Model(hp.rnn_dims, hp.fc_dims, hp.bits,
hp.pad, hp.upsample_factors, hp.num_mels,
hp.compute_dims, hp.res_out_dims, hp.res_blocks)
return model
示例2: get_alignment_energies
# 需要導入模塊: from hparams import hparams [as 別名]
# 或者: from hparams.hparams import num_mels [as 別名]
def get_alignment_energies(self, query, processed_memory,
attention_weights_cat):
'''
PARAMS
------
query: decoder output (batch, num_mels * n_frames_per_step)
processed_memory: processed encoder outputs (B, T_in, attention_dim)
attention_weights_cat: cumulative and prev. att weights (B, 2, max_time)
RETURNS
-------
alignment (batch, max_time)
'''
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(attention_weights_cat)
energies = self.v(torch.tanh(
processed_query + processed_attention_weights + processed_memory))
energies = energies.squeeze(-1)
return energies
示例3: create_network
# 需要導入模塊: from hparams import hparams [as 別名]
# 或者: from hparams.hparams import num_mels [as 別名]
def create_network(hp,batch_size,num_speakers,is_training):
net = WaveNetModel(
batch_size=batch_size,
dilations=hp.dilations,
filter_width=hp.filter_width,
residual_channels=hp.residual_channels,
dilation_channels=hp.dilation_channels,
quantization_channels=hp.quantization_channels,
out_channels =hp.out_channels,
skip_channels=hp.skip_channels,
use_biases=hp.use_biases, # True
scalar_input=hp.scalar_input,
global_condition_channels=hp.gc_channels,
global_condition_cardinality=num_speakers,
local_condition_channels=hp.num_mels,
upsample_factor=hp.upsample_factor,
legacy = hp.legacy,
residual_legacy = hp.residual_legacy,
drop_rate = hp.wavenet_dropout,
train_mode=is_training)
return net
示例4: _build_mel_basis
# 需要導入模塊: from hparams import hparams [as 別名]
# 或者: from hparams.hparams import num_mels [as 別名]
def _build_mel_basis():
n_fft = (hparams.num_freq - 1) * 2
return librosa.filters.mel(hparams.sample_rate, n_fft, n_mels=hparams.num_mels)
示例5: _build_mel_basis
# 需要導入模塊: from hparams import hparams [as 別名]
# 或者: from hparams.hparams import num_mels [as 別名]
def _build_mel_basis():
n_fft = (hparams.num_freq - 1) * 2
return librosa.filters.mel(hparams.sample_rate, n_fft, n_mels=hparams.num_mels)
示例6: _build_mel_basis
# 需要導入模塊: from hparams import hparams [as 別名]
# 或者: from hparams.hparams import num_mels [as 別名]
def _build_mel_basis():
assert hparams.fmax <= hparams.sample_rate // 2
return librosa.filters.mel(hparams.sample_rate, hparams.fft_size, n_mels=hparams.num_mels,
fmin=hparams.fmin, fmax=hparams.fmax)
示例7: __init__
# 需要導入模塊: from hparams import hparams [as 別名]
# 或者: from hparams.hparams import num_mels [as 別名]
def __init__(self, coordinator, metadata_filename, hparams):
super(Feeder, self).__init__()
self._coord = coordinator
self._hparams = hparams
self._cleaner_names = [x.strip() for x in hparams.cleaners.split(',')]
self._offset = 0
# Load metadata
self._mel_dir = os.path.join(os.path.dirname(metadata_filename), 'mels')
self._linear_dir = os.path.join(os.path.dirname(metadata_filename), 'linear')
with open(metadata_filename, encoding='utf-8') as f:
self._metadata = [line.strip().split('|') for line in f]
frame_shift_ms = hparams.hop_size / hparams.sample_rate
hours = sum([int(x[4]) for x in self._metadata]) * frame_shift_ms / (3600)
log('Loaded metadata for {} examples ({:.2f} hours)'.format(len(self._metadata), hours))
# Create placeholders for inputs and targets. Don't specify batch size because we want
# to be able to feed different batch sizes at eval time.
self._placeholders = [
tf.placeholder(tf.int32, shape=(None, None), name='inputs'),
tf.placeholder(tf.int32, shape=(None, ), name='input_lengths'),
tf.placeholder(tf.float32, shape=(None, None, hparams.num_mels), name='mel_targets'),
tf.placeholder(tf.int32,[None],'mel_lengths'),
tf.placeholder(tf.float32, shape=(None, None), name='token_targets'),
tf.placeholder(tf.float32, shape=(None, None, hparams.num_freq), name='linear_targets'),
]
# Create queue for buffering data
queue = tf.FIFOQueue(8, [tf.int32, tf.int32, tf.float32, tf.int32, tf.float32, tf.float32], name='input_queue')
self._enqueue_op = queue.enqueue(self._placeholders)
self.inputs, self.input_lengths, self.mel_targets, self.mel_lengths, self.token_targets, self.linear_targets = queue.dequeue()
self.inputs.set_shape(self._placeholders[0].shape)
self.input_lengths.set_shape(self._placeholders[1].shape)
self.mel_targets.set_shape(self._placeholders[2].shape)
self.mel_lengths.set_shape(self._placeholders[3].shape)
self.token_targets.set_shape(self._placeholders[4].shape)
self.linear_targets.set_shape(self._placeholders[5].shape)
示例8: load
# 需要導入模塊: from hparams import hparams [as 別名]
# 或者: from hparams.hparams import num_mels [as 別名]
def load(self, checkpoint_path, gta=False, model_name='Tacotron'):
print('Constructing model: %s' % model_name)
inputs = tf.placeholder(tf.int32, [1, None], 'inputs')
input_lengths = tf.placeholder(tf.int32, [1], 'input_lengths')
with tf.variable_scope('model') as scope:
self.model = create_model(model_name, hparams)
if hparams.use_vae:
ref_targets = tf.placeholder(tf.float32, [1, None, hparams.num_mels], 'ref_targets')
if gta:
targets = tf.placeholder(tf.float32, [1, None, hparams.num_mels], 'mel_targets')
if hparams.use_vae:
self.model.initialize(inputs, input_lengths, targets, gta=gta, reference_mel=ref_targets)
else:
self.model.initialize(inputs, input_lengths, targets, gta=gta)
else:
if hparams.use_vae:
self.model.initialize(inputs, input_lengths, reference_mel=ref_targets)
else:
self.model.initialize(inputs, input_lengths)
self.mel_outputs = self.model.mel_outputs
self.alignment = self.model.alignments[0]
self.gta = gta
print('Loading checkpoint: %s' % checkpoint_path)
self.session = tf.Session()
self.session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(self.session, checkpoint_path)
示例9: __init__
# 需要導入模塊: from hparams import hparams [as 別名]
# 或者: from hparams.hparams import num_mels [as 別名]
def __init__(self,coord,data_dirs,batch_size,receptive_field, gc_enable=False, queue_size=8):
super(DataFeederWavenet, self).__init__()
self.data_dirs = data_dirs
self.coord = coord
self.batch_size = batch_size
self.receptive_field = receptive_field
self.hop_size = audio.get_hop_size(hparams)
self.sample_size = ensure_divisible(hparams.sample_size,self.hop_size, True)
self.max_frames = self.sample_size // self.hop_size # sample_size 크기를 확보하기 위해.
self.queue_size = queue_size
self.gc_enable = gc_enable
self.skip_path_filter = hparams.skip_path_filter
self.rng = np.random.RandomState(123)
self._offset = defaultdict(lambda: 2) # key에 없는 값이 들어어면 2가 할당된다.
self.data_dir_to_id = {data_dir: idx for idx, data_dir in enumerate(self.data_dirs)} # data_dir <---> speaker_id 매핑
self.path_dict = get_path_dict(self.data_dirs,np.max([self.sample_size,receptive_field]))# receptive_field 보다 작은 것을 버리고, 나머지만 돌려준다.
self._placeholders = [
tf.placeholder(tf.float32, shape=[None,None,1],name='input_wav'),
tf.placeholder(tf.float32, shape=[None,None,hparams.num_mels],name='local_condition')
]
dtypes = [tf.float32, tf.float32]
if self.gc_enable:
self._placeholders.append(tf.placeholder(tf.int32, shape=[None],name='speaker_id'))
dtypes.append(tf.int32)
queue = tf.FIFOQueue(self.queue_size, dtypes, name='input_queue')
self.enqueue = queue.enqueue(self._placeholders)
if self.gc_enable:
self.inputs_wav, self.local_condition, self.speaker_id = queue.dequeue()
else:
self.inputs_wav, self.local_condition = queue.dequeue()
self.inputs_wav.set_shape(self._placeholders[0].shape)
self.local_condition.set_shape(self._placeholders[1].shape)
if self.gc_enable:
self.speaker_id.set_shape(self._placeholders[2].shape)
示例10: _build_mel_basis
# 需要導入模塊: from hparams import hparams [as 別名]
# 或者: from hparams.hparams import num_mels [as 別名]
def _build_mel_basis():
assert hparams.fmax <= hparams.sample_rate // 2
return librosa.filters.mel(hparams.sample_rate, n_fft,
fmin=hparams.fmin, fmax=hparams.fmax,
n_mels=hparams.num_mels)
示例11: _build_mel_basis
# 需要導入模塊: from hparams import hparams [as 別名]
# 或者: from hparams.hparams import num_mels [as 別名]
def _build_mel_basis():
if hparams.fmax is not None:
assert hparams.fmax <= hparams.sample_rate // 2
return librosa.filters.mel(hparams.sample_rate, hparams.fft_size,
fmin=hparams.fmin, fmax=hparams.fmax,
n_mels=hparams.num_mels)
示例12: no_test_build_model
# 需要導入模塊: from hparams import hparams [as 別名]
# 或者: from hparams.hparams import num_mels [as 別名]
def no_test_build_model():
model = Model(hp.rnn_dims, hp.fc_dims, hp.bits,
hp.pad, hp.upsample_factors, hp.num_mels,
hp.compute_dims, hp.res_out_dims, hp.res_blocks).cuda()
print(vars(model))
示例13: test_batch_generate
# 需要導入模塊: from hparams import hparams [as 別名]
# 或者: from hparams.hparams import num_mels [as 別名]
def test_batch_generate():
model = Model(hp.rnn_dims, hp.fc_dims, hp.bits,
hp.pad, hp.upsample_factors, hp.num_mels,
hp.compute_dims, hp.res_out_dims, hp.res_blocks).cuda()
print(vars(model))
batch_mel = torch.rand(3, 80, 100)
output = model.batch_generate(batch_mel)
print(output.shape)
示例14: _build_mel_basis
# 需要導入模塊: from hparams import hparams [as 別名]
# 或者: from hparams.hparams import num_mels [as 別名]
def _build_mel_basis():
n_fft = (hps.num_freq - 1) * 2
return librosa.filters.mel(hps.sample_rate, n_fft, n_mels=hps.num_mels)
示例15: __init__
# 需要導入模塊: from hparams import hparams [as 別名]
# 或者: from hparams.hparams import num_mels [as 別名]
def __init__(self):
super(Postnet, self).__init__()
self.convolutions = nn.ModuleList()
self.convolutions.append(
nn.Sequential(
ConvNorm(hps.num_mels, hps.postnet_embedding_dim,
kernel_size=hps.postnet_kernel_size, stride=1,
padding=int((hps.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(hps.postnet_embedding_dim))
)
for i in range(1, hps.postnet_n_convolutions - 1):
self.convolutions.append(
nn.Sequential(
ConvNorm(hps.postnet_embedding_dim,
hps.postnet_embedding_dim,
kernel_size=hps.postnet_kernel_size, stride=1,
padding=int((hps.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(hps.postnet_embedding_dim))
)
self.convolutions.append(
nn.Sequential(
ConvNorm(hps.postnet_embedding_dim, hps.num_mels,
kernel_size=hps.postnet_kernel_size, stride=1,
padding=int((hps.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='linear'),
nn.BatchNorm1d(hps.num_mels))
)