當前位置: 首頁>>代碼示例>>Python>>正文


Python text.text_to_sequence方法代碼示例

本文整理匯總了Python中text.text_to_sequence方法的典型用法代碼示例。如果您正苦於以下問題:Python text.text_to_sequence方法的具體用法?Python text.text_to_sequence怎麽用?Python text.text_to_sequence使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在text的用法示例。


在下文中一共展示了text.text_to_sequence方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: generate

# 需要導入模塊: import text [as 別名]
# 或者: from text import text_to_sequence [as 別名]
def generate(model, text):

    # Text to index sequence
    cleaner_names = [x.strip() for x in hp.cleaners.split(',')]
    seq = np.expand_dims(np.asarray(text_to_sequence(text, cleaner_names), dtype=np.int32), axis=0)

    # Provide [GO] Frame
    mel_input = np.zeros([seq.shape[0], hp.num_mels, 1], dtype=np.float32)

    # Variables
    characters = Variable(torch.from_numpy(seq).type(torch.cuda.LongTensor), volatile=True).cuda()
    mel_input = Variable(torch.from_numpy(mel_input).type(torch.cuda.FloatTensor), volatile=True).cuda()

    # Spectrogram to wav
    _, linear_output = model.forward(characters, mel_input)
    wav = inv_spectrogram(linear_output[0].data.cpu().numpy())
    wav = wav[:find_endpoint(wav)]
    out = io.BytesIO()
    save_wav(wav, out)

    return out.getvalue() 
開發者ID:soobinseo,項目名稱:Tacotron-pytorch,代碼行數:23,代碼來源:synthesis.py

示例2: _get_next_example

# 需要導入模塊: import text [as 別名]
# 或者: from text import text_to_sequence [as 別名]
def _get_next_example(self):
    '''Loads a single example (input, mel_target, linear_target, cost) from disk'''
    if self._offset >= len(self._metadata):
      self._offset = 0
      random.shuffle(self._metadata)
    meta = self._metadata[self._offset]
    self._offset += 1

    text = meta[3]
    if self._cmudict and random.random() < _p_cmudict:
      text = ' '.join([self._maybe_get_arpabet(word) for word in text.split(' ')])

    input_data = np.asarray(text_to_sequence(text, self._cleaner_names), dtype=np.int32)
    linear_target = np.load(os.path.join(self._datadir, meta[0]))
    mel_target = np.load(os.path.join(self._datadir, meta[1]))

    return (input_data, mel_target, linear_target, len(linear_target)) 
開發者ID:yanggeng1995,項目名稱:vae_tacotron,代碼行數:19,代碼來源:datafeeder.py

示例3: _get_next_example

# 需要導入模塊: import text [as 別名]
# 或者: from text import text_to_sequence [as 別名]
def _get_next_example(self):
    '''Loads a single example (input, mel_target, linear_target, cost) from disk'''
    if self._offset >= len(self._metadata):
      self._offset = 0
      random.shuffle(self._metadata)
    meta = self._metadata[self._offset]
    self._offset += 1

    text = meta[3]
    if self._cmudict and random.random() < _p_cmudict:
      text = ' '.join([self._maybe_get_arpabet(word) for word in text.split(' ')])

    input_data = np.asarray(text_to_sequence(text, self._cleaner_names), dtype=np.int32)
    linear_target = np.load(os.path.join(self._datadir, meta[0]))
    mel_target = np.load(os.path.join(self._datadir, meta[1]))
    return (input_data, mel_target, linear_target, len(linear_target)) 
開發者ID:richmondu,項目名稱:libfaceid,代碼行數:18,代碼來源:datafeeder.py

示例4: _get_next_example

# 需要導入模塊: import text [as 別名]
# 或者: from text import text_to_sequence [as 別名]
def _get_next_example(self):
    '''Loads a single example (input, mel_target, linear_target, cost) from disk'''
    if self._offset >= len(self._metadata):
      self._offset = 0
      random.shuffle(self._metadata)
    meta = self._metadata[self._offset]
    self._offset += 1
    text = meta[3]
    arr = []
    for word in text.split(' '):
      if word in [" ", ""]:
        pass
      elif word in [",", '.', '-']:
        x = word
        arr.append(x)
      else:
        x = self._maybe_get_arpabet(word)
        arr.append(x)
    text = ' '.join(arr)

    input_data = np.asarray(text_to_sequence(text, self._cleaner_names), dtype=np.int32)
    linear_target = np.load(os.path.join(self._datadir, meta[0]))
    mel_target = np.load(os.path.join(self._datadir, meta[1]))
    return (input_data, mel_target, linear_target, len(linear_target)) 
開發者ID:youssefsharief,項目名稱:arabic-tacotron-tts,代碼行數:26,代碼來源:datafeeder.py

示例5: load_data

# 需要導入模塊: import text [as 別名]
# 或者: from text import text_to_sequence [as 別名]
def load_data(txt, mel, model):
    character = text.text_to_sequence(txt, hparams.text_cleaners)
    character = torch.from_numpy(np.stack([np.array(character)])).long().cuda()

    text_length = torch.Tensor([character.size(1)]).long().cuda()
    mel = torch.from_numpy(np.stack([mel.T])).float().cuda()
    max_len = mel.size(2)
    output_length = torch.Tensor([max_len]).long().cuda()

    inputs = character, text_length, mel, max_len, output_length

    with torch.no_grad():
        [_, mel_tacotron2, _, alignment], cemb = model.forward(inputs)

    alignment = alignment[0].cpu().numpy()
    cemb = cemb[0].cpu().numpy()

    D = get_D(alignment)
    D = np.array(D)

    mel_tacotron2 = mel_tacotron2[0].cpu().numpy()

    return mel_tacotron2, cemb, D 
開發者ID:xcmyz,項目名稱:LightSpeech,代碼行數:25,代碼來源:utils.py

示例6: load_data_from_tacotron2

# 需要導入模塊: import text [as 別名]
# 或者: from text import text_to_sequence [as 別名]
def load_data_from_tacotron2(txt, model):
    character = text.text_to_sequence(txt, hparams.text_cleaners)
    character = torch.from_numpy(np.stack([np.array(character)])).long().cuda()

    with torch.no_grad():
        [_, mel, _, alignment], cemb = model.inference(character)

    alignment = alignment[0].cpu().numpy()
    cemb = cemb[0].cpu().numpy()

    D = get_D(alignment)
    D = np.array(D)

    mel = mel[0].cpu().numpy()

    return mel, cemb, D 
開發者ID:xcmyz,項目名稱:LightSpeech,代碼行數:18,代碼來源:utils.py

示例7: synthesis

# 需要導入模塊: import text [as 別名]
# 或者: from text import text_to_sequence [as 別名]
def synthesis(model, text, alpha=1.0):
    text = np.array(text_to_sequence(text, hp.text_cleaners))
    text = np.stack([text])
    with torch.no_grad():
        sequence = torch.autograd.Variable(
            torch.from_numpy(text)).cuda().long()
        # mel, mel_postnet_1, mel_postnet_2 = model.module.inference(
        #     sequence, alpha)
        mel = model.module.inference(sequence, alpha)

        # out = mel[0].cpu().transpose(0, 1),\
        #     mel_postnet_1[0].cpu().transpose(0, 1),\
        #     mel_postnet_2[0].cpu().transpose(0, 1),\
        #     mel.transpose(1, 2),\
        #     mel_postnet_1.transpose(1, 2),\
        #     mel_postnet_2.transpose(1, 2)

        return mel[0].cpu().transpose(0, 1), mel.transpose(1, 2) 
開發者ID:xcmyz,項目名稱:LightSpeech,代碼行數:20,代碼來源:inference.py

示例8: __getitem__

# 需要導入模塊: import text [as 別名]
# 或者: from text import text_to_sequence [as 別名]
def __getitem__(self, idx):
        # mel_gt_name = os.path.join(
        #     hparams.mel_ground_truth, "ljspeech-mel-%05d.npy" % (idx+1))
        # mel_gt_target = np.load(mel_gt_name)
        mel_tac2_target = np.load(os.path.join(
            hparams.mel_tacotron2, str(idx)+".npy")).T

        cemb = np.load(os.path.join(hparams.cemb_path, str(idx)+".npy"))
        D = np.load(os.path.join(hparams.alignment_path, str(idx)+".npy"))

        character = self.text[idx][0:len(self.text[idx])-1]
        character = np.array(text_to_sequence(
            character, hparams.text_cleaners))

        sample = {"text": character,
                  "mel_tac2_target": mel_tac2_target,
                  "cemb": cemb,
                  "D": D}

        return sample 
開發者ID:xcmyz,項目名稱:LightSpeech,代碼行數:22,代碼來源:dataset.py

示例9: synthesis

# 需要導入模塊: import text [as 別名]
# 或者: from text import text_to_sequence [as 別名]
def synthesis(model, text, alpha=1.0):
    text = np.array(text_to_sequence(text, hp.text_cleaners))
    text = np.stack([text])

    src_pos = np.array([i+1 for i in range(text.shape[1])])
    src_pos = np.stack([src_pos])
    with torch.no_grad():
        sequence = torch.autograd.Variable(
            torch.from_numpy(text)).cuda().long()
        src_pos = torch.autograd.Variable(
            torch.from_numpy(src_pos)).cuda().long()

        mel, mel_postnet = model.module.forward(sequence, src_pos, alpha=alpha)

        return mel[0].cpu().transpose(0, 1), \
            mel_postnet[0].cpu().transpose(0, 1), \
            mel.transpose(1, 2), \
            mel_postnet.transpose(1, 2) 
開發者ID:xcmyz,項目名稱:FastSpeech,代碼行數:20,代碼來源:synthesis.py

示例10: infer

# 需要導入模塊: import text [as 別名]
# 或者: from text import text_to_sequence [as 別名]
def infer(wav_path, text, model):
	sequence = text_to_sequence(text, hps.text_cleaners)
	sequence = to_var(torch.IntTensor(sequence)[None, :]).long()
	mel = melspectrogram(load_wav(wav_path))
	mel_in = to_var(torch.Tensor([mel]))
	r = mel_in.shape[2]%hps.n_frames_per_step
	if r != 0:
		mel_in = mel_in[:, :, :-r]
	sequence = torch.cat([sequence, sequence], 0)
	mel_in = torch.cat([mel_in, mel_in], 0)
	_, mel_outputs_postnet, _, _ = model.teacher_infer(sequence, mel_in)
	ret = mel
	if r != 0:
		ret[:, :-r] = to_arr(mel_outputs_postnet[0])
	else:
		ret = to_arr(mel_outputs_postnet[0])
	return ret 
開發者ID:BogiHsu,項目名稱:Tacotron2-PyTorch,代碼行數:19,代碼來源:mkgta.py

示例11: synthesize

# 需要導入模塊: import text [as 別名]
# 或者: from text import text_to_sequence [as 別名]
def synthesize(self, text, reference_mel):
    cleaner_names = [x.strip() for x in hparams.cleaners.split(',')]
    seq = text_to_sequence(text, cleaner_names)
    feed_dict = {
      self.model.inputs: [np.asarray(seq, dtype=np.int32)],
      self.model.input_lengths: np.asarray([len(seq)], dtype=np.int32),
      self.model.reference_mel: [np.asarray(reference_mel, dtype=np.float32)]
    }
    wav = self.session.run(self.wav_output, feed_dict=feed_dict)
    wav = audio.inv_preemphasis(wav)
    wav = wav[:audio.find_endpoint(wav)]
    out = io.BytesIO()
    audio.save_wav(wav, out)
    return out.getvalue() 
開發者ID:yanggeng1995,項目名稱:vae_tacotron,代碼行數:16,代碼來源:synthesizer.py

示例12: test_text_to_sequence

# 需要導入模塊: import text [as 別名]
# 或者: from text import text_to_sequence [as 別名]
def test_text_to_sequence():
  assert text_to_sequence('', []) == [1]
  assert text_to_sequence('Hi!', []) == [9, 36, 54, 1]
  assert text_to_sequence('"A"_B', []) == [2, 3, 1]
  assert text_to_sequence('A {AW1 S} B', []) == [2, 64, 83, 132, 64, 3, 1]
  assert text_to_sequence('Hi', ['lowercase']) == [35, 36, 1]
  assert text_to_sequence('A {AW1 S}  B', ['english_cleaners']) == [28, 64, 83, 132, 64, 29, 1] 
開發者ID:yanggeng1995,項目名稱:vae_tacotron,代碼行數:9,代碼來源:text_test.py

示例13: synthesis

# 需要導入模塊: import text [as 別名]
# 或者: from text import text_to_sequence [as 別名]
def synthesis(text, args):
    m = Model()
    m_post = ModelPostNet()

    m.load_state_dict(load_checkpoint(args.restore_step1, "transformer"))
    m_post.load_state_dict(load_checkpoint(args.restore_step2, "postnet"))

    text = np.asarray(text_to_sequence(text, [hp.cleaners]))
    text = t.LongTensor(text).unsqueeze(0)
    text = text.cuda()
    mel_input = t.zeros([1,1, 80]).cuda()
    pos_text = t.arange(1, text.size(1)+1).unsqueeze(0)
    pos_text = pos_text.cuda()

    m=m.cuda()
    m_post = m_post.cuda()
    m.train(False)
    m_post.train(False)
    
    pbar = tqdm(range(args.max_len))
    with t.no_grad():
        for i in pbar:
            pos_mel = t.arange(1,mel_input.size(1)+1).unsqueeze(0).cuda()
            mel_pred, postnet_pred, attn, stop_token, _, attn_dec = m.forward(text, mel_input, pos_text, pos_mel)
            mel_input = t.cat([mel_input, postnet_pred[:,-1:,:]], dim=1)

        mag_pred = m_post.forward(postnet_pred)
        
    wav = spectrogram2wav(mag_pred.squeeze(0).cpu().numpy())
    write(hp.sample_path + "/test.wav", hp.sr, wav) 
開發者ID:soobinseo,項目名稱:Transformer-TTS,代碼行數:32,代碼來源:synthesis.py

示例14: __getitem__

# 需要導入模塊: import text [as 別名]
# 或者: from text import text_to_sequence [as 別名]
def __getitem__(self, idx):
        wav_name = os.path.join(self.root_dir, self.landmarks_frame.ix[idx, 0]) + '.wav'
        text = self.landmarks_frame.ix[idx, 1]

        text = np.asarray(text_to_sequence(text, [hp.cleaners]), dtype=np.int32)
        mel = np.load(wav_name[:-4] + '.pt.npy')
        mel_input = np.concatenate([np.zeros([1,hp.num_mels], np.float32), mel[:-1,:]], axis=0)
        text_length = len(text)
        pos_text = np.arange(1, text_length + 1)
        pos_mel = np.arange(1, mel.shape[0] + 1)

        sample = {'text': text, 'mel': mel, 'text_length':text_length, 'mel_input':mel_input, 'pos_mel':pos_mel, 'pos_text':pos_text}

        return sample 
開發者ID:soobinseo,項目名稱:Transformer-TTS,代碼行數:16,代碼來源:preprocess.py

示例15: synthesize

# 需要導入模塊: import text [as 別名]
# 或者: from text import text_to_sequence [as 別名]
def synthesize(self, text):
    text = arpa.to_arpa(text)
    cleaner_names = [x.strip() for x in hparams.cleaners.split(',')]
    seq = text_to_sequence(text, cleaner_names)
    feed_dict = {
      self.model.inputs: [np.asarray(seq, dtype=np.int32)],
      self.model.input_lengths: np.asarray([len(seq)], dtype=np.int32)
    }
    wav = self.session.run(self.wav_output, feed_dict=feed_dict)
    wav = audio.inv_preemphasis(wav)
    wav = wav[:audio.find_endpoint(wav)]
    out = io.BytesIO()
    audio.save_wav(wav, out)
    return out.getvalue() 
開發者ID:youssefsharief,項目名稱:arabic-tacotron-tts,代碼行數:16,代碼來源:synthesizer.py


注:本文中的text.text_to_sequence方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。