本文整理匯總了Python中data_load.load_data方法的典型用法代碼示例。如果您正苦於以下問題:Python data_load.load_data方法的具體用法?Python data_load.load_data怎麽用?Python data_load.load_data使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類data_load
的用法示例。
在下文中一共展示了data_load.load_data方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: synthesize
# 需要導入模塊: import data_load [as 別名]
# 或者: from data_load import load_data [as 別名]
def synthesize():
if not os.path.exists(hp.sampledir): os.mkdir(hp.sampledir)
# Load graph
g = Graph(mode="synthesize"); print("Graph loaded")
# Load data
texts = load_data(mode="synthesize")
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint(hp.logdir)); print("Restored!")
# Feed Forward
## mel
y_hat = np.zeros((texts.shape[0], 200, hp.n_mels*hp.r), np.float32) # hp.n_mels*hp.r
for j in tqdm.tqdm(range(200)):
_y_hat = sess.run(g.y_hat, {g.x: texts, g.y: y_hat})
y_hat[:, j, :] = _y_hat[:, j, :]
## mag
mags = sess.run(g.z_hat, {g.y_hat: y_hat})
for i, mag in enumerate(mags):
print("File {}.wav is being generated ...".format(i+1))
audio = spectrogram2wav(mag)
write(os.path.join(hp.sampledir, '{}.wav'.format(i+1)), hp.sr, audio)
示例2: copy_synth_SSRN_GL
# 需要導入模塊: import data_load [as 別名]
# 或者: from data_load import load_data [as 別名]
def copy_synth_SSRN_GL(hp, outdir):
safe_makedir(outdir)
dataset = load_data(hp, mode="synthesis")
fnames, texts = dataset['fpaths'], dataset['texts']
bases = [basename(fname) for fname in fnames]
mels = [np.load(os.path.join(hp.coarse_audio_dir, base + '.npy')) for base in bases]
lengths = [a.shape[0] for a in mels]
mels = list2batch(mels, 0)
g = SSRNGraph(hp, mode="synthesize"); print("Graph (ssrn) loaded")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
ssrn_epoch = restore_latest_model_parameters(sess, hp, 'ssrn')
print('Run SSRN...')
Z = synth_mel2mag(hp, mels, g, sess)
for i, mag in enumerate(Z):
print("Working on %s"%(bases[i]))
mag = mag[:lengths[i]*hp.r,:] ### trim to generated length
wav = spectrogram2wav(hp, mag)
soundfile.write(outdir + "/%s.wav"%(base), wav, hp.sr)
示例3: test
# 需要導入模塊: import data_load [as 別名]
# 或者: from data_load import load_data [as 別名]
def test():
x, y = load_data(type="test")
g = Graph(is_training=False)
with g.graph.as_default():
sv = tf.train.Supervisor()
with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
# Restore parameters
sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir))
print("Restored!")
# Get model name
mname = open(hp.logdir + '/checkpoint', 'r').read().split('"')[1] # model name
if not os.path.exists('results'): os.mkdir('results')
fout = 'results/{}.txt'.format(mname)
import copy
_preds = copy.copy(x)
while 1:
istarget, probs, preds = sess.run([g.istarget, g.probs, g.preds], {g.x:_preds, g.y: y})
probs = probs.astype(np.float32)
preds = preds.astype(np.float32)
probs *= istarget #(N, 9, 9)
preds *= istarget #(N, 9, 9)
probs = np.reshape(probs, (-1, 9*9)) #(N, 9*9)
preds = np.reshape(preds, (-1, 9*9))#(N, 9*9)
_preds = np.reshape(_preds, (-1, 9*9))
maxprob_ids = np.argmax(probs, axis=1) # (N, ) <- blanks of the most probable prediction
maxprobs = np.max(probs, axis=1, keepdims=False)
for j, (maxprob_id, maxprob) in enumerate(zip(maxprob_ids, maxprobs)):
if maxprob != 0:
_preds[j, maxprob_id] = preds[j, maxprob_id]
_preds = np.reshape(_preds, (-1, 9, 9))
_preds = np.where(x==0, _preds, y) # # Fill in the non-blanks with correct numbers
if np.count_nonzero(_preds) == _preds.size: break
write_to_file(x.astype(np.int32), y, _preds.astype(np.int32), fout)
示例4: test
# 需要導入模塊: import data_load [as 別名]
# 或者: from data_load import load_data [as 別名]
def test():
# Load data: two samples
files, speaker_ids = load_data(mode="test")
speaker_ids = speaker_ids[::-1] # swap
# Parse
x = np.zeros((2, 63488, 1), np.int32)
for i, f in enumerate(files):
f = np.load(f)
length = min(63488, len(f))
x[i, :length, :] = f[:length]
# Graph
g = Graph("test"); print("Test Graph loaded")
with tf.Session() as sess:
saver = tf.train.Saver()
# Restore saved variables
ckpt = tf.train.latest_checkpoint(hp.logdir)
if ckpt is not None: saver.restore(sess, ckpt)
# Feed Forward
y_hat = np.zeros((2, 63488, 1), np.int32)
for j in tqdm(range(63488)):
_y_hat = sess.run(g.y_hat, {g.x: x, g.y: y_hat, g.speaker_ids: speaker_ids})
_y_hat = np.expand_dims(_y_hat, -1)
y_hat[:, j, :] = _y_hat[:, j, :]
for i, y in tqdm(enumerate(y_hat)):
audio = mu_law_decode(y)
write(os.path.join(hp.sampledir, '{}.wav'.format(i + 1)), hp.sr, audio)
示例5: eval
# 需要導入模塊: import data_load [as 別名]
# 或者: from data_load import load_data [as 別名]
def eval():
# Load graph
g = Graph(mode="eval"); print("Evaluation Graph loaded")
# Load data
fpaths, text_lengths, texts = load_data(mode="eval")
# Parse
text = np.fromstring(texts[0], np.int32) # (None,)
fname, mel, mag = load_spectrograms(fpaths[0])
x = np.expand_dims(text, 0) # (1, None)
y = np.expand_dims(mel, 0) # (1, None, n_mels*r)
z = np.expand_dims(mag, 0) # (1, None, n_mfccs)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint(hp.logdir)); print("Restored!")
writer = tf.summary.FileWriter(hp.logdir, sess.graph)
# Feed Forward
## mel
y_hat = np.zeros((1, y.shape[1], y.shape[2]), np.float32) # hp.n_mels*hp.r
for j in range(y.shape[1]):
_y_hat = sess.run(g.y_hat, {g.x: x, g.y: y_hat})
y_hat[:, j, :] = _y_hat[:, j, :]
## mag
merged, gs = sess.run([g.merged, g.global_step], {g.x:x, g.y:y, g.y_hat: y_hat, g.z: z})
writer.add_summary(merged, global_step=gs)
writer.close()
示例6: copy_synth_GL
# 需要導入模塊: import data_load [as 別名]
# 或者: from data_load import load_data [as 別名]
def copy_synth_GL(hp, outdir):
safe_makedir(outdir)
dataset = load_data(hp, mode="synthesis")
fnames, texts = dataset['fpaths'], dataset['texts']
bases = [basename(fname) for fname in fnames]
for base in bases:
print("Working on file %s"%(base))
mag = np.load(os.path.join(hp.full_audio_dir, base + '.npy'))
wav = spectrogram2wav(hp, mag)
soundfile.write(outdir + "/%s.wav"%(base), wav, hp.sr)
示例7: main_work
# 需要導入模塊: import data_load [as 別名]
# 或者: from data_load import load_data [as 別名]
def main_work():
#################################################
# ============= Process command line ============
a = ArgumentParser()
a.add_argument('-c', dest='config', required=True, type=str)
a.add_argument('-ncores', default=1, type=int, help='Number of cores for parallel processing')
opts = a.parse_args()
# ===============================================
hp = load_config(opts.config)
assert hp.attention_guide_dir
dataset = load_data(hp)
fpaths, text_lengths = dataset['fpaths'], dataset['text_lengths']
assert os.path.exists(hp.coarse_audio_dir)
safe_makedir(hp.attention_guide_dir)
executor = ProcessPoolExecutor(max_workers=opts.ncores)
futures = []
for (fpath, text_length) in zip(fpaths, text_lengths):
futures.append(executor.submit(proc, fpath, text_length, hp))
proc_list = [future.result() for future in tqdm.tqdm(futures)]
示例8: convert
# 需要導入模塊: import data_load [as 別名]
# 或者: from data_load import load_data [as 別名]
def convert():
g = Graph("convert"); print("Training Graph loaded")
mfccs = load_data("convert")
with tf.Session() as sess:
# Initialize all variables
sess.run(tf.global_variables_initializer())
# Restore
logdir = hp.logdir + "/train1"
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'net1')
saver = tf.train.Saver(var_list=var_list)
ckpt = tf.train.latest_checkpoint(logdir)
if ckpt is not None: saver.restore(sess, ckpt)
logdir = hp.logdir + "/train2"
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'net2') +\
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'training')
saver2 = tf.train.Saver(var_list=var_list)
ckpt = tf.train.latest_checkpoint(logdir)
if ckpt is not None: saver2.restore(sess, ckpt)
# Synthesize
if not os.path.exists('50lang-output'): os.mkdir('50lang-output')
mag_hats = sess.run(g.mag_hats, {g.mfccs: mfccs})
for i, mag_hat in enumerate(mag_hats):
wav = spectrogram2wav(mag_hat)
write('50lang-output/{}.wav'.format(i+1), hp.sr, wav)
示例9: eval1
# 需要導入模塊: import data_load [as 別名]
# 或者: from data_load import load_data [as 別名]
def eval1():
# Load data
mfccs, phns = load_data(mode="eval1")
# Graph
g = Graph("eval1"); print("Evaluation Graph loaded")
logdir = hp.logdir + "/train1"
# Session
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Restore saved variables
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'net1') +\
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'training')
saver = tf.train.Saver(var_list=var_list)
ckpt = tf.train.latest_checkpoint(logdir)
if ckpt is not None: saver.restore(sess, ckpt)
# Writer
writer = tf.summary.FileWriter(logdir, sess.graph)
# Evaluation
merged, gs = sess.run([g.merged, g.global_step], {g.mfccs: mfccs, g.phones: phns})
# Write summaries
writer.add_summary(merged, global_step=gs)
writer.close()
示例10: synthesize
# 需要導入模塊: import data_load [as 別名]
# 或者: from data_load import load_data [as 別名]
def synthesize():
# Load data
L = load_data("synthesize")
# Load graph
g = Graph(mode="synthesize"); print("Graph loaded")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Restore parameters
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Text2Mel')
saver1 = tf.train.Saver(var_list=var_list)
saver1.restore(sess, tf.train.latest_checkpoint(hp.logdir + "-1"))
print("Text2Mel Restored!")
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'SSRN') + \
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'gs')
saver2 = tf.train.Saver(var_list=var_list)
saver2.restore(sess, tf.train.latest_checkpoint(hp.logdir + "-2"))
print("SSRN Restored!")
# Feed Forward
## mel
Y = np.zeros((len(L), hp.max_T, hp.n_mels), np.float32)
prev_max_attentions = np.zeros((len(L),), np.int32)
for j in tqdm(range(hp.max_T)):
_gs, _Y, _max_attentions, _alignments = \
sess.run([g.global_step, g.Y, g.max_attentions, g.alignments],
{g.L: L,
g.mels: Y,
g.prev_max_attentions: prev_max_attentions})
Y[:, j, :] = _Y[:, j, :]
prev_max_attentions = _max_attentions[:, j]
# Get magnitude
Z = sess.run(g.Z, {g.Y: Y})
# Generate wav files
if not os.path.exists(hp.sampledir): os.makedirs(hp.sampledir)
for i, mag in enumerate(Z):
print("Working on file", i+1)
wav = spectrogram2wav(mag)
write(hp.sampledir + "/{}.wav".format(i+1), hp.sr, wav)
示例11: eval
# 需要導入模塊: import data_load [as 別名]
# 或者: from data_load import load_data [as 別名]
def eval():
# Load graph
g = Graph(mode="test")
print("Graph loaded")
# Load batch
_Y = load_data(mode="test")
X = np.zeros((len(_Y), hp.maxlen))
Y = np.zeros((len(_Y), hp.maxlen))
for i, y in enumerate(_Y):
y = np.fromstring(y, np.int32)
Y[i][:len(y)] = y
np.random.shuffle(y)
X[i][:len(y)] = y
word2idx, idx2word = g.word2idx, g.idx2word
# Start session
with g.graph.as_default():
sv = tf.train.Supervisor()
with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
# Restore parameters
sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir))
# Get model
mname = open(hp.logdir + '/checkpoint', 'r').read().split('"')[1] # model name
# inference
if not os.path.exists('results'): os.mkdir('results')
with codecs.open("results/" + mname, "w", "utf-8") as fout:
num_words, total_edit_distance = 0, 0
for i in range(0, len(Y), hp.batch_size):
### Get mini-batches
x = X[i:i+hp.batch_size]
y = Y[i:i+hp.batch_size]
### Autoregressive inference
preds = np.zeros((hp.batch_size, hp.maxlen), np.int32)
for j in range(hp.maxlen):
_preds = sess.run(g.preds, {g.x: x, g.y: preds})
preds[:, j] = _preds[:, j]
for xx, yy, pred in zip(x, y, preds): # sentence-wise
inputs = " ".join(idx2word[idx] for idx in xx).replace("_", "").strip()
expected = " ".join(idx2word[idx] for idx in yy).replace("_", "").strip()
got = " ".join(idx2word[idx] for idx in pred[:len(inputs.split())])
edit_distance = distance.levenshtein(expected.split(), got.split())
total_edit_distance += edit_distance
num_words += len(expected.split())
fout.write(u"Inputs : {}\n".format(inputs))
fout.write(u"Expected: {}\n".format(expected))
fout.write(u"Got : {}\n".format(got))
fout.write(u"WER : {}\n\n".format(edit_distance))
fout.write(u"Total WER: {}/{}={}\n".format(total_edit_distance,
num_words,
round(float(total_edit_distance) / num_words, 2)))
示例12: evaluate
# 需要導入模塊: import data_load [as 別名]
# 或者: from data_load import load_data [as 別名]
def evaluate():
# Load graph
g = Graph(mode="evaluate"); print("Graph loaded")
# Load data
fpaths, _, texts = load_data(mode="evaluate")
lengths = [len(t) for t in texts]
maxlen = sorted(lengths, reverse=True)[0]
new_texts = np.zeros((len(texts), maxlen), np.int32)
for i, text in enumerate(texts):
new_texts[i, :len(text)] = [idx for idx in text]
#new_texts = np.split(new_texts, 2)
new_texts = new_texts[:evaluate_wav_num]
half_size = int(len(fpaths)/2)
print(half_size)
#new_fpaths = [fpaths[:half_size], fpaths[half_size:]]
fpaths = fpaths[:evaluate_wav_num]
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint(hp.logdir)); print("Evaluate Model Restored!")
"""
err = 0.0
for i, t_split in enumerate(new_texts):
y_hat = np.zeros((t_split.shape[0], 200, hp.n_mels*hp.r), np.float32) # hp.n_mels*hp.r
for j in tqdm.tqdm(range(200)):
_y_hat = sess.run(g.y_hat, {g.x: t_split, g.y: y_hat})
y_hat[:, j, :] = _y_hat[:, j, :]
mags = sess.run(g.z_hat, {g.y_hat: y_hat})
for k, mag in enumerate(mags):
fname, mel_ans, mag_ans = load_spectrograms(new_fpaths[i][k])
print("File {} is being evaluated ...".format(fname))
audio = spectrogram2wav(mag)
audio_ans = spectrogram2wav(mag_ans)
err += calculate_mse(audio, audio_ans)
err = err/float(len(fpaths))
print(err)
"""
# Feed Forward
## mel
y_hat = np.zeros((new_texts.shape[0], 200, hp.n_mels*hp.r), np.float32) # hp.n_mels*hp.r
for j in tqdm.tqdm(range(200)):
_y_hat = sess.run(g.y_hat, {g.x: new_texts, g.y: y_hat})
y_hat[:, j, :] = _y_hat[:, j, :]
## mag
mags = sess.run(g.z_hat, {g.y_hat: y_hat})
err = 0.0
for i, mag in enumerate(mags):
fname, mel_ans, mag_ans = load_spectrograms(fpaths[i])
print("File {} is being evaluated ...".format(fname))
#audio = spectrogram2wav(mag)
#audio_ans = spectrogram2wav(mag_ans)
#err += calculate_mse(audio, audio_ans)
err += calculate_mse(mag, mag_ans)
err = err/float(len(fpaths))
print(err)
opf.write(hp.logdir + " spectrogram mse: " + str(err) + "\n")
示例13: eval
# 需要導入模塊: import data_load [as 別名]
# 或者: from data_load import load_data [as 別名]
def eval():
# Load graph
g = Graph(is_training=False)
print("Graph loaded")
# Load data
X, Y = load_data(mode="test") # texts
char2idx, idx2char = load_vocab()
with g.graph.as_default():
sv = tf.train.Supervisor()
with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
# Restore parameters
sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir))
print("Restored!")
# Get model
mname = open(hp.logdir + '/checkpoint', 'r').read().split('"')[1] # model name
# Inference
if not os.path.exists(hp.savedir): os.mkdir(hp.savedir)
with open("{}/{}".format(hp.savedir, mname), 'w') as fout:
results = []
baseline_results = []
for step in range(len(X) // hp.batch_size):
x = X[step * hp.batch_size: (step + 1) * hp.batch_size]
y = Y[step * hp.batch_size: (step + 1) * hp.batch_size]
# predict characters
preds = sess.run(g.preds, {g.x: x})
for xx, yy, pp in zip(x, y, preds): # sentence-wise
expected = ''
got = ''
for xxx, yyy, ppp in zip(xx, yy, pp): # character-wise
if xxx == 0:
break
else:
got += idx2char.get(xxx, "*")
expected += idx2char.get(xxx, "*")
if ppp == 1: got += " "
if yyy == 1: expected += " "
# prediction results
if ppp == yyy:
results.append(1)
else:
results.append(0)
# baseline results
if yyy == 0: # no space
baseline_results.append(1)
else:
baseline_results.append(0)
fout.write("▌Expected: " + expected + "\n")
fout.write("▌Got: " + got + "\n\n")
fout.write(
"Final Accuracy = %d/%d=%.4f\n" % (sum(results), len(results), float(sum(results)) / len(results)))
fout.write(
"Baseline Accuracy = %d/%d=%.4f" % (sum(baseline_results), len(baseline_results), float(sum(baseline_results)) / len(baseline_results)))