本文整理匯總了Python中mir_eval.separation.bss_eval_sources方法的典型用法代碼示例。如果您正苦於以下問題:Python separation.bss_eval_sources方法的具體用法?Python separation.bss_eval_sources怎麽用?Python separation.bss_eval_sources使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類mir_eval.separation
的用法示例。
在下文中一共展示了separation.bss_eval_sources方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: cal_SDRi
# 需要導入模塊: from mir_eval import separation [as 別名]
# 或者: from mir_eval.separation import bss_eval_sources [as 別名]
def cal_SDRi(src_ref, src_est, mix):
"""Calculate Source-to-Distortion Ratio improvement (SDRi).
NOTE: bss_eval_sources is very very slow.
Args:
src_ref: numpy.ndarray, [C, T]
src_est: numpy.ndarray, [C, T], reordered by best PIT permutation
mix: numpy.ndarray, [T]
Returns:
average_SDRi
"""
src_anchor = np.stack([mix, mix], axis=0)
sdr, sir, sar, popt = bss_eval_sources(src_ref, src_est)
sdr0, sir0, sar0, popt0 = bss_eval_sources(src_ref, src_anchor)
avg_SDRi = ((sdr[0]-sdr0[0]) + (sdr[1]-sdr0[1])) / 2
# print("SDRi1: {0:.2f}, SDRi2: {1:.2f}".format(sdr[0]-sdr0[0], sdr[1]-sdr0[1]))
return avg_SDRi
示例2: convergence_callback
# 需要導入模塊: from mir_eval import separation [as 別名]
# 或者: from mir_eval.separation import bss_eval_sources [as 別名]
def convergence_callback(Y, **kwargs):
global SDR, SIR, ref
from mir_eval.separation import bss_eval_sources
if Y.shape[2] == 1:
y = pra.transform.synthesis(
Y[:, :, 0], framesize, framesize // 2, win=win_s
)[:, None]
else:
y = pra.transform.synthesis(Y, framesize, framesize // 2, win=win_s)
if args.algo != "blinkiva":
new_ord = np.argsort(np.std(y, axis=0))[::-1]
y = y[:, new_ord]
m = np.minimum(y.shape[0] - framesize // 2, ref.shape[1])
sdr, sir, sar, perm = bss_eval_sources(
ref[:n_sources_target, :m, 0],
y[framesize // 2 : m + framesize // 2, :n_sources_target].T,
)
SDR.append(sdr)
SIR.append(sir)
示例3: val
# 需要導入模塊: from mir_eval import separation [as 別名]
# 或者: from mir_eval.separation import bss_eval_sources [as 別名]
def val(epoch):
model.eval()
start_time = time.time()
cnt, aveloss,avesdr = 0, 0, 0
with torch.no_grad():
for iloader, xtrain, ytrain in loadval:
for ind in range(0, xtrain.shape[-1], sampleSize):
if (xtrain[0, 0, ind:ind + sampleSize].shape[0] < (sampleSize)): break
output = model(xtrain[:, :, ind:ind + sampleSize].to(device))
loss = criterion(output, (ytrain[:, :, ind:ind + sampleSize].to(device)))
#if epoch % 5 == 0:
# if(np.sum(output.cpu().numpy()==0) == sampleSize or np.sum(ytrain[:, 0, ind:ind + sampleSize].numpy())==sampleSize):
# print("all slience")
# else:
# avesdr += bss_eval_sources(output[:,0,:].cpu().numpy(), ytrain[:, 0, ind:ind + sampleSize].numpy())[0].mean()
cnt += 1
aveloss += float(loss)
aveloss /= cnt
#avesdr /= cnt
#if epoch % 5 == 0:print('sdr:',avesdr)
print('loss for validation:{:.5f},epoch{},valtime{}'.format(aveloss, epoch,np.round(time.time() - start_time)))
if (USEBOARD): writer.add_scalar('waveunet val loss', aveloss, iteration)
示例4: bss_eval_global
# 需要導入模塊: from mir_eval import separation [as 別名]
# 或者: from mir_eval.separation import bss_eval_sources [as 別名]
def bss_eval_global(wavs_mono, wavs_src1, wavs_src2, wavs_src1_pred, wavs_src2_pred):
assert len(wavs_mono) == len(wavs_src1) == len(wavs_src2) == len(wavs_src1_pred) == len(wavs_src2_pred)
num_samples = len(wavs_mono)
gnsdr = np.zeros(2)
gsir = np.zeros(2)
gsar = np.zeros(2)
frames_total = 0
for wav_mono, wav_src1, wav_src2, wav_src1_pred, wav_src2_pred in zip(wavs_mono, wavs_src1, wavs_src2, wavs_src1_pred, wavs_src2_pred):
len_cropped = wav_src1_pred.shape[-1]
wav_mono_cropped = wav_mono[:len_cropped]
wav_src1_cropped = wav_src1[:len_cropped]
wav_src2_cropped = wav_src2[:len_cropped]
sdr, sir, sar, _ = bss_eval_sources(reference_sources = np.asarray([wav_src1_cropped, wav_src2_cropped]), estimated_sources = np.asarray([wav_src1_pred, wav_src2_pred]), compute_permutation = False)
sdr_mono, _, _, _ = bss_eval_sources(reference_sources = np.asarray([wav_src1_cropped, wav_src2_cropped]), estimated_sources = np.asarray([wav_mono_cropped, wav_mono_cropped]), compute_permutation = False)
nsdr = sdr - sdr_mono
gnsdr += len_cropped * nsdr
gsir += len_cropped * sir
gsar += len_cropped * sar
frames_total += len_cropped
gnsdr = gnsdr / frames_total
gsir = gsir / frames_total
gsar = gsar / frames_total
return gnsdr, gsir, gsar
示例5: convergence_callback
# 需要導入模塊: from mir_eval import separation [as 別名]
# 或者: from mir_eval.separation import bss_eval_sources [as 別名]
def convergence_callback(Y):
global SDR, SIR
from mir_eval.separation import bss_eval_sources
ref = np.moveaxis(separate_recordings, 1, 2)
y = pra.transform.stft.synthesis(Y, L, hop, win=win_s)
y = y[L-hop: , :].T
m = np.minimum(y.shape[1], ref.shape[1])
sdr, sir, sar, perm = bss_eval_sources(ref[:, :m, 0], y[:, :m])
SDR.append(sdr)
SIR.append(sir)
## STFT ANALYSIS
示例6: run
# 需要導入模塊: from mir_eval import separation [as 別名]
# 或者: from mir_eval.separation import bss_eval_sources [as 別名]
def run(args):
sep_reader = AudioReader(args.sep_scp)
ref_reader = AudioReader(args.ref_scp)
utt_snr = open(args.per_utt, "w") if args.per_utt else None
utt_ali = open(args.utt_ali, "w") if args.utt_ali else None
reporter = Report(args.spk2class)
# sep: N x S
for key, sep in sep_reader:
# ref: N x S
ref = ref_reader[key]
# keep same shape
nsamps = min(sep.shape[-1], ref.shape[-1])
sdr, _, _, ali = bss_eval_sources(ref[:, :nsamps], sep[:, :nsamps])
sdr = np.mean(sdr)
reporter.add(key, sdr)
if utt_snr:
utt_snr.write("{}\t{:.2f}\n".format(key, sdr))
if utt_ali:
ali_str = " ".join(map(str, ali))
utt_ali.write(f"{key}\t{ali_str}\n")
reporter.report()
if utt_snr:
utt_snr.close()
if utt_ali:
utt_ali.close()
示例7: test
# 需要導入模塊: from mir_eval import separation [as 別名]
# 或者: from mir_eval.separation import bss_eval_sources [as 別名]
def test(epoch): # testing data
model.eval()
start_time = time.time()
with torch.no_grad():
avesdr = 0
numSongs = 0
sdrmedian = np.zeros(50)
for iloader, xtrain, ytrain in loadtest:
iloader=iloader.item()
listofpred0 = []
cnt,aveloss=0,0
for ind in range(0, xtrain.shape[-1] - sampleSize, sampleSize):
if (xtrain[0, 0, ind:ind + sampleSize].shape[0] < (sampleSize)): break
output = model(xtrain[:, :,ind:ind + sampleSize].to(device))
listofpred0.append(output.reshape(-1).cpu().numpy())
loss = criterion(output, (ytrain[:, :,ind:ind + sampleSize].to(device)))
cnt+=1
aveloss += float(loss)
aveloss /= cnt
print('loss for test:{},num{},epoch{}'.format(aveloss, iloader,epoch))
ans0 = mu_law_decode(np.concatenate(listofpred0))
if(iloader >= 150):
sdr = bss_eval_sources(mu_law_decode(ytrain[0,0,:ans0.shape[0]].cpu().numpy()), ans0)
avesdr += sdr[0][0]
sdrmedian[iloader-150] = sdr[0][0]
#print('each ele of median',sdrmedian[iloader-150],iloader-150)
numSongs += 1
if(iloader > 160):continue
if not os.path.exists('vsCorpus/'): os.makedirs('vsCorpus/')
sf.write(savemusic.format(iloader), ans0, sample_rate)
print('test stored done', np.round(time.time() - start_time))
print('sdr mean:', avesdr / numSongs)
print('sdr median:', np.median(sdrmedian))