本文整理匯總了Python中matplotlib.pyplot.specgram方法的典型用法代碼示例。如果您正苦於以下問題:Python pyplot.specgram方法的具體用法?Python pyplot.specgram怎麽用?Python pyplot.specgram使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類matplotlib.pyplot
的用法示例。
在下文中一共展示了pyplot.specgram方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: run_phase_reconstruction_example
# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import specgram [as 別名]
def run_phase_reconstruction_example():
fs, d = fetch_sample_speech_tapestry()
# actually gives however many components you say! So double what .m file
# says
fftsize = 512
step = 64
X_s = np.abs(stft(d, fftsize=fftsize, step=step, real=False,
compute_onesided=False))
X_t = iterate_invert_spectrogram(X_s, fftsize, step, verbose=True)
"""
import matplotlib.pyplot as plt
plt.specgram(d, cmap="gray")
plt.savefig("1.png")
plt.close()
plt.imshow(X_s, cmap="gray")
plt.savefig("2.png")
plt.close()
"""
wavfile.write("phase_original.wav", fs, soundsc(d))
wavfile.write("phase_reconstruction.wav", fs, soundsc(X_t))
示例2: harmonics
# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import specgram [as 別名]
def harmonics():
synth = WaveSynth()
freq = 1500
num_harmonics = 6
h_all = synth.harmonics(freq, 1, [(n, 1/n) for n in range(1, num_harmonics+1)])
even_harmonics = [(1, 1)] # always include fundamental tone harmonic
even_harmonics.extend([(n, 1/n) for n in range(2, num_harmonics*2, 2)])
h_even = synth.harmonics(freq, 1, even_harmonics)
h_odd = synth.harmonics(freq, 1, [(n, 1/n) for n in range(1, num_harmonics*2, 2)])
h_all.join(h_even).join(h_odd)
import matplotlib.pyplot as plot
plot.title("Spectrogram")
plot.ylabel("Freq")
plot.xlabel("Time")
plot.specgram(h_all.get_frame_array(), Fs=synth.samplerate, noverlap=90, cmap=plot.cm.gist_heat)
plot.show()
示例3: implot
# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import specgram [as 別名]
def implot(arr, scale=None, title="", cmap="gray"):
import matplotlib.pyplot as plt
if scale is "specgram":
# plotting part
mag = 20. * np.log10(np.abs(arr))
# Transpose so time is X axis, and invert y axis so
# frequency is low at bottom
mag = mag.T[::-1, :]
else:
mag = arr
f, ax = plt.subplots()
ax.matshow(mag, cmap=cmap)
plt.axis("off")
x1 = mag.shape[0]
y1 = mag.shape[1]
def autoaspect(x_range, y_range):
"""
The aspect to make a plot square with ax.set_aspect in Matplotlib
"""
mx = max(x_range, y_range)
mn = min(x_range, y_range)
if x_range <= y_range:
return mx / float(mn)
else:
return mn / float(mx)
asp = autoaspect(x1, y1)
ax.set_aspect(asp)
plt.title(title)
示例4: plotSpectrogram
# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import specgram [as 別名]
def plotSpectrogram(data, fftWindow, fftSize, Fs):
if fftSize == None:
N = len(data)
else:
N = fftSize
if Fs == None:
Fs = 2
if fftWindow == "rectangular":
plt.specgram(data, NFFT=N, Fs=Fs,
window=lambda data: data*np.ones(len(data)), noverlap=int(N/10))
elif fftWindow == "bartlett":
plt.specgram(data, NFFT=N, Fs=Fs,
window=lambda data: data*np.bartlett(len(data)), noverlap=int(N/10))
elif args.fftWindow == "blackman":
plt.specgram(data, NFFT=N, Fs=Fs,
window=lambda data: data*np.blackman(len(data)), noverlap=int(N/10))
elif fftWindow == "hamming":
plt.specgram(data, NFFT=N, Fs=Fs,
window=lambda data: data*np.hamming(len(data)), noverlap=int(N/10))
elif fftWindow == "hanning":
plt.specgram(data, NFFT=N, Fs=Fs,
window=lambda data: data*np.hanning(len(data)), noverlap=int(N/10))
plt.show()
示例5: plot
# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import specgram [as 別名]
def plot(self, NFFT=512, noverlap=384, **kwargs):
'''
Plot the spectrogram of the audio sample.
It takes the same keyword arguments as
`matplotlib.pyplot.specgram <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.specgram.html>`_.
'''
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
print('Warning: matplotlib is required for plotting')
return
# Handle single channel case
if self.data.ndim == 1:
data = self.data[:,None]
nchannels = data.shape[1]
# Try to have a square looking plot
pcols = int(np.ceil(np.sqrt(nchannels)))
prows = int(np.ceil(nchannels / pcols))
for c in range(nchannels):
plt.specgram(data[:,c], NFFT=NFFT, Fs=self.fs, noverlap=noverlap, **kwargs)
plt.xlabel('Time [s]')
plt.ylabel('Frequency [Hz]')
plt.title('Channel {}'.format(c+1))
示例6: graph_spectrogram
# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import specgram [as 別名]
def graph_spectrogram(wav_file):
rate, data = get_wav_info(wav_file)
print (type(data), len(data))
nfft = 256 # Length of the windowing segments
fs = 256 # Sampling frequency
pxx, freqs, bins, im = plt.specgram(data, nfft, fs)
print ("pxx : ", len(pxx))
print ("freqs : ", len(freqs))
print ("bins : ", len(bins))
# plt.axis('on')
# plt.show()
plt.axis('off')
print (wav_file.split('.wav')[0])
plt.savefig(wav_file.split('.wav')[0] + '.png',
dpi=100, # Dots per inch
frameon='false',
aspect='normal',
bbox_inches='tight',
pad_inches=0) # Spectrogram saved as a .png
try:
im = Image.open(wav_file.split('.wav')[0] + '.png')
rgb_im = im.convert('RGB')
rgb_im.save(wav_file.split('.png')[0] + '.jpg')
except Exception as e:
print (e)
if os.path.exists(wav_file.split('.wav')[0] + '.png'):
os.system('convert '+(wav_file.split('.wav')[0] + '.png') + ' '+(wav_file.split('.wav')[0] + '.jpg'))
os.remove(wav_file.split('.wav')[0] + '.png')
示例7: run_phase_vq_example
# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import specgram [as 別名]
def run_phase_vq_example():
def _pre(list_of_data):
# Temporal window setting is crucial! - 512 seems OK for music, 256
# fruit perhaps due to samplerates
n_fft = 256
step = 32
f_r = np.vstack([np.abs(stft(dd, n_fft, step=step, real=False,
compute_onesided=False))
for dd in list_of_data])
return f_r, n_fft, step
def preprocess_train(list_of_data, random_state):
f_r, n_fft, step = _pre(list_of_data)
clusters = copy.deepcopy(f_r)
return clusters
def apply_preprocess(list_of_data, clusters):
f_r, n_fft, step = _pre(list_of_data)
f_clust = f_r
# Nondeterministic ?
memberships, distances = vq(f_clust, clusters)
vq_r = clusters[memberships]
d_k = iterate_invert_spectrogram(vq_r, n_fft, step, verbose=True)
return d_k
random_state = np.random.RandomState(1999)
fs, d = fetch_sample_speech_fruit()
d1 = d[::9]
d2 = d[7::8][:5]
# make sure d1 and d2 aren't the same!
assert [len(di) for di in d1] != [len(di) for di in d2]
clusters = preprocess_train(d1, random_state)
fix_d1 = np.concatenate(d1)
fix_d2 = np.concatenate(d2)
vq_d2 = apply_preprocess(d2, clusters)
wavfile.write("phase_train_no_agc.wav", fs, soundsc(fix_d1))
wavfile.write("phase_vq_test_no_agc.wav", fs, soundsc(vq_d2))
agc_d1, freq_d1, energy_d1 = time_attack_agc(fix_d1, fs, .5, 5)
agc_d2, freq_d2, energy_d2 = time_attack_agc(fix_d2, fs, .5, 5)
agc_vq_d2, freq_vq_d2, energy_vq_d2 = time_attack_agc(vq_d2, fs, .5, 5)
"""
import matplotlib.pyplot as plt
plt.specgram(agc_vq_d2, cmap="gray")
#plt.title("Fake")
plt.figure()
plt.specgram(agc_d2, cmap="gray")
#plt.title("Real")
plt.show()
"""
wavfile.write("phase_train_agc.wav", fs, soundsc(agc_d1))
wavfile.write("phase_test_agc.wav", fs, soundsc(agc_d2))
wavfile.write("phase_vq_test_agc.wav", fs, soundsc(agc_vq_d2))
示例8: fm
# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import specgram [as 別名]
def fm():
synth = WaveSynth(samplerate=8000)
from matplotlib import pyplot as plot
freq = 2000
lfo1 = Sine(1, amplitude=0.4, samplerate=synth.samplerate)
s1 = synth.sine(freq, duration=3, fm_lfo=lfo1)
plot.title("Spectrogram")
plot.ylabel("Freq")
plot.xlabel("Time")
plot.specgram(s1.get_frame_array(), Fs=synth.samplerate, noverlap=90, cmap=plot.cm.gist_heat)
plot.show()
with Output(nchannels=1, mixing="sequential") as out:
synth = WaveSynth()
freq = 440
lfo1 = Linear(5, samplerate=synth.samplerate)
lfo1 = EnvelopeFilter(lfo1, 1, 0.5, 0.5, 0.5, 1)
s1 = synth.sine(freq, duration=3, fm_lfo=lfo1)
s_all = s1.copy()
out.play_sample(s1)
lfo1 = Sine(1, amplitude=0.2, samplerate=synth.samplerate)
s1 = synth.sine(freq, duration=2, fm_lfo=lfo1)
s_all.join(s1)
out.play_sample(s1)
lfo1 = Sine(freq/17, amplitude=0.5, samplerate=synth.samplerate)
s1 = synth.sine(freq, duration=2, fm_lfo=lfo1)
s_all.join(s1)
out.play_sample(s1)
lfo1 = Sine(freq/6, amplitude=0.5, samplerate=synth.samplerate)
s1 = synth.sine(freq, duration=2, fm_lfo=lfo1)
s_all.join(s1)
out.play_sample(s1)
lfo1 = Sine(1, amplitude=0.4, samplerate=synth.samplerate)
s1 = synth.triangle(freq, duration=2, fm_lfo=lfo1)
s_all.join(s1)
out.play_sample(s1)
freq = 440*2
lfo1 = Sine(freq/80, amplitude=0.4, samplerate=synth.samplerate)
s1 = synth.triangle(freq, duration=2, fm_lfo=lfo1)
s_all.join(s1)
out.play_sample(s1)
# s_all.write_wav("fmtestall.wav")
out.wait_all_played()
示例9: run_phase_vq_example
# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import specgram [as 別名]
def run_phase_vq_example():
def _pre(list_of_data):
# Temporal window setting is crucial! - 512 seems OK for music, 256
# fruit perhaps due to samplerates
n_fft = 256
step = 32
f_r = np.vstack([np.abs(stft(dd, fftsize=n_fft, step=step, real=False,
compute_onesided=False))
for dd in list_of_data])
return f_r, n_fft, step
def preprocess_train(list_of_data, random_state):
f_r, n_fft, step = _pre(list_of_data)
clusters = copy.deepcopy(f_r)
return clusters
def apply_preprocess(list_of_data, clusters):
f_r, n_fft, step = _pre(list_of_data)
f_clust = f_r
# Nondeterministic ?
memberships, distances = vq(f_clust, clusters)
vq_r = clusters[memberships]
d_k = iterate_invert_spectrogram(vq_r, n_fft, step, verbose=True)
return d_k
random_state = np.random.RandomState(1999)
fs, d = fetch_sample_speech_fruit()
d1 = d[::9]
d2 = d[7::8][:5]
# make sure d1 and d2 aren't the same!
assert [len(di) for di in d1] != [len(di) for di in d2]
clusters = preprocess_train(d1, random_state)
fix_d1 = np.concatenate(d1)
fix_d2 = np.concatenate(d2)
vq_d2 = apply_preprocess(d2, clusters)
wavfile.write("phase_train_no_agc.wav", fs, soundsc(fix_d1))
wavfile.write("phase_vq_test_no_agc.wav", fs, soundsc(vq_d2))
agc_d1, freq_d1, energy_d1 = time_attack_agc(fix_d1, fs, .5, 5)
agc_d2, freq_d2, energy_d2 = time_attack_agc(fix_d2, fs, .5, 5)
agc_vq_d2, freq_vq_d2, energy_vq_d2 = time_attack_agc(vq_d2, fs, .5, 5)
"""
import matplotlib.pyplot as plt
plt.specgram(agc_vq_d2, cmap="gray")
#plt.title("Fake")
plt.figure()
plt.specgram(agc_d2, cmap="gray")
#plt.title("Real")
plt.show()
"""
wavfile.write("phase_train_agc.wav", fs, soundsc(agc_d1))
wavfile.write("phase_test_agc.wav", fs, soundsc(agc_d2))
wavfile.write("phase_vq_test_agc.wav", fs, soundsc(agc_vq_d2))