本文整理汇总了Python中scikits.samplerate.resample函数的典型用法代码示例。如果您正苦于以下问题:Python resample函数的具体用法?Python resample怎么用?Python resample使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了resample函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: play
def play(self):
if not self.sound:
self.sound = pygame.mixer.Sound(self.file)
self.freq = self.info.rate
if resampleOk:
(freq,format,channels) = pygame.mixer.get_init()
if freq != self.freq:
print "reload sound ",self.file
self.freq = self.info.rate
self.sound = pygame.mixer.Sound(self.file)
if self.freq != freq:
snd_array = pygame.sndarray.array(self.sound)
samples = len(snd_array)/2
samples = int(samples*freq*1.0/(self.info.rate))
print "start resampling ",self.file," from ",self.info.rate," to ",freq," len ",len(snd_array)/2," visée ",samples
# if samples != len(snd_array):
# snd_array = np.resize(snd_array,(samples,2))
snd_array = resample(snd_array, freq*1.0/self.info.rate, "sinc_fastest").astype(snd_array.dtype)
# datal = signal.resample(snd_array[0::2],samples).astype(snd_array.dtype)
# datar = signal.resample(snd_array[1::2],samples).astype(snd_array.dtype)
# snd_array = np.resize(snd_array,(len(datal)*2,2))
# snd_array[0::2] = datal
# snd_array[1::2] = datar
# print "end resampling ",snd_array
self.sound = pygame.sndarray.make_sound(snd_array)
self.freq = freq
if self.volume:
self.sound.set_volume(self.volume)
self.channel = self.sound.play()
self.playTime = time.time()
if self.event:
self.channel.set_endevent(self.event)
示例2: strfpak_resample
def strfpak_resample(sound,slice_rate,resample_type):
'''
Resample a sound object using scikits.samplerate.resample.
Sound is upsampled to nearest multiple of slice_rate;
if sound.samplerate == slice_rate, no resampling is done.
Sound is also converted to float.
Currently, error is less than 10dB max, mostly very good.
'''
from scikits.samplerate import resample
from copy import deepcopy
# Calculate the resampling frequency for STRFPAK 5.3:
# round up to the nearest multiple of 'slice_rate'
input_freq = floor(sound.samplerate)
output_freq = ceil(input_freq/slice_rate)*slice_rate
# Copy so that we don't alter original object
sound2 = deepcopy(sound)
# Scale factor -- MATLAB makes .wav data be -1:1, scipy does integer
sound2.data = sound2.data/float(2**15)
# Resample if necessary
if output_freq > input_freq:
sound2.data = resample(sound2.data,output_freq/input_freq,
resample_type)
sound2.samplerate = output_freq
# Pad because matlab resampling works differently
sound2.data = sound2._zeropad(0,2)
return sound2
示例3: convert_wav
def convert_wav(File, ofile):
import scikits.audiolab as audiolab
from scikits.samplerate import resample
# lastest scikits.audiolab include sound record lib, based on python-alsaaudio
# if you want make the down sample rate using scipy.signal
#import scipy.signal
#using audiolab to read wav file
Signal, fs = audiolab.wavread(File)[:2]
#changing the original sample rate to 16000fs fast mode
Signal = resample(Signal, fr/float(fs), 'sinc_best')
#changing sample rate from audio file using scipy this is a bit slow
#Signal=scipy.signal.resample(Signal,int(round(len(Getsignal)*fr)/float(fs)),window=None)
# file Format type
fmt = audiolab.Format('flac', 'pcm16')
nchannels = 1
# convert into the file .flac
ofile = audiolab.Sndfile(FileNameTmp, 'w', fmt, nchannels, fr)
#writing in the file
ofile.write_frames(Signal)
#
return ofile
示例4: wav_convert
def wav_convert(data, SR, tar_freq):
# If the input signal is stereo, make it mono.
if ndim(data) == 2:
# Mix stereo signal into a mono signal
buff01 = 0.49 * (data[:, 0] + data[:, 1])
wave_ts = array(buff01)
else:
wave_ts = array(data[:])
wave_ts = array(wave_ts)
# Set a sampling rate
up_SR = 44100
# Compute a ratio to feed into resample function
ratio = float(float(tar_freq)/float(up_SR))
# Resample the file.
wave_ts = resample(wave_ts, ratio , 'linear')
# Transpose the data list.
wave_ts = transpose(wave_ts)
# Return wave_ts signal
return wave_ts
示例5: record
def record(self, signals, Fs):
'''
This simulates the recording of the signals by the microphones.
In particular, if the microphones and the room simulation
do not use the same sampling frequency, down/up-sampling
is done here.
Arguments:
----------
signals: An ndarray with as many lines as there are microphones.
Fs: the sampling frequency of the signals.
'''
if signals.shape[0] != self.M:
raise NameError('The signals array should have as many lines as there are microphones.')
if signals.ndim != 2:
raise NameError('The signals should be a 2D array.')
if Fs != self.Fs:
from scikits.samplerate import resample
Fs_ratio = self.Fs/float(Fs)
newL = int(Fs_ratio*signals.shape[1])-1
self.signals = np.zeros((self.M, newL))
# scikits.samplerate resample function considers columns as channels (hence the transpose)
for m in range(self.M):
self.signals[m] = resample(signals[m], Fs_ratio, 'sinc_best')
else:
self.signals = signals
示例6: resample
def resample(y, orig_sr, target_sr, res_type='sinc_fastest'):
"""Resample a signal from orig_sr to target_sr
Arguments:
y -- (ndarray) audio time series
orig_sr -- (int) original sample rate of y
target_sr -- (int) target sample rate
res_type -- (str) resample type (see below)
Returns y_hat:
y_hat -- (ndarray) y resampled from orig_sr to target_sr
Notes:
if scikits.samplerate is installed, resample will use res_type
otherwise, it will fall back on scip.signal.resample
"""
if orig_sr == target_sr:
return y
if _HAS_SAMPLERATE:
y_hat = samplerate.resample(y,
float(target_sr) / orig_sr,
res_type)
else:
n_samples = len(y) * target_sr / orig_sr
y_hat = scipy.signal.resample(y, n_samples, axis=-1)
return y_hat
示例7: analyze
def analyze(sig, samplerate=8000, resample_to=8000):
e_min_scale = 0.3
e_min_distance = 1
e_a_scale = 0.5
e_W_ms = 2000
min_len = 30
if samplerate != resample_to:
print("resampling")
sig = srate.resample(sig, resample_to/samplerate, 'sinc_best')
frames,frame_size = ad.frames(sig, samplerate, 64)
#ac_peaks = [ad.ac_peaks(frame) for frame in frames]
#energy = ad.energies(frames, ...)
#energy = log_energy
print("getting normalized spectra")
normalized_spectrum = ad.normalized_spectrum(frames, samplerate)
print("spectral entropy")
spectral_entropy = np.fromiter(ad.entropy(frame) for frame in normalized_spectrum)
entopy_t = np.percentile(spectral_entropy, 80)
se_segments = ad.segments_to_seconds(ad.entropy_segment_indexes(spectral_entropy, entropy_t))
print("energy based computations")
energy, smooth, en_a, en_t, en_lmin = ad.energy_thresholds(sig, noise_dist=e_min_distance, a_scale=e_a_scale, min_scale=e_min_scale, W_ms=e_W_ms)
energy_segments = ad.get_voice_segments(smooth, en_t, ad.get_segment_indexes(smooth, en_t, min_len=10))
#energy_t = ad.thresholds(frames)
#entropy_t = ad.entropy_t(frames)
#energy_indexes = ad.get_segment_indexes(x, t, min_len=30)
#entropy_indexes = ad.get_entropy_indexes(x, t, min_len=30)
#combination_indexes = ad.get_combined_indexes(x, t, min_len=30)
return spectral_entropy, se_t, se_segments, energy, en_t, energy_segments
示例8: calculate_attributes
def calculate_attributes(self):
source = self.source
freq = self.frequency
sampling_rate = float(source.sampling_rate)
fft_sampling_rate = sampling_rate/float(source.fft_step_size)
window_length = float(source.fft_window_size)/sampling_rate
# FIXME real time should be passed in as an extra field
self.start = window_length
if self.first_frame > 0:
self.start += (self.first_frame - 1)/fft_sampling_rate
self.length = window_length
if len(freq) > 1:
# if 'length' not in self.__dict__:
# print self.__dict__
# print self.__dict__['length']
self.length += (len(freq) - 1)/fft_sampling_rate
# print self.length
self.end = self.start + self.length
for k in ('frequency','amplitude'):
a = getattr(self,k)
setattr(self, k+'_min', min(a))
setattr(self, k+'_max', max(a))
setattr(self, k+'_mean', sum(a)/len(a))
freq_window = 2 # seconds
freq_fft_size = 128
resampled_freq = resample(freq, freq_window*freq_fft_size/fft_sampling_rate, 'sinc_fastest') # FIXME truncate array?
self.freq_fft = abs(rfft(resampled_freq,n=freq_fft_size,overwrite_x=True))[1:]
示例9: generate_scale
def generate_scale(self):
"""
Given the initial note, middle C, create the rest of the musical scale by
resampling.
Returns: Dictionary of musical scale with the key being the name of the note
and the value being the corresponding sound object.
"""
pygame.mixer.init()
wav = util.get_app_path() + "res/piano-c.wav"
sound = pygame.mixer.Sound(wav)
pygame.mixer.set_num_channels(32)
sndarray = pygame.sndarray.array(sound)
ratio_dict = {'low_c': 1, 'c_sharp': .944, 'd': .891, 'd_sharp':.841, 'e':.794,
'f':.749, 'f_sharp': .707, 'g': .667, 'g_sharp': .63, 'a': .594,
'a_sharp': .561, 'b':.53, 'high_c':.5}
# Generate the Sound objects from the dictionary.
scale = {}
for key,value in ratio_dict.iteritems():
smp = resample(sndarray, value,"sinc_fastest").astype(sndarray.dtype)
# Use the key, currently a string, as a variable
scale[key] = pygame.sndarray.make_sound(smp)
self.scale_dict=scale
示例10: listen
def listen(stream, queue):
try:
while not exit:
stream.start_stream()
print('Listening...')
for i in range(0,size):
data = stream.read(frame)
ar = numpy.fromstring(data, dtype=numpy.int16)
data2 = resample(ar, (16000./48000.), 'linear')
q.put(data2)
# samps = numpy.fromstring(data, dtype=numpy.int16)
# print (samps, q.qsize())
rms = audioop.rms(data, 2)
print rms
stream.stop_stream()
if exit:
sys.exit()
q.join()
except IOError:
print('ERROR!!!!')
pass
stream.stop_stream()
stream.close()
pyaud.terminate()
print "----------------------------------------------------------------------------------------------------------------"
示例11: wav_convert
def wav_convert(data, SR, tar_freq):
# If the input signal is stereo, make it mono.
if ndim(data) == 2:
buff01 = 0.49 * (data[:, 0] + data[:, 1])
wave_ts = array(buff01)
else:
wave_ts = array(data[:])
wave_ts = array(wave_ts)
# print "Shape of wave_ts", shape(wave_ts)
up_SR = 44100
ratio = float(float(tar_freq)/float(up_SR))
print tar_freq
print float(float(tar_freq)/float(up_SR))
wave_ts = resample(wave_ts, ratio , 'linear')
# SR_div = int(floor(up_SR/tar_freq))
# wave_ts = sig.decimate(wave_ts, SR_div)
# Transpose the data list.
wave_ts = transpose(wave_ts)
print 'length wave_ts', shape(wave_ts)
return wave_ts
示例12: __stream_audio_realtime
def __stream_audio_realtime(filepath, rate=44100):
total_chunks = 0
format = pyaudio.paInt16
channels = 1 if sys.platform == 'darwin' else 2
record_cap = 10 # seconds
p = pyaudio.PyAudio()
stream = p.open(format=format, channels=channels, rate=rate, input=True, frames_per_buffer=ASR.chunk_size)
print "o\t recording\t\t(Ctrl+C to stop)"
try:
desired_rate = float(desired_sample_rate) / rate # desired_sample_rate is an INT. convert to FLOAT for division.
for i in range(0, rate/ASR.chunk_size*record_cap):
data = stream.read(ASR.chunk_size)
_raw_data = numpy.fromstring(data, dtype=numpy.int16)
_resampled_data = resample(_raw_data, desired_rate, "sinc_best").astype(numpy.int16).tostring()
total_chunks += len(_resampled_data)
stdout.write("\r bytes sent: \t%d" % total_chunks)
stdout.flush()
yield _resampled_data
stdout.write("\n\n")
except KeyboardInterrupt:
pass
finally:
print "x\t done recording"
stream.stop_stream()
stream.close()
p.terminate()
示例13: train_codebook
def train_codebook(basedirectory,
spectral,
desired_fs,
clfs,
n_samples):
"""Train the codebooks.
Arguments:
:param basedirectory: root directory of the audio corpus
:param spectral:
Spectral feature extraction.
Object should be picklable and implement the
\c Spectral abc; i.e. provide a \c transform method.
:param clfs:
list of clusterers. valid clusterers have a \c fit method
and a \c predict method. optionally, for soft vq, also implement
a \c predict_proba method.
:param n_samples:
number of spectral frames to sample from the audio corpus.
:returns:
a list of Codebook objects, of same length as the output of spectral_func
"""
wavs = list(rglob(basedirectory, '*.wav'))
np.random.shuffle(wavs)
inds = None
idx = 0
X = None
for i, wav in enumerate(wavs):
if i % 10 == 0 and i > 0:
print 'samples: {3}/{4}; loading file: {0} ({1}/{2})'.format(
wavs[i],
i+1,
len(wavs),
X.shape[0],
n_samples
)
sig, fs, _ = audiolab.wavread(wav)
start, stop = trim_silence(sig, fs)
specs = spectral.transform(samplerate.resample(sig[start:stop],
desired_fs/fs,
'sinc_best'))
if inds is None:
inds = [0] + list(np.cumsum([spec.shape[1] for spec in specs]))
spec = np.hstack(specs)
if idx + spec.shape[0] >= n_samples:
spec = spec[:n_samples - idx, :]
if X is None:
X = spec
else:
X = np.vstack((X, spec))
idx += spec.shape[0]
if idx >= n_samples:
break
cdbs = [Codebook(clf) for clf in clfs]
for i, cdb in enumerate(cdbs):
cdb.train(X[:, inds[i]:inds[i+1]])
return cdbs
示例14: resample
def resample(self, samplerate, resample_type="sinc_best"):
"""
Returns a resampled version of the sound.
"""
if not have_scikits_samplerate:
raise ImportError("Need scikits.samplerate package for resampling")
y = array(resample(self, float(samplerate / self.samplerate), resample_type), dtype=float64)
return Sound(y, samplerate=samplerate)
示例15: TestGBPDN2
def TestGBPDN2():
# ________________________________________
print 'Test: generalized basis pursuit decomposition'
fs = 8000.
btmp,fstmp,fmt = audiolab.wavread('glockenspiel.wav')
b = samplerate.resample(btmp, fs/fstmp,'sinc_best')
L = len(b)
A = GaborBlock(L,1024)
B = GaborBlock(A.M,64)
C = DictionaryUnion(A,B)
b = np.hstack((b,np.zeros(C.M-L))) # pad to block boundary
spow = 10*np.log10(b.conj().dot(b))
# additive white noise
snr = 15
nvar = np.sum(np.abs(b)**2)/(10**(snr/10)) # 1e-2
n = np.sqrt(nvar)*np.random.randn(C.M)/np.sqrt(C.M)
tonemap = np.reshape(range(A.N),(A.N/A.fftLen,A.fftLen)).transpose()
transmap = np.reshape(range(B.N),(B.N/B.fftLen,B.fftLen)).transpose()
f,fgrad = BP_factory()
#f,fgrad = TT_factory(tonemap,transmap)
xe = GBPDN_momentum(C,b+n,f,fgrad,maxerr=nvar,maxits=200,stoptol=1e-3,muinit=1e-1,momentum=0.9,smoothinit=1e-5,anneal=0.96)
ye = np.real(C.dot(xe))
r = b-ye;
rpow = 10*np.log10(r.conj().dot(r))
print 'SNR = %f' % (spow-rpow)
ynoise = np.array(samplerate.resample(b+n,fstmp/fs,'sinc_best'),dtype='float64')
ydenoise = np.array(samplerate.resample(ye,fstmp/fs,'sinc_best'),dtype='float64')
yetone = np.real(A.dot(xe[:tonemap.size]))
yetone = np.array(samplerate.resample(yetone,fstmp/fs,'sinc_best'),dtype='float64')
yetrans = np.real(B.dot(xe[tonemap.size:]))
yetrans = np.array(samplerate.resample(yetrans,fstmp/fs,'sinc_best'),dtype='float64')
print 'Error (should be <= %f): %f' % (nvar,np.sum((r)**2))
print '----------------------------------------'