本文整理汇总了Python中scikits.audiolab.wavread函数的典型用法代码示例。如果您正苦于以下问题:Python wavread函数的具体用法?Python wavread怎么用?Python wavread使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了wavread函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
def main():
# import soundfile
snd = wavread('trumpet.wav')[0]
kick = wavread('kick.wav')[0]
amb = wavread('amb.wav')[0]
amb = amb * 0.8 # reduce gain of this soundfile a little bit
print len(amb)
#low_demo(snd, 10., 500.)
#high_demo(snd, 10000., 10.)
#allpass_demo(snd, 1000, -find_c(1000., fs), find_c(1000., fs), 1.0)
#iir_comb_demo(kick, 100, 0.5, -0.5)
t = len(amb) / fs
period = 1.0 / fs
t_v = arange(0.0, t, period)
delayTime = 2.0
width = 1.0
freq = 1
breakPoint = (sin(2. * pi * freq * t_v))
#breakPoint = linspace(1, -1, len(amb))
#var_allpass_demo(snd, delayTime / 1000., width / 1000., -find_c(8000, fs), find_c(8000, fs), 1.0, breakPoint)
#var_allpass_demo(amb, delayTime / 1000., width / 1000., 0.5, -0.5, 0.0, breakPoint)
# flanger
var_allpass_demo(amb, delayTime, width, 0.7, 0.7, 0.7, breakPoint)
示例2: noise_reduce_test
def noise_reduce_test():
sample = wavread('../../sounds/single-bloop-trimmed.wav')[0]
noise = wavread('../../sounds/single-bloop-noise.wav')[0]
sample = bandpass(sample,30000,50000)
t0 = time.time()
sample = noise_reduce(sample,noise,NoiseReduceSettings())
print 'noise filter in time:', round(time.time() - t0,2)
'''
示例3: sound_wav
def sound_wav():
clf()
(snd, sampFreq, nBits) = audiolab.wavread('temp.wav')
wave_form = []
signal = snd[:,0]
if (len(signal)) < 500000:
timeArray = arange(0, float(len(signal)), 1)
timeArray = timeArray / sampFreq
wave_form = signal
else:
downsample_factor = len(signal) / 30000
i = 0
while i < len(signal):
wave_form = wave_form + [signal[i]]
i = i + downsample_factor
timeArray = arange(0, float(len(wave_form)), 1)
timeArray = timeArray * downsample_factor / sampFreq
timeArray = timeArray * 1000
plot(timeArray, wave_form, color='k')
ylabel('Amplitude')
xlabel('Time (ms)')
savefig('wave_form.png', bbox_inches=0)
# show()
# setup('skream.wav')
# sound_wav()
# teardown()
示例4: __init__
def __init__(self, filepath):
self.filepath = filepath
(self.audio_array, self.sample_rate, self.format) = wavread(filepath)
self.name = os.path.basename(filepath)
samples = len(self.audio_array)
self.length = float(samples) / float(self.sample_rate)
示例5: wavread
def wavread(path):
"""
Wrapper around scikits functions
Returns: wavdata, sample rate, encoding type
See pyaudiolab or scikits.audiolab for more information
"""
return AUDIOLAB.wavread(path)
示例6: test_bad_wavread
def test_bad_wavread(self):
""" Check wavread on bad file"""
# Create a tmp audio file with non wav format, write some random data into it,
# and check it can not be opened by wavread
rfd, fd, cfilename = open_tmp_file('pysndfiletest.wav')
try:
nbuff = 22050
noise = 0.1 * N.random.randn(nbuff)
# Open the copy file for writing
format = audio_format('aiff', 'pcm16')
b = Sndfile(cfilename, 'w', format, 1, nbuff)
b.write_frames(noise)
b.close()
b = Sndfile(cfilename, 'r')
rcnoise = b.read_frames(nbuff)
b.close()
try:
rnoise = wavread(cfilename)[0]
raise Exception("wavread on non wav file succeded, expected to fail")
except ValueError, e:
pass
#print str(e) + ", as expected"
finally:
close_tmp_file(rfd, cfilename)
示例7: computeFeaturesForFullSong
def computeFeaturesForFullSong(file_path, feature_list, pack_size):
"""
Computes each of the features (must be full_song features) for the song recording.
This method is used for one shot computation of a songs features.
:param file_path:
:param features:
:param pack_size:
:return: a tuple of values with length = len(features). Each item is the resulting feature value corresponding to features[].
"""
# will hold the evaluated feature values
feature_values = []
raw_data, fs, enc = wavread(file_path)
raw_chunks = chunks(raw_data, pack_size)
for feature_name in feature_list:
# print "Computing " + feature_name
class_ = getattr(features, feature_name)
if class_.requireFullSong is False: # ensure full song
raise "Every feature must be a full song feature"
feature = class_(raw_chunks)
feature_values.append(feature.value)
return feature_values
示例8: convert_wav
def convert_wav(File, ofile):
import scikits.audiolab as audiolab
from scikits.samplerate import resample
# lastest scikits.audiolab include sound record lib, based on python-alsaaudio
# if you want make the down sample rate using scipy.signal
#import scipy.signal
#using audiolab to read wav file
Signal, fs = audiolab.wavread(File)[:2]
#changing the original sample rate to 16000fs fast mode
Signal = resample(Signal, fr/float(fs), 'sinc_best')
#changing sample rate from audio file using scipy this is a bit slow
#Signal=scipy.signal.resample(Signal,int(round(len(Getsignal)*fr)/float(fs)),window=None)
# file Format type
fmt = audiolab.Format('flac', 'pcm16')
nchannels = 1
# convert into the file .flac
ofile = audiolab.Sndfile(FileNameTmp, 'w', fmt, nchannels, fr)
#writing in the file
ofile.write_frames(Signal)
#
return ofile
示例9: gather_training_data
def gather_training_data(path=SAMPLE_PATH):
instr_names = os.walk(path).next()[1]
samples = dict()
pitch_pattern = re.compile("([A-G][sb]?)(\d+)")
# NOTE: Could potentially make subdirs for different qualities
for instr in instr_names:
#if instr not in ('guitar', 'trumpet'): continue
instr_samples = []
instr_sample_dir = "%s\%s" % (SAMPLE_PATH, instr)
for samp in [f for f in os.listdir(instr_sample_dir) \
if os.path.isfile(os.path.join(instr_sample_dir, f)) \
and os.path.splitext(f)[1].lower() == ".wav"]:
data, fs, enc = skal.wavread("%s\%s" % (instr_sample_dir, samp))
matches = pitch_pattern.search(samp)
assert matches is not None
chroma, octave = matches.groups()
chroma = canonical_chroma[chroma]
# NOTE: It's quite possible that using a dictionary
# instead of a list will be helpful, but we'll
# cross that bridge when we get to it
instr_samples.append( (data, chroma, octave) )
samples[instr] = instr_samples
return samples
示例10: main
def main():
"""
Main function for processing the specified soundfile through this reverb.
"""
parser = argparse.ArgumentParser(description='Artificial Reverb')
parser.add_argument('soundfile', help='audio file to process', type=validInput) # the soundfile is the first agument, with parameter values to follow
parser.add_argument('outfile', help='path to output file', type=validInput)
parser.add_argument('-w', '--wetdry', default=0.2, type=float, help='amount of wet signal in the mix')
parser.add_argument('-da', '--damping', default=0.25, type=float, help='amount of high frequency damping')
parser.add_argument('-de', '--decay', default=0.4, type=float, help='amount of attentuation applied to signal to make it decay')
parser.add_argument('-pd', '--predelay', default=30, type=float, help='amount of time before starting reverb')
parser.add_argument('-b', '--bandwidth', default=0.6, type=float, help='amount of high frequency attentuation on input')
parser.add_argument('-t', '--tankoffset', default=0, type=float, help='amount of time (ms) to increase the last tank delay time')
# Parse the commandline arguments
args = parser.parse_args()
# Get the entire path and assign soundfile
soundfilePath = os.path.join(os.getcwd(), args.soundfile)
# From here on, x refers to the input signal
x, sampleRate, wavType = wavread(soundfilePath)
dry = x.copy()
y = reverbTest(x, sampleRate, args.damping, args.decay, args.predelay, args.bandwidth, args.tankoffset)
# Apply wet/dry mix
output = dryWet(dry, y, args.wetdry)
# Finally write the output file
wavwrite(transpose(output), args.outfile, sampleRate)
示例11: _analyse
def _analyse(self, filepath):
audio = to_mono(wavread(filepath)[0])
audio = audio.astype('float32')
w = Windowing(type = 'hann')
fft = FFT() # this gives us a complex FFT
c2p = CartesianToPolar() # and this turns it into a pair (magnitude, phase)
hfc_detect = OnsetDetection(method = 'hfc')
complex_detect = OnsetDetection(method = 'complex')
rms_detect = RMS()
spec = Spectrum()
#pd = PitchDetection()
flux = Flux()
pool = Pool()
#wap = WarpedAutoCorrelation()
# let's get down to business
print 'Computing onset detection functions...'
for frame in FrameGenerator(audio, frameSize = self.frame_size,\
hopSize = self.hop_size):
mag, phase, = c2p(fft(w(frame)))
spectrum = spec(w(frame))
f = flux(spectrum)
#pitch = pd(spectrum)
pool.add('hfc', hfc_detect(mag, phase))
pool.add('complex', complex_detect(mag, phase))
pool.add('rms', rms_detect(frame))
pool.add('flux', f)
#pool.add('pitch', pitch[0])
#print pool['pitch']
#pool.add('autoc', wap(pool['pitch']))
return pool, audio
示例12: estimate_f0s
def estimate_f0s(self, audio_path):
if not os.path.exists(audio_path):
raise ValueError('Invalid audio path')
x, fs, _ = wavread(audio_path)
# make x mono if stereo
if x.ndim > 1:
_, n_channels = x.shape
x = x.sum(axis=1)/n_channels
X = self._stft(x, fs)
# Section 2.1 Spectrally whiten the signal to suppress timbral information
Y = self._spectral_whitening(X, fs)
# perform iterative estimation of the fundamental periods in the audio file
f0_estimations = self._iterative_est(Y, fs)
# get notes which correspond to these frequency estimates
notes = []
for frame_ests in f0_estimations:
notes.append([self._freq_to_note(f) for f in frame_ests])
return f0_estimations, notes
示例13: loadFiles
def loadFiles(path):
"""reads wave files from path and returns dictionary with fields:
- "name" - name of file
- "nameGender" - a sex readed from filename
- "signal" - numpy array with sound signal readed from file
- "sampleRate" - sample rate of the file
and dictionary that contains numbers of male and female voices
"""
print "reading files..."
files = [ f for f in listdir(path) if isfile(join(path,f)) and splitext(f)[1] == ".wav" ]
samples = []
maleCount = 0
femaleCount = 0
for f in files:
p = path + '/' + f
print "...", f
data,rate,encoding=wavread(p)
sig=[mean(d) for d in data]
samples.append({'name': f, 'nameGender': f[-5:-4], 'signal': sig, 'sampleRate': rate})
if f[-5:-4] == "M":
maleCount += 1
else:
femaleCount += 1
counters = {"maleCount":maleCount, "femaleCount":femaleCount}
return samples, counters
示例14: open_wav_audiolab
def open_wav_audiolab(self, filename):
#http://scikits.appspot.com/audiolab
from scikits.audiolab import wavread
results, sample_frequency,encoding = wavread(filename)
self.sample_rate = sample_frequency
print 'Sample Rate is ', sample_frequency
return results, self.sample_rate
示例15: normalize_target_audio
def normalize_target_audio(input_file='moviehires_endpos_beta02.imatsh.wav',
sources_expr='/home/mkc/Music/GoldbergVariations/*48_1.wav', write_me=False, amp_factor=0.5, proc_audio=True):
"""
Per-variation normalization of concatenated imatsh file using individual sources as locators
Assumes that the input_file and the source_dir have the same sample rate
inputs:
input_file - the file to be processed (locally normalized)
sources_expr- regular expression for input files
write_me - write output files when true [False]
amp_factor - amplitude change factor (proportion of full scale normalization) [0.5]
proc_audio - whether to process target audio using source audio info [1]
outputs:
sample_locators - sample locators for each variation
audio_summaries - min, max, rms values for each variation
output files:
output_file = {input_file_stem}+'norm.'+{input_ext}
"""
# Compute min, max, rms per source file
flist = glob.glob(sources_expr)
flist.sort()
sample_locators = [0]
audio_summaries = []
ext_pos = input_file.rindex('.')
outfile_stem, ext = input_file[:ext_pos], input_file[ext_pos+1:]
for i,f in enumerate(flist):
x,sr,fmt = skaud.wavread(f)
print f, sr, fmt
if(len(x.shape)>1):
x = x[:,0] # Take left-channel only
sample_locators.extend([len(x)])
audio_summaries.append([max(abs(x)), np.sqrt(np.mean(x**2))])
if proc_audio:
y,sr_y,fmt_y = skaud.wavread(input_file, first=np.cumsum(sample_locators)[-2], last=sample_locators[-1])
if sr != sr_y:
raise ValueError("input and source sample rates don't match: %d,%d"%(sr,sr_y))
audio_summaries.append([max(abs(y[:,0])), np.sqrt(np.mean(y[:,0]**2))])
max_val = audio_summaries[-1][0]
rms_val = audio_summaries[-1][1]
norm_cf = amp_factor / max_val + (1 - amp_factor)
outfile = outfile_stem+'_%02d.%s'%(i+1,ext)
max_amp_val = norm_cf * max_val
rms_amp_val = norm_cf * rms_val
print '%s: nrm=%05.2fdB, peak=%05.2fdB, *peak=%05.2fdB, rms=%05.2fdB, *rms=%05.2fdB'%(
outfile, dB(norm_cf), dB(max_val), dB(max_amp_val), dB(rms_val), dB(rms_amp_val))
if(write_me):
skaud.wavwrite(norm_cf*y, outfile, sr, fmt)
return np.cumsum(sample_locators), np.array(audio_summaries)