本文整理匯總了Python中audioop.rms方法的典型用法代碼示例。如果您正苦於以下問題:Python audioop.rms方法的具體用法?Python audioop.rms怎麽用?Python audioop.rms使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類audioop
的用法示例。
在下文中一共展示了audioop.rms方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: btn_detect
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import rms [as 別名]
def btn_detect():
global btn_status
with MicrophoneStream(RATE, CHUNK) as stream:
audio_generator = stream.generator()
for content in audio_generator:
GPIO.output(31, GPIO.HIGH)
rc = ktkws.detect(content)
rms = audioop.rms(content,2)
#print('audio rms = %d' % (rms))
GPIO.output(31, GPIO.LOW)
if (btn_status == True):
rc = 1
btn_status = False
if (rc == 1):
GPIO.output(31, GPIO.HIGH)
play_file("../data/sample_sound.wav")
return 200
示例2: detect
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import rms [as 別名]
def detect():
global button
with MicrophoneStream(RATE, CHUNK) as stream:
audio_generator = stream.generator()
for content in audio_generator:
#import binascii
#print ("INBYTE: %s" % (binascii.hexlify(bytearray(content))))
GPIO.output(31, GPIO.HIGH)
rc = ktkws.detect(content)
rms = audioop.rms(content,2)
#print('audio rms = %d' % (rms))
GPIO.output(31, GPIO.LOW)
if (button == True):
rc = 1
button = False
if (rc == 1):
GPIO.output(31, GPIO.HIGH)
play_file("../data/sample_sound.wav")
return 200
示例3: detect
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import rms [as 別名]
def detect():
global button
with MicrophoneStream(RATE, CHUNK) as stream:
audio_generator = stream.generator()
for content in audio_generator:
#import binascii
#print ("INBYTE: %s" % (binascii.hexlify(bytearray(content))))
GPIO.output(31, GPIO.HIGH)
rc = ktkws.detect(content)
rms = audioop.rms(content,2)
#print('audio rms = %d' % (rms))
GPIO.output(31, GPIO.LOW)
if (button == True):
rc = 1
button = False
if (rc == 1):
GPIO.output(31, GPIO.HIGH)
gt2vt.play_file("../data/sample_sound.wav")
return 200
示例4: btn_detect
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import rms [as 別名]
def btn_detect():
global btn_status
with MS.MicrophoneStream(RATE, CHUNK) as stream:
audio_generator = stream.generator()
for content in audio_generator:
GPIO.output(31, GPIO.HIGH)
rc = ktkws.detect(content)
rms = audioop.rms(content,2)
#print('audio rms = %d' % (rms))
GPIO.output(31, GPIO.LOW)
if (btn_status == True):
rc = 1
btn_status = False
if (rc == 1):
GPIO.output(31, GPIO.HIGH)
MS.play_file("../data/sample_sound.wav")
return 200
示例5: test_issue7673
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import rms [as 別名]
def test_issue7673(self):
state = None
for data, size in INVALID_DATA:
size2 = size
self.assertRaises(audioop.error, audioop.getsample, data, size, 0)
self.assertRaises(audioop.error, audioop.max, data, size)
self.assertRaises(audioop.error, audioop.minmax, data, size)
self.assertRaises(audioop.error, audioop.avg, data, size)
self.assertRaises(audioop.error, audioop.rms, data, size)
self.assertRaises(audioop.error, audioop.avgpp, data, size)
self.assertRaises(audioop.error, audioop.maxpp, data, size)
self.assertRaises(audioop.error, audioop.cross, data, size)
self.assertRaises(audioop.error, audioop.mul, data, size, 1.0)
self.assertRaises(audioop.error, audioop.tomono, data, size, 0.5, 0.5)
self.assertRaises(audioop.error, audioop.tostereo, data, size, 0.5, 0.5)
self.assertRaises(audioop.error, audioop.add, data, data, size)
self.assertRaises(audioop.error, audioop.bias, data, size, 0)
self.assertRaises(audioop.error, audioop.reverse, data, size)
self.assertRaises(audioop.error, audioop.lin2lin, data, size, size2)
self.assertRaises(audioop.error, audioop.ratecv, data, size, 1, 1, 1, state)
self.assertRaises(audioop.error, audioop.lin2ulaw, data, size)
self.assertRaises(audioop.error, audioop.lin2alaw, data, size)
self.assertRaises(audioop.error, audioop.lin2adpcm, data, size, state)
示例6: test_string
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import rms [as 別名]
def test_string(self):
data = 'abcd'
size = 2
self.assertRaises(TypeError, audioop.getsample, data, size, 0)
self.assertRaises(TypeError, audioop.max, data, size)
self.assertRaises(TypeError, audioop.minmax, data, size)
self.assertRaises(TypeError, audioop.avg, data, size)
self.assertRaises(TypeError, audioop.rms, data, size)
self.assertRaises(TypeError, audioop.avgpp, data, size)
self.assertRaises(TypeError, audioop.maxpp, data, size)
self.assertRaises(TypeError, audioop.cross, data, size)
self.assertRaises(TypeError, audioop.mul, data, size, 1.0)
self.assertRaises(TypeError, audioop.tomono, data, size, 0.5, 0.5)
self.assertRaises(TypeError, audioop.tostereo, data, size, 0.5, 0.5)
self.assertRaises(TypeError, audioop.add, data, data, size)
self.assertRaises(TypeError, audioop.bias, data, size, 0)
self.assertRaises(TypeError, audioop.reverse, data, size)
self.assertRaises(TypeError, audioop.lin2lin, data, size, size)
self.assertRaises(TypeError, audioop.ratecv, data, size, 1, 1, 1, None)
self.assertRaises(TypeError, audioop.lin2ulaw, data, size)
self.assertRaises(TypeError, audioop.lin2alaw, data, size)
self.assertRaises(TypeError, audioop.lin2adpcm, data, size, None)
示例7: __db_level
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import rms [as 別名]
def __db_level(self, rms_mode: bool = False) -> Tuple[float, float]:
"""
Returns the average audio volume level measured in dB (range -60 db to 0 db)
If the sample is stereo, you get back a tuple: (left_level, right_level)
If the sample is mono, you still get a tuple but both values will be the same.
This method is probably only useful if processed on very short sample fragments in sequence,
so the db levels could be used to show a level meter for the duration of the sample.
"""
maxvalue = 2**(8*self.__samplewidth-1)
if self.nchannels == 1:
if rms_mode:
peak_left = peak_right = (audioop.rms(self.__frames, self.__samplewidth)+1)/maxvalue
else:
peak_left = peak_right = (audioop.max(self.__frames, self.__samplewidth)+1)/maxvalue
else:
left_frames = audioop.tomono(self.__frames, self.__samplewidth, 1, 0)
right_frames = audioop.tomono(self.__frames, self.__samplewidth, 0, 1)
if rms_mode:
peak_left = (audioop.rms(left_frames, self.__samplewidth)+1)/maxvalue
peak_right = (audioop.rms(right_frames, self.__samplewidth)+1)/maxvalue
else:
peak_left = (audioop.max(left_frames, self.__samplewidth)+1)/maxvalue
peak_right = (audioop.max(right_frames, self.__samplewidth)+1)/maxvalue
# cut off at the bottom at -60 instead of all the way down to -infinity
return max(20.0*math.log(peak_left, 10), -60.0), max(20.0*math.log(peak_right, 10), -60.0)
示例8: read
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import rms [as 別名]
def read(self, frame_size):
self.frame_count += 1
frame = self.buff.read(frame_size)
if self.volume != 1:
frame = self._frame_vol(frame, self.volume, maxv=2)
if self.draw and not self.frame_count % self.frame_skip:
# these should be processed for every frame, but "overhead"
rms = audioop.rms(frame, 2)
self.rmss.append(rms)
max_rms = sorted(self.rmss)[-1]
meter_text = 'avg rms: {:.2f}, max rms: {:.2f} '.format(avg(self.rmss), max_rms)
self._pprint_meter(rms / max(1, max_rms), text=meter_text, shift=True)
return frame
示例9: get_microphone_level
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import rms [as 別名]
def get_microphone_level():
"""
source: http://stackoverflow.com/questions/26478315/getting-volume-levels-from-pyaudio-for-use-in-arduino
audioop.max alternative to audioop.rms
"""
chunk = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
p = pyaudio.PyAudio()
s = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=chunk)
global levels
while True:
data = s.read(chunk)
mx = audioop.rms(data, 2)
if len(levels) >= 100:
levels = []
levels.append(mx)
示例10: mic_to_ws
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import rms [as 別名]
def mic_to_ws(): # uses stream
try:
print >> sys.stderr, "\nLISTENING TO MICROPHONE"
last_state = None
while True:
data = stream.read(self.chunk)
if self.audio_gate > 0:
rms = audioop.rms(data, 2)
if rms < self.audio_gate:
data = '\00' * len(data)
#if sample_chan == 2:
# data = audioop.tomono(data, 2, 1, 1)
if sample_rate != self.byterate:
(data, last_state) = audioop.ratecv(data, 2, 1, sample_rate, self.byterate, last_state)
self.send_data(data)
except IOError, e:
# usually a broken pipe
print e
示例11: adjust_for_ambient_noise
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import rms [as 別名]
def adjust_for_ambient_noise(self, source, duration = 1):
"""
Adjusts the energy threshold dynamically using audio from ``source`` (an ``AudioSource`` instance) to account for ambient noise.
Intended to calibrate the energy threshold with the ambient energy level. Should be used on periods of audio without speech - will stop early if any speech is detected.
The ``duration`` parameter is the maximum number of seconds that it will dynamically adjust the threshold for before returning. This value should be at least 0.5 in order to get a representative sample of the ambient noise.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before adjusting, see documentation for `AudioSource`; are you using `source` outside of a `with` statement?"
assert self.pause_threshold >= self.non_speaking_duration >= 0
seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
elapsed_time = 0
# adjust energy threshold until a phrase starts
while True:
elapsed_time += seconds_per_buffer
if elapsed_time > duration: break
buffer = source.stream.read(source.CHUNK)
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
# dynamically adjust the energy threshold using assymmetric weighted average
damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates
target_energy = energy * self.dynamic_energy_ratio
self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping)
示例12: print_rms
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import rms [as 別名]
def print_rms(rms):
out = ''
for _ in range(int(round(rms/30))):
out = out + '*'
#print (out)
示例13: generate_request
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import rms [as 別名]
def generate_request():
with MicrophoneStream(RATE, CHUNK) as stream:
audio_generator = stream.generator()
for content in audio_generator:
message = gigagenieRPC_pb2.reqVoice()
message.audioContent = content
yield message
rms = audioop.rms(content,2)
print_rms(rms)
示例14: print_rms
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import rms [as 別名]
def print_rms(rms):
out = ''
for _ in range(int(round(rms/30))):
out = out + '*'
#print (out)
示例15: detect
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import rms [as 別名]
def detect():
with MicrophoneStream(RATE, CHUNK) as stream:
audio_generator = stream.generator()
for content in audio_generator:
rc = ktkws.detect(content)
rms = audioop.rms(content,2)
#print('audio rms = %d' % (rms))
if (rc == 1):
play_file("../data/sample_sound.wav")
return 200