本文整理匯總了Python中audioop.tomono方法的典型用法代碼示例。如果您正苦於以下問題:Python audioop.tomono方法的具體用法?Python audioop.tomono怎麽用?Python audioop.tomono使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類audioop
的用法示例。
在下文中一共展示了audioop.tomono方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_issue7673
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import tomono [as 別名]
def test_issue7673(self):
state = None
for data, size in INVALID_DATA:
size2 = size
self.assertRaises(audioop.error, audioop.getsample, data, size, 0)
self.assertRaises(audioop.error, audioop.max, data, size)
self.assertRaises(audioop.error, audioop.minmax, data, size)
self.assertRaises(audioop.error, audioop.avg, data, size)
self.assertRaises(audioop.error, audioop.rms, data, size)
self.assertRaises(audioop.error, audioop.avgpp, data, size)
self.assertRaises(audioop.error, audioop.maxpp, data, size)
self.assertRaises(audioop.error, audioop.cross, data, size)
self.assertRaises(audioop.error, audioop.mul, data, size, 1.0)
self.assertRaises(audioop.error, audioop.tomono, data, size, 0.5, 0.5)
self.assertRaises(audioop.error, audioop.tostereo, data, size, 0.5, 0.5)
self.assertRaises(audioop.error, audioop.add, data, data, size)
self.assertRaises(audioop.error, audioop.bias, data, size, 0)
self.assertRaises(audioop.error, audioop.reverse, data, size)
self.assertRaises(audioop.error, audioop.lin2lin, data, size, size2)
self.assertRaises(audioop.error, audioop.ratecv, data, size, 1, 1, 1, state)
self.assertRaises(audioop.error, audioop.lin2ulaw, data, size)
self.assertRaises(audioop.error, audioop.lin2alaw, data, size)
self.assertRaises(audioop.error, audioop.lin2adpcm, data, size, state)
示例2: _play
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import tomono [as 別名]
def _play(self, data, rate=16000, channels=1, width=2, spectrum=True):
stream = self.pyaudio_instance.open(
format=self.pyaudio_instance.get_format_from_width(width),
channels=channels,
rate=rate,
output=True,
# output_device_index=1,
frames_per_buffer=CHUNK_SIZE,
)
if isinstance(data, types.GeneratorType):
for d in data:
if self.stop_event.is_set():
break
stream.write(d)
if spectrum:
if channels == 2:
d = audioop.tomono(d, 2, 0.5, 0.5)
self.queue.put(d)
else:
stream.write(data)
stream.close()
示例3: test_string
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import tomono [as 別名]
def test_string(self):
data = 'abcd'
size = 2
self.assertRaises(TypeError, audioop.getsample, data, size, 0)
self.assertRaises(TypeError, audioop.max, data, size)
self.assertRaises(TypeError, audioop.minmax, data, size)
self.assertRaises(TypeError, audioop.avg, data, size)
self.assertRaises(TypeError, audioop.rms, data, size)
self.assertRaises(TypeError, audioop.avgpp, data, size)
self.assertRaises(TypeError, audioop.maxpp, data, size)
self.assertRaises(TypeError, audioop.cross, data, size)
self.assertRaises(TypeError, audioop.mul, data, size, 1.0)
self.assertRaises(TypeError, audioop.tomono, data, size, 0.5, 0.5)
self.assertRaises(TypeError, audioop.tostereo, data, size, 0.5, 0.5)
self.assertRaises(TypeError, audioop.add, data, data, size)
self.assertRaises(TypeError, audioop.bias, data, size, 0)
self.assertRaises(TypeError, audioop.reverse, data, size)
self.assertRaises(TypeError, audioop.lin2lin, data, size, size)
self.assertRaises(TypeError, audioop.ratecv, data, size, 1, 1, 1, None)
self.assertRaises(TypeError, audioop.lin2ulaw, data, size)
self.assertRaises(TypeError, audioop.lin2alaw, data, size)
self.assertRaises(TypeError, audioop.lin2adpcm, data, size, None)
示例4: __db_level
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import tomono [as 別名]
def __db_level(self, rms_mode: bool = False) -> Tuple[float, float]:
"""
Returns the average audio volume level measured in dB (range -60 db to 0 db)
If the sample is stereo, you get back a tuple: (left_level, right_level)
If the sample is mono, you still get a tuple but both values will be the same.
This method is probably only useful if processed on very short sample fragments in sequence,
so the db levels could be used to show a level meter for the duration of the sample.
"""
maxvalue = 2**(8*self.__samplewidth-1)
if self.nchannels == 1:
if rms_mode:
peak_left = peak_right = (audioop.rms(self.__frames, self.__samplewidth)+1)/maxvalue
else:
peak_left = peak_right = (audioop.max(self.__frames, self.__samplewidth)+1)/maxvalue
else:
left_frames = audioop.tomono(self.__frames, self.__samplewidth, 1, 0)
right_frames = audioop.tomono(self.__frames, self.__samplewidth, 0, 1)
if rms_mode:
peak_left = (audioop.rms(left_frames, self.__samplewidth)+1)/maxvalue
peak_right = (audioop.rms(right_frames, self.__samplewidth)+1)/maxvalue
else:
peak_left = (audioop.max(left_frames, self.__samplewidth)+1)/maxvalue
peak_right = (audioop.max(right_frames, self.__samplewidth)+1)/maxvalue
# cut off at the bottom at -60 instead of all the way down to -infinity
return max(20.0*math.log(peak_left, 10), -60.0), max(20.0*math.log(peak_right, 10), -60.0)
示例5: normalize
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import tomono [as 別名]
def normalize(self) -> 'Sample':
"""
Normalize the sample, meaning: convert it to the default samplerate, sample width and number of channels.
When mixing samples, they should all have the same properties, and this method is ideal to make sure of that.
"""
if self.__locked:
raise RuntimeError("cannot modify a locked sample")
self.resample(params.norm_samplerate)
if self.samplewidth != params.norm_samplewidth:
# Convert to desired sample size.
self.__frames = audioop.lin2lin(self.__frames, self.samplewidth, params.norm_samplewidth)
self.__samplewidth = params.norm_samplewidth
if params.norm_nchannels not in (1, 2):
raise ValueError("norm_nchannels has invalid value, can only be 1 or 2")
if self.nchannels == 1 and params.norm_nchannels == 2:
# convert to stereo
self.__frames = audioop.tostereo(self.__frames, self.samplewidth, 1, 1)
self.__nchannels = 2
elif self.nchannels == 2 and params.norm_nchannels == 1:
# convert to mono
self.__frames = audioop.tomono(self.__frames, self.__samplewidth, 1, 1)
self.__nchannels = 1
return self
示例6: convert_stereo_to_mono
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import tomono [as 別名]
def convert_stereo_to_mono(fragment, width):
"""
Convert stereo fragment to mono.
Parameters
----------
fragment : bytes object
Specifies the original fragment.
width : int
Specifies the fragment's original sampwidth.
Returns
-------
bytes
Converted audio in mono type.
"""
new_fragment = audioop.tomono(fragment, width, 0.5, 0.5)
return new_fragment
示例7: mic_to_ws
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import tomono [as 別名]
def mic_to_ws(): # uses stream
try:
print >> sys.stderr, "\nLISTENING TO MICROPHONE"
last_state = None
while True:
data = stream.read(self.chunk)
if self.audio_gate > 0:
rms = audioop.rms(data, 2)
if rms < self.audio_gate:
data = '\00' * len(data)
#if sample_chan == 2:
# data = audioop.tomono(data, 2, 1, 1)
if sample_rate != self.byterate:
(data, last_state) = audioop.ratecv(data, 2, 1, sample_rate, self.byterate, last_state)
self.send_data(data)
except IOError, e:
# usually a broken pipe
print e
示例8: read
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import tomono [as 別名]
def read(self, size = -1):
buffer = self.audio_reader.readframes(self.audio_reader.getnframes() if size == -1 else size)
if not isinstance(buffer, bytes): buffer = b"" # workaround for https://bugs.python.org/issue24608
sample_width = self.audio_reader.getsampwidth()
if not self.little_endian: # big endian format, convert to little endian on the fly
if hasattr(audioop, "byteswap"): # ``audioop.byteswap`` was only added in Python 3.4 (incidentally, that also means that we don't need to worry about 24-bit audio being unsupported, since Python 3.4+ always has that functionality)
buffer = audioop.byteswap(buffer, sample_width)
else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback
buffer = buffer[sample_width - 1::-1] + b"".join(buffer[i + sample_width:i:-1] for i in range(sample_width - 1, len(buffer), sample_width))
# workaround for https://bugs.python.org/issue12866
if self.samples_24_bit_pretending_to_be_32_bit: # we need to convert samples from 24-bit to 32-bit before we can process them with ``audioop`` functions
buffer = b"".join("\x00" + buffer[i:i + sample_width] for i in range(0, len(buffer), sample_width)) # since we're in little endian, we prepend a zero byte to each 24-bit sample to get a 32-bit sample
if self.audio_reader.getnchannels() != 1: # stereo audio
buffer = audioop.tomono(buffer, sample_width, 1, 1) # convert stereo audio data to mono
return buffer
示例9: test_tomono
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import tomono [as 別名]
def test_tomono(self):
for w in 1, 2, 4:
data1 = datas[w]
data2 = bytearray(2 * len(data1))
for k in range(w):
data2[k::2*w] = data1[k::w]
self.assertEqual(audioop.tomono(str(data2), w, 1, 0), data1)
self.assertEqual(audioop.tomono(str(data2), w, 0, 1), b'\0' * len(data1))
for k in range(w):
data2[k+w::2*w] = data1[k::w]
self.assertEqual(audioop.tomono(str(data2), w, 0.5, 0.5), data1)
示例10: _convert_file
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import tomono [as 別名]
def _convert_file(self, src, dest=None):
"""
convert wav into 8khz rate
"""
def convert(read,write):
write.setparams((1, 2, 8000, 0,'NONE', 'not compressed'))
o_fr = read.getframerate()
o_chnl = read.getnchannels()
t_fr = read.getnframes()
data = read.readframes(t_fr)
cnvrt = audioop.ratecv(data, 2, o_chnl,
o_fr, 8000, None)
if o_chnl != 1:
mono = audioop.tomono(cnvrt[0], 2, 1, 0)
write.writeframes(mono)
else:
write.writeframes(cnvrt[0])
read.close()
write.close()
if dest is None:
temp = src + '.temp'
os.rename(src, temp)
read = wave.open(temp, 'r')
write = wave.open(src, 'w')
convert(read, write)
os.remove(temp)
else:
read = wave.open(src, 'r')
write = wave.open(dest, 'w')
convert(read, write)
示例11: test_tomono
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import tomono [as 別名]
def test_tomono(self):
for w in 1, 2, 3, 4:
data1 = datas[w]
data2 = bytearray(2 * len(data1))
for k in range(w):
data2[k::2*w] = data1[k::w]
self.assertEqual(audioop.tomono(data2, w, 1, 0), data1)
self.assertEqual(audioop.tomono(data2, w, 0, 1), b'\0' * len(data1))
for k in range(w):
data2[k+w::2*w] = data1[k::w]
self.assertEqual(audioop.tomono(data2, w, 0.5, 0.5), data1)
self.assertEqual(audioop.tomono(bytearray(data2), w, 0.5, 0.5),
data1)
self.assertEqual(audioop.tomono(memoryview(data2), w, 0.5, 0.5),
data1)
示例12: mono
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import tomono [as 別名]
def mono(self, left_factor: float = 1.0, right_factor: float = 1.0) -> 'Sample':
"""Make the sample mono (1-channel) applying the given left/right channel factors when downmixing"""
if self.__locked:
raise RuntimeError("cannot modify a locked sample")
if self.__nchannels == 1:
return self
if self.__nchannels == 2:
self.__frames = audioop.tomono(self.__frames, self.__samplewidth, left_factor, right_factor)
self.__nchannels = 1
return self
raise ValueError("sample must be stereo or mono already")
示例13: testtomono
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import tomono [as 別名]
def testtomono(data):
if verbose:
print 'tomono'
data2 = ''
for d in data[0]:
data2 = data2 + d + d
if audioop.tomono(data2, 1, 0.5, 0.5) != data[0]:
return 0
return 1
示例14: average_channels_stereo
# 需要導入模塊: import audioop [as 別名]
# 或者: from audioop import tomono [as 別名]
def average_channels_stereo(data, sample_width):
fmt = FORMAT[sample_width]
arr = array(fmt, audioop.tomono(data, sample_width, 0.5, 0.5))
return arr