本文整理汇总了Java中javax.sound.sampled.AudioFormat.getEncoding方法的典型用法代码示例。如果您正苦于以下问题:Java AudioFormat.getEncoding方法的具体用法?Java AudioFormat.getEncoding怎么用?Java AudioFormat.getEncoding使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类javax.sound.sampled.AudioFormat
的用法示例。
在下文中一共展示了AudioFormat.getEncoding方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getAudioFileTypes
import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
@Override
public Type[] getAudioFileTypes(AudioInputStream stream) {
Type[] filetypes = new Type[types.length];
System.arraycopy(types, 0, filetypes, 0, types.length);
// make sure we can write this stream
AudioFormat format = stream.getFormat();
AudioFormat.Encoding encoding = format.getEncoding();
if (AudioFormat.Encoding.ALAW.equals(encoding)
|| AudioFormat.Encoding.ULAW.equals(encoding)
|| AudioFormat.Encoding.PCM_SIGNED.equals(encoding)
|| AudioFormat.Encoding.PCM_UNSIGNED.equals(encoding)
|| AudioFormat.Encoding.PCM_FLOAT.equals(encoding)) {
return filetypes;
}
return new Type[0];
}
示例2: convertAudioBytes
import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
/**
* Convert the audio bytes into the stream
*
* @param format The audio format being decoded
* @param audio_bytes The audio byts
* @param two_bytes_data True if we using double byte data
* @return The byte bufer of data
*/
private static ByteBuffer convertAudioBytes(AudioFormat format, byte[] audio_bytes, boolean two_bytes_data) {
ByteBuffer dest = ByteBuffer.allocateDirect(audio_bytes.length);
dest.order(ByteOrder.nativeOrder());
ByteBuffer src = ByteBuffer.wrap(audio_bytes);
src.order(ByteOrder.BIG_ENDIAN);
if (two_bytes_data) {
ShortBuffer dest_short = dest.asShortBuffer();
ShortBuffer src_short = src.asShortBuffer();
while (src_short.hasRemaining())
dest_short.put(src_short.get());
} else {
while (src.hasRemaining()) {
byte b = src.get();
if (format.getEncoding() == Encoding.PCM_SIGNED) {
b = (byte) (b + 127);
}
dest.put(b);
}
}
dest.rewind();
return dest;
}
示例3: WaveFileFormat
import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
WaveFileFormat(AudioFileFormat.Type type, int lengthInBytes, AudioFormat format, int lengthInFrames) {
super(type,lengthInBytes,format,lengthInFrames);
AudioFormat.Encoding encoding = format.getEncoding();
if( encoding.equals(AudioFormat.Encoding.ALAW) ) {
waveType = WAVE_FORMAT_ALAW;
} else if( encoding.equals(AudioFormat.Encoding.ULAW) ) {
waveType = WAVE_FORMAT_MULAW;
} else if( encoding.equals(AudioFormat.Encoding.PCM_SIGNED) ||
encoding.equals(AudioFormat.Encoding.PCM_UNSIGNED) ) {
waveType = WAVE_FORMAT_PCM;
} else {
waveType = WAVE_FORMAT_UNKNOWN;
}
}
示例4: getAudioFileTypes
import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
public AudioFileFormat.Type[] getAudioFileTypes(AudioInputStream stream) {
AudioFileFormat.Type[] filetypes = new AudioFileFormat.Type[types.length];
System.arraycopy(types, 0, filetypes, 0, types.length);
// make sure we can write this stream
AudioFormat format = stream.getFormat();
AudioFormat.Encoding encoding = format.getEncoding();
if( (AudioFormat.Encoding.ALAW.equals(encoding)) ||
(AudioFormat.Encoding.ULAW.equals(encoding)) ||
(AudioFormat.Encoding.PCM_SIGNED.equals(encoding)) ||
(AudioFormat.Encoding.PCM_UNSIGNED.equals(encoding)) ) {
return filetypes;
}
return new AudioFileFormat.Type[0];
}
示例5: getAudioFileTypes
import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
public AudioFileFormat.Type[] getAudioFileTypes(AudioInputStream stream) {
AudioFileFormat.Type[] filetypes = new AudioFileFormat.Type[types.length];
System.arraycopy(types, 0, filetypes, 0, types.length);
// make sure we can write this stream
AudioFormat format = stream.getFormat();
AudioFormat.Encoding encoding = format.getEncoding();
if( AudioFormat.Encoding.ALAW.equals(encoding) ||
AudioFormat.Encoding.ULAW.equals(encoding) ||
AudioFormat.Encoding.PCM_SIGNED.equals(encoding) ||
AudioFormat.Encoding.PCM_UNSIGNED.equals(encoding) ) {
return filetypes;
}
return new AudioFileFormat.Type[0];
}
示例6: AudioFloatInputStreamChannelMixer
import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
AudioFloatInputStreamChannelMixer(AudioFloatInputStream ais,
int targetChannels) {
this.sourceChannels = ais.getFormat().getChannels();
this.targetChannels = targetChannels;
this.ais = ais;
AudioFormat format = ais.getFormat();
targetFormat = new AudioFormat(format.getEncoding(), format
.getSampleRate(), format.getSampleSizeInBits(),
targetChannels, (format.getFrameSize() / sourceChannels)
* targetChannels, format.getFrameRate(), format
.isBigEndian());
}
示例7: convertAudioInputStreamToPcmSigned
import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
private void convertAudioInputStreamToPcmSigned() {
AudioFormat format = audioInputStream.getFormat();
if (format.getEncoding() != AudioFormat.Encoding.PCM_SIGNED) {
audioInputStream = AudioSystem.getAudioInputStream(
new AudioFormat(AudioFormat.Encoding.PCM_SIGNED,
format.getSampleRate(),
16,
1,
2,
format.getSampleRate(),
false), audioInputStream);
}
}
示例8: AuFileFormat
import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
AuFileFormat(final AudioFileFormat.Type type, final long byteLength,
final AudioFormat format, final long frameLength) {
super(type, byteLength, format, frameLength);
AudioFormat.Encoding encoding = format.getEncoding();
auType = -1;
if (AudioFormat.Encoding.ALAW.equals(encoding)) {
if (format.getSampleSizeInBits() == 8) {
auType = AU_ALAW_8;
}
} else if (AudioFormat.Encoding.ULAW.equals(encoding)) {
if (format.getSampleSizeInBits() == 8) {
auType = AU_ULAW_8;
}
} else if (AudioFormat.Encoding.PCM_SIGNED.equals(encoding)) {
if (format.getSampleSizeInBits() == 8) {
auType = AU_LINEAR_8;
} else if (format.getSampleSizeInBits() == 16) {
auType = AU_LINEAR_16;
} else if (format.getSampleSizeInBits() == 24) {
auType = AU_LINEAR_24;
} else if (format.getSampleSizeInBits() == 32) {
auType = AU_LINEAR_32;
}
} else if (AudioFormat.Encoding.PCM_FLOAT.equals(encoding)) {
if (format.getSampleSizeInBits() == 32) {
auType = AU_FLOAT;
}
}
}
示例9: AudioFloatInputStreamResampler
import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
public AudioFloatInputStreamResampler(AudioFloatInputStream ais,
AudioFormat format) {
this.ais = ais;
AudioFormat sourceFormat = ais.getFormat();
targetFormat = new AudioFormat(sourceFormat.getEncoding(), format
.getSampleRate(), sourceFormat.getSampleSizeInBits(),
sourceFormat.getChannels(), sourceFormat.getFrameSize(),
format.getSampleRate(), sourceFormat.isBigEndian());
nrofchannels = targetFormat.getChannels();
Object interpolation = format.getProperty("interpolation");
if (interpolation != null && (interpolation instanceof String)) {
String resamplerType = (String) interpolation;
if (resamplerType.equalsIgnoreCase("point"))
this.resampler = new SoftPointResampler();
if (resamplerType.equalsIgnoreCase("linear"))
this.resampler = new SoftLinearResampler2();
if (resamplerType.equalsIgnoreCase("linear1"))
this.resampler = new SoftLinearResampler();
if (resamplerType.equalsIgnoreCase("linear2"))
this.resampler = new SoftLinearResampler2();
if (resamplerType.equalsIgnoreCase("cubic"))
this.resampler = new SoftCubicResampler();
if (resamplerType.equalsIgnoreCase("lanczos"))
this.resampler = new SoftLanczosResampler();
if (resamplerType.equalsIgnoreCase("sinc"))
this.resampler = new SoftSincResampler();
}
if (resampler == null)
resampler = new SoftLinearResampler2(); // new
// SoftLinearResampler2();
pitch[0] = sourceFormat.getSampleRate() / format.getSampleRate();
pad = resampler.getPadding();
pad2 = pad * 2;
ibuffer = new float[nrofchannels][buffer_len + pad2];
ibuffer2 = new float[nrofchannels * buffer_len];
ibuffer_index = buffer_len + pad;
ibuffer_len = buffer_len;
}
示例10: toLittleEndian
import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
private AudioInputStream toLittleEndian(AudioInputStream ais) {
AudioFormat format = ais.getFormat();
AudioFormat targetFormat = new AudioFormat(format.getEncoding(), format
.getSampleRate(), format.getSampleSizeInBits(), format
.getChannels(), format.getFrameSize(), format.getFrameRate(),
false);
return AudioSystem.getAudioInputStream(targetFormat, ais);
}
示例11: AudioFloatInputStreamResampler
import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
AudioFloatInputStreamResampler(AudioFloatInputStream ais,
AudioFormat format) {
this.ais = ais;
AudioFormat sourceFormat = ais.getFormat();
targetFormat = new AudioFormat(sourceFormat.getEncoding(), format
.getSampleRate(), sourceFormat.getSampleSizeInBits(),
sourceFormat.getChannels(), sourceFormat.getFrameSize(),
format.getSampleRate(), sourceFormat.isBigEndian());
nrofchannels = targetFormat.getChannels();
Object interpolation = format.getProperty("interpolation");
if (interpolation != null && (interpolation instanceof String)) {
String resamplerType = (String) interpolation;
if (resamplerType.equalsIgnoreCase("point"))
this.resampler = new SoftPointResampler();
if (resamplerType.equalsIgnoreCase("linear"))
this.resampler = new SoftLinearResampler2();
if (resamplerType.equalsIgnoreCase("linear1"))
this.resampler = new SoftLinearResampler();
if (resamplerType.equalsIgnoreCase("linear2"))
this.resampler = new SoftLinearResampler2();
if (resamplerType.equalsIgnoreCase("cubic"))
this.resampler = new SoftCubicResampler();
if (resamplerType.equalsIgnoreCase("lanczos"))
this.resampler = new SoftLanczosResampler();
if (resamplerType.equalsIgnoreCase("sinc"))
this.resampler = new SoftSincResampler();
}
if (resampler == null)
resampler = new SoftLinearResampler2(); // new
// SoftLinearResampler2();
pitch[0] = sourceFormat.getSampleRate() / format.getSampleRate();
pad = resampler.getPadding();
pad2 = pad * 2;
ibuffer = new float[nrofchannels][buffer_len + pad2];
ibuffer2 = new float[nrofchannels * buffer_len];
ibuffer_index = buffer_len + pad;
ibuffer_len = buffer_len;
}
示例12: AuFileFormat
import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
AuFileFormat(AudioFileFormat.Type type, int lengthInBytes, AudioFormat format, int lengthInFrames) {
super(type,lengthInBytes,format,lengthInFrames);
AudioFormat.Encoding encoding = format.getEncoding();
auType = -1;
if( AudioFormat.Encoding.ALAW.equals(encoding) ) {
if( format.getSampleSizeInBits()==8 ) {
auType = AU_ALAW_8;
}
} else if( AudioFormat.Encoding.ULAW.equals(encoding) ) {
if( format.getSampleSizeInBits()==8 ) {
auType = AU_ULAW_8;
}
} else if( AudioFormat.Encoding.PCM_SIGNED.equals(encoding) ) {
if( format.getSampleSizeInBits()==8 ) {
auType = AU_LINEAR_8;
} else if( format.getSampleSizeInBits()==16 ) {
auType = AU_LINEAR_16;
} else if( format.getSampleSizeInBits()==24 ) {
auType = AU_LINEAR_24;
} else if( format.getSampleSizeInBits()==32 ) {
auType = AU_LINEAR_32;
}
}
}
示例13: getAudioFileFormat
import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
/**
* Returns the AudioFileFormat describing the file that will be written from this AudioInputStream.
* Throws IllegalArgumentException if not supported.
*/
private AudioFileFormat getAudioFileFormat(Type type, AudioInputStream stream) {
if (!isFileTypeSupported(type, stream)) {
throw new IllegalArgumentException("File type " + type + " not supported.");
}
AudioFormat streamFormat = stream.getFormat();
AudioFormat.Encoding encoding = streamFormat.getEncoding();
if (AudioFormat.Encoding.PCM_UNSIGNED.equals(encoding)) {
encoding = AudioFormat.Encoding.PCM_SIGNED;
}
// We always write big endian au files, this is by far the standard
AudioFormat format = new AudioFormat(encoding,
streamFormat.getSampleRate(),
streamFormat.getSampleSizeInBits(),
streamFormat.getChannels(),
streamFormat.getFrameSize(),
streamFormat.getFrameRate(), true);
int fileSize;
if (stream.getFrameLength() != AudioSystem.NOT_SPECIFIED) {
fileSize = (int)stream.getFrameLength()*streamFormat.getFrameSize() + AuFileFormat.AU_HEADERSIZE;
} else {
fileSize = AudioSystem.NOT_SPECIFIED;
}
return new AuFileFormat(Type.AU, fileSize, format,
(int) stream.getFrameLength());
}
示例14: isConversionSupported
import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
/**
* Indicates whether the format converter supports conversion to one
* particular format from another.
*
* @param targetFormat desired format of outgoing data
* @param sourceFormat format of the incoming data
* @return {@code true} if the conversion is supported, otherwise
* {@code false}
* @throws NullPointerException if {@code targetFormat} or
* {@code sourceFormat} are {@code null}
*/
public boolean isConversionSupported(final AudioFormat targetFormat,
final AudioFormat sourceFormat) {
final Encoding targetEncoding = targetFormat.getEncoding();
return Stream.of(getTargetFormats(targetEncoding, sourceFormat))
.anyMatch(targetFormat::matches);
}