本文整理匯總了Java中javax.sound.sampled.AudioInputStream.getFormat方法的典型用法代碼示例。如果您正苦於以下問題:Java AudioInputStream.getFormat方法的具體用法?Java AudioInputStream.getFormat怎麽用?Java AudioInputStream.getFormat使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類javax.sound.sampled.AudioInputStream
的用法示例。
在下文中一共展示了AudioInputStream.getFormat方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: getAudioInputStream
import javax.sound.sampled.AudioInputStream; //導入方法依賴的package包/類
public AudioInputStream getAudioInputStream(Encoding targetEncoding,
AudioInputStream sourceStream) {
if (sourceStream.getFormat().getEncoding().equals(targetEncoding))
return sourceStream;
AudioFormat format = sourceStream.getFormat();
int channels = format.getChannels();
Encoding encoding = targetEncoding;
float samplerate = format.getSampleRate();
int bits = format.getSampleSizeInBits();
boolean bigendian = format.isBigEndian();
if (targetEncoding.equals(Encoding.PCM_FLOAT))
bits = 32;
AudioFormat targetFormat = new AudioFormat(encoding, samplerate, bits,
channels, channels * bits / 8, samplerate, bigendian);
return getAudioInputStream(targetFormat, sourceStream);
}
示例2: ExtendedClip
import javax.sound.sampled.AudioInputStream; //導入方法依賴的package包/類
public ExtendedClip(JuggleMasterPro objPjuggleMasterPro, byte bytPsoundFileIndex) {
this.bytGsoundFileIndex = bytPsoundFileIndex;
try {
final AudioInputStream objLaudioInputStream =
AudioSystem.getAudioInputStream(new File(Strings.doConcat( objPjuggleMasterPro.strS_CODE_BASE,
Constants.strS_FILE_NAME_A[Constants.intS_FILE_FOLDER_SOUNDS],
objPjuggleMasterPro.chrGpathSeparator,
Constants.strS_FILE_SOUND_NAME_A[bytPsoundFileIndex])));
final AudioFormat objLaudioFormat = objLaudioInputStream.getFormat();
final DataLine.Info objLdataLineInfo =
new DataLine.Info(Clip.class, objLaudioFormat, (int) objLaudioInputStream.getFrameLength()
* objLaudioFormat.getFrameSize());
this.objGclip = (Clip) AudioSystem.getLine(objLdataLineInfo);
this.objGclip.open(objLaudioInputStream);
} catch (final Throwable objPthrowable) {
Tools.err("Error while initializing sound : ", Constants.strS_FILE_SOUND_NAME_A[bytPsoundFileIndex]);
this.objGclip = null;
}
}
示例3: getPCMConvertedAudioInputStream
import javax.sound.sampled.AudioInputStream; //導入方法依賴的package包/類
public static AudioInputStream getPCMConvertedAudioInputStream(AudioInputStream ais) {
// we can't open the device for non-PCM playback, so we have
// convert any other encodings to PCM here (at least we try!)
AudioFormat af = ais.getFormat();
if( (!af.getEncoding().equals(AudioFormat.Encoding.PCM_SIGNED)) &&
(!af.getEncoding().equals(AudioFormat.Encoding.PCM_UNSIGNED))) {
try {
AudioFormat newFormat =
new AudioFormat( AudioFormat.Encoding.PCM_SIGNED,
af.getSampleRate(),
16,
af.getChannels(),
af.getChannels() * 2,
af.getSampleRate(),
Platform.isBigEndian());
ais = AudioSystem.getAudioInputStream(newFormat, ais);
} catch (Exception e) {
if (Printer.err) e.printStackTrace();
ais = null;
}
}
return ais;
}
示例4: getAudioFileTypes
import javax.sound.sampled.AudioInputStream; //導入方法依賴的package包/類
@Override
public AudioFileFormat.Type[] getAudioFileTypes(AudioInputStream stream) {
AudioFileFormat.Type[] filetypes = new AudioFileFormat.Type[types.length];
System.arraycopy(types, 0, filetypes, 0, types.length);
// make sure we can write this stream
AudioFormat format = stream.getFormat();
AudioFormat.Encoding encoding = format.getEncoding();
if( AudioFormat.Encoding.ALAW.equals(encoding) ||
AudioFormat.Encoding.ULAW.equals(encoding) ||
AudioFormat.Encoding.PCM_SIGNED.equals(encoding) ||
AudioFormat.Encoding.PCM_UNSIGNED.equals(encoding) ) {
return filetypes;
}
return new AudioFileFormat.Type[0];
}
示例5: getAudioFileTypes
import javax.sound.sampled.AudioInputStream; //導入方法依賴的package包/類
public AudioFileFormat.Type[] getAudioFileTypes(AudioInputStream stream) {
AudioFileFormat.Type[] filetypes = new AudioFileFormat.Type[types.length];
System.arraycopy(types, 0, filetypes, 0, types.length);
// make sure we can write this stream
AudioFormat format = stream.getFormat();
AudioFormat.Encoding encoding = format.getEncoding();
if( (AudioFormat.Encoding.ALAW.equals(encoding)) ||
(AudioFormat.Encoding.ULAW.equals(encoding)) ||
(AudioFormat.Encoding.PCM_SIGNED.equals(encoding)) ||
(AudioFormat.Encoding.PCM_UNSIGNED.equals(encoding)) ) {
return filetypes;
}
return new AudioFileFormat.Type[0];
}
示例6: getSoundbank
import javax.sound.sampled.AudioInputStream; //導入方法依賴的package包/類
public Soundbank getSoundbank(AudioInputStream ais)
throws InvalidMidiDataException, IOException {
try {
byte[] buffer;
if (ais.getFrameLength() == -1) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
byte[] buff = new byte[1024
- (1024 % ais.getFormat().getFrameSize())];
int ret;
while ((ret = ais.read(buff)) != -1) {
baos.write(buff, 0, ret);
}
ais.close();
buffer = baos.toByteArray();
} else {
buffer = new byte[(int) (ais.getFrameLength()
* ais.getFormat().getFrameSize())];
new DataInputStream(ais).readFully(buffer);
}
ModelByteBufferWavetable osc = new ModelByteBufferWavetable(
new ModelByteBuffer(buffer), ais.getFormat(), -4800);
ModelPerformer performer = new ModelPerformer();
performer.getOscillators().add(osc);
SimpleSoundbank sbk = new SimpleSoundbank();
SimpleInstrument ins = new SimpleInstrument();
ins.add(performer);
sbk.addInstrument(ins);
return sbk;
} catch (Exception e) {
return null;
}
}
示例7: enableHorn
import javax.sound.sampled.AudioInputStream; //導入方法依賴的package包/類
private void enableHorn () throws UnsupportedAudioFileException, IOException, LineUnavailableException
{
if ( ( this.clip == null || !this.clip.isRunning () ) && Activator.getDefault ().getPreferenceStore ().getBoolean ( PreferenceConstants.BELL_ACTIVATED_KEY ) )
{
final AudioInputStream sound = AudioSystem.getAudioInputStream ( this.soundFile );
final DataLine.Info info = new DataLine.Info ( Clip.class, sound.getFormat () );
this.clip = (Clip)AudioSystem.getLine ( info );
this.clip.open ( sound );
this.clip.loop ( Clip.LOOP_CONTINUOUSLY );
}
if ( !this.bellIcon.isDisposed () )
{
this.bellIcon.setImage ( getBellIcon () );
}
}
示例8: main
import javax.sound.sampled.AudioInputStream; //導入方法依賴的package包/類
public static void main(String args[]) throws Exception {
boolean res = true;
try {
AudioInputStream ais = new AudioInputStream(
new ByteArrayInputStream(new byte[2000]),
new AudioFormat(8000.0f, 8, 1, false, false), 2000); //
AudioFormat format = ais.getFormat();
DataLine.Info info = new DataLine.Info(Clip.class, format,
((int) ais.getFrameLength()
* format
.getFrameSize()));
Clip clip = (Clip) AudioSystem.getLine(info);
clip.open();
FloatControl rateControl = (FloatControl) clip.getControl(
FloatControl.Type.SAMPLE_RATE);
int c = 0;
while (c++ < 10) {
clip.stop();
clip.setFramePosition(0);
clip.start();
for (float frq = 22000; frq < 44100; frq = frq + 100) {
try {
Thread.currentThread().sleep(20);
} catch (Exception e) {
break;
}
rateControl.setValue(frq);
}
}
} catch (Exception ex) {
ex.printStackTrace();
res = ex.getMessage().indexOf(
"This method should not have been invoked!") < 0;
}
if (res) {
System.out.println("Test passed");
} else {
System.out.println("Test failed");
throw new Exception("Test failed");
}
}
示例9: getConvertedStream
import javax.sound.sampled.AudioInputStream; //導入方法依賴的package包/類
/**
* Opens the codec with the specified parameters.
* @param stream stream from which data to be processed should be read
* @param outputFormat desired data format of the stream after processing
* @return stream from which processed data may be read
* @throws IllegalArgumentException if the format combination supplied is
* not supported.
*/
private AudioInputStream getConvertedStream(AudioFormat outputFormat, AudioInputStream stream) {
AudioInputStream cs = null;
AudioFormat inputFormat = stream.getFormat();
if( inputFormat.matches(outputFormat) ) {
cs = stream;
} else {
cs = new AlawCodecStream(stream, outputFormat);
}
return cs;
}
示例10: loadAudioData
import javax.sound.sampled.AudioInputStream; //導入方法依賴的package包/類
private boolean loadAudioData(AudioInputStream as) throws IOException, UnsupportedAudioFileException {
if (DEBUG || Printer.debug)Printer.debug("JavaSoundAudioClip->openAsClip()");
// first possibly convert this stream to PCM
as = Toolkit.getPCMConvertedAudioInputStream(as);
if (as == null) {
return false;
}
loadedAudioFormat = as.getFormat();
long frameLen = as.getFrameLength();
int frameSize = loadedAudioFormat.getFrameSize();
long byteLen = AudioSystem.NOT_SPECIFIED;
if (frameLen != AudioSystem.NOT_SPECIFIED
&& frameLen > 0
&& frameSize != AudioSystem.NOT_SPECIFIED
&& frameSize > 0) {
byteLen = frameLen * frameSize;
}
if (byteLen != AudioSystem.NOT_SPECIFIED) {
// if the stream length is known, it can be efficiently loaded into memory
readStream(as, byteLen);
} else {
// otherwise we use a ByteArrayOutputStream to load it into memory
readStream(as);
}
// if everything went fine, we have now the audio data in
// loadedAudio, and the byte length in loadedAudioByteLength
return true;
}
示例11: getAudioFileFormat
import javax.sound.sampled.AudioInputStream; //導入方法依賴的package包/類
/**
* Returns the AudioFileFormat describing the file that will be written from this AudioInputStream.
* Throws IllegalArgumentException if not supported.
*/
private AudioFileFormat getAudioFileFormat(Type type, AudioInputStream stream) {
if (!isFileTypeSupported(type, stream)) {
throw new IllegalArgumentException("File type " + type + " not supported.");
}
AudioFormat streamFormat = stream.getFormat();
AudioFormat.Encoding encoding = streamFormat.getEncoding();
if (AudioFormat.Encoding.PCM_UNSIGNED.equals(encoding)) {
encoding = AudioFormat.Encoding.PCM_SIGNED;
}
// We always write big endian au files, this is by far the standard
AudioFormat format = new AudioFormat(encoding,
streamFormat.getSampleRate(),
streamFormat.getSampleSizeInBits(),
streamFormat.getChannels(),
streamFormat.getFrameSize(),
streamFormat.getFrameRate(), true);
int fileSize;
if (stream.getFrameLength() != AudioSystem.NOT_SPECIFIED) {
fileSize = (int)stream.getFrameLength()*streamFormat.getFrameSize() + AuFileFormat.AU_HEADERSIZE;
} else {
fileSize = AudioSystem.NOT_SPECIFIED;
}
return new AuFileFormat(Type.AU, fileSize, format,
(int) stream.getFrameLength());
}
示例12: toLittleEndian
import javax.sound.sampled.AudioInputStream; //導入方法依賴的package包/類
private AudioInputStream toLittleEndian(AudioInputStream ais) {
AudioFormat format = ais.getFormat();
AudioFormat targetFormat = new AudioFormat(format.getEncoding(), format
.getSampleRate(), format.getSampleSizeInBits(), format
.getChannels(), format.getFrameSize(), format.getFrameRate(),
false);
return AudioSystem.getAudioInputStream(targetFormat, ais);
}
示例13: testAS
import javax.sound.sampled.AudioInputStream; //導入方法依賴的package包/類
/**
* Tests the part of AudioSystem API, which implemented via
* FormatConversionProvider.
* <p>
* AudioSystem always support conversion to the same encoding/format.
*/
private static void testAS(final AudioFormat.Encoding enc,
final Boolean endian, final int sampleSize) {
final AudioInputStream ais = getStream(enc, endian, sampleSize);
final AudioFormat format = ais.getFormat();
if (!AudioSystem.isConversionSupported(enc, format)) {
throw new RuntimeException("Format: " + format);
}
if (!AudioSystem.isConversionSupported(format, format)) {
throw new RuntimeException("Format: " + format);
}
AudioSystem.getAudioInputStream(enc, ais);
AudioSystem.getAudioInputStream(format, ais);
}
示例14: convertWaveToFlac
import javax.sound.sampled.AudioInputStream; //導入方法依賴的package包/類
/**
* Converts a wave file to a FLAC file(in order to POST the data to Google and retrieve a response) <br>
* Sample Rate is 8000 by default
*
* @param inputFile Input wave file
* @param outputFile Output FLAC file
*/
public void convertWaveToFlac(File inputFile, File outputFile) {
StreamConfiguration streamConfiguration = new StreamConfiguration();
streamConfiguration.setSampleRate(8000);
streamConfiguration.setBitsPerSample(16);
streamConfiguration.setChannelCount(1);
try {
AudioInputStream audioInputStream = AudioSystem.getAudioInputStream(inputFile);
AudioFormat format = audioInputStream.getFormat();
int frameSize = format.getFrameSize();
FLACEncoder flacEncoder = new FLACEncoder();
FLACFileOutputStream flacOutputStream = new FLACFileOutputStream(outputFile);
flacEncoder.setStreamConfiguration(streamConfiguration);
flacEncoder.setOutputStream(flacOutputStream);
flacEncoder.openFLACStream();
int frameLength = (int) audioInputStream.getFrameLength();
if(frameLength <= AudioSystem.NOT_SPECIFIED){
frameLength = 16384;//Arbitrary file size
}
int[] sampleData = new int[frameLength];
byte[] samplesIn = new byte[frameSize];
int i = 0;
while (audioInputStream.read(samplesIn, 0, frameSize) != -1) {
if (frameSize != 1) {
ByteBuffer bb = ByteBuffer.wrap(samplesIn);
bb.order(ByteOrder.LITTLE_ENDIAN);
short shortVal = bb.getShort();
sampleData[i] = shortVal;
} else {
sampleData[i] = samplesIn[0];
}
i++;
}
sampleData = truncateNullData(sampleData, i);
flacEncoder.addSamples(sampleData, i);
flacEncoder.encodeSamples(i, false);
flacEncoder.encodeSamples(flacEncoder.samplesAvailableToEncode(), true);
audioInputStream.close();
flacOutputStream.close();
} catch (Exception ex) {
ex.printStackTrace();
}
}
示例15: UlawCodecStream
import javax.sound.sampled.AudioInputStream; //導入方法依賴的package包/類
UlawCodecStream(AudioInputStream stream, AudioFormat outputFormat) {
super(stream, outputFormat, AudioSystem.NOT_SPECIFIED);
AudioFormat inputFormat = stream.getFormat();
// throw an IllegalArgumentException if not ok
if (!(isConversionSupported(outputFormat, inputFormat))) {
throw new IllegalArgumentException("Unsupported conversion: " + inputFormat.toString() + " to " + outputFormat.toString());
}
//$$fb 2002-07-18: fix for 4714846: JavaSound ULAW (8-bit) encoder erroneously depends on endian-ness
boolean PCMIsBigEndian;
// determine whether we are encoding or decoding
if (AudioFormat.Encoding.ULAW.equals(inputFormat.getEncoding())) {
encode = false;
encodeFormat = inputFormat;
decodeFormat = outputFormat;
PCMIsBigEndian = outputFormat.isBigEndian();
} else {
encode = true;
encodeFormat = outputFormat;
decodeFormat = inputFormat;
PCMIsBigEndian = inputFormat.isBigEndian();
tempBuffer = new byte[tempBufferSize];
}
// setup tables according to byte order
if (PCMIsBigEndian) {
tabByte1 = ULAW_TABH;
tabByte2 = ULAW_TABL;
highByte = 0;
lowByte = 1;
} else {
tabByte1 = ULAW_TABL;
tabByte2 = ULAW_TABH;
highByte = 1;
lowByte = 0;
}
// set the AudioInputStream length in frames if we know it
if (stream instanceof AudioInputStream) {
frameLength = ((AudioInputStream)stream).getFrameLength();
}
// set framePos to zero
framePos = 0;
frameSize = inputFormat.getFrameSize();
if (frameSize == AudioSystem.NOT_SPECIFIED) {
frameSize = 1;
}
}