本文整理汇总了Java中javax.sound.sampled.AudioInputStream.getFrameLength方法的典型用法代码示例。如果您正苦于以下问题:Java AudioInputStream.getFrameLength方法的具体用法?Java AudioInputStream.getFrameLength怎么用?Java AudioInputStream.getFrameLength使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类javax.sound.sampled.AudioInputStream
的用法示例。
在下文中一共展示了AudioInputStream.getFrameLength方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: save
import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
/**
* Finishes the recording process by closing the file.
*/
public AudioRecordingStream save()
{
try
{
aos.close();
}
catch (IOException e)
{
Minim.error("AudioRecorder.save: An error occurred when trying to save the file:\n"
+ e.getMessage());
}
String filePath = filePath();
AudioInputStream ais = system.getAudioInputStream(filePath);
SourceDataLine sdl = system.getSourceDataLine(ais.getFormat(), 1024);
// this is fine because the recording will always be
// in a raw format (WAV, AU, etc).
long length = AudioUtils.frames2Millis(ais.getFrameLength(), format);
BasicMetaData meta = new BasicMetaData(filePath, length, ais.getFrameLength());
JSPCMAudioRecordingStream recording = new JSPCMAudioRecordingStream(system, meta, ais, sdl, 1024);
return recording;
}
示例2: write
import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
public int write(AudioInputStream stream, AudioFileFormat.Type fileType, OutputStream out) throws IOException {
//$$fb the following check must come first ! Otherwise
// the next frame length check may throw an IOException and
// interrupt iterating File Writers. (see bug 4351296)
// throws IllegalArgumentException if not supported
WaveFileFormat waveFileFormat = (WaveFileFormat)getAudioFileFormat(fileType, stream);
//$$fb when we got this far, we are committed to write this file
// we must know the total data length to calculate the file length
if( stream.getFrameLength() == AudioSystem.NOT_SPECIFIED ) {
throw new IOException("stream length not specified");
}
int bytesWritten = writeWaveFile(stream, waveFileFormat, out);
return bytesWritten;
}
示例3: testAIS
import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
/**
* Tests the {@code AudioInputStream} fetched from the fake header.
* <p>
* Note that the frameLength is stored as long which means that {@code
* AudioInputStream} must store all possible data from aiff file.
*/
private static void testAIS(final byte bits, final int rate,
final int channel, final long frameLength)
throws Exception {
final byte[] header = createHeader(bits, rate, channel, frameLength);
final ByteArrayInputStream fake = new ByteArrayInputStream(header);
final AudioInputStream ais = AudioSystem.getAudioInputStream(fake);
final AudioFormat format = ais.getFormat();
if (frameLength != ais.getFrameLength()) {
System.err.println("Expected: " + frameLength);
System.err.println("Actual: " + ais.getFrameLength());
throw new RuntimeException();
}
if (ais.available() < 0) {
System.err.println("available should be >=0: " + ais.available());
throw new RuntimeException();
}
validateFormat(bits, rate, channel, format);
}
示例4: write
import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
public int write(AudioInputStream stream, AudioFileFormat.Type fileType, OutputStream out) throws IOException {
//$$fb the following check must come first ! Otherwise
// the next frame length check may throw an IOException and
// interrupt iterating File Writers. (see bug 4351296)
// throws IllegalArgumentException if not supported
AiffFileFormat aiffFileFormat = (AiffFileFormat)getAudioFileFormat(fileType, stream);
// we must know the total data length to calculate the file length
if( stream.getFrameLength() == AudioSystem.NOT_SPECIFIED ) {
throw new IOException("stream length not specified");
}
int bytesWritten = writeAiffFile(stream, aiffFileFormat, out);
return bytesWritten;
}
示例5: ExtendedClip
import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
public ExtendedClip(JuggleMasterPro objPjuggleMasterPro, byte bytPsoundFileIndex) {
this.bytGsoundFileIndex = bytPsoundFileIndex;
try {
final AudioInputStream objLaudioInputStream =
AudioSystem.getAudioInputStream(new File(Strings.doConcat( objPjuggleMasterPro.strS_CODE_BASE,
Constants.strS_FILE_NAME_A[Constants.intS_FILE_FOLDER_SOUNDS],
objPjuggleMasterPro.chrGpathSeparator,
Constants.strS_FILE_SOUND_NAME_A[bytPsoundFileIndex])));
final AudioFormat objLaudioFormat = objLaudioInputStream.getFormat();
final DataLine.Info objLdataLineInfo =
new DataLine.Info(Clip.class, objLaudioFormat, (int) objLaudioInputStream.getFrameLength()
* objLaudioFormat.getFrameSize());
this.objGclip = (Clip) AudioSystem.getLine(objLdataLineInfo);
this.objGclip.open(objLaudioInputStream);
} catch (final Throwable objPthrowable) {
Tools.err("Error while initializing sound : ", Constants.strS_FILE_SOUND_NAME_A[bytPsoundFileIndex]);
this.objGclip = null;
}
}
示例6: testAIS
import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
/**
* Tests the {@code AudioInputStream} fetched from the fake header.
* <p>
* Note that the frameLength is stored as long which means that {@code
* AudioInputStream} must store all possible data from wave file.
*/
private static void testAIS(final byte[] type, final int rate,
final int channel, final long size)
throws Exception {
final byte[] header = createHeader(type, rate, channel, size);
final ByteArrayInputStream fake = new ByteArrayInputStream(header);
final AudioInputStream ais = AudioSystem.getAudioInputStream(fake);
final AudioFormat format = ais.getFormat();
final long frameLength = size / format.getFrameSize();
if (frameLength != ais.getFrameLength()) {
System.err.println("Expected: " + frameLength);
System.err.println("Actual: " + ais.getFrameLength());
throw new RuntimeException();
}
if (ais.available() < 0) {
System.err.println("available should be >=0: " + ais.available());
throw new RuntimeException();
}
validateFormat(type[1], rate, channel, format);
}
示例7: testAfterSaveToFile
import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
/**
* Verifies the frame length after the stream was saved/read to/from file.
*/
private static void testAfterSaveToFile(final AudioFileWriter afw,
final AudioFileFormat.Type type,
AudioInputStream ais)
throws IOException {
final File temp = File.createTempFile("sound", ".tmp");
try {
afw.write(ais, type, temp);
ais = AudioSystem.getAudioInputStream(temp);
final long frameLength = ais.getFrameLength();
ais.close();
validate(frameLength);
} catch (IllegalArgumentException | UnsupportedAudioFileException
ignored) {
} finally {
Files.delete(Paths.get(temp.getAbsolutePath()));
}
}
示例8: testAIS
import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
/**
* Tests the {@code AudioInputStream} fetched from the fake header.
* <p>
* Note that the frameLength is stored as long which means that {@code
* AudioInputStream} must store all possible data from wave file.
*/
private static void testAIS(final int[] type, final int rate,
final int channel, final long size)
throws Exception {
final byte[] header = createHeader(type, rate, channel, size);
final ByteArrayInputStream fake = new ByteArrayInputStream(header);
final AudioInputStream ais = AudioSystem.getAudioInputStream(fake);
final AudioFormat format = ais.getFormat();
final long frameLength = size / format.getFrameSize();
if (frameLength != ais.getFrameLength()) {
System.err.println("Expected: " + frameLength);
System.err.println("Actual: " + ais.getFrameLength());
throw new RuntimeException();
}
if (ais.available() < 0) {
System.err.println("available should be >=0: " + ais.available());
throw new RuntimeException();
}
validateFormat(type[1], rate, channel, format);
}
示例9: write
import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
@Override
public int write(AudioInputStream stream, AudioFileFormat.Type fileType, OutputStream out) throws IOException {
Objects.requireNonNull(stream);
Objects.requireNonNull(fileType);
Objects.requireNonNull(out);
//$$fb the following check must come first ! Otherwise
// the next frame length check may throw an IOException and
// interrupt iterating File Writers. (see bug 4351296)
// throws IllegalArgumentException if not supported
WaveFileFormat waveFileFormat = (WaveFileFormat)getAudioFileFormat(fileType, stream);
//$$fb when we got this far, we are committed to write this file
// we must know the total data length to calculate the file length
if( stream.getFrameLength() == AudioSystem.NOT_SPECIFIED ) {
throw new IOException("stream length not specified");
}
return writeWaveFile(stream, waveFileFormat, out);
}
示例10: load
import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
@Override
public AudioClip load(String file) {
String path = Oasis.getFileSystem().find(file);
if (path == null) {
log.warning("Could not find file: " + file);
return null;
}
try {
AudioInputStream in = AudioSystem.getAudioInputStream(new File(path));
int sampleRate = (int) in.getFormat().getSampleRate();
int channels = in.getFormat().getChannels();
int length = (int) in.getFrameLength();
int bytesPerFrame = in.getFormat().getFrameSize();
byte[] bytes = new byte[length * bytesPerFrame];
in.read(bytes);
AudioClip clip = AudioClip.create(
bytes.length,
sampleRate,
channels > 1,
bytesPerFrame / channels > 1,
channels == 1);
clip.setData(bytes);
return clip;
} catch (Exception e) {
e.printStackTrace();
log.warning("Could not read WAV data: " + file);
return null;
}
}
示例11: loadAudioData
import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
private boolean loadAudioData(AudioInputStream as) throws IOException, UnsupportedAudioFileException {
if (DEBUG || Printer.debug)Printer.debug("JavaSoundAudioClip->openAsClip()");
// first possibly convert this stream to PCM
as = Toolkit.getPCMConvertedAudioInputStream(as);
if (as == null) {
return false;
}
loadedAudioFormat = as.getFormat();
long frameLen = as.getFrameLength();
int frameSize = loadedAudioFormat.getFrameSize();
long byteLen = AudioSystem.NOT_SPECIFIED;
if (frameLen != AudioSystem.NOT_SPECIFIED
&& frameLen > 0
&& frameSize != AudioSystem.NOT_SPECIFIED
&& frameSize > 0) {
byteLen = frameLen * frameSize;
}
if (byteLen != AudioSystem.NOT_SPECIFIED) {
// if the stream length is known, it can be efficiently loaded into memory
readStream(as, byteLen);
} else {
// otherwise we use a ByteArrayOutputStream to load it into memory
readStream(as);
}
// if everything went fine, we have now the audio data in
// loadedAudio, and the byte length in loadedAudioByteLength
return true;
}
示例12: testAIS
import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
/**
* Tests the {@code AudioInputStream} fetched from the fake header.
* <p>
* Note that the frameLength is stored as long which means
* that {@code AudioInputStream} must store all possible data from au file.
*/
private static void testAIS(final byte[] type, final int rate,
final int channel, final long size)
throws Exception {
final byte[] header = createHeader(type, rate, channel, size);
final ByteArrayInputStream fake = new ByteArrayInputStream(header);
final AudioInputStream ais = AudioSystem.getAudioInputStream(fake);
final AudioFormat format = ais.getFormat();
final long frameLength = size / format.getFrameSize();
if (size != MAX_UNSIGNED_INT) {
if (frameLength != ais.getFrameLength()) {
System.err.println("Expected: " + frameLength);
System.err.println("Actual: " + ais.getFrameLength());
throw new RuntimeException();
}
} else {
if (ais.getFrameLength() != AudioSystem.NOT_SPECIFIED) {
System.err.println("Expected: " + AudioSystem.NOT_SPECIFIED);
System.err.println("Actual: " + ais.getFrameLength());
throw new RuntimeException();
}
}
if (ais.available() < 0) {
System.err.println("available should be >=0: " + ais.available());
throw new RuntimeException();
}
validateFormat(type[1], rate, channel, format);
}
示例13: main
import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
public static void main(String args[]) throws Exception {
boolean res = true;
try {
AudioInputStream ais = new AudioInputStream(
new ByteArrayInputStream(new byte[2000]),
new AudioFormat(8000.0f, 8, 1, false, false), 2000); //
AudioFormat format = ais.getFormat();
DataLine.Info info = new DataLine.Info(Clip.class, format,
((int) ais.getFrameLength()
* format
.getFrameSize()));
Clip clip = (Clip) AudioSystem.getLine(info);
clip.open();
FloatControl rateControl = (FloatControl) clip.getControl(
FloatControl.Type.SAMPLE_RATE);
int c = 0;
while (c++ < 10) {
clip.stop();
clip.setFramePosition(0);
clip.start();
for (float frq = 22000; frq < 44100; frq = frq + 100) {
try {
Thread.currentThread().sleep(20);
} catch (Exception e) {
break;
}
rateControl.setValue(frq);
}
}
} catch (Exception ex) {
ex.printStackTrace();
res = ex.getMessage().indexOf(
"This method should not have been invoked!") < 0;
}
if (res) {
System.out.println("Test passed");
} else {
System.out.println("Test failed");
throw new Exception("Test failed");
}
}
示例14: getSoundbank
import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
public Soundbank getSoundbank(AudioInputStream ais)
throws InvalidMidiDataException, IOException {
try {
byte[] buffer;
if (ais.getFrameLength() == -1) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
byte[] buff = new byte[1024
- (1024 % ais.getFormat().getFrameSize())];
int ret;
while ((ret = ais.read(buff)) != -1) {
baos.write(buff, 0, ret);
}
ais.close();
buffer = baos.toByteArray();
} else {
buffer = new byte[(int) (ais.getFrameLength()
* ais.getFormat().getFrameSize())];
new DataInputStream(ais).readFully(buffer);
}
ModelByteBufferWavetable osc = new ModelByteBufferWavetable(
new ModelByteBuffer(buffer), ais.getFormat(), -4800);
ModelPerformer performer = new ModelPerformer();
performer.getOscillators().add(osc);
SimpleSoundbank sbk = new SimpleSoundbank();
SimpleInstrument ins = new SimpleInstrument();
ins.add(performer);
sbk.addInstrument(ins);
return sbk;
} catch (Exception e) {
return null;
}
}
示例15: convertWaveToFlac
import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
/**
* Converts a wave file to a FLAC file(in order to POST the data to Google and retrieve a response) <br>
* Sample Rate is 8000 by default
*
* @param inputFile Input wave file
* @param outputFile Output FLAC file
*/
public void convertWaveToFlac(File inputFile, File outputFile) {
StreamConfiguration streamConfiguration = new StreamConfiguration();
streamConfiguration.setSampleRate(8000);
streamConfiguration.setBitsPerSample(16);
streamConfiguration.setChannelCount(1);
try {
AudioInputStream audioInputStream = AudioSystem.getAudioInputStream(inputFile);
AudioFormat format = audioInputStream.getFormat();
int frameSize = format.getFrameSize();
FLACEncoder flacEncoder = new FLACEncoder();
FLACFileOutputStream flacOutputStream = new FLACFileOutputStream(outputFile);
flacEncoder.setStreamConfiguration(streamConfiguration);
flacEncoder.setOutputStream(flacOutputStream);
flacEncoder.openFLACStream();
int frameLength = (int) audioInputStream.getFrameLength();
if(frameLength <= AudioSystem.NOT_SPECIFIED){
frameLength = 16384;//Arbitrary file size
}
int[] sampleData = new int[frameLength];
byte[] samplesIn = new byte[frameSize];
int i = 0;
while (audioInputStream.read(samplesIn, 0, frameSize) != -1) {
if (frameSize != 1) {
ByteBuffer bb = ByteBuffer.wrap(samplesIn);
bb.order(ByteOrder.LITTLE_ENDIAN);
short shortVal = bb.getShort();
sampleData[i] = shortVal;
} else {
sampleData[i] = samplesIn[0];
}
i++;
}
sampleData = truncateNullData(sampleData, i);
flacEncoder.addSamples(sampleData, i);
flacEncoder.encodeSamples(i, false);
flacEncoder.encodeSamples(flacEncoder.samplesAvailableToEncode(), true);
audioInputStream.close();
flacOutputStream.close();
} catch (Exception ex) {
ex.printStackTrace();
}
}