当前位置: 首页>>代码示例>>Java>>正文


Java AudioFormat类代码示例

本文整理汇总了Java中javax.sound.sampled.AudioFormat的典型用法代码示例。如果您正苦于以下问题:Java AudioFormat类的具体用法?Java AudioFormat怎么用?Java AudioFormat使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


AudioFormat类属于javax.sound.sampled包,在下文中一共展示了AudioFormat类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getTargetFormats

import javax.sound.sampled.AudioFormat; //导入依赖的package包/类
/**
 */
public AudioFormat[] getTargetFormats(AudioFormat.Encoding targetEncoding, AudioFormat sourceFormat){

    // filter out targetEncoding from the old getOutputFormats( sourceFormat ) method

    AudioFormat[] formats = getOutputFormats( sourceFormat );
    Vector newFormats = new Vector();
    for(int i=0; i<formats.length; i++ ) {
        if( formats[i].getEncoding().equals( targetEncoding ) ) {
            newFormats.addElement( formats[i] );
        }
    }

    AudioFormat[] formatArray = new AudioFormat[newFormats.size()];

    for (int i = 0; i < formatArray.length; i++) {
        formatArray[i] = (AudioFormat)(newFormats.elementAt(i));
    }

    return formatArray;
}
 
开发者ID:lambdalab-mirror,项目名称:jdk8u-jdk,代码行数:23,代码来源:PCMtoPCMCodec.java

示例2: initFromByteArray

import javax.sound.sampled.AudioFormat; //导入依赖的package包/类
/**
 * Resets this buffer with the audio data specified in the arguments. This
 * FloatSampleBuffer's sample count will be set to
 * <code>byteCount / format.getFrameSize()</code>.
 * 
 * @param lazy if true, then existing channels will be tried to be re-used
 *            to minimize garbage collection.
 * @throws IllegalArgumentException
 */
public void initFromByteArray(byte[] buffer, int offset, int byteCount,
		AudioFormat format, boolean lazy) {
	if (offset + byteCount > buffer.length) {
		throw new IllegalArgumentException(
				"FloatSampleBuffer.initFromByteArray: buffer too small.");
	}

	int thisSampleCount = byteCount / format.getFrameSize();
	init(format.getChannels(), thisSampleCount, format.getSampleRate(),
			lazy);

	// save format for automatic dithering mode
	originalFormatType = FloatSampleTools.getFormatType(format);

	FloatSampleTools.byte2float(buffer, offset, channels, 0, sampleCount,
			format);
}
 
开发者ID:JacobRoth,项目名称:romanov,代码行数:27,代码来源:FloatSampleBuffer.java

示例3: writeByteBuffer

import javax.sound.sampled.AudioFormat; //导入依赖的package包/类
/**
 * Write the contents of the byte array to this buffer, overwriting existing
 * data. If the byte array has fewer channels than this float buffer, only
 * the first channels are written. Vice versa, if the byte buffer has more
 * channels than this float buffer, only the first channels of the byte
 * buffer are written to this buffer.
 * <p>
 * The format and the number of samples of this float buffer are not
 * changed, so if the byte array has more samples than fit into this float
 * buffer, it is not expanded.
 * 
 * @param buffer the byte buffer to write to this float buffer
 * @param srcByteOffset the offset in bytes in buffer where to start reading
 * @param format the audio format of the bytes in buffer
 * @param dstSampleOffset the offset in samples where to start writing the
 *            converted float data into this float buffer
 * @param aSampleCount the number of samples to write
 * @return the number of samples actually written
 */
public int writeByteBuffer(byte[] buffer, int srcByteOffset,
		AudioFormat format, int dstSampleOffset, int aSampleCount) {
	if (dstSampleOffset + aSampleCount > getSampleCount()) {
		aSampleCount = getSampleCount() - dstSampleOffset;
	}
	int lChannels = format.getChannels();
	if (lChannels > getChannelCount()) {
		lChannels = getChannelCount();
	}
	if (lChannels > format.getChannels()) {
		lChannels = format.getChannels();
	}
	for (int channel = 0; channel < lChannels; channel++) {
		float[] data = getChannel(channel);

		FloatSampleTools.byte2floatGeneric(buffer, srcByteOffset,
				format.getFrameSize(), data, dstSampleOffset, aSampleCount,
				format);
		srcByteOffset += format.getFrameSize() / format.getChannels();
	}
	return aSampleCount;
}
 
开发者ID:JacobRoth,项目名称:romanov,代码行数:42,代码来源:FloatSampleBuffer.java

示例4: getAudioInputStream

import javax.sound.sampled.AudioFormat; //导入依赖的package包/类
@Override
public AudioInputStream getAudioInputStream(Encoding targetEncoding,
                                            AudioInputStream sourceStream) {
    if (!isConversionSupported(targetEncoding, sourceStream.getFormat())) {
        throw new IllegalArgumentException(
                "Unsupported conversion: " + sourceStream.getFormat()
                        .toString() + " to " + targetEncoding.toString());
    }
    if (sourceStream.getFormat().getEncoding().equals(targetEncoding))
        return sourceStream;
    AudioFormat format = sourceStream.getFormat();
    int channels = format.getChannels();
    Encoding encoding = targetEncoding;
    float samplerate = format.getSampleRate();
    int bits = format.getSampleSizeInBits();
    boolean bigendian = format.isBigEndian();
    if (targetEncoding.equals(Encoding.PCM_FLOAT))
        bits = 32;
    AudioFormat targetFormat = new AudioFormat(encoding, samplerate, bits,
            channels, channels * bits / 8, samplerate, bigendian);
    return getAudioInputStream(targetFormat, sourceStream);
}
 
开发者ID:AdoptOpenJDK,项目名称:openjdk-jdk10,代码行数:23,代码来源:AudioFloatFormatConverter.java

示例5: getAudioFileTypes

import javax.sound.sampled.AudioFormat; //导入依赖的package包/类
public AudioFileFormat.Type[] getAudioFileTypes(AudioInputStream stream) {

        AudioFileFormat.Type[] filetypes = new AudioFileFormat.Type[types.length];
        System.arraycopy(types, 0, filetypes, 0, types.length);

        // make sure we can write this stream
        AudioFormat format = stream.getFormat();
        AudioFormat.Encoding encoding = format.getEncoding();

        if( AudioFormat.Encoding.ALAW.equals(encoding) ||
            AudioFormat.Encoding.ULAW.equals(encoding) ||
            AudioFormat.Encoding.PCM_SIGNED.equals(encoding) ||
            AudioFormat.Encoding.PCM_UNSIGNED.equals(encoding) ) {

            return filetypes;
        }

        return new AudioFileFormat.Type[0];
    }
 
开发者ID:lambdalab-mirror,项目名称:jdk8u-jdk,代码行数:20,代码来源:WaveFileWriter.java

示例6: test

import javax.sound.sampled.AudioFormat; //导入依赖的package包/类
/**
 * Runs a short test by playing a short silent sound.
 */
public void test()
	throws JavaLayerException
{
	try
	{
		open(new AudioFormat(22050, 16, 1, true, false));
		short[] data = new short[22050/10];
		write(data, 0, data.length);
		flush();
		close();
	}
	catch (RuntimeException ex)
	{
		throw new JavaLayerException("Device test failed: "+ex);
	}

}
 
开发者ID:mahozad,项目名称:jlayer,代码行数:21,代码来源:JavaSoundAudioDevice.java

示例7: JSBufferedSampleRecorder

import javax.sound.sampled.AudioFormat; //导入依赖的package包/类
/**
 * Constructs a JSBufferedSampleRecorder that expects audio in the given AudioFormat and 
 * which will save to a file with given name.
 * 
 * @param format the AudioFormat you want to record in
 * @param name the name of the file to save to (not including the extension)
 */
JSBufferedSampleRecorder(JSMinim sys,
                         String fileName, 
                         AudioFileFormat.Type fileType, 
                         AudioFormat fileFormat,
                         int bufferSize)
{
  name = fileName;
  type = fileType;
  format = fileFormat;
  buffers = new ArrayList<FloatBuffer>(20);
  left = FloatBuffer.allocate(bufferSize*10);
  if ( format.getChannels() == Minim.STEREO )
  {
    right = FloatBuffer.allocate(bufferSize*10);
  }
  else
  {
    right = null;
  }
  system = sys;
}
 
开发者ID:JacobRoth,项目名称:romanov,代码行数:29,代码来源:JSBufferedSampleRecorder.java

示例8: testSampleRate

import javax.sound.sampled.AudioFormat; //导入依赖的package包/类
private static boolean testSampleRate(float sampleRate) {
    boolean result = true;

    try {
        // create AudioInputStream with sample rate of 10000 Hz
        ByteArrayInputStream data = new ByteArrayInputStream(new byte[1]);
        AudioFormat format = new AudioFormat(sampleRate, 8, 1, true, true);
        AudioInputStream stream = new AudioInputStream(data, format, 1);

        // write to AIFF file
        ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
        AudioSystem.write(stream, AudioFileFormat.Type.AIFF, outputStream);
        byte[] fileData = outputStream.toByteArray();
        InputStream inputStream = new ByteArrayInputStream(fileData);
        AudioFileFormat aff = AudioSystem.getAudioFileFormat(inputStream);
        if (! equals(sampleRate, aff.getFormat().getFrameRate())) {
            out("error for sample rate " + sampleRate);
            result = false;
        }
    } catch (Exception e) {
        out(e);
        out("Test NOT FAILED");
    }
    return result;
}
 
开发者ID:AdoptOpenJDK,项目名称:openjdk-jdk10,代码行数:26,代码来源:AiffSampleRate.java

示例9: main

import javax.sound.sampled.AudioFormat; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
	AbstractRcomArgs a=new AbstractRcomArgs();
	UtilCli.parse(a, args, true);
	File folder=new File("/home/rizsi/tmp/video");
	byte[] data=UtilFile.loadFile(new File(folder, "remote.sw"));
	AudioFormat format=ManualTestEchoCancel.getFormat();
	final Mixer mixer = AudioSystem.getMixer(null);
	DataLine.Info info2= new DataLine.Info(SourceDataLine.class, format);
	SourceDataLine s=(SourceDataLine) mixer.getLine(info2);
	s.open(format, framesamples*2);
	s.start();
	try(LoopInputStream lis=new LoopInputStream(data))
	{
		try(JitterResampler rs=new JitterResampler(a, 8000, framesamples, 2))
		{
			new FeedThread(lis, rs).start();
			final byte[] buffer=new byte[framesamples*2];;
			while(true)
			{
				rs.readOutput(buffer);
				s.write(buffer, 0, buffer.length);
			}
		}
	}
}
 
开发者ID:rizsi,项目名称:rcom,代码行数:26,代码来源:JitterExample.java

示例10: getAudioInputStream

import javax.sound.sampled.AudioFormat; //导入依赖的package包/类
@Override
public AudioInputStream getAudioInputStream(AudioFormat.Encoding targetEncoding, AudioInputStream sourceStream) {

    if( isConversionSupported(targetEncoding, sourceStream.getFormat()) ) {

        AudioFormat sourceFormat = sourceStream.getFormat();
        AudioFormat targetFormat = new AudioFormat( targetEncoding,
                                                    sourceFormat.getSampleRate(),
                                                    sourceFormat.getSampleSizeInBits(),
                                                    sourceFormat.getChannels(),
                                                    sourceFormat.getFrameSize(),
                                                    sourceFormat.getFrameRate(),
                                                    sourceFormat.isBigEndian() );

        return getConvertedStream(targetFormat, sourceStream);

    } else {
        throw new IllegalArgumentException("Unsupported conversion: " + sourceStream.getFormat().toString() + " to " + targetEncoding.toString() );
    }

}
 
开发者ID:AdoptOpenJDK,项目名称:openjdk-jdk10,代码行数:22,代码来源:PCMtoPCMCodec.java

示例11: main

import javax.sound.sampled.AudioFormat; //导入依赖的package包/类
public static void main(String[] args) throws IOException, LineUnavailableException {
	File folder=new File("/home/rizsi/tmp/video");
	byte[] data=UtilFile.loadFile(new File(folder, "remote.sw"));
	byte[] data2=UtilFile.loadFile(new File(folder, "local.sw"));
	System.out.println("remote.sw max: "+measureMax(data));
	System.out.println("local.sw max: "+measureMax(data2));
	byte[] data3=sum(data, data2);
	UtilFile.saveAsFile(new File(folder, "rawmic.sw"), data3);
	AudioFormat format=ManualTestEchoCancel.getFormat();
	final Mixer mixer = AudioSystem.getMixer(null);
	Play p=new Play(mixer, format, ManualTestEchoCancel.frameSamples)
	{
		@Override
		protected void switchBuffer() {
			if(getSample()==data)
			{
				setSample(data2);
			}else if(getSample()==data2)
			{
				setSample(data3);
			}
		}
	};
	p.start();
	p.setSample(data);
}
 
开发者ID:rizsi,项目名称:rcom,代码行数:27,代码来源:Replay.java

示例12: newProperties

import javax.sound.sampled.AudioFormat; //导入依赖的package包/类
@Override
public void newProperties(PropertySheet ps) throws PropertyException {
	super.newProperties(ps);
	logger = ps.getLogger();

	sampleRate = ps.getInt(PROP_SAMPLE_RATE);

	int sampleSizeInBits = ps.getInt(PROP_BITS_PER_SAMPLE);

	int channels = ps.getInt(PROP_CHANNELS);
	bigEndian = ps.getBoolean(PROP_BIG_ENDIAN);
	signed = ps.getBoolean(PROP_SIGNED);

	desiredFormat = new AudioFormat((float) sampleRate, sampleSizeInBits, channels, signed, bigEndian);

	closeBetweenUtterances = ps.getBoolean(PROP_CLOSE_BETWEEN_UTTERANCES);
	msecPerRead = ps.getInt(PROP_MSEC_PER_READ);
	keepDataReference = ps.getBoolean(PROP_KEEP_LAST_AUDIO);
	stereoToMono = ps.getString(PROP_STEREO_TO_MONO);
	selectedChannel = ps.getInt(PROP_SELECT_CHANNEL);
	selectedMixerIndex = ps.getString(PROP_SELECT_MIXER);
	audioBufferSize = ps.getInt(PROP_BUFFER_SIZE);
}
 
开发者ID:CognitiveModeling,项目名称:BrainControl,代码行数:24,代码来源:Microphone.java

示例13: getAudioInputStream

import javax.sound.sampled.AudioFormat; //导入依赖的package包/类
public AudioInputStream getAudioInputStream(AudioFormat targetFormat,
        AudioFloatInputStream sourceStream) {

    if (!isConversionSupported(targetFormat, sourceStream.getFormat()))
        throw new IllegalArgumentException("Unsupported conversion: "
                + sourceStream.getFormat().toString() + " to "
                + targetFormat.toString());
    if (targetFormat.getChannels() != sourceStream.getFormat()
            .getChannels())
        sourceStream = new AudioFloatInputStreamChannelMixer(sourceStream,
                targetFormat.getChannels());
    if (Math.abs(targetFormat.getSampleRate()
            - sourceStream.getFormat().getSampleRate()) > 0.000001)
        sourceStream = new AudioFloatInputStreamResampler(sourceStream,
                targetFormat);
    return new AudioInputStream(new AudioFloatFormatConverterInputStream(
            targetFormat, sourceStream), targetFormat, sourceStream
            .getFrameLength());
}
 
开发者ID:SunburstApps,项目名称:OpenJSharp,代码行数:20,代码来源:AudioFloatFormatConverter.java

示例14: getTargetFormats

import javax.sound.sampled.AudioFormat; //导入依赖的package包/类
@Override
public AudioFormat[] getTargetFormats(AudioFormat.Encoding targetEncoding, AudioFormat sourceFormat){
    Objects.requireNonNull(targetEncoding);

    // filter out targetEncoding from the old getOutputFormats( sourceFormat ) method

    AudioFormat[] formats = getOutputFormats( sourceFormat );
    Vector<AudioFormat> newFormats = new Vector<>();
    for(int i=0; i<formats.length; i++ ) {
        if( formats[i].getEncoding().equals( targetEncoding ) ) {
            newFormats.addElement( formats[i] );
        }
    }

    AudioFormat[] formatArray = new AudioFormat[newFormats.size()];

    for (int i = 0; i < formatArray.length; i++) {
        formatArray[i] = newFormats.elementAt(i);
    }

    return formatArray;
}
 
开发者ID:AdoptOpenJDK,项目名称:openjdk-jdk10,代码行数:23,代码来源:PCMtoPCMCodec.java

示例15: write

import javax.sound.sampled.AudioFormat; //导入依赖的package包/类
public void write(AudioInputStream stream, RIFFWriter writer)
        throws IOException {

    RIFFWriter fmt_chunk = writer.writeChunk("fmt ");

    AudioFormat format = stream.getFormat();
    fmt_chunk.writeUnsignedShort(3); // WAVE_FORMAT_IEEE_FLOAT
    fmt_chunk.writeUnsignedShort(format.getChannels());
    fmt_chunk.writeUnsignedInt((int) format.getSampleRate());
    fmt_chunk.writeUnsignedInt(((int) format.getFrameRate())
            * format.getFrameSize());
    fmt_chunk.writeUnsignedShort(format.getFrameSize());
    fmt_chunk.writeUnsignedShort(format.getSampleSizeInBits());
    fmt_chunk.close();
    RIFFWriter data_chunk = writer.writeChunk("data");
    byte[] buff = new byte[1024];
    int len;
    while ((len = stream.read(buff, 0, buff.length)) != -1)
        data_chunk.write(buff, 0, len);
    data_chunk.close();
}
 
开发者ID:SunburstApps,项目名称:OpenJSharp,代码行数:22,代码来源:WaveFloatFileWriter.java


注:本文中的javax.sound.sampled.AudioFormat类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。