当前位置: 首页>>代码示例>>Java>>正文


Java AudioFormat.getFrameSize方法代码示例

本文整理汇总了Java中javax.sound.sampled.AudioFormat.getFrameSize方法的典型用法代码示例。如果您正苦于以下问题:Java AudioFormat.getFrameSize方法的具体用法?Java AudioFormat.getFrameSize怎么用?Java AudioFormat.getFrameSize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在javax.sound.sampled.AudioFormat的用法示例。


在下文中一共展示了AudioFormat.getFrameSize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: writeByteBuffer

import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
/**
 * Write the contents of the byte array to this buffer, overwriting existing
 * data. If the byte array has fewer channels than this float buffer, only
 * the first channels are written. Vice versa, if the byte buffer has more
 * channels than this float buffer, only the first channels of the byte
 * buffer are written to this buffer.
 * <p>
 * The format and the number of samples of this float buffer are not
 * changed, so if the byte array has more samples than fit into this float
 * buffer, it is not expanded.
 * 
 * @param buffer the byte buffer to write to this float buffer
 * @param srcByteOffset the offset in bytes in buffer where to start reading
 * @param format the audio format of the bytes in buffer
 * @param dstSampleOffset the offset in samples where to start writing the
 *            converted float data into this float buffer
 * @param aSampleCount the number of samples to write
 * @return the number of samples actually written
 */
public int writeByteBuffer(byte[] buffer, int srcByteOffset,
		AudioFormat format, int dstSampleOffset, int aSampleCount) {
	if (dstSampleOffset + aSampleCount > getSampleCount()) {
		aSampleCount = getSampleCount() - dstSampleOffset;
	}
	int lChannels = format.getChannels();
	if (lChannels > getChannelCount()) {
		lChannels = getChannelCount();
	}
	if (lChannels > format.getChannels()) {
		lChannels = format.getChannels();
	}
	for (int channel = 0; channel < lChannels; channel++) {
		float[] data = getChannel(channel);

		FloatSampleTools.byte2floatGeneric(buffer, srcByteOffset,
				format.getFrameSize(), data, dstSampleOffset, aSampleCount,
				format);
		srcByteOffset += format.getFrameSize() / format.getChannels();
	}
	return aSampleCount;
}
 
开发者ID:JacobRoth,项目名称:romanov,代码行数:42,代码来源:FloatSampleBuffer.java

示例2: test

import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
private static void test(final Encoding enc, final byte[] expected,
                         boolean end) {
    System.err.println("enc = " + enc);
    AudioFormat af = new AudioFormat(enc, 44100, SIZE, 1, SIZE / 8, 44100,
                                     end);
    byte[] bytes = new byte[FLOATS.length * af.getFrameSize()];
    AudioFloatConverter conv = AudioFloatConverter.getConverter(af);

    conv.toByteArray(FLOATS, bytes);

    if (!Arrays.equals(bytes, expected)) {
        System.err.println("Actual: " + Arrays.toString(bytes));
        System.err.println("Expected: " + Arrays.toString(expected));
        throw new RuntimeException();
    }

    float[] floats = new float[bytes.length / af.getFrameSize()];
    conv.toFloatArray(bytes, floats);

    if (!Arrays.equals(floats, FLOATS)) {
        System.err.println("Actual: " + Arrays.toString(floats));
        System.err.println("Expected: " + Arrays.toString(FLOATS));
        throw new RuntimeException();
    }
}
 
开发者ID:AdoptOpenJDK,项目名称:openjdk-jdk10,代码行数:26,代码来源:Bits16ToFromFloatArray.java

示例3: validateFormat

import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
/**
 * Tests that format contains the same data as were provided to the fake
 * stream.
 */
private static void validateFormat(final int bits, final int rate,
                                   final int channel,
                                   final AudioFormat format) {

    if (Float.compare(format.getSampleRate(), rate) != 0) {
        System.err.println("Expected: " + rate);
        System.err.println("Actual: " + format.getSampleRate());
        throw new RuntimeException();
    }
    if (format.getChannels() != channel) {
        System.err.println("Expected: " + channel);
        System.err.println("Actual: " + format.getChannels());
        throw new RuntimeException();
    }
    int frameSize = ((bits + 7) / 8) * channel;
    if (format.getFrameSize() != frameSize) {
        System.err.println("Expected: " + frameSize);
        System.err.println("Actual: " + format.getFrameSize());
        throw new RuntimeException();
    }
}
 
开发者ID:AdoptOpenJDK,项目名称:openjdk-jdk10,代码行数:26,代码来源:RecognizeHugeWaveExtFiles.java

示例4: ExtendedClip

import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
public ExtendedClip(JuggleMasterPro objPjuggleMasterPro, byte bytPsoundFileIndex) {

		this.bytGsoundFileIndex = bytPsoundFileIndex;
		try {
			final AudioInputStream objLaudioInputStream =
															AudioSystem.getAudioInputStream(new File(Strings.doConcat(	objPjuggleMasterPro.strS_CODE_BASE,
																														Constants.strS_FILE_NAME_A[Constants.intS_FILE_FOLDER_SOUNDS],
																														objPjuggleMasterPro.chrGpathSeparator,
																														Constants.strS_FILE_SOUND_NAME_A[bytPsoundFileIndex])));
			final AudioFormat objLaudioFormat = objLaudioInputStream.getFormat();
			final DataLine.Info objLdataLineInfo =
													new DataLine.Info(Clip.class, objLaudioFormat, (int) objLaudioInputStream.getFrameLength()
																									* objLaudioFormat.getFrameSize());
			this.objGclip = (Clip) AudioSystem.getLine(objLdataLineInfo);
			this.objGclip.open(objLaudioInputStream);
		} catch (final Throwable objPthrowable) {
			Tools.err("Error while initializing sound : ", Constants.strS_FILE_SOUND_NAME_A[bytPsoundFileIndex]);
			this.objGclip = null;
		}
	}
 
开发者ID:jugglemaster,项目名称:JuggleMasterPro,代码行数:21,代码来源:ExtendedClip.java

示例5: convertToByteArray

import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
/**
 * Writes this sample buffer's audio data to <code>buffer</code> as an
 * interleaved byte array. <code>buffer</code> must be large enough to
 * hold all data.
 * 
 * @param readOffset the sample offset from where samples are read from this
 *            FloatSampleBuffer
 * @param lenInSamples how many samples are converted
 * @param buffer the byte buffer written to
 * @param writeOffset the byte offset in buffer
 * @throws IllegalArgumentException when buffer is too small or
 *             <code>format</code> doesn't match
 * @return number of bytes written to <code>buffer</code>
 */
public int convertToByteArray(int readOffset, int lenInSamples,
		byte[] buffer, int writeOffset, AudioFormat format) {
	int byteCount = format.getFrameSize() * lenInSamples;
	if (writeOffset + byteCount > buffer.length) {
		throw new IllegalArgumentException(
				"FloatSampleBuffer.convertToByteArray: buffer too small.");
	}
	if (format != lastConvertToByteArrayFormat) {
		if (format.getSampleRate() != getSampleRate()) {
			throw new IllegalArgumentException(
					"FloatSampleBuffer.convertToByteArray: different samplerates.");
		}
		if (format.getChannels() != getChannelCount()) {
			throw new IllegalArgumentException(
					"FloatSampleBuffer.convertToByteArray: different channel count.");
		}
		lastConvertToByteArrayFormat = format;
		lastConvertToByteArrayFormatCode = FloatSampleTools.getFormatType(format);
	}
	FloatSampleTools.float2byte(channels, readOffset, buffer, writeOffset,
			lenInSamples, lastConvertToByteArrayFormatCode,
			format.getChannels(), format.getFrameSize(),
			getConvertDitherBits(lastConvertToByteArrayFormatCode));

	return byteCount;
}
 
开发者ID:JacobRoth,项目名称:romanov,代码行数:41,代码来源:FloatSampleBuffer.java

示例6: isFullySpecifiedPCMFormat

import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
static boolean isFullySpecifiedPCMFormat(AudioFormat format) {
    if (!format.getEncoding().equals(AudioFormat.Encoding.PCM_SIGNED)
        && !format.getEncoding().equals(AudioFormat.Encoding.PCM_UNSIGNED)) {
        return false;
    }
    if ((format.getFrameRate() <= 0)
        || (format.getSampleRate() <= 0)
        || (format.getSampleSizeInBits() <= 0)
        || (format.getFrameSize() <= 0)
        || (format.getChannels() <= 0)) {
        return false;
    }
    return true;
}
 
开发者ID:lambdalab-mirror,项目名称:jdk8u-jdk,代码行数:15,代码来源:Toolkit.java

示例7: main

import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
public static void main(String args[]) throws Exception {
    boolean res = true;
    try {
        AudioInputStream ais = new AudioInputStream(
                new ByteArrayInputStream(new byte[2000]),
                new AudioFormat(8000.0f, 8, 1, false, false), 2000); //
        AudioFormat format = ais.getFormat();
        DataLine.Info info = new DataLine.Info(Clip.class, format,
                                               ((int) ais.getFrameLength()
                                                        * format
                                                       .getFrameSize()));
        Clip clip = (Clip) AudioSystem.getLine(info);
        clip.open();
        FloatControl rateControl = (FloatControl) clip.getControl(
                FloatControl.Type.SAMPLE_RATE);
        int c = 0;
        while (c++ < 10) {
            clip.stop();
            clip.setFramePosition(0);
            clip.start();
            for (float frq = 22000; frq < 44100; frq = frq + 100) {
                try {
                    Thread.currentThread().sleep(20);
                } catch (Exception e) {
                    break;
                }
                rateControl.setValue(frq);
            }
        }
    } catch (Exception ex) {
        ex.printStackTrace();
        res = ex.getMessage().indexOf(
                "This method should not have been invoked!") < 0;
    }
    if (res) {
        System.out.println("Test passed");
    } else {
        System.out.println("Test failed");
        throw new Exception("Test failed");
    }
}
 
开发者ID:AdoptOpenJDK,项目名称:openjdk-jdk10,代码行数:42,代码来源:ClipOpenBug.java

示例8: testAIS

import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
/**
 * Tests the {@code AudioInputStream} fetched from the fake header.
 * <p>
 * Note that the frameLength is stored as long which means
 * that {@code AudioInputStream} must store all possible data from au file.
 */
private static void testAIS(final byte[] type, final int rate,
                            final int channel, final long size)
        throws Exception {
    final byte[] header = createHeader(type, rate, channel, size);
    final ByteArrayInputStream fake = new ByteArrayInputStream(header);
    final AudioInputStream ais = AudioSystem.getAudioInputStream(fake);
    final AudioFormat format = ais.getFormat();
    final long frameLength = size / format.getFrameSize();
    if (size != MAX_UNSIGNED_INT) {
        if (frameLength != ais.getFrameLength()) {
            System.err.println("Expected: " + frameLength);
            System.err.println("Actual: " + ais.getFrameLength());
            throw new RuntimeException();
        }
    } else {
        if (ais.getFrameLength() != AudioSystem.NOT_SPECIFIED) {
            System.err.println("Expected: " + AudioSystem.NOT_SPECIFIED);
            System.err.println("Actual: " + ais.getFrameLength());
            throw new RuntimeException();
        }
    }
    if (ais.available() < 0) {
        System.err.println("available should be >=0: " + ais.available());
        throw new RuntimeException();
    }
    validateFormat(type[1], rate, channel, format);
}
 
开发者ID:AdoptOpenJDK,项目名称:openjdk-jdk10,代码行数:34,代码来源:RecognizeHugeAuFiles.java

示例9: testAFF

import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
/**
 * Tests the {@code AudioFileFormat} fetched from the fake header.
 * <p>
 * Note that the frameLength and byteLength are stored as int which means
 * that {@code AudioFileFormat} will store the data above {@code MAX_INT} as
 * NOT_SPECIFIED.
 */
private static void testAFF(final byte[] type, final int rate,
                            final int channel, final long size)
        throws Exception {
    final byte[] header = createHeader(type, rate, channel, size);
    final ByteArrayInputStream fake = new ByteArrayInputStream(header);
    final AudioFileFormat aff = AudioSystem.getAudioFileFormat(fake);
    final AudioFormat format = aff.getFormat();

    if (aff.getType() != AudioFileFormat.Type.WAVE) {
        throw new RuntimeException("Error");
    }

    final long frameLength = size / format.getFrameSize();
    if (frameLength <= Integer.MAX_VALUE) {
        if (aff.getFrameLength() != frameLength) {
            System.err.println("Expected: " + frameLength);
            System.err.println("Actual: " + aff.getFrameLength());
            throw new RuntimeException();
        }
    } else {
        if (aff.getFrameLength() != AudioSystem.NOT_SPECIFIED) {
            System.err.println("Expected: " + AudioSystem.NOT_SPECIFIED);
            System.err.println("Actual: " + aff.getFrameLength());
            throw new RuntimeException();
        }
    }
    validateFormat(type[1], rate, channel, aff.getFormat());
}
 
开发者ID:AdoptOpenJDK,项目名称:openjdk-jdk10,代码行数:36,代码来源:RecognizeHugeWaveFiles.java

示例10: isFullySpecifiedAudioFormat

import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
static void isFullySpecifiedAudioFormat(AudioFormat format) {
    if (!format.getEncoding().equals(AudioFormat.Encoding.PCM_SIGNED)
        && !format.getEncoding().equals(AudioFormat.Encoding.PCM_UNSIGNED)
        && !format.getEncoding().equals(AudioFormat.Encoding.ULAW)
        && !format.getEncoding().equals(AudioFormat.Encoding.ALAW)) {
        // we don't know how to verify possibly non-linear encodings
        return;
    }
    if (format.getFrameRate() <= 0) {
        throw new IllegalArgumentException("invalid frame rate: "
                                           +((format.getFrameRate()==-1)?
                                             "NOT_SPECIFIED":String.valueOf(format.getFrameRate())));
    }
    if (format.getSampleRate() <= 0) {
        throw new IllegalArgumentException("invalid sample rate: "
                                           +((format.getSampleRate()==-1)?
                                             "NOT_SPECIFIED":String.valueOf(format.getSampleRate())));
    }
    if (format.getSampleSizeInBits() <= 0) {
        throw new IllegalArgumentException("invalid sample size in bits: "
                                           +((format.getSampleSizeInBits()==-1)?
                                             "NOT_SPECIFIED":String.valueOf(format.getSampleSizeInBits())));
    }
    if (format.getFrameSize() <= 0) {
        throw new IllegalArgumentException("invalid frame size: "
                                           +((format.getFrameSize()==-1)?
                                             "NOT_SPECIFIED":String.valueOf(format.getFrameSize())));
    }
    if (format.getChannels() <= 0) {
        throw new IllegalArgumentException("invalid number of channels: "
                                           +((format.getChannels()==-1)?
                                             "NOT_SPECIFIED":String.valueOf(format.getChannels())));
    }
}
 
开发者ID:SunburstApps,项目名称:OpenJSharp,代码行数:35,代码来源:Toolkit.java

示例11: AudioFloatInputStreamResampler

import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
AudioFloatInputStreamResampler(AudioFloatInputStream ais,
        AudioFormat format) {
    this.ais = ais;
    AudioFormat sourceFormat = ais.getFormat();
    targetFormat = new AudioFormat(sourceFormat.getEncoding(), format
            .getSampleRate(), sourceFormat.getSampleSizeInBits(),
            sourceFormat.getChannels(), sourceFormat.getFrameSize(),
            format.getSampleRate(), sourceFormat.isBigEndian());
    nrofchannels = targetFormat.getChannels();
    Object interpolation = format.getProperty("interpolation");
    if (interpolation != null && (interpolation instanceof String)) {
        String resamplerType = (String) interpolation;
        if (resamplerType.equalsIgnoreCase("point"))
            this.resampler = new SoftPointResampler();
        if (resamplerType.equalsIgnoreCase("linear"))
            this.resampler = new SoftLinearResampler2();
        if (resamplerType.equalsIgnoreCase("linear1"))
            this.resampler = new SoftLinearResampler();
        if (resamplerType.equalsIgnoreCase("linear2"))
            this.resampler = new SoftLinearResampler2();
        if (resamplerType.equalsIgnoreCase("cubic"))
            this.resampler = new SoftCubicResampler();
        if (resamplerType.equalsIgnoreCase("lanczos"))
            this.resampler = new SoftLanczosResampler();
        if (resamplerType.equalsIgnoreCase("sinc"))
            this.resampler = new SoftSincResampler();
    }
    if (resampler == null)
        resampler = new SoftLinearResampler2(); // new
                                                // SoftLinearResampler2();
    pitch[0] = sourceFormat.getSampleRate() / format.getSampleRate();
    pad = resampler.getPadding();
    pad2 = pad * 2;
    ibuffer = new float[nrofchannels][buffer_len + pad2];
    ibuffer2 = new float[nrofchannels * buffer_len];
    ibuffer_index = buffer_len + pad;
    ibuffer_len = buffer_len;
}
 
开发者ID:AdoptOpenJDK,项目名称:openjdk-jdk10,代码行数:39,代码来源:AudioFloatFormatConverter.java

示例12: testAFF

import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
/**
 * Tests the {@code AudioFileFormat} fetched from the fake header.
 * <p>
 * Note that the frameLength and byteLength are stored as int which means
 * that {@code AudioFileFormat} will store the data above {@code MAX_INT} as
 * NOT_SPECIFIED.
 */
private static void testAFF(final int[] type, final int rate,
                            final int channel, final long size)
        throws Exception {
    final byte[] header = createHeader(type, rate, channel, size);
    final ByteArrayInputStream fake = new ByteArrayInputStream(header);
    final AudioFileFormat aff = AudioSystem.getAudioFileFormat(fake);
    final AudioFormat format = aff.getFormat();

    if (aff.getType() != AudioFileFormat.Type.WAVE) {
        throw new RuntimeException("Error");
    }

    final long frameLength = size / format.getFrameSize();
    if (frameLength <= Integer.MAX_VALUE) {
        if (aff.getFrameLength() != frameLength) {
            System.err.println("Expected: " + frameLength);
            System.err.println("Actual: " + aff.getFrameLength());
            throw new RuntimeException();
        }
    } else {
        if (aff.getFrameLength() != AudioSystem.NOT_SPECIFIED) {
            System.err.println("Expected: " + AudioSystem.NOT_SPECIFIED);
            System.err.println("Actual: " + aff.getFrameLength());
            throw new RuntimeException();
        }
    }
    validateFormat(type[1], rate, channel, aff.getFormat());
}
 
开发者ID:AdoptOpenJDK,项目名称:openjdk-jdk10,代码行数:36,代码来源:RecognizeHugeWaveExtFiles.java

示例13: AudioFloatInputStreamResampler

import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
public AudioFloatInputStreamResampler(AudioFloatInputStream ais,
        AudioFormat format) {
    this.ais = ais;
    AudioFormat sourceFormat = ais.getFormat();
    targetFormat = new AudioFormat(sourceFormat.getEncoding(), format
            .getSampleRate(), sourceFormat.getSampleSizeInBits(),
            sourceFormat.getChannels(), sourceFormat.getFrameSize(),
            format.getSampleRate(), sourceFormat.isBigEndian());
    nrofchannels = targetFormat.getChannels();
    Object interpolation = format.getProperty("interpolation");
    if (interpolation != null && (interpolation instanceof String)) {
        String resamplerType = (String) interpolation;
        if (resamplerType.equalsIgnoreCase("point"))
            this.resampler = new SoftPointResampler();
        if (resamplerType.equalsIgnoreCase("linear"))
            this.resampler = new SoftLinearResampler2();
        if (resamplerType.equalsIgnoreCase("linear1"))
            this.resampler = new SoftLinearResampler();
        if (resamplerType.equalsIgnoreCase("linear2"))
            this.resampler = new SoftLinearResampler2();
        if (resamplerType.equalsIgnoreCase("cubic"))
            this.resampler = new SoftCubicResampler();
        if (resamplerType.equalsIgnoreCase("lanczos"))
            this.resampler = new SoftLanczosResampler();
        if (resamplerType.equalsIgnoreCase("sinc"))
            this.resampler = new SoftSincResampler();
    }
    if (resampler == null)
        resampler = new SoftLinearResampler2(); // new
    // SoftLinearResampler2();
    pitch[0] = sourceFormat.getSampleRate() / format.getSampleRate();
    pad = resampler.getPadding();
    pad2 = pad * 2;
    ibuffer = new float[nrofchannels][buffer_len + pad2];
    ibuffer2 = new float[nrofchannels * buffer_len];
    ibuffer_index = buffer_len + pad;
    ibuffer_len = buffer_len;
}
 
开发者ID:AdoptOpenJDK,项目名称:openjdk-jdk10,代码行数:39,代码来源:SoftMixingDataLine.java

示例14: getAudioInputStream

import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
/**
 */
public AudioInputStream getAudioInputStream(AudioFormat.Encoding targetEncoding, AudioInputStream sourceStream) {

    if( isConversionSupported(targetEncoding, sourceStream.getFormat()) ) {

        AudioFormat sourceFormat = sourceStream.getFormat();
        AudioFormat targetFormat = new AudioFormat( targetEncoding,
                                                    sourceFormat.getSampleRate(),
                                                    sourceFormat.getSampleSizeInBits(),
                                                    sourceFormat.getChannels(),
                                                    sourceFormat.getFrameSize(),
                                                    sourceFormat.getFrameRate(),
                                                    sourceFormat.isBigEndian() );

        return getAudioInputStream( targetFormat, sourceStream );

    } else {
        throw new IllegalArgumentException("Unsupported conversion: " + sourceStream.getFormat().toString() + " to " + targetEncoding.toString() );
    }

}
 
开发者ID:SunburstApps,项目名称:OpenJSharp,代码行数:23,代码来源:PCMtoPCMCodec.java

示例15: AudioFloatInputStreamChannelMixer

import javax.sound.sampled.AudioFormat; //导入方法依赖的package包/类
AudioFloatInputStreamChannelMixer(AudioFloatInputStream ais,
        int targetChannels) {
    this.sourceChannels = ais.getFormat().getChannels();
    this.targetChannels = targetChannels;
    this.ais = ais;
    AudioFormat format = ais.getFormat();
    targetFormat = new AudioFormat(format.getEncoding(), format
            .getSampleRate(), format.getSampleSizeInBits(),
            targetChannels, (format.getFrameSize() / sourceChannels)
                    * targetChannels, format.getFrameRate(), format
                    .isBigEndian());
}
 
开发者ID:SunburstApps,项目名称:OpenJSharp,代码行数:13,代码来源:AudioFloatFormatConverter.java


注:本文中的javax.sound.sampled.AudioFormat.getFrameSize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。