本文整理汇总了Java中javax.sound.sampled.AudioSystem.NOT_SPECIFIED属性的典型用法代码示例。如果您正苦于以下问题:Java AudioSystem.NOT_SPECIFIED属性的具体用法?Java AudioSystem.NOT_SPECIFIED怎么用?Java AudioSystem.NOT_SPECIFIED使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类javax.sound.sampled.AudioSystem
的用法示例。
在下文中一共展示了AudioSystem.NOT_SPECIFIED属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: write
public int write(AudioInputStream stream, AudioFileFormat.Type fileType, File out) throws IOException {
// throws IllegalArgumentException if not supported
WaveFileFormat waveFileFormat = (WaveFileFormat)getAudioFileFormat(fileType, stream);
// first write the file without worrying about length fields
FileOutputStream fos = new FileOutputStream( out ); // throws IOException
BufferedOutputStream bos = new BufferedOutputStream( fos, bisBufferSize );
int bytesWritten = writeWaveFile(stream, waveFileFormat, bos );
bos.close();
// now, if length fields were not specified, calculate them,
// open as a random access file, write the appropriate fields,
// close again....
if( waveFileFormat.getByteLength()== AudioSystem.NOT_SPECIFIED ) {
int dataLength=bytesWritten-waveFileFormat.getHeaderSize();
int riffLength=dataLength + waveFileFormat.getHeaderSize() - 8;
RandomAccessFile raf=new RandomAccessFile(out, "rw");
// skip RIFF magic
raf.skipBytes(4);
raf.writeInt(big2little( riffLength ));
// skip WAVE magic, fmt_ magic, fmt_ length, fmt_ chunk, data magic
raf.skipBytes(4+4+4+WaveFileFormat.getFmtChunkSize(waveFileFormat.getWaveType())+4);
raf.writeInt(big2little( dataLength ));
// that's all
raf.close();
}
return bytesWritten;
}
示例2: open
public void open(AudioInputStream stream) throws LineUnavailableException,
IOException {
if (isOpen()) {
throw new IllegalStateException("Clip is already open with format "
+ getFormat() + " and frame lengh of " + getFrameLength());
}
if (AudioFloatConverter.getConverter(stream.getFormat()) == null)
throw new IllegalArgumentException("Invalid format : "
+ stream.getFormat().toString());
if (stream.getFrameLength() != AudioSystem.NOT_SPECIFIED) {
byte[] data = new byte[(int) stream.getFrameLength()
* stream.getFormat().getFrameSize()];
int readsize = 512 * stream.getFormat().getFrameSize();
int len = 0;
while (len != data.length) {
if (readsize > data.length - len)
readsize = data.length - len;
int ret = stream.read(data, len, readsize);
if (ret == -1)
break;
if (ret == 0)
Thread.yield();
len += ret;
}
open(stream.getFormat(), data, 0, len);
} else {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
byte[] b = new byte[512 * stream.getFormat().getFrameSize()];
int r = 0;
while ((r = stream.read(b)) != -1) {
if (r == 0)
Thread.yield();
baos.write(b, 0, r);
}
open(stream.getFormat(), baos.toByteArray(), 0, baos.size());
}
}
示例3: getMaxLines
public int getMaxLines(Line.Info info) {
if (info.getLineClass() == SourceDataLine.class)
return AudioSystem.NOT_SPECIFIED;
if (info.getLineClass() == Clip.class)
return AudioSystem.NOT_SPECIFIED;
return 0;
}
示例4: getInputStream
public static AudioFloatInputStream getInputStream(AudioFormat format,
byte[] buffer, int offset, int len) {
AudioFloatConverter converter = AudioFloatConverter
.getConverter(format);
if (converter != null)
return new BytaArrayAudioFloatInputStream(converter, buffer,
offset, len);
InputStream stream = new ByteArrayInputStream(buffer, offset, len);
long aLen = format.getFrameSize() == AudioSystem.NOT_SPECIFIED
? AudioSystem.NOT_SPECIFIED : len / format.getFrameSize();
AudioInputStream astream = new AudioInputStream(stream, format, aLen);
return getInputStream(astream);
}
示例5: addFormat
private static void addFormat(Vector<AudioFormat> v, int bits, int frameSizeInBytes, int channels, float sampleRate,
int encoding, boolean signed, boolean bigEndian) {
AudioFormat.Encoding enc = null;
switch (encoding) {
case PCM:
enc = signed?AudioFormat.Encoding.PCM_SIGNED:AudioFormat.Encoding.PCM_UNSIGNED;
break;
case ULAW:
enc = AudioFormat.Encoding.ULAW;
if (bits != 8) {
if (Printer.err) Printer.err("DirectAudioDevice.addFormat called with ULAW, but bitsPerSample="+bits);
bits = 8; frameSizeInBytes = channels;
}
break;
case ALAW:
enc = AudioFormat.Encoding.ALAW;
if (bits != 8) {
if (Printer.err) Printer.err("DirectAudioDevice.addFormat called with ALAW, but bitsPerSample="+bits);
bits = 8; frameSizeInBytes = channels;
}
break;
}
if (enc==null) {
if (Printer.err) Printer.err("DirectAudioDevice.addFormat called with unknown encoding: "+encoding);
return;
}
if (frameSizeInBytes <= 0) {
if (channels > 0) {
frameSizeInBytes = ((bits + 7) / 8) * channels;
} else {
frameSizeInBytes = AudioSystem.NOT_SPECIFIED;
}
}
v.add(new AudioFormat(enc, sampleRate, bits, channels, frameSizeInBytes, sampleRate, bigEndian));
}
示例6: write
@Override
public int write(AudioInputStream stream, Type fileType, File out) throws IOException {
Objects.requireNonNull(stream);
Objects.requireNonNull(fileType);
Objects.requireNonNull(out);
// throws IllegalArgumentException if not supported
AuFileFormat auFileFormat = (AuFileFormat)getAudioFileFormat(fileType, stream);
// first write the file without worrying about length fields
FileOutputStream fos = new FileOutputStream( out ); // throws IOException
BufferedOutputStream bos = new BufferedOutputStream( fos, bisBufferSize );
int bytesWritten = writeAuFile(stream, auFileFormat, bos );
bos.close();
// now, if length fields were not specified, calculate them,
// open as a random access file, write the appropriate fields,
// close again....
if( auFileFormat.getByteLength()== AudioSystem.NOT_SPECIFIED ) {
// $$kk: 10.22.99: jan: please either implement this or throw an exception!
// $$fb: 2001-07-13: done. Fixes Bug 4479981
RandomAccessFile raf=new RandomAccessFile(out, "rw");
if (raf.length()<=0x7FFFFFFFl) {
// skip AU magic and data offset field
raf.skipBytes(8);
raf.writeInt(bytesWritten-AuFileFormat.AU_HEADERSIZE);
// that's all
}
raf.close();
}
return bytesWritten;
}
示例7: getAudioFileFormat
/**
* Returns the AudioFileFormat describing the file that will be written from this AudioInputStream.
* Throws IllegalArgumentException if not supported.
*/
private AudioFileFormat getAudioFileFormat(Type type, AudioInputStream stream) {
if (!isFileTypeSupported(type, stream)) {
throw new IllegalArgumentException("File type " + type + " not supported.");
}
AudioFormat streamFormat = stream.getFormat();
AudioFormat.Encoding encoding = streamFormat.getEncoding();
if (AudioFormat.Encoding.PCM_UNSIGNED.equals(encoding)) {
encoding = AudioFormat.Encoding.PCM_SIGNED;
}
// We always write big endian au files, this is by far the standard
AudioFormat format = new AudioFormat(encoding,
streamFormat.getSampleRate(),
streamFormat.getSampleSizeInBits(),
streamFormat.getChannels(),
streamFormat.getFrameSize(),
streamFormat.getFrameRate(), true);
int fileSize;
if (stream.getFrameLength() != AudioSystem.NOT_SPECIFIED) {
fileSize = (int)stream.getFrameLength()*streamFormat.getFrameSize() + AuFileFormat.AU_HEADERSIZE;
} else {
fileSize = AudioSystem.NOT_SPECIFIED;
}
return new AuFileFormat(Type.AU, fileSize, format,
(int) stream.getFrameLength());
}
示例8: AbstractDataLine
/**
* Constructs a new AbstractLine.
*/
protected AbstractDataLine(DataLine.Info info, AbstractMixer mixer, Control[] controls) {
this(info, mixer, controls, null, AudioSystem.NOT_SPECIFIED);
}
示例9: DummySourceDataLine
public DummySourceDataLine()
{
ArrayList<AudioFormat> formats = new ArrayList<AudioFormat>();
for (int channels = 1; channels <= 2; channels++) {
formats.add(new AudioFormat(Encoding.PCM_SIGNED,
AudioSystem.NOT_SPECIFIED, 8, channels, channels,
AudioSystem.NOT_SPECIFIED, false));
formats.add(new AudioFormat(Encoding.PCM_UNSIGNED,
AudioSystem.NOT_SPECIFIED, 8, channels, channels,
AudioSystem.NOT_SPECIFIED, false));
for (int bits = 16; bits < 32; bits += 8) {
formats.add(new AudioFormat(Encoding.PCM_SIGNED,
AudioSystem.NOT_SPECIFIED, bits, channels, channels
* bits / 8, AudioSystem.NOT_SPECIFIED, false));
formats.add(new AudioFormat(Encoding.PCM_UNSIGNED,
AudioSystem.NOT_SPECIFIED, bits, channels, channels
* bits / 8, AudioSystem.NOT_SPECIFIED, false));
formats.add(new AudioFormat(Encoding.PCM_SIGNED,
AudioSystem.NOT_SPECIFIED, bits, channels, channels
* bits / 8, AudioSystem.NOT_SPECIFIED, true));
formats.add(new AudioFormat(Encoding.PCM_UNSIGNED,
AudioSystem.NOT_SPECIFIED, bits, channels, channels
* bits / 8, AudioSystem.NOT_SPECIFIED, true));
}
formats.add(new AudioFormat(Encoding.PCM_FLOAT,
AudioSystem.NOT_SPECIFIED, 32, channels, channels * 4,
AudioSystem.NOT_SPECIFIED, false));
formats.add(new AudioFormat(Encoding.PCM_FLOAT,
AudioSystem.NOT_SPECIFIED, 32, channels, channels * 4,
AudioSystem.NOT_SPECIFIED, true));
formats.add(new AudioFormat(Encoding.PCM_FLOAT,
AudioSystem.NOT_SPECIFIED, 64, channels, channels * 8,
AudioSystem.NOT_SPECIFIED, false));
formats.add(new AudioFormat(Encoding.PCM_FLOAT,
AudioSystem.NOT_SPECIFIED, 64, channels, channels * 8,
AudioSystem.NOT_SPECIFIED, true));
}
AudioFormat[] formats_array = formats.toArray(new AudioFormat[formats
.size()]);
sourceLineInfo = new DataLine.Info(SourceDataLine.class,
formats_array, AudioSystem.NOT_SPECIFIED,
AudioSystem.NOT_SPECIFIED);
}
示例10: getFrameLength
public long getFrameLength() {
return AudioSystem.NOT_SPECIFIED; // ais.getFrameLength();
}
示例11: internal_getAudioFileFormat
private AudioFileFormat internal_getAudioFileFormat(InputStream stream)
throws UnsupportedAudioFileException, IOException {
RIFFReader riffiterator = new RIFFReader(stream);
if (!riffiterator.getFormat().equals("RIFF"))
throw new UnsupportedAudioFileException();
if (!riffiterator.getType().equals("WAVE"))
throw new UnsupportedAudioFileException();
boolean fmt_found = false;
boolean data_found = false;
int channels = 1;
long samplerate = 1;
int framesize = 1;
int bits = 1;
while (riffiterator.hasNextChunk()) {
RIFFReader chunk = riffiterator.nextChunk();
if (chunk.getFormat().equals("fmt ")) {
fmt_found = true;
int format = chunk.readUnsignedShort();
if (format != 3) // WAVE_FORMAT_IEEE_FLOAT only
throw new UnsupportedAudioFileException();
channels = chunk.readUnsignedShort();
samplerate = chunk.readUnsignedInt();
/* framerate = */chunk.readUnsignedInt();
framesize = chunk.readUnsignedShort();
bits = chunk.readUnsignedShort();
}
if (chunk.getFormat().equals("data")) {
data_found = true;
break;
}
}
if (!fmt_found)
throw new UnsupportedAudioFileException();
if (!data_found)
throw new UnsupportedAudioFileException();
AudioFormat audioformat = new AudioFormat(
Encoding.PCM_FLOAT, samplerate, bits, channels,
framesize, samplerate, false);
AudioFileFormat fileformat = new AudioFileFormat(
AudioFileFormat.Type.WAVE, audioformat,
AudioSystem.NOT_SPECIFIED);
return fileformat;
}
示例12: SoftMixingMixer
public SoftMixingMixer() {
sourceLineInfo = new javax.sound.sampled.Line.Info[2];
ArrayList<AudioFormat> formats = new ArrayList<AudioFormat>();
for (int channels = 1; channels <= 2; channels++) {
formats.add(new AudioFormat(Encoding.PCM_SIGNED,
AudioSystem.NOT_SPECIFIED, 8, channels, channels,
AudioSystem.NOT_SPECIFIED, false));
formats.add(new AudioFormat(Encoding.PCM_UNSIGNED,
AudioSystem.NOT_SPECIFIED, 8, channels, channels,
AudioSystem.NOT_SPECIFIED, false));
for (int bits = 16; bits < 32; bits += 8) {
formats.add(new AudioFormat(Encoding.PCM_SIGNED,
AudioSystem.NOT_SPECIFIED, bits, channels, channels
* bits / 8, AudioSystem.NOT_SPECIFIED, false));
formats.add(new AudioFormat(Encoding.PCM_UNSIGNED,
AudioSystem.NOT_SPECIFIED, bits, channels, channels
* bits / 8, AudioSystem.NOT_SPECIFIED, false));
formats.add(new AudioFormat(Encoding.PCM_SIGNED,
AudioSystem.NOT_SPECIFIED, bits, channels, channels
* bits / 8, AudioSystem.NOT_SPECIFIED, true));
formats.add(new AudioFormat(Encoding.PCM_UNSIGNED,
AudioSystem.NOT_SPECIFIED, bits, channels, channels
* bits / 8, AudioSystem.NOT_SPECIFIED, true));
}
formats.add(new AudioFormat(Encoding.PCM_FLOAT,
AudioSystem.NOT_SPECIFIED, 32, channels, channels * 4,
AudioSystem.NOT_SPECIFIED, false));
formats.add(new AudioFormat(Encoding.PCM_FLOAT,
AudioSystem.NOT_SPECIFIED, 32, channels, channels * 4,
AudioSystem.NOT_SPECIFIED, true));
formats.add(new AudioFormat(Encoding.PCM_FLOAT,
AudioSystem.NOT_SPECIFIED, 64, channels, channels * 8,
AudioSystem.NOT_SPECIFIED, false));
formats.add(new AudioFormat(Encoding.PCM_FLOAT,
AudioSystem.NOT_SPECIFIED, 64, channels, channels * 8,
AudioSystem.NOT_SPECIFIED, true));
}
AudioFormat[] formats_array = formats.toArray(new AudioFormat[formats
.size()]);
sourceLineInfo[0] = new DataLine.Info(SourceDataLine.class,
formats_array, AudioSystem.NOT_SPECIFIED,
AudioSystem.NOT_SPECIFIED);
sourceLineInfo[1] = new DataLine.Info(Clip.class, formats_array,
AudioSystem.NOT_SPECIFIED, AudioSystem.NOT_SPECIFIED);
}
示例13: getLevel
/**
* This implementation returns AudioSystem.NOT_SPECIFIED.
*/
@Override
public final float getLevel() {
return (float)AudioSystem.NOT_SPECIFIED;
}
示例14: getLevel
public float getLevel() {
return AudioSystem.NOT_SPECIFIED;
}
示例15: getLevel
@Override
public float getLevel() {
return AudioSystem.NOT_SPECIFIED;
}