当前位置: 首页>>代码示例>>Java>>正文


Java AudioInputStream.read方法代码示例

本文整理汇总了Java中javax.sound.sampled.AudioInputStream.read方法的典型用法代码示例。如果您正苦于以下问题:Java AudioInputStream.read方法的具体用法?Java AudioInputStream.read怎么用?Java AudioInputStream.read使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在javax.sound.sampled.AudioInputStream的用法示例。


在下文中一共展示了AudioInputStream.read方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: run

import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
@Override
public void run() {
    byte[] buffer = SoftAudioPusher.this.buffer;
    AudioInputStream ais = SoftAudioPusher.this.ais;
    SourceDataLine sourceDataLine = SoftAudioPusher.this.sourceDataLine;

    try {
        while (active) {
            // Read from audio source
            int count = ais.read(buffer);
            if(count < 0) break;
            // Write byte buffer to source output
            sourceDataLine.write(buffer, 0, count);
        }
    } catch (IOException e) {
        active = false;
        //e.printStackTrace();
    }
}
 
开发者ID:AdoptOpenJDK,项目名称:openjdk-jdk10,代码行数:20,代码来源:SoftAudioPusher.java

示例2: run

import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
@Override
public void run() {
    log("ConversionThread[" + num + "] started.");
    try {
        InputStream inStream = new ByteArrayInputStream(pcmBuffer);

        AudioInputStream pcmStream = new AudioInputStream(
                inStream, pcmFormat, AudioSystem.NOT_SPECIFIED);
        AudioInputStream alawStream = AudioSystem.getAudioInputStream(alawFormat, pcmStream);

        ByteArrayOutputStream outStream = new ByteArrayOutputStream();
        int read = 0;
        byte[] data = new byte[4096];
        while((read = alawStream.read(data)) != -1) {
            outStream.write(data, 0, read);
       }
       alawStream.close();
       resultArray = outStream.toByteArray();
    } catch (Exception ex) {
        log("ConversionThread[" + num + "] exception:");
        log(ex);
    }
    log("ConversionThread[" + num + "] completed.");
}
 
开发者ID:lambdalab-mirror,项目名称:jdk8u-jdk,代码行数:25,代码来源:AlawEncoderSync.java

示例3: loadByteAudio

import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
private byte[] loadByteAudio(AudioInputStream ais, int toRead)
{
	int totalRead = 0;
	byte[] rawBytes = new byte[toRead];
	try
	{
		// we have to read in chunks because the decoded stream won't
		// read more than about 2000 bytes at a time
		while (totalRead < toRead)
		{
			int actualRead = ais.read(rawBytes, totalRead, toRead	- totalRead);
			if (actualRead < 1)
				break;
			totalRead += actualRead;
		}
		ais.close();
	}
	catch (Exception ioe)
	{
		error("Error loading file into memory: " + ioe.getMessage());
	}
	debug("Needed to read " + toRead + " actually read " + totalRead);
	return rawBytes;
}
 
开发者ID:JacobRoth,项目名称:romanov,代码行数:25,代码来源:JSMinim.java

示例4: playSound

import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
/**
 * Play a sound.
 *
 * @param in The {@code AudioInputStream} to play.
 * @return True if the stream was played without incident.
 * @exception IOException if unable to read or write the sound data.
 */
private boolean playSound(AudioInputStream in) throws IOException {
    boolean ret = false;

    SourceDataLine line = openLine(in.getFormat());
    if (line == null) return false;
    try {
        startPlaying();
        int rd;
        while (keepPlaying() && (rd = in.read(data)) > 0) {
            line.write(data, 0, rd);
        }
        ret = true;
    } finally {
        stopPlaying();
        line.drain();
        line.stop();
        line.close();
    }
    return ret;
}
 
开发者ID:FreeCol,项目名称:freecol,代码行数:28,代码来源:SoundPlayer.java

示例5: readStream

import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
private void readStream(AudioInputStream as, long byteLen) throws IOException {
    // arrays "only" max. 2GB
    int intLen;
    if (byteLen > 2147483647) {
        intLen = 2147483647;
    } else {
        intLen = (int) byteLen;
    }
    loadedAudio = new byte[intLen];
    loadedAudioByteLength = 0;

    // this loop may throw an IOException
    while (true) {
        int bytesRead = as.read(loadedAudio, loadedAudioByteLength, intLen - loadedAudioByteLength);
        if (bytesRead <= 0) {
            as.close();
            break;
        }
        loadedAudioByteLength += bytesRead;
    }
}
 
开发者ID:SunburstApps,项目名称:OpenJSharp,代码行数:22,代码来源:JavaSoundAudioClip.java

示例6: write

import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
public void write(AudioInputStream stream, RIFFWriter writer)
        throws IOException {

    RIFFWriter fmt_chunk = writer.writeChunk("fmt ");

    AudioFormat format = stream.getFormat();
    fmt_chunk.writeUnsignedShort(3); // WAVE_FORMAT_IEEE_FLOAT
    fmt_chunk.writeUnsignedShort(format.getChannels());
    fmt_chunk.writeUnsignedInt((int) format.getSampleRate());
    fmt_chunk.writeUnsignedInt(((int) format.getFrameRate())
            * format.getFrameSize());
    fmt_chunk.writeUnsignedShort(format.getFrameSize());
    fmt_chunk.writeUnsignedShort(format.getSampleSizeInBits());
    fmt_chunk.close();
    RIFFWriter data_chunk = writer.writeChunk("data");
    byte[] buff = new byte[1024];
    int len;
    while ((len = stream.read(buff, 0, buff.length)) != -1)
        data_chunk.write(buff, 0, len);
    data_chunk.close();
}
 
开发者ID:SunburstApps,项目名称:OpenJSharp,代码行数:22,代码来源:WaveFloatFileWriter.java

示例7: run

import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
public void run() {
    byte[] buffer = SoftAudioPusher.this.buffer;
    AudioInputStream ais = SoftAudioPusher.this.ais;
    SourceDataLine sourceDataLine = SoftAudioPusher.this.sourceDataLine;

    try {
        while (active) {
            // Read from audio source
            int count = ais.read(buffer);
            if(count < 0) break;
            // Write byte buffer to source output
            sourceDataLine.write(buffer, 0, count);
        }
    } catch (IOException e) {
        active = false;
        //e.printStackTrace();
    }

}
 
开发者ID:SunburstApps,项目名称:OpenJSharp,代码行数:20,代码来源:SoftAudioPusher.java

示例8: load

import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
@Override
public AudioClip load(String file) {
    String path = Oasis.getFileSystem().find(file); 
    
    if (path == null) {
        log.warning("Could not find file: " + file);
        return null; 
    }
    
    try {
        AudioInputStream in = AudioSystem.getAudioInputStream(new File(path));
        
        int sampleRate = (int) in.getFormat().getSampleRate(); 
        int channels = in.getFormat().getChannels(); 
        
        int length = (int) in.getFrameLength(); 
        
        int bytesPerFrame = in.getFormat().getFrameSize(); 
        
        byte[] bytes = new byte[length * bytesPerFrame]; 
        
        in.read(bytes); 
        
        AudioClip clip = AudioClip.create(
                bytes.length, 
                sampleRate, 
                channels > 1, 
                bytesPerFrame / channels > 1, 
                channels == 1); 
        clip.setData(bytes); 
        
        return clip; 
    } catch (Exception e) {
        e.printStackTrace();
        log.warning("Could not read WAV data: " + file); 
        return null; 
    } 
}
 
开发者ID:HuskyGameDev,项目名称:2017-TeamEngine,代码行数:39,代码来源:WavLoader.java

示例9: getSoundbank

import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
public Soundbank getSoundbank(AudioInputStream ais)
        throws InvalidMidiDataException, IOException {
    try {
        byte[] buffer;
        if (ais.getFrameLength() == -1) {
            ByteArrayOutputStream baos = new ByteArrayOutputStream();
            byte[] buff = new byte[1024
                    - (1024 % ais.getFormat().getFrameSize())];
            int ret;
            while ((ret = ais.read(buff)) != -1) {
                baos.write(buff, 0, ret);
            }
            ais.close();
            buffer = baos.toByteArray();
        } else {
            buffer = new byte[(int) (ais.getFrameLength()
                                * ais.getFormat().getFrameSize())];
            new DataInputStream(ais).readFully(buffer);
        }
        ModelByteBufferWavetable osc = new ModelByteBufferWavetable(
                new ModelByteBuffer(buffer), ais.getFormat(), -4800);
        ModelPerformer performer = new ModelPerformer();
        performer.getOscillators().add(osc);

        SimpleSoundbank sbk = new SimpleSoundbank();
        SimpleInstrument ins = new SimpleInstrument();
        ins.add(performer);
        sbk.addInstrument(ins);
        return sbk;
    } catch (Exception e) {
        return null;
    }
}
 
开发者ID:SunburstApps,项目名称:OpenJSharp,代码行数:34,代码来源:AudioFileSoundbankReader.java

示例10: open

import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
public void open(AudioInputStream stream) throws LineUnavailableException,
        IOException {
    if (isOpen()) {
        throw new IllegalStateException("Clip is already open with format "
                + getFormat() + " and frame lengh of " + getFrameLength());
    }
    if (AudioFloatConverter.getConverter(stream.getFormat()) == null)
        throw new IllegalArgumentException("Invalid format : "
                + stream.getFormat().toString());

    if (stream.getFrameLength() != AudioSystem.NOT_SPECIFIED) {
        byte[] data = new byte[(int) stream.getFrameLength()
                * stream.getFormat().getFrameSize()];
        int readsize = 512 * stream.getFormat().getFrameSize();
        int len = 0;
        while (len != data.length) {
            if (readsize > data.length - len)
                readsize = data.length - len;
            int ret = stream.read(data, len, readsize);
            if (ret == -1)
                break;
            if (ret == 0)
                Thread.yield();
            len += ret;
        }
        open(stream.getFormat(), data, 0, len);
    } else {
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        byte[] b = new byte[512 * stream.getFormat().getFrameSize()];
        int r = 0;
        while ((r = stream.read(b)) != -1) {
            if (r == 0)
                Thread.yield();
            baos.write(b, 0, r);
        }
        open(stream.getFormat(), baos.toByteArray(), 0, baos.size());
    }

}
 
开发者ID:SunburstApps,项目名称:OpenJSharp,代码行数:40,代码来源:SoftMixingClip.java

示例11: test

import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
public static void test(Soundbank soundbank) throws Exception {

        // Create instance of synthesizer using the testing soundbank above
        AudioSynthesizer synth = new SoftSynthesizer();
        AudioInputStream stream = synth.openStream(format, null);
        synth.unloadAllInstruments(synth.getDefaultSoundbank());
        synth.loadAllInstruments(soundbank);
        Receiver recv = synth.getReceiver();

        // Set volume to max and turn reverb off
        ShortMessage reverb_off = new ShortMessage();
        reverb_off.setMessage(ShortMessage.CONTROL_CHANGE, 91, 0);
        recv.send(reverb_off, -1);
        ShortMessage full_volume = new ShortMessage();
        full_volume.setMessage(ShortMessage.CONTROL_CHANGE, 7, 127);
        recv.send(full_volume, -1);

        Random random = new Random(3485934583945l);

        // Create random timestamps
        long[] test_timestamps = new long[30];
        for (int i = 1; i < test_timestamps.length; i++) {
            test_timestamps[i] = i * 44100
                    + (int) (random.nextDouble() * 22050.0);
        }

        // Send midi note on message to synthesizer
        for (int i = 0; i < test_timestamps.length; i++) {
            ShortMessage midi_on = new ShortMessage();
            midi_on.setMessage(ShortMessage.NOTE_ON, 69, 127);
            recv.send(midi_on,
                    (long) ((test_timestamps[i] / 44100.0) * 1000000.0));
        }

        // Measure timing from rendered audio
        float[] fbuffer = new float[100];
        byte[] buffer = new byte[fbuffer.length * format.getFrameSize()];
        long firsts = -1;
        int counter = 0;
        long s = 0;
        long max_jitter = 0;
        outerloop: for (int k = 0; k < 10000000; k++) {
            stream.read(buffer);
            AudioFloatConverter.getConverter(format).toFloatArray(buffer,
                    fbuffer);
            for (int i = 0; i < fbuffer.length; i++) {
                if (fbuffer[i] != 0) {
                    if (firsts == -1)
                        firsts = s;

                    long measure_time = (s - firsts);
                    long predicted_time = test_timestamps[counter];

                    long jitter = Math.abs(measure_time - predicted_time);

                    if (jitter > 10)
                        max_jitter = jitter;

                    counter++;
                    if (counter == test_timestamps.length)
                        break outerloop;
                }
                s++;
            }
        }
        synth.close();

        if (counter == 0)
            throw new Exception("Nothing was measured!");

        if (max_jitter != 0) {
            throw new Exception("Jitter has occurred! "
                    + "(max jitter = " + max_jitter + ")");
        }

    }
 
开发者ID:AdoptOpenJDK,项目名称:openjdk-jdk10,代码行数:77,代码来源:TestPreciseTimestampRendering.java

示例12: read

import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
public int read(byte[] b, int off, int len) throws IOException {
     AudioInputStream local_stream = stream;
     if(local_stream != null)
         return local_stream.read(b, off, len);
     else
     {
         int flen = len / samplesize;
         if(silentbuffer == null || silentbuffer.length < flen)
             silentbuffer = new float[flen];
         converter.toByteArray(silentbuffer, flen, b, off);

         silent_samples += (long)((len / framesize));

         if(pusher != null)
         if(weak_stream_link.get() == null)
         {
             Runnable runnable = new Runnable()
             {
                 SoftAudioPusher _pusher = pusher;
                 AudioInputStream _jitter_stream = jitter_stream;
                 SourceDataLine _sourceDataLine = sourceDataLine;
                 public void run()
                 {
                     _pusher.stop();
                     if(_jitter_stream != null)
                        try {
                            _jitter_stream.close();
                        } catch (IOException e) {
                            e.printStackTrace();
                        }
                     if(_sourceDataLine != null)
                         _sourceDataLine.close();
                 }
             };
             pusher = null;
             jitter_stream = null;
             sourceDataLine = null;
             new Thread(runnable).start();
         }
         return len;
     }
}
 
开发者ID:lambdalab-mirror,项目名称:jdk8u-jdk,代码行数:43,代码来源:SoftSynthesizer.java

示例13: read

import javax.sound.sampled.AudioInputStream; //导入方法依赖的package包/类
@Override
public int read(byte[] b, int off, int len) throws IOException {
     AudioInputStream local_stream = stream;
     if(local_stream != null)
         return local_stream.read(b, off, len);
     else
     {
         int flen = len / samplesize;
         if(silentbuffer == null || silentbuffer.length < flen)
             silentbuffer = new float[flen];
         converter.toByteArray(silentbuffer, flen, b, off);

         silent_samples += (long)((len / framesize));

         if(pusher != null)
         if(weak_stream_link.get() == null)
         {
             Runnable runnable = new Runnable()
             {
                 SoftAudioPusher _pusher = pusher;
                 AudioInputStream _jitter_stream = jitter_stream;
                 SourceDataLine _sourceDataLine = sourceDataLine;
                 @Override
                 public void run()
                 {
                     _pusher.stop();
                     if(_jitter_stream != null)
                        try {
                            _jitter_stream.close();
                        } catch (IOException e) {
                            e.printStackTrace();
                        }
                     if(_sourceDataLine != null)
                         _sourceDataLine.close();
                 }
             };
             pusher = null;
             jitter_stream = null;
             sourceDataLine = null;
             new Thread(null, runnable, "Synthesizer",0,false).start();
         }
         return len;
     }
}
 
开发者ID:AdoptOpenJDK,项目名称:openjdk-jdk10,代码行数:45,代码来源:SoftSynthesizer.java


注:本文中的javax.sound.sampled.AudioInputStream.read方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。