当前位置: 首页>>代码示例>>Java>>正文


Java AudioSynthesizer.close方法代码示例

本文整理汇总了Java中com.sun.media.sound.AudioSynthesizer.close方法的典型用法代码示例。如果您正苦于以下问题:Java AudioSynthesizer.close方法的具体用法?Java AudioSynthesizer.close怎么用?Java AudioSynthesizer.close使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在com.sun.media.sound.AudioSynthesizer的用法示例。


在下文中一共展示了AudioSynthesizer.close方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import com.sun.media.sound.AudioSynthesizer; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    AudioSynthesizer synth = new SoftSynthesizer();
    Receiver recv = synth.getReceiver();
    assertTrue(recv != null);
    ShortMessage sm = new ShortMessage();
    sm.setMessage(ShortMessage.NOTE_OFF, 0, 64, 64);
    synth.open(new DummySourceDataLine(), null);
    recv.send(sm, -1);
    synth.close();
    try
    {
        recv.send(sm, -1);
        throw new RuntimeException("Exception not thrown!");
    }
    catch(Exception e)
    {
        // Just checking if exception is thrown
    }
}
 
开发者ID:lambdalab-mirror,项目名称:jdk8u-jdk,代码行数:20,代码来源:GetReceiver2.java

示例2: main

import com.sun.media.sound.AudioSynthesizer; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception
{
    AudioSynthesizer synth = new SoftSynthesizer();
    AudioFormat format = new AudioFormat(44100, 16, 2, true, false);
    AudioInputStream stream = synth.openStream(format, null);

    // Make all voices busy, e.g.
    // send midi on and midi off on all available voices
    MidiChannel ch1 = synth.getChannels()[0];
    ch1.programChange(48); // Use contionus instrument like string ensemble
    for (int i = 0; i < synth.getMaxPolyphony(); i++) {
        ch1.noteOn(64, 64);
        ch1.noteOff(64);
    }

    // Now send single midi on, and midi off message
    ch1.noteOn(64, 64);
    ch1.noteOff(64);

    // Read 10 sec from stream, by this time all voices should be inactvie
    stream.skip(format.getFrameSize() * ((int)(format.getFrameRate() * 20)));

    // If no voice are active, then this test will pass
    VoiceStatus[] v = synth.getVoiceStatus();
    for (int i = 0; i < v.length; i++) {
        if(v[i].active)
        {
            throw new RuntimeException("Not all voices are inactive!");
        }
    }

    // Close the synthesizer after use
    synth.close();
}
 
开发者ID:lambdalab-mirror,项目名称:jdk8u-jdk,代码行数:35,代码来源:NoteOverFlowTest.java

示例3: main

import com.sun.media.sound.AudioSynthesizer; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        AudioSynthesizer synth = new SoftSynthesizer();
        synth.openStream(null, null);
        Receiver recv = synth.getReceiver();
        if (((SoftReceiver) recv).getMidiDevice() != synth) {
            throw new Exception("SoftReceiver.getMidiDevice() doesn't return "
                    + "instance of the synthesizer");
        }
        synth.close();
    }
 
开发者ID:lambdalab-mirror,项目名称:jdk8u-jdk,代码行数:12,代码来源:GetMidiDevice.java

示例4: test

import com.sun.media.sound.AudioSynthesizer; //导入方法依赖的package包/类
public static void test(Soundbank soundbank) throws Exception {

        // Create instance of synthesizer using the testing soundbank above
        AudioSynthesizer synth = new SoftSynthesizer();
        AudioInputStream stream = synth.openStream(format, null);
        synth.unloadAllInstruments(synth.getDefaultSoundbank());
        synth.loadAllInstruments(soundbank);
        Receiver recv = synth.getReceiver();

        // Set volume to max and turn reverb off
        ShortMessage reverb_off = new ShortMessage();
        reverb_off.setMessage(ShortMessage.CONTROL_CHANGE, 91, 0);
        recv.send(reverb_off, -1);
        ShortMessage full_volume = new ShortMessage();
        full_volume.setMessage(ShortMessage.CONTROL_CHANGE, 7, 127);
        recv.send(full_volume, -1);

        Random random = new Random(3485934583945l);

        // Create random timestamps
        long[] test_timestamps = new long[30];
        for (int i = 1; i < test_timestamps.length; i++) {
            test_timestamps[i] = i * 44100
                    + (int) (random.nextDouble() * 22050.0);
        }

        // Send midi note on message to synthesizer
        for (int i = 0; i < test_timestamps.length; i++) {
            ShortMessage midi_on = new ShortMessage();
            midi_on.setMessage(ShortMessage.NOTE_ON, 69, 127);
            recv.send(midi_on,
                    (long) ((test_timestamps[i] / 44100.0) * 1000000.0));
        }

        // Measure timing from rendered audio
        float[] fbuffer = new float[100];
        byte[] buffer = new byte[fbuffer.length * format.getFrameSize()];
        long firsts = -1;
        int counter = 0;
        long s = 0;
        long max_jitter = 0;
        outerloop: for (int k = 0; k < 10000000; k++) {
            stream.read(buffer);
            AudioFloatConverter.getConverter(format).toFloatArray(buffer,
                    fbuffer);
            for (int i = 0; i < fbuffer.length; i++) {
                if (fbuffer[i] != 0) {
                    if (firsts == -1)
                        firsts = s;

                    long measure_time = (s - firsts);
                    long predicted_time = test_timestamps[counter];

                    long jitter = Math.abs(measure_time - predicted_time);

                    if (jitter > 10)
                        max_jitter = jitter;

                    counter++;
                    if (counter == test_timestamps.length)
                        break outerloop;
                }
                s++;
            }
        }
        synth.close();

        if (counter == 0)
            throw new Exception("Nothing was measured!");

        if (max_jitter != 0) {
            throw new Exception("Jitter has occurred! "
                    + "(max jitter = " + max_jitter + ")");
        }

    }
 
开发者ID:AdoptOpenJDK,项目名称:openjdk-jdk10,代码行数:77,代码来源:TestPreciseTimestampRendering.java

示例5: render

import com.sun.media.sound.AudioSynthesizer; //导入方法依赖的package包/类
public static void render(Sequence sequence, OutputStream outStream) {
    try {
        // Find available AudioSynthesizer.
        AudioSynthesizer synth = findAudioSynthesizer();
        if (synth == null) {
            logger.warn("No AudioSynhtesizer was found!");
            return;
        }

        // Open AudioStream from AudioSynthesizer.
        AudioInputStream stream = synth.openStream(null, null);

        Generator.loadSoundbankInstruments(synth);
        // Play Sequence into AudioSynthesizer Receiver.
        double total = send(sequence, synth.getReceiver());

        // Calculate how long the WAVE file needs to be.
        long len = (long) (stream.getFormat().getFrameRate() * (total + 4));
        stream = new AudioInputStream(stream, stream.getFormat(), len);

        AudioSystem.write(stream, AudioFileFormat.Type.WAVE, outStream);

        synth.close();
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
开发者ID:Glamdring,项目名称:computoser,代码行数:28,代码来源:Midi2WavRenderer.java

示例6: render

import com.sun.media.sound.AudioSynthesizer; //导入方法依赖的package包/类
public static void render(Soundbank soundbank, Sequence sequence,
		File audio_file) {
	try {
		// Find available AudioSynthesizer.
		AudioSynthesizer synth = findAudioSynthesizer();
		if (synth == null) {
			System.out.println("No AudioSynhtesizer was found!");
			System.exit(1);
		}

		// Open AudioStream from AudioSynthesizer.
		AudioInputStream stream = synth.openStream(null, null);

		// Load user-selected Soundbank into AudioSynthesizer.
		if (soundbank != null) {
			Soundbank defsbk = synth.getDefaultSoundbank();
			if (defsbk != null)
				synth.unloadAllInstruments(defsbk);
			synth.loadAllInstruments(soundbank);
		}

		// Play Sequence into AudioSynthesizer Receiver.
		double total = send(sequence, synth.getReceiver());

		// Calculate how long the WAVE file needs to be.
		long len = (long) (stream.getFormat().getFrameRate() * (total + 4));
		stream = new AudioInputStream(stream, stream.getFormat(), len);

		// Write WAVE file to disk.
		AudioSystem.write(stream, AudioFileFormat.Type.WAVE, audio_file);

		// We are finished, close synthesizer.
		synth.close();
	} catch (Exception e) {
		e.printStackTrace();
	}
}
 
开发者ID:bluenote10,项目名称:gervill,代码行数:38,代码来源:Midi2WavRender.java

示例7: render

import com.sun.media.sound.AudioSynthesizer; //导入方法依赖的package包/类
/**
 * Render sequence using selected or default soundbank into wave audio file.
 * If activeTracks is not null, only the tracks with the given indices are rendered.
 */
public static void render(Soundbank soundbank, Sequence sequence, Set<Integer> activeTracks,
	OutputStream wavOutputStream)
	throws IOException, MidiUnavailableException {
	// Find available AudioSynthesizer.
	AudioSynthesizer synth = findAudioSynthesizer();
	if (synth == null) {
		System.out.println("No AudioSynhtesizer was found!");
		System.exit(1);
	}

	// Open AudioStream from AudioSynthesizer.
	AudioInputStream stream = synth.openStream(null, null);

	// Load user-selected Soundbank into AudioSynthesizer.
	if (soundbank != null) {
		Soundbank defsbk = synth.getDefaultSoundbank();
		if (defsbk != null)
			synth.unloadAllInstruments(defsbk);
		synth.loadAllInstruments(soundbank);
	}

	// Play Sequence into AudioSynthesizer Receiver.
	double total = send(sequence, activeTracks, synth.getReceiver());

	// Calculate how long the WAVE file needs to be.
	long len = (long) (stream.getFormat().getFrameRate() * (total + 4));
	stream = new AudioInputStream(stream, stream.getFormat(), len);

	// Write WAVE file to disk.
	AudioSystem.write(stream, AudioFileFormat.Type.WAVE, wavOutputStream);

	// We are finished, close synthesizer.
	synth.close();
}
 
开发者ID:Xenoage,项目名称:Zong,代码行数:39,代码来源:MidiToWaveRenderer.java

示例8: main

import com.sun.media.sound.AudioSynthesizer; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception
{
    // Create instance of the synthesizer with very low polyphony
    AudioSynthesizer synth = new SoftSynthesizer();
    AudioFormat format = new AudioFormat(44100, 16, 2, true, false);
    Map<String, Object> p = new HashMap<String, Object>();
    p.put("max polyphony", new Integer(5));
    AudioInputStream stream = synth.openStream(format, p);

    // Create instrument with too many regions (more than max polyphony)
    SF2Soundbank sf2 = new SF2Soundbank();

    SF2Sample sample = new SF2Sample(sf2);
    sample.setName("test sample");
    sample.setData(new byte[100]);
    sample.setSampleRate(44100);
    sample.setOriginalPitch(20);
    sf2.addResource(sample);

    SF2Layer layer = new SF2Layer(sf2);
    layer.setName("test layer");
    sf2.addResource(layer);

    for (int i = 0; i < 100; i++) {
        SF2LayerRegion region = new SF2LayerRegion();
        region.setSample(sample);
        layer.getRegions().add(region);
    }

    SF2Instrument ins = new SF2Instrument(sf2);
    ins.setPatch(new Patch(0,0));
    ins.setName("test instrument");
    sf2.addInstrument(ins);

    SF2InstrumentRegion insregion = new SF2InstrumentRegion();
    insregion.setLayer(layer);
    ins.getRegions().add(insregion);

    // Load the test soundbank into the synthesizer
    synth.unloadAllInstruments(synth.getDefaultSoundbank());
    synth.loadAllInstruments(sf2);

    // Send out one midi on message
    MidiChannel ch1 = synth.getChannels()[0];
    ch1.programChange(0);
    ch1.noteOn(64, 64);

    // Read 1 sec from stream
    stream.skip(format.getFrameSize() * ((int)(format.getFrameRate() * 2)));

    // Close the synthesizer after use
    synth.close();
}
 
开发者ID:lambdalab-mirror,项目名称:jdk8u-jdk,代码行数:54,代码来源:NoteOverFlowTest2.java

示例9: render

import com.sun.media.sound.AudioSynthesizer; //导入方法依赖的package包/类
/**
 * Render sequence using selected or default soundbank into wave audio file.
 */
public static void render(Sequence sequence, OutputStream out) throws MidiUnavailableException
{
	try
	{
		// Find available AudioSynthesizer.
		AudioSynthesizer synth = SynthesizerFactory.findAudioSynthesizer();
		if (synth == null)
		{
			throw new MidiUnavailableException("Failed to find appropriate synthesizer");
		}

		// Open AudioStream from AudioSynthesizer.
		boolean opened = synth.isOpen();
		if (opened)
			synth.close();

		AudioInputStream stream = synth.openStream(null, null);
		SynthesizerFactory.initLotroSynthesizer(synth);

		// Play Sequence into AudioSynthesizer Receiver.
		double total = send(sequence, synth.getReceiver());

		// Calculate how long the WAVE file needs to be.
		long len = (long) (stream.getFormat().getFrameRate() * (total + 4));
		stream = new AudioInputStream(stream, stream.getFormat(), len);

		// Write WAVE file to disk.
		AudioSystem.write(stream, AudioFileFormat.Type.WAVE, out);

		// We are finished, close synthesizer.
		synth.close();

		if (opened)
			synth.open();
	}
	catch (Exception e)
	{
		e.printStackTrace();
	}
}
 
开发者ID:digero,项目名称:maestro,代码行数:44,代码来源:MidiToWav.java

示例10: main

import com.sun.media.sound.AudioSynthesizer; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception
{
	/*
	* Open synthesizer in pull mode in the format 96000hz 24 bit stereo
	* using Sinc interpolation for highest quality.
	* With 1024 in max polyphony.
	*/
	AudioFormat format = new AudioFormat(96000, 24, 2, true, false);
	AudioSynthesizer synthesizer = (AudioSynthesizer)MidiSystem.getSynthesizer();
	Map<String,Object> info = new HashMap<String,Object>();
	info.put("resampletType", "sinc");
	info.put("maxPolyphony", "1024");
	AudioInputStream stream = synthesizer.openStream(format, info);
	 
	/*
	* Play midi note 60 on channel 1 for 1 sec.
	*/
	ShortMessage msg = new ShortMessage();
	Receiver recv = synthesizer.getReceiver();
	msg.setMessage(ShortMessage.PROGRAM_CHANGE, 0, 48, 0);
	recv.send(msg, 0);
	msg.setMessage(ShortMessage.NOTE_ON, 0, 60, 80);
	recv.send(msg, 0);
	msg.setMessage(ShortMessage.NOTE_ON, 0, 60, 0);
	recv.send(msg, 1000000);
	 
	/*
	* Calculate how many bytes 4 seconds are.
	*/
	long len = (long)(format.getFrameRate() * 4);
	 
	/*
	* Write 10 second into output file.
	*/
	stream = new AudioInputStream(stream, format, len);
	AudioSystem.write(stream, AudioFileFormat.Type.WAVE, new File("output.wav"));
	 
	/*
	* Close all resources.
	*/
	synthesizer.close();
}
 
开发者ID:bluenote10,项目名称:gervill,代码行数:43,代码来源:AudioRender.java


注:本文中的com.sun.media.sound.AudioSynthesizer.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。