本文整理汇总了Java中javax.sound.sampled.AudioSystem类的典型用法代码示例。如果您正苦于以下问题:Java AudioSystem类的具体用法?Java AudioSystem怎么用?Java AudioSystem使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
AudioSystem类属于javax.sound.sampled包,在下文中一共展示了AudioSystem类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: playSound
import javax.sound.sampled.AudioSystem; //导入依赖的package包/类
private synchronized void playSound(final String audioFileName) {
if(isSoundEnabled) {
try {
Clip clip = AudioSystem.getClip();
InputStream inputStream = MainWindow.class.getResourceAsStream(audioFileName);
if(inputStream != null) {
AudioInputStream audioInputStream = AudioSystem.getAudioInputStream(inputStream);
clip.open(audioInputStream);
clip.start();
}
else {
System.out.println("Input stream not valid");
}
}
catch (Exception e) {
e.printStackTrace();
}
}
}
示例2: init
import javax.sound.sampled.AudioSystem; //导入依赖的package包/类
private static void init() {
try {
// 44,100 samples per second, 16-bit audio, mono, signed PCM, little
// Endian
AudioFormat format = new AudioFormat((float) SAMPLE_RATE, BITS_PER_SAMPLE, 1, true, false);
DataLine.Info info = new DataLine.Info(SourceDataLine.class, format);
line = (SourceDataLine) AudioSystem.getLine(info);
line.open(format, SAMPLE_BUFFER_SIZE * BYTES_PER_SAMPLE);
// the internal buffer is a fraction of the actual buffer size, this
// choice is arbitrary
// it gets divided because we can't expect the buffered data to line
// up exactly with when
// the sound card decides to push out its samples.
buffer = new byte[SAMPLE_BUFFER_SIZE * BYTES_PER_SAMPLE / 3];
} catch (LineUnavailableException e) {
System.out.println(e.getMessage());
}
// no sound gets made before this call
line.start();
}
示例3: loadSound
import javax.sound.sampled.AudioSystem; //导入依赖的package包/类
/**
* WAV files only
*
* @param name
* Name to store sound as
* @param file
* Sound file
*/
public static void loadSound(String name, String file) {
try {
System.out.print("Loading sound file: \"" + file + "\" into clip: \"" + name + "\", ");
BufferedInputStream in = new BufferedInputStream(SoundPlayer.class.getResourceAsStream(file));
AudioInputStream ain = AudioSystem.getAudioInputStream(in);
Clip c = AudioSystem.getClip();
c.open(ain);
c.setLoopPoints(0, -1);
clips.put(name, c);
ain.close();
in.close();
System.out.println("Done.");
} catch (Exception e) {
System.out.println("Failed. (" + e.getMessage() + ")");
}
}
示例4: getPCMConvertedAudioInputStream
import javax.sound.sampled.AudioSystem; //导入依赖的package包/类
public static AudioInputStream getPCMConvertedAudioInputStream(AudioInputStream ais) {
// we can't open the device for non-PCM playback, so we have
// convert any other encodings to PCM here (at least we try!)
AudioFormat af = ais.getFormat();
if( (!af.getEncoding().equals(AudioFormat.Encoding.PCM_SIGNED)) &&
(!af.getEncoding().equals(AudioFormat.Encoding.PCM_UNSIGNED))) {
try {
AudioFormat newFormat =
new AudioFormat( AudioFormat.Encoding.PCM_SIGNED,
af.getSampleRate(),
16,
af.getChannels(),
af.getChannels() * 2,
af.getSampleRate(),
Platform.isBigEndian());
ais = AudioSystem.getAudioInputStream(newFormat, ais);
} catch (Exception e) {
if (Printer.err) e.printStackTrace();
ais = null;
}
}
return ais;
}
示例5: test2
import javax.sound.sampled.AudioSystem; //导入依赖的package包/类
public static void test2(AudioFormat inFormat1, AudioFormat inFormat2, AudioFormat outFormat) throws Exception {
AudioInputStream inStream1 = new AudioInputStream(in, inFormat1, -1);
System.out.println("Input Format1: " + printFormat(inStream1.getFormat()));
// get a converted stream
AudioInputStream stream1 = AudioSystem.getAudioInputStream(outFormat, inStream1);
System.out.println("Output Format 1: " + printFormat(stream1.getFormat()));
AudioInputStream inStream2 = new AudioInputStream(in, inFormat2, -1);
System.out.println("Input Format1: " + printFormat(inStream2.getFormat()));
// get a converted stream in big endian ulaw
AudioInputStream stream2 = AudioSystem.getAudioInputStream(outFormat, inStream2);
System.out.println("Output Format 2: " + printFormat(stream2.getFormat()));
compareStreams(stream1, stream2);
}
示例6: createSourceDataLine
import javax.sound.sampled.AudioSystem; //导入依赖的package包/类
private boolean createSourceDataLine() {
if (DEBUG || Printer.debug)Printer.debug("JavaSoundAudioClip.createSourceDataLine()");
try {
DataLine.Info info = new DataLine.Info(SourceDataLine.class, loadedAudioFormat);
if (!(AudioSystem.isLineSupported(info)) ) {
if (DEBUG || Printer.err)Printer.err("Line not supported: "+loadedAudioFormat);
// fail silently
return false;
}
SourceDataLine source = (SourceDataLine) AudioSystem.getLine(info);
datapusher = new DataPusher(source, loadedAudioFormat, loadedAudio, loadedAudioByteLength);
} catch (Exception e) {
if (DEBUG || Printer.err)e.printStackTrace();
// fail silently
return false;
}
if (datapusher==null) {
// fail silently
return false;
}
if (DEBUG || Printer.debug)Printer.debug("Created SourceDataLine.");
return true;
}
示例7: queueSong
import javax.sound.sampled.AudioSystem; //导入依赖的package包/类
/**
* Queues song for the audio player
* @param player main instance of the AudioPlayer
* @param event event triggered when command is sent
* @param audioLink URL linking to audio file
* @throws IOException thrown if connection could not be made
* @throws UnsupportedAudioFileException thrown if audio file linked
* is not playable
*/
private synchronized void queueSong(AudioPlayer player,
MessageReceivedEvent event,
String audioLink)
throws IOException, UnsupportedAudioFileException {
//Connection to server for music file
//might be rejected because of no user agent
URLConnection conn = new URL(audioLink.trim()).openConnection();
conn.setRequestProperty("User-Agent", rexCord.USER_AGENT);
AudioInputStream audioInputStream
= AudioSystem.getAudioInputStream(conn.getInputStream());
player.queue(audioInputStream);
String message
= String.format(
"Song is now queued! Your song is #%d on the queue.",
player.getPlaylistSize());
rexCord.sendMessage(event.getChannel(), message);
//Start playing music if there is nothing in the playlist.
if (player.getPlaylistSize() == 0) {
player.provide();
}
}
示例8: getAudioInputStream
import javax.sound.sampled.AudioSystem; //导入依赖的package包/类
/**
* This method is a replacement for
* AudioSystem.getAudioInputStream(AudioFormat, AudioInputStream), which is
* used for audio format conversion at the stream level. This method includes
* a workaround for converting from an mp3 AudioInputStream when the sketch
* is running in an applet. The workaround was developed by the Tritonus team
* and originally comes from the package javazoom.jlgui.basicplayer
*
* @param targetFormat
* the AudioFormat to convert the stream to
* @param sourceStream
* the stream containing the unconverted audio
* @return an AudioInputStream in the target format
*/
AudioInputStream getAudioInputStream(AudioFormat targetFormat,
AudioInputStream sourceStream)
{
try
{
return AudioSystem.getAudioInputStream(targetFormat, sourceStream);
}
catch (IllegalArgumentException iae)
{
debug("Using AppletMpegSPIWorkaround to get codec");
try
{
Class.forName("javazoom.spi.mpeg.sampled.convert.MpegFormatConversionProvider");
return new javazoom.spi.mpeg.sampled.convert.MpegFormatConversionProvider().getAudioInputStream(
targetFormat,
sourceStream);
}
catch (ClassNotFoundException cnfe)
{
throw new IllegalArgumentException("Mpeg codec not properly installed");
}
}
}
示例9: open
import javax.sound.sampled.AudioSystem; //导入依赖的package包/类
@Override
public void open() throws AudioException {
try {
this.audioInputStream = Audio.getAudioInputStream(this.resource);
this.clip = AudioSystem.getClip();
this.clip.open(this.audioInputStream);
this.clip.addLineListener(event -> {
if(event.getType().equals(LineEvent.Type.STOP) && this.clip.getMicrosecondPosition() >= this.clip.getMicrosecondLength()) {
this.trigger(AudioEvent.Type.REACHED_END);
}
});
this.controls = AbstractAudio.extractControls(this.clip, this.controls);
this.open = true;
this.trigger(AudioEvent.Type.OPENED);
} catch(Exception exception) {
throw new AudioException(exception);
}
}
示例10: isSoundcardInstalled
import javax.sound.sampled.AudioSystem; //导入依赖的package包/类
/**
* Returns true if at least one soundcard is correctly installed
* on the system.
*/
public static boolean isSoundcardInstalled() {
boolean result = false;
try {
Mixer.Info[] mixers = AudioSystem.getMixerInfo();
if (mixers.length > 0) {
result = AudioSystem.getSourceDataLine(null) != null;
}
} catch (Exception e) {
System.err.println("Exception occured: "+e);
}
if (!result) {
System.err.println("Soundcard does not exist or sound drivers not installed!");
System.err.println("This test requires sound drivers for execution.");
}
return result;
}
示例11: create
import javax.sound.sampled.AudioSystem; //导入依赖的package包/类
/**
* Creates a WaveData container from the specified url
*
* @param path
* URL to file
* @return WaveData containing data, or null if a failure occured
*/
public static WaveData create(URL path)
{
try
{
return create(AudioSystem
.getAudioInputStream(new BufferedInputStream(path
.openStream())));
}
catch (Exception e)
{
org.lwjgl.LWJGLUtil.log("Unable to create from: " + path);
e.printStackTrace();
return null;
}
}
示例12: playSound
import javax.sound.sampled.AudioSystem; //导入依赖的package包/类
/**
* Play a sound at a given frequency freq during duration (in seconds) with volume as strenght
* <br/><br/>
* <code>SoundGenerator.playSound(440.0,1.0,0.5,SoundGenerator.FADE_LINEAR,SoundGenerator.WAVE_SIN);</code><br/>
* Available fades : FADE_NONE, FADE_LINEAR, FADE_QUADRATIC<br/>
* Available waves : WAVE_SIN, WAVE_SQUARE, WAVE_TRIANGLE, WAVE_SAWTOOTH<br/>
*/
public static void playSound(double freq,double duration,double volume,byte fade,byte wave){
double[] soundData = generateSoundData(freq,duration,volume,fade,wave);
byte[] freqdata = new byte[soundData.length];
for(int i = 0;i < soundData.length;i++) {
freqdata[i] = (byte)soundData[i];
}
// Play it
try {
final AudioFormat af = new AudioFormat(SAMPLE_RATE, 8, 1, true, true);
SourceDataLine line = AudioSystem.getSourceDataLine(af);
line.open(af, SAMPLE_RATE);
line.start();
line.write(freqdata, 0, freqdata.length);
line.drain();
line.close();
}catch(LineUnavailableException e) {
e.printStackTrace();
}
}
示例13: restartSDL
import javax.sound.sampled.AudioSystem; //导入依赖的package包/类
public void restartSDL(){
AudioFormat form = new AudioFormat(sys.getSampleRate(),16,2,true,false);
bufptr=0;
audioints = new int[(int)((sys.getSampleRate()/1000.0)*sys.getBufferSize())*2];
if(scope!=null)
scope.setAudio(audioints);
audiobuffer = new byte[audioints.length*2];
try {
if(sdl!=null)
sdl.close();
sdl = AudioSystem.getSourceDataLine(form);
sdl.open(form,audiobuffer.length*3);
sdl.start();
} catch (LineUnavailableException e) {
e.printStackTrace();
}
}
示例14: write
import javax.sound.sampled.AudioSystem; //导入依赖的package包/类
public int write(AudioInputStream stream, AudioFileFormat.Type fileType, OutputStream out) throws IOException {
//$$fb the following check must come first ! Otherwise
// the next frame length check may throw an IOException and
// interrupt iterating File Writers. (see bug 4351296)
// throws IllegalArgumentException if not supported
AiffFileFormat aiffFileFormat = (AiffFileFormat)getAudioFileFormat(fileType, stream);
// we must know the total data length to calculate the file length
if( stream.getFrameLength() == AudioSystem.NOT_SPECIFIED ) {
throw new IOException("stream length not specified");
}
int bytesWritten = writeAiffFile(stream, aiffFileFormat, out);
return bytesWritten;
}
示例15: testAfterSaveToFile
import javax.sound.sampled.AudioSystem; //导入依赖的package包/类
/**
* Verifies the frame length after the stream was saved/read to/from file.
*/
private static void testAfterSaveToFile(final AudioFileWriter afw,
final AudioFileFormat.Type type,
AudioInputStream ais)
throws IOException {
final File temp = File.createTempFile("sound", ".tmp");
try {
afw.write(ais, type, temp);
ais = AudioSystem.getAudioInputStream(temp);
final long frameLength = ais.getFrameLength();
ais.close();
validate(frameLength);
} catch (IllegalArgumentException | UnsupportedAudioFileException
ignored) {
} finally {
Files.delete(Paths.get(temp.getAbsolutePath()));
}
}