当前位置: 首页>>代码示例>>Java>>正文


Java AudioDispatcher类代码示例

本文整理汇总了Java中be.tarsos.dsp.AudioDispatcher的典型用法代码示例。如果您正苦于以下问题:Java AudioDispatcher类的具体用法?Java AudioDispatcher怎么用?Java AudioDispatcher使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


AudioDispatcher类属于be.tarsos.dsp包,在下文中一共展示了AudioDispatcher类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: processMonitorQuery

import be.tarsos.dsp.AudioDispatcher; //导入依赖的package包/类
private void processMonitorQuery(float[] audioData,QueryResultHandler handler, double timeStamp,Set<Integer> avoid){
	int samplerate = Config.getInt(Key.RAFS_SAMPLE_RATE);
	int size = Config.getInt(Key.RAFS_FFT_SIZE);
	int overlap = size - Config.getInt(Key.RAFS_FFT_STEP_SIZE);
	
	AudioDispatcher d;
	try {
		d = AudioDispatcherFactory.fromFloatArray(audioData, samplerate, size, overlap);
		d.setZeroPadFirstBuffer(true);
		final RafsExtractor processor = new RafsExtractor(null,true);
		d.addAudioProcessor(processor);
		d.run();
		queryForMonitor(processor.fingerprints, processor.fingerprintProbabilities, 10 , avoid, handler);
	} catch (UnsupportedAudioFileException e) {
		LOG.severe("Unsupported audio");
	}
}
 
开发者ID:JorenSix,项目名称:Panako,代码行数:18,代码来源:RafsRepStrategy.java

示例2: fromDefaultMicrophone

import be.tarsos.dsp.AudioDispatcher; //导入依赖的package包/类
/**
 * Create a new AudioDispatcher connected to the default microphone.
 * 
 * @param sampleRate
 *            The requested sample rate.
 * @param audioBufferSize
 *            The size of the audio buffer (in samples).
 * 
 * @param bufferOverlap
 *            The size of the overlap (in samples).
 * @return A new AudioDispatcher
 */
public static AudioDispatcher fromDefaultMicrophone(final int sampleRate,
		final int audioBufferSize, final int bufferOverlap) {
	int minAudioBufferSize = AudioRecord.getMinBufferSize(sampleRate,
			android.media.AudioFormat.CHANNEL_IN_MONO,
			android.media.AudioFormat.ENCODING_PCM_16BIT);
	int minAudioBufferSizeInSamples =  minAudioBufferSize/2;
	if(minAudioBufferSizeInSamples <= audioBufferSize ){
	AudioRecord audioInputStream = new AudioRecord(
			MediaRecorder.AudioSource.MIC, sampleRate,
			android.media.AudioFormat.CHANNEL_IN_MONO,
			android.media.AudioFormat.ENCODING_PCM_16BIT,
			audioBufferSize * 2);

	TarsosDSPAudioFormat format = new TarsosDSPAudioFormat(sampleRate, 16,1, true, false);
	
	TarsosDSPAudioInputStream audioStream = new AndroidAudioInputStream(audioInputStream, format);
	//start recording ! Opens the stream.
	audioInputStream.startRecording();
	return new AudioDispatcher(audioStream,audioBufferSize,bufferOverlap);
	}else{
		throw new IllegalArgumentException("Buffer size too small should be at least " + (minAudioBufferSize *2));
	}
}
 
开发者ID:gstraube,项目名称:cythara,代码行数:36,代码来源:AudioDispatcherFactory.java

示例3: extractPacker

import be.tarsos.dsp.AudioDispatcher; //导入依赖的package包/类
private static RafsPacker extractPacker(File f, int fileIndex, boolean trackProbabilities){
	final int sampleRate = Config.getInt(Key.RAFS_SAMPLE_RATE);//2250Hz Nyquist frequency
	final int size = Config.getInt(Key.RAFS_FFT_SIZE);
	final int overlap =  size - Config.getInt(Key.RAFS_FFT_STEP_SIZE); //about an fft every 11.6ms (64/5500)
	String file = f.getAbsolutePath();
	AudioDispatcher d = AudioDispatcherFactory.fromPipe(file, sampleRate, size, overlap);
	d.setZeroPadFirstBuffer(true);
	RafsExtractor ex = new RafsExtractor(file, trackProbabilities);
	RafsPacker packer = new RafsPacker(ex,trackProbabilities);
	//String baseName = f.getName();
	d.setZeroPadFirstBuffer(true);
	d.addAudioProcessor(ex);
	d.addAudioProcessor(packer);
	d.run();
	return packer;
}
 
开发者ID:JorenSix,项目名称:Panako,代码行数:17,代码来源:RafsStrategy.java

示例4: extractPackedPrints

import be.tarsos.dsp.AudioDispatcher; //导入依赖的package包/类
private static List<BitSetWithID> extractPackedPrints(File f,int fileIndex){		
	final int sampleRate = Config.getInt(Key.RAFS_SAMPLE_RATE);//2250Hz Nyquist frequency
	final int size = Config.getInt(Key.RAFS_FFT_SIZE);
	final int overlap =  size - Config.getInt(Key.RAFS_FFT_STEP_SIZE); 
	String file = f.getAbsolutePath();
	AudioDispatcher d = AudioDispatcherFactory.fromPipe(file, sampleRate, size, overlap);
	RafsExtractor ex = new RafsExtractor(file, true);
	RafsPacker packer = new RafsPacker(ex,true);
	//String baseName = f.getName();
	d.setZeroPadFirstBuffer(true);
	d.addAudioProcessor(ex);
	d.addAudioProcessor(packer);
	d.run();
	List<BitSetWithID> prints = new ArrayList<>();
	
	for (Map.Entry<Float, BitSet> frameEntry : packer.packedFingerprints.entrySet()) {
		int offset = (int) (frameEntry.getKey() * 1000);
		prints.add(new BitSetWithID(fileIndex * (1L<<32)  + offset, frameEntry.getValue()));
	}
	return prints;		
}
 
开发者ID:JorenSix,项目名称:Panako,代码行数:22,代码来源:RafsCliTest.java

示例5: processMonitorQueryToSerializeFingerprints

import be.tarsos.dsp.AudioDispatcher; //导入依赖的package包/类
private void processMonitorQueryToSerializeFingerprints(float[] audioBuffer,SerializedFingerprintsHandler handler,double queryOffset){
	int samplerate = Config.getInt(Key.NFFT_SAMPLE_RATE);
	int size = Config.getInt(Key.NFFT_SIZE);
	int overlap = size - Config.getInt(Key.NFFT_STEP_SIZE);
	
	AudioDispatcher d;
	try {
		d = AudioDispatcherFactory.fromFloatArray(audioBuffer, samplerate, size, overlap);
		final NFFTEventPointProcessor minMaxProcessor = new NFFTEventPointProcessor(size,overlap,samplerate);
		d.addAudioProcessor(minMaxProcessor);
		d.run();
		double queryDuration = d.secondsProcessed();
		List<NFFTFingerprint> fingerprints = new ArrayList<NFFTFingerprint>(minMaxProcessor.getFingerprints());
		handler.handleSerializedFingerprints(PanakoWebserviceClient.serializeFingerprintsToJson(fingerprints),queryDuration,queryOffset);
	} catch (UnsupportedAudioFileException e) {
		LOG.severe("Unsupported audio");
	}
}
 
开发者ID:JorenSix,项目名称:Panako,代码行数:19,代码来源:NFFTStrategy.java

示例6: monitor

import be.tarsos.dsp.AudioDispatcher; //导入依赖的package包/类
@Override
public void monitor(String query, final int maxNumberOfReqults,Set<Integer> avoid,
		final QueryResultHandler handler) {
	
	int samplerate = Config.getInt(Key.NCTEQ_SAMPLE_RATE);
	int size = Config.getInt(Key.MONITOR_STEP_SIZE) * samplerate;
	int overlap = Config.getInt(Key.MONITOR_OVERLAP) * samplerate;
	final ConstantQ constanQ = createConstantQ();
	
	AudioDispatcher d = AudioDispatcherFactory.fromPipe(query, samplerate, size, overlap);
	d.addAudioProcessor(new AudioProcessor() {
		@Override
		public boolean process(AudioEvent audioEvent) {
			double timeStamp = audioEvent.getTimeStamp() - Config.getInt(Key.MONITOR_OVERLAP);
			processMonitorQuery(audioEvent.getFloatBuffer().clone(), maxNumberOfReqults, handler,timeStamp,constanQ);
			return true;
		}
		
		@Override
		public void processingFinished() {
		}
	});
	d.run();

}
 
开发者ID:JorenSix,项目名称:Panako,代码行数:26,代码来源:NCteQStrategy.java

示例7: BeatRootSpectralFluxOnsetDetector

import be.tarsos.dsp.AudioDispatcher; //导入依赖的package包/类
public BeatRootSpectralFluxOnsetDetector(AudioDispatcher d,int fftSize, int hopSize){
	
	this.hopSize = hopSize; 
	this.hopTime = hopSize/d.getFormat().getSampleRate();
	this.fftSize = fftSize;

	System.err.println("Please use the ComplexOnset detector: BeatRootSpectralFluxOnsetDetector does currenlty not support streaming");
	//no overlap
	//FIXME:		
	int durationInFrames = -1000; 
	totalFrames = (int)(durationInFrames / hopSize) + 4;
	energy = new double[totalFrames*energyOversampleFactor];
	spectralFlux = new double[totalFrames];
	
	reBuffer = new float[fftSize/2];
	imBuffer = new float[fftSize/2];
	prevFrame = new float[fftSize/2];
	
	makeFreqMap(fftSize, d.getFormat().getSampleRate());
	
	newFrame = new double[freqMapSize];
	frames = new double[totalFrames][freqMapSize];
	handler = new PrintOnsetHandler();
	fft = new FFT(fftSize,new ScaledHammingWindow());
}
 
开发者ID:gstraube,项目名称:cythara,代码行数:26,代码来源:BeatRootSpectralFluxOnsetDetector.java

示例8: checkMicrophone

import be.tarsos.dsp.AudioDispatcher; //导入依赖的package包/类
private void checkMicrophone() {
    AudioDispatcher dispatcher = AudioDispatcherFactory.fromDefaultMicrophone(22050, 1024, 0);

    PitchDetectionHandler pdh = new PitchDetectionHandler() {
        @Override
        public void handlePitch(PitchDetectionResult result,AudioEvent e) {
            final float pitchInHz = result.getPitch();
            runOnUiThread(new Runnable() {
                @Override
                public void run() {
                    if (pitchInHz != -1) {
                        System.out.println(pitchInHz);
                    }
                    if (pitchInHz <= 18500 && pitchInHz >= 17500) {
                        System.err.println("Pitch Richtig");
                    }
                }
            });
        }
    };
    AudioProcessor p = new PitchProcessor(PitchProcessor.PitchEstimationAlgorithm.FFT_YIN, 22050, 1024, pdh);
    dispatcher.addAudioProcessor(p);
    new Thread(dispatcher,"Audio Dispatcher").start();
}
 
开发者ID:AndroidMusicSync,项目名称:AndroidMusicSync,代码行数:25,代码来源:ServantActivity.java

示例9: run

import be.tarsos.dsp.AudioDispatcher; //导入依赖的package包/类
@Override
public void run(String... args) {
	String inputResource = AudioResourceUtils.sanitizeResource(args[0]);
	AudioDispatcher d;
	try {
		d = AudioDispatcherFactory.fromPipe(inputResource, TARGET_SAMPLE_RATE, 2028, 0);
		d.addAudioProcessor(new AudioPlayer(JVMAudioInputStream.toAudioFormat(d.getFormat())));
		d.run();
	}  catch (LineUnavailableException e) {
		e.printStackTrace();
		System.err.print(e.getLocalizedMessage());
	}
}
 
开发者ID:JorenSix,项目名称:Panako,代码行数:14,代码来源:Play.java

示例10: monitor

import be.tarsos.dsp.AudioDispatcher; //导入依赖的package包/类
@Override
public void monitor(String query, int maxNumberOfReqults, Set<Integer> avoid, QueryResultHandler handler) {
	int samplerate = Config.getInt(Key.RAFS_SAMPLE_RATE);
	int size = Config.getInt(Key.MONITOR_STEP_SIZE) * samplerate;
	int overlap = Config.getInt(Key.MONITOR_OVERLAP) * samplerate;
	AudioDispatcher d ;
	if (query.equals(Panako.DEFAULT_MICROPHONE)){
		try {
			d = AudioDispatcherFactory.fromDefaultMicrophone(samplerate,size, overlap);
		} catch (LineUnavailableException e) {
			LOG.warning("Could not connect to default microphone!" + e.getMessage());
			e.printStackTrace();
			d = null;
		}
	}else{
		d = AudioDispatcherFactory.fromPipe(query, samplerate, size, overlap);
	}
	d.setZeroPadFirstBuffer(true);
	d.addAudioProcessor(new AudioProcessor() {
		@Override
		public boolean process(AudioEvent audioEvent) {
			double timeStamp = audioEvent.getTimeStamp() - Config.getInt(Key.MONITOR_OVERLAP);
			processMonitorQuery(audioEvent.getFloatBuffer().clone(), handler,timeStamp,avoid);
			return true;
		}
		
		@Override
		public void processingFinished() {
		}
	});
	d.run();
}
 
开发者ID:JorenSix,项目名称:Panako,代码行数:33,代码来源:RafsRepStrategy.java

示例11: extractExtractor

import be.tarsos.dsp.AudioDispatcher; //导入依赖的package包/类
private static RafsExtractor extractExtractor(File f, int fileIndex, boolean trackProbabilities){
	final int sampleRate = Config.getInt(Key.RAFS_SAMPLE_RATE);//2250Hz Nyquist frequency
	final int size = Config.getInt(Key.RAFS_FFT_SIZE);
	final int overlap = size -  Config.getInt(Key.RAFS_FFT_STEP_SIZE); //about an fft every 11.6ms (64/5500)
	String file = f.getAbsolutePath();
	AudioDispatcher d = AudioDispatcherFactory.fromPipe(file, sampleRate, size, overlap);
	d.setZeroPadFirstBuffer(true);
	RafsExtractor ex = new RafsExtractor(file, trackProbabilities);
	//String baseName = f.getName();
	d.addAudioProcessor(ex);
	d.run();
	return ex;
}
 
开发者ID:JorenSix,项目名称:Panako,代码行数:14,代码来源:RafsRepStrategy.java

示例12: starExtraction

import be.tarsos.dsp.AudioDispatcher; //导入依赖的package包/类
public void starExtraction(){
	AudioDispatcher d = AudioDispatcherFactory.fromPipe(file, sampleRate, size, overlap);
	//every buffer has the same length
	d.setZeroPadFirstBuffer(true);
	d.addAudioProcessor(this);
	d.run();
}
 
开发者ID:JorenSix,项目名称:Panako,代码行数:8,代码来源:RafsExtractor.java

示例13: extractPackedPrints

import be.tarsos.dsp.AudioDispatcher; //导入依赖的package包/类
private static TreeMap<Float, BitSet> extractPackedPrints(File f,boolean trackProbabilities){		
	final int sampleRate = Config.getInt(Key.RAFS_SAMPLE_RATE);//2250Hz Nyquist frequency
	final int size = Config.getInt(Key.RAFS_FFT_SIZE);
	final int overlap = size - Config.getInt(Key.RAFS_FFT_STEP_SIZE);
	String file = f.getAbsolutePath();
	AudioDispatcher d = AudioDispatcherFactory.fromPipe(file, sampleRate, size, overlap);
	d.setZeroPadFirstBuffer(true);
	RafsExtractor ex = new RafsExtractor(file, trackProbabilities);
	//String baseName = f.getName();
	d.addAudioProcessor(ex);
	d.run();
	return ex.fingerprints;
}
 
开发者ID:JorenSix,项目名称:Panako,代码行数:14,代码来源:RafsCompStats.java

示例14: extractFingerprints

import be.tarsos.dsp.AudioDispatcher; //导入依赖的package包/类
private List<NFFTFingerprint> extractFingerprints(String resource) {
	int samplerate = Config.getInt(Key.NFFT_SAMPLE_RATE);
	int size = Config.getInt(Key.NFFT_SIZE);
	int overlap = size - Config.getInt(Key.NFFT_STEP_SIZE);

	AudioDispatcher d = AudioDispatcherFactory.fromPipe(resource, samplerate, size, overlap);
	final NFFTEventPointProcessor minMaxProcessor = new NFFTEventPointProcessor(size, overlap, samplerate);
	d.addAudioProcessor(minMaxProcessor);
	d.run();
	return new ArrayList<NFFTFingerprint>(minMaxProcessor.getFingerprints());
}
 
开发者ID:JorenSix,项目名称:Panako,代码行数:12,代码来源:NFFTStreamSync.java

示例15: store

import be.tarsos.dsp.AudioDispatcher; //导入依赖的package包/类
@Override
public double store(String resource, String description) {
	int samplerate = Config.getInt(Key.NFFT_SAMPLE_RATE);
	int size = Config.getInt(Key.NFFT_SIZE);
	int overlap = size - Config.getInt(Key.NFFT_STEP_SIZE);
	
	AudioDispatcher d = AudioDispatcherFactory.fromPipe(resource, samplerate, size, overlap);
	final NFFTEventPointProcessor minMaxProcessor = new NFFTEventPointProcessor(size,overlap,samplerate);
	d.addAudioProcessor(minMaxProcessor);
	d.run();
	Set<NFFTFingerprint> fingerprints = new HashSet<NFFTFingerprint>(minMaxProcessor.getFingerprints());
	
	int identifier = FileUtils.getIdentifier(resource);
	
	
	for(NFFTFingerprint fingerprint: fingerprints){
		storage.addFingerprint(identifier, fingerprint.t1, fingerprint.hash());
	}
	
	// Store the meta data.
	storage.addAudio(identifier, description);
	
	// Commit the changes to store the fingerprints
	double durationInSeconds = d.secondsProcessed();
	storage.audioObjectAdded((int) Math.round(durationInSeconds));
	
	LOG.info(String.format("Stored %d fingerprints bundeled from %d event points for %s.",fingerprints.size(),minMaxProcessor.getEventPoints().size(),resource));
	return durationInSeconds;
}
 
开发者ID:JorenSix,项目名称:Panako,代码行数:30,代码来源:NFFTStrategy.java


注:本文中的be.tarsos.dsp.AudioDispatcher类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。