本文整理汇总了Java中be.tarsos.dsp.AudioDispatcher.addAudioProcessor方法的典型用法代码示例。如果您正苦于以下问题:Java AudioDispatcher.addAudioProcessor方法的具体用法?Java AudioDispatcher.addAudioProcessor怎么用?Java AudioDispatcher.addAudioProcessor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类be.tarsos.dsp.AudioDispatcher
的用法示例。
在下文中一共展示了AudioDispatcher.addAudioProcessor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: processMonitorQuery
import be.tarsos.dsp.AudioDispatcher; //导入方法依赖的package包/类
private void processMonitorQuery(float[] audioData,QueryResultHandler handler, double timeStamp,Set<Integer> avoid){
int samplerate = Config.getInt(Key.RAFS_SAMPLE_RATE);
int size = Config.getInt(Key.RAFS_FFT_SIZE);
int overlap = size - Config.getInt(Key.RAFS_FFT_STEP_SIZE);
AudioDispatcher d;
try {
d = AudioDispatcherFactory.fromFloatArray(audioData, samplerate, size, overlap);
d.setZeroPadFirstBuffer(true);
final RafsExtractor processor = new RafsExtractor(null,true);
d.addAudioProcessor(processor);
d.run();
queryForMonitor(processor.fingerprints, processor.fingerprintProbabilities, 10 , avoid, handler);
} catch (UnsupportedAudioFileException e) {
LOG.severe("Unsupported audio");
}
}
示例2: extractPacker
import be.tarsos.dsp.AudioDispatcher; //导入方法依赖的package包/类
private static RafsPacker extractPacker(File f, int fileIndex, boolean trackProbabilities){
final int sampleRate = Config.getInt(Key.RAFS_SAMPLE_RATE);//2250Hz Nyquist frequency
final int size = Config.getInt(Key.RAFS_FFT_SIZE);
final int overlap = size - Config.getInt(Key.RAFS_FFT_STEP_SIZE); //about an fft every 11.6ms (64/5500)
String file = f.getAbsolutePath();
AudioDispatcher d = AudioDispatcherFactory.fromPipe(file, sampleRate, size, overlap);
d.setZeroPadFirstBuffer(true);
RafsExtractor ex = new RafsExtractor(file, trackProbabilities);
RafsPacker packer = new RafsPacker(ex,trackProbabilities);
//String baseName = f.getName();
d.setZeroPadFirstBuffer(true);
d.addAudioProcessor(ex);
d.addAudioProcessor(packer);
d.run();
return packer;
}
示例3: extractPackedPrints
import be.tarsos.dsp.AudioDispatcher; //导入方法依赖的package包/类
private static List<BitSetWithID> extractPackedPrints(File f,int fileIndex){
final int sampleRate = Config.getInt(Key.RAFS_SAMPLE_RATE);//2250Hz Nyquist frequency
final int size = Config.getInt(Key.RAFS_FFT_SIZE);
final int overlap = size - Config.getInt(Key.RAFS_FFT_STEP_SIZE);
String file = f.getAbsolutePath();
AudioDispatcher d = AudioDispatcherFactory.fromPipe(file, sampleRate, size, overlap);
RafsExtractor ex = new RafsExtractor(file, true);
RafsPacker packer = new RafsPacker(ex,true);
//String baseName = f.getName();
d.setZeroPadFirstBuffer(true);
d.addAudioProcessor(ex);
d.addAudioProcessor(packer);
d.run();
List<BitSetWithID> prints = new ArrayList<>();
for (Map.Entry<Float, BitSet> frameEntry : packer.packedFingerprints.entrySet()) {
int offset = (int) (frameEntry.getKey() * 1000);
prints.add(new BitSetWithID(fileIndex * (1L<<32) + offset, frameEntry.getValue()));
}
return prints;
}
示例4: processMonitorQueryToSerializeFingerprints
import be.tarsos.dsp.AudioDispatcher; //导入方法依赖的package包/类
private void processMonitorQueryToSerializeFingerprints(float[] audioBuffer,SerializedFingerprintsHandler handler,double queryOffset){
int samplerate = Config.getInt(Key.NFFT_SAMPLE_RATE);
int size = Config.getInt(Key.NFFT_SIZE);
int overlap = size - Config.getInt(Key.NFFT_STEP_SIZE);
AudioDispatcher d;
try {
d = AudioDispatcherFactory.fromFloatArray(audioBuffer, samplerate, size, overlap);
final NFFTEventPointProcessor minMaxProcessor = new NFFTEventPointProcessor(size,overlap,samplerate);
d.addAudioProcessor(minMaxProcessor);
d.run();
double queryDuration = d.secondsProcessed();
List<NFFTFingerprint> fingerprints = new ArrayList<NFFTFingerprint>(minMaxProcessor.getFingerprints());
handler.handleSerializedFingerprints(PanakoWebserviceClient.serializeFingerprintsToJson(fingerprints),queryDuration,queryOffset);
} catch (UnsupportedAudioFileException e) {
LOG.severe("Unsupported audio");
}
}
示例5: monitor
import be.tarsos.dsp.AudioDispatcher; //导入方法依赖的package包/类
@Override
public void monitor(String query, final int maxNumberOfReqults,Set<Integer> avoid,
final QueryResultHandler handler) {
int samplerate = Config.getInt(Key.NCTEQ_SAMPLE_RATE);
int size = Config.getInt(Key.MONITOR_STEP_SIZE) * samplerate;
int overlap = Config.getInt(Key.MONITOR_OVERLAP) * samplerate;
final ConstantQ constanQ = createConstantQ();
AudioDispatcher d = AudioDispatcherFactory.fromPipe(query, samplerate, size, overlap);
d.addAudioProcessor(new AudioProcessor() {
@Override
public boolean process(AudioEvent audioEvent) {
double timeStamp = audioEvent.getTimeStamp() - Config.getInt(Key.MONITOR_OVERLAP);
processMonitorQuery(audioEvent.getFloatBuffer().clone(), maxNumberOfReqults, handler,timeStamp,constanQ);
return true;
}
@Override
public void processingFinished() {
}
});
d.run();
}
示例6: checkMicrophone
import be.tarsos.dsp.AudioDispatcher; //导入方法依赖的package包/类
private void checkMicrophone() {
AudioDispatcher dispatcher = AudioDispatcherFactory.fromDefaultMicrophone(22050, 1024, 0);
PitchDetectionHandler pdh = new PitchDetectionHandler() {
@Override
public void handlePitch(PitchDetectionResult result,AudioEvent e) {
final float pitchInHz = result.getPitch();
runOnUiThread(new Runnable() {
@Override
public void run() {
if (pitchInHz != -1) {
System.out.println(pitchInHz);
}
if (pitchInHz <= 18500 && pitchInHz >= 17500) {
System.err.println("Pitch Richtig");
}
}
});
}
};
AudioProcessor p = new PitchProcessor(PitchProcessor.PitchEstimationAlgorithm.FFT_YIN, 22050, 1024, pdh);
dispatcher.addAudioProcessor(p);
new Thread(dispatcher,"Audio Dispatcher").start();
}
示例7: run
import be.tarsos.dsp.AudioDispatcher; //导入方法依赖的package包/类
@Override
public void run(String... args) {
String inputResource = AudioResourceUtils.sanitizeResource(args[0]);
AudioDispatcher d;
try {
d = AudioDispatcherFactory.fromPipe(inputResource, TARGET_SAMPLE_RATE, 2028, 0);
d.addAudioProcessor(new AudioPlayer(JVMAudioInputStream.toAudioFormat(d.getFormat())));
d.run();
} catch (LineUnavailableException e) {
e.printStackTrace();
System.err.print(e.getLocalizedMessage());
}
}
示例8: monitor
import be.tarsos.dsp.AudioDispatcher; //导入方法依赖的package包/类
@Override
public void monitor(String query, int maxNumberOfReqults, Set<Integer> avoid, QueryResultHandler handler) {
int samplerate = Config.getInt(Key.RAFS_SAMPLE_RATE);
int size = Config.getInt(Key.MONITOR_STEP_SIZE) * samplerate;
int overlap = Config.getInt(Key.MONITOR_OVERLAP) * samplerate;
AudioDispatcher d ;
if (query.equals(Panako.DEFAULT_MICROPHONE)){
try {
d = AudioDispatcherFactory.fromDefaultMicrophone(samplerate,size, overlap);
} catch (LineUnavailableException e) {
LOG.warning("Could not connect to default microphone!" + e.getMessage());
e.printStackTrace();
d = null;
}
}else{
d = AudioDispatcherFactory.fromPipe(query, samplerate, size, overlap);
}
d.setZeroPadFirstBuffer(true);
d.addAudioProcessor(new AudioProcessor() {
@Override
public boolean process(AudioEvent audioEvent) {
double timeStamp = audioEvent.getTimeStamp() - Config.getInt(Key.MONITOR_OVERLAP);
processMonitorQuery(audioEvent.getFloatBuffer().clone(), handler,timeStamp,avoid);
return true;
}
@Override
public void processingFinished() {
}
});
d.run();
}
示例9: extractExtractor
import be.tarsos.dsp.AudioDispatcher; //导入方法依赖的package包/类
private static RafsExtractor extractExtractor(File f, int fileIndex, boolean trackProbabilities){
final int sampleRate = Config.getInt(Key.RAFS_SAMPLE_RATE);//2250Hz Nyquist frequency
final int size = Config.getInt(Key.RAFS_FFT_SIZE);
final int overlap = size - Config.getInt(Key.RAFS_FFT_STEP_SIZE); //about an fft every 11.6ms (64/5500)
String file = f.getAbsolutePath();
AudioDispatcher d = AudioDispatcherFactory.fromPipe(file, sampleRate, size, overlap);
d.setZeroPadFirstBuffer(true);
RafsExtractor ex = new RafsExtractor(file, trackProbabilities);
//String baseName = f.getName();
d.addAudioProcessor(ex);
d.run();
return ex;
}
示例10: starExtraction
import be.tarsos.dsp.AudioDispatcher; //导入方法依赖的package包/类
public void starExtraction(){
AudioDispatcher d = AudioDispatcherFactory.fromPipe(file, sampleRate, size, overlap);
//every buffer has the same length
d.setZeroPadFirstBuffer(true);
d.addAudioProcessor(this);
d.run();
}
示例11: extractPackedPrints
import be.tarsos.dsp.AudioDispatcher; //导入方法依赖的package包/类
private static TreeMap<Float, BitSet> extractPackedPrints(File f,boolean trackProbabilities){
final int sampleRate = Config.getInt(Key.RAFS_SAMPLE_RATE);//2250Hz Nyquist frequency
final int size = Config.getInt(Key.RAFS_FFT_SIZE);
final int overlap = size - Config.getInt(Key.RAFS_FFT_STEP_SIZE);
String file = f.getAbsolutePath();
AudioDispatcher d = AudioDispatcherFactory.fromPipe(file, sampleRate, size, overlap);
d.setZeroPadFirstBuffer(true);
RafsExtractor ex = new RafsExtractor(file, trackProbabilities);
//String baseName = f.getName();
d.addAudioProcessor(ex);
d.run();
return ex.fingerprints;
}
示例12: extractFingerprints
import be.tarsos.dsp.AudioDispatcher; //导入方法依赖的package包/类
private List<NFFTFingerprint> extractFingerprints(String resource) {
int samplerate = Config.getInt(Key.NFFT_SAMPLE_RATE);
int size = Config.getInt(Key.NFFT_SIZE);
int overlap = size - Config.getInt(Key.NFFT_STEP_SIZE);
AudioDispatcher d = AudioDispatcherFactory.fromPipe(resource, samplerate, size, overlap);
final NFFTEventPointProcessor minMaxProcessor = new NFFTEventPointProcessor(size, overlap, samplerate);
d.addAudioProcessor(minMaxProcessor);
d.run();
return new ArrayList<NFFTFingerprint>(minMaxProcessor.getFingerprints());
}
示例13: store
import be.tarsos.dsp.AudioDispatcher; //导入方法依赖的package包/类
@Override
public double store(String resource, String description) {
int samplerate = Config.getInt(Key.NFFT_SAMPLE_RATE);
int size = Config.getInt(Key.NFFT_SIZE);
int overlap = size - Config.getInt(Key.NFFT_STEP_SIZE);
AudioDispatcher d = AudioDispatcherFactory.fromPipe(resource, samplerate, size, overlap);
final NFFTEventPointProcessor minMaxProcessor = new NFFTEventPointProcessor(size,overlap,samplerate);
d.addAudioProcessor(minMaxProcessor);
d.run();
Set<NFFTFingerprint> fingerprints = new HashSet<NFFTFingerprint>(minMaxProcessor.getFingerprints());
int identifier = FileUtils.getIdentifier(resource);
for(NFFTFingerprint fingerprint: fingerprints){
storage.addFingerprint(identifier, fingerprint.t1, fingerprint.hash());
}
// Store the meta data.
storage.addAudio(identifier, description);
// Commit the changes to store the fingerprints
double durationInSeconds = d.secondsProcessed();
storage.audioObjectAdded((int) Math.round(durationInSeconds));
LOG.info(String.format("Stored %d fingerprints bundeled from %d event points for %s.",fingerprints.size(),minMaxProcessor.getEventPoints().size(),resource));
return durationInSeconds;
}
示例14: extractFingerprintsFromQuery
import be.tarsos.dsp.AudioDispatcher; //导入方法依赖的package包/类
public List<NFFTFingerprint> extractFingerprintsFromQuery(String query){
int samplerate = Config.getInt(Key.NFFT_SAMPLE_RATE);
int size = Config.getInt(Key.NFFT_SIZE);
int overlap = size - Config.getInt(Key.NFFT_STEP_SIZE);
AudioDispatcher d = AudioDispatcherFactory.fromPipe(query, samplerate, size, overlap);
final NFFTEventPointProcessor minMaxProcessor = new NFFTEventPointProcessor(size,overlap,samplerate);
d.addAudioProcessor(minMaxProcessor);
d.run();
List<NFFTFingerprint> fingerprints = new ArrayList<NFFTFingerprint>(minMaxProcessor.getFingerprints());
return fingerprints;
}
示例15: getAudioBuffer
import be.tarsos.dsp.AudioDispatcher; //导入方法依赖的package包/类
public static float[] getAudioBuffer(File file,double start,double stop){
double sampleRate = 44100;
int sampleStart = (int) Math.round(sampleRate * start);
int sampleStop = (int) Math.round(sampleRate * stop);
int diff = sampleStop - sampleStart;
final float[] audioBuffer = new float[diff];
AudioDispatcher d;
d = AudioDispatcherFactory.fromPipe(file.getAbsolutePath(), 44100,diff, 0);
d.skip(start);
d.addAudioProcessor(new AudioProcessor() {
boolean filled = false;
@Override
public void processingFinished() {
}
@Override
public boolean process(AudioEvent audioEvent) {
if(!filled){
for (int i = 0; i < audioEvent.getFloatBuffer().length; i++) {
audioBuffer[i] = audioEvent.getFloatBuffer()[i];
}
filled = true;
}
return false;
}
});
d.run();
return audioBuffer;
}