本文整理汇总了Java中be.tarsos.dsp.AudioEvent类的典型用法代码示例。如果您正苦于以下问题:Java AudioEvent类的具体用法?Java AudioEvent怎么用?Java AudioEvent使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
AudioEvent类属于be.tarsos.dsp包,在下文中一共展示了AudioEvent类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: handlePitch
import be.tarsos.dsp.AudioEvent; //导入依赖的package包/类
public void handlePitch(PitchDetectionResult pitchDetectionResult, AudioEvent audioEvent) {
if (pitchDetectionResult.getPitch() != -1) {
double timeStamp = audioEvent.getTimeStamp();
float pitch = pitchDetectionResult.getPitch();
float probability = pitchDetectionResult.getProbability();
double rms = audioEvent.getRMS() * 100;
String message = String.format("Pitch detected at %.2fs: %.2fHz ( %.2f probability, RMS: %.5f )\n",
timeStamp, pitch, probability, rms);
System.out.println(message);
String addMe1;
if (probability < 0.5 && Pitch.pitches.getItemCount() > 2) {
addMe1 = Pitch.pitches.getItem(Pitch.pitches.getItemCount() - 1);
} else {
addMe1 = String.valueOf(pitch);
}
String addMe2 = String.valueOf(timeStamp);
Pitch.pitches.add(addMe1);
Pitch.time.add(addMe2);
}
}
示例2: process
import be.tarsos.dsp.AudioEvent; //导入依赖的package包/类
@Override
public boolean process(AudioEvent audioEvent) {
float[] currentMagnitudes = eventPointProcessor.getMagnitudes().clone();
//for visualization purposes:
//store the new max value or, decay the running max
float currentMaxValue = max(currentMagnitudes);
if(currentMaxValue > runningMaxMagnitude){
runningMaxMagnitude = currentMaxValue;
}else{
runningMaxMagnitude = 0.9999f * runningMaxMagnitude;
}
normalize(currentMagnitudes);
magnitudes.put((float)audioEvent.getTimeStamp(),currentMagnitudes);
return true;
}
示例3: process
import be.tarsos.dsp.AudioEvent; //导入依赖的package包/类
@Override
public boolean process(AudioEvent audioEvent) {
float[] fftData = audioEvent.getFloatBuffer().clone();
Arrays.fill(zeroPaddedData, 0);
System.arraycopy(fftData, 0, zeroPaddedData, fftData.length/2, fftData.length);
fft.forwardTransform(zeroPaddedData);
fft.multiply(zeroPaddedData, zeroPaddedInvesedQuery);
fft.backwardsTransform(zeroPaddedData);
float maxVal = -100000;
int maxIndex = 0;
for(int i = 0 ; i<zeroPaddedData.length ; i++){
if(zeroPaddedData[i]> maxVal){
maxVal = zeroPaddedData[i];
maxIndex=i;
}
}
float time = (float) (audioEvent.getTimeStamp() - audioEvent.getBufferSize()/audioEvent.getSampleRate() + maxIndex/2 /audioEvent.getSampleRate() + 0.005);
handler.handleCrossCorrelation((float)audioEvent.getTimeStamp(), time, maxVal);
return true;
}
示例4: monitor
import be.tarsos.dsp.AudioEvent; //导入依赖的package包/类
@Override
public void monitor(String query, final int maxNumberOfReqults,Set<Integer> avoid,
final QueryResultHandler handler) {
int samplerate = Config.getInt(Key.NCTEQ_SAMPLE_RATE);
int size = Config.getInt(Key.MONITOR_STEP_SIZE) * samplerate;
int overlap = Config.getInt(Key.MONITOR_OVERLAP) * samplerate;
final ConstantQ constanQ = createConstantQ();
AudioDispatcher d = AudioDispatcherFactory.fromPipe(query, samplerate, size, overlap);
d.addAudioProcessor(new AudioProcessor() {
@Override
public boolean process(AudioEvent audioEvent) {
double timeStamp = audioEvent.getTimeStamp() - Config.getInt(Key.MONITOR_OVERLAP);
processMonitorQuery(audioEvent.getFloatBuffer().clone(), maxNumberOfReqults, handler,timeStamp,constanQ);
return true;
}
@Override
public void processingFinished() {
}
});
d.run();
}
示例5: process
import be.tarsos.dsp.AudioEvent; //导入依赖的package包/类
@Override
public boolean process(AudioEvent audioEvent)
{
// Don't do anything before the beginning of Fade Out
if(isFadeOut==true)
{
if(firstTime==-1)
firstTime=audioEvent.getTimeStamp();
// Decrease the gain according to time since the beginning of the Fade Out
time=audioEvent.getTimeStamp()-firstTime;
gp.setGain(1-time/duration);
gp.process(audioEvent);
}
return true;
}
示例6: process
import be.tarsos.dsp.AudioEvent; //导入依赖的package包/类
@Override
public boolean process(AudioEvent audioEvent)
{
// Don't do anything after the end of the Fade In
if(fadingIn)
{
if(firstTime==-1)
firstTime=audioEvent.getTimeStamp();
// Increase the gain according to time since the beginning of the Fade In
time=audioEvent.getTimeStamp()-firstTime;
gp.setGain(time/duration);
gp.process(audioEvent);
if(time > duration){
fadingIn = false;
}
}
return true;
}
示例7: process
import be.tarsos.dsp.AudioEvent; //导入依赖的package包/类
@Override
public boolean process(AudioEvent audioEvent) {
float[] audioFloatBuffer = audioEvent.getFloatBuffer();
int overlap = audioEvent.getOverlap();
for(int i = overlap ; i < audioFloatBuffer.length ; i++){
if(position >= echoBuffer.length){
position = 0;
}
//output is the input added with the decayed echo
audioFloatBuffer[i] = audioFloatBuffer[i] + echoBuffer[position] * decay;
//store the sample in the buffer;
echoBuffer[position] = audioFloatBuffer[i];
position++;
}
applyNewEchoLength();
return true;
}
示例8: process
import be.tarsos.dsp.AudioEvent; //导入依赖的package包/类
@Override
public boolean process(AudioEvent audioEvent) {
float[] audioBuffer = audioEvent.getFloatBuffer();
float[] sortBuffer = new float[audioBuffer.length];
transform.forwardTrans(audioBuffer);
for (int i = 0; i < sortBuffer.length; i++) {
sortBuffer[i] = Math.abs(audioBuffer[i]);
}
Arrays.sort(sortBuffer);
double threshold = sortBuffer[compression];
for (int i = 0; i < audioBuffer.length; i++) {
if (Math.abs(audioBuffer[i]) <= threshold) {
audioBuffer[i] = 0;
}
}
return true;
}
示例9: process
import be.tarsos.dsp.AudioEvent; //导入依赖的package包/类
@Override
public boolean process(AudioEvent audioEvent) {
float[] audioBuffer = audioEvent.getFloatBuffer();
float[] sortBuffer = new float[audioBuffer.length];
transform.transform(audioEvent.getFloatBuffer());
for (int i = 0; i < sortBuffer.length; i++) {
sortBuffer[i] = Math.abs(audioBuffer[i]);
}
Arrays.sort(sortBuffer);
double threshold = sortBuffer[compression];
for (int i = 0; i < audioBuffer.length; i++) {
if (Math.abs(audioBuffer[i]) <= threshold) {
audioBuffer[i] = 0;
}
}
return true;
}
示例10: process
import be.tarsos.dsp.AudioEvent; //导入依赖的package包/类
@Override
public boolean process(AudioEvent audioEvent) {
float[] audioFloatBuffer = audioEvent.getFloatBuffer();
for (int i = audioEvent.getOverlap(); i < audioFloatBuffer.length; i++) {
//shift the in array
System.arraycopy(in, 0, in, 1, in.length - 1);
in[0] = audioFloatBuffer[i];
//calculate y based on a and b coefficients
//and in and out.
float y = 0;
for(int j = 0 ; j < a.length ; j++){
y += a[j] * in[j];
}
for(int j = 0 ; j < b.length ; j++){
y += b[j] * out[j];
}
//shift the out array
System.arraycopy(out, 0, out, 1, out.length - 1);
out[0] = y;
audioFloatBuffer[i] = y;
}
return true;
}
示例11: startDispatch
import be.tarsos.dsp.AudioEvent; //导入依赖的package包/类
private void startDispatch() {
dispatcher = AudioDispatcherFactory.fromDefaultMicrophone(22050, 1024, 0);
uiThread = new Handler();
PitchDetectionHandler pdh = (PitchDetectionResult result, AudioEvent audioEven) -> uiThread.post(() -> {
final float pitchInHz = result.getPitch();
int pitch = pitchInHz > 0 ? (int) pitchInHz : 1;
if(pitch > 1 && mConnected) {
if((pitch - lastPitch) >= sensitive * 10) {
Random random = new Random();
byte[] rgb = getLedBytes(random.nextInt(600000000) + 50000);
controlLed(rgb);
}
if(minPitch > pitch)
minPitch = pitch;
}
lastPitch = pitch;
});
processor = new PitchProcessor(PitchProcessor.PitchEstimationAlgorithm.FFT_YIN, 22050, 1024, pdh);
dispatcher.addAudioProcessor(processor);
listeningThread = new Thread(dispatcher);
listeningThread.start();
}
示例12: checkMicrophone
import be.tarsos.dsp.AudioEvent; //导入依赖的package包/类
private void checkMicrophone() {
AudioDispatcher dispatcher = AudioDispatcherFactory.fromDefaultMicrophone(22050, 1024, 0);
PitchDetectionHandler pdh = new PitchDetectionHandler() {
@Override
public void handlePitch(PitchDetectionResult result,AudioEvent e) {
final float pitchInHz = result.getPitch();
runOnUiThread(new Runnable() {
@Override
public void run() {
if (pitchInHz != -1) {
System.out.println(pitchInHz);
}
if (pitchInHz <= 18500 && pitchInHz >= 17500) {
System.err.println("Pitch Richtig");
}
}
});
}
};
AudioProcessor p = new PitchProcessor(PitchProcessor.PitchEstimationAlgorithm.FFT_YIN, 22050, 1024, pdh);
dispatcher.addAudioProcessor(p);
new Thread(dispatcher,"Audio Dispatcher").start();
}
示例13: process
import be.tarsos.dsp.AudioEvent; //导入依赖的package包/类
@Override
public boolean process(AudioEvent audioEvent) {
float[][] input = {audioEvent.getFloatBuffer()};
rbs.process(input, false);
int availableSamples = rbs.available();
while(availableSamples ==0){
try {
Thread.sleep(1);
} catch (InterruptedException e) {
e.printStackTrace();
}
availableSamples = rbs.available();
}
float[][] output = {new float[availableSamples]};
rbs.retrieve(output);
audioEvent.setFloatBuffer(output[0]);
return true;
}
示例14: newPitchDetectionHandler
import be.tarsos.dsp.AudioEvent; //导入依赖的package包/类
/**
* A pitch detection storing {@link SoundEvent}
*/
protected VoiceDetection newPitchDetectionHandler() {
VoiceDetection prs = new VoiceDetection(settings.format.getSampleRate(), settings) {
@Override
public void handlePitch(PitchDetectionResult pitchDetectionResult, AudioEvent audioEvent) {
// Process
float frequency = (float) computeFrequency(pitchDetectionResult);
float amplitude = computeAmplitude(audioEvent);
double timestamp = audioEvent.getTimeStamp();
// pitch
SoundEvent pitch = SoundEvent.pitch((float) timestamp, frequency);
pitch.confidence = pitchDetectionResult.getProbability();
pitchEvents.add(pitch);
// amplitude
ampliEvents.add(SoundEvent.amplitude((float) timestamp, amplitude));
}
};
return prs;
}
示例15: extractInfoFromAudio
import be.tarsos.dsp.AudioEvent; //导入依赖的package包/类
public void extractInfoFromAudio(final Component componentToRepaint){
int samplerate = Config.getInt(Key.NFFT_SAMPLE_RATE);
int size = Config.getInt(Key.NFFT_SIZE);
int overlap = size - Config.getInt(Key.NFFT_STEP_SIZE);
StopWatch w = new StopWatch();
w.start();
d = AudioDispatcherFactory.fromPipe(audioFile.getAbsolutePath(), samplerate, size, overlap);
eventPointProcessor = new QIFFTEventPointProcessor(size,overlap,samplerate,4);
d.addAudioProcessor(eventPointProcessor);
d.addAudioProcessor(this);
d.addAudioProcessor(new AudioProcessor() {
@Override
public void processingFinished() {
SwingUtilities.invokeLater(new Runnable() {
@Override
public void run() {
componentToRepaint.repaint();
}
});
if(referenceFileInfo!=null)
referenceFileInfo.setMatchingFingerprints(matchingPrints);
}
@Override
public boolean process(AudioEvent audioEvent) {
return true;
}
});
new Thread(d).start();
}