本文整理汇总了Java中org.openimaj.audio.SampleChunk类的典型用法代码示例。如果您正苦于以下问题:Java SampleChunk类的具体用法?Java SampleChunk怎么用?Java SampleChunk使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
SampleChunk类属于org.openimaj.audio包,在下文中一共展示了SampleChunk类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: PowerCepstrumVis
import org.openimaj.audio.SampleChunk; //导入依赖的package包/类
/**
*
* @param as
* @throws Exception
*/
public PowerCepstrumVis( AudioStream as ) throws Exception
{
FImage img = new FImage( 1000, 600 );
PowerCepstrumTransform pct = new PowerCepstrumTransform();
SampleChunk sc = null;
while( (sc = as.nextSampleChunk()) != null )
{
pct.process( sc );
float[][] c = pct.getLastCepstrum();
for( int i = 0; i < c[0].length; i++ )
img.setPixel( img.getWidth()-1, i, c[0][i]/50f );
img.shiftLeftInplace();
DisplayUtilities.displayName( img, "Power Cepstrum" );
}
}
示例2: train
import org.openimaj.audio.SampleChunk; //导入依赖的package包/类
/**
* Train the annotator on the given streams. The streams are annotated with
* the appropriate annotation, and sample chunks (and therefore buffers) are
* gathered from the streams into batches to train the annotator.
*
* @param streams
* The annotated streams
*/
public void train(final List<IndependentPair<AudioStream, ANNOTATION>> streams)
{
// Convert all the incoming streams into AnnotatedObject instances
// where the sample buffer for each
final List<Annotated<SampleBuffer, ANNOTATION>> list = new ArrayList<Annotated<SampleBuffer, ANNOTATION>>();
for (final IndependentPair<AudioStream, ANNOTATION> stream : streams)
{
SampleChunk sc = null;
while ((sc = stream.firstObject().nextSampleChunk()) != null)
{
final SampleBuffer sb = sc.getSampleBuffer();
final AnnotatedObject<SampleBuffer, ANNOTATION> a = AnnotatedObject.create(sb, stream.secondObject());
list.add(a);
}
}
// Train the annotator for the streams
this.annotator.train(list);
}
示例3: main
import org.openimaj.audio.SampleChunk; //导入依赖的package包/类
/**
* Main method
*
* @param args
* command-line args (not used)
* @throws InterruptedException
*/
public static void main(final String[] args) throws InterruptedException
{
// Construct a new audio waveform visualisation
final AudioSpectrogram aw = new AudioSpectrogram(440, 600);
aw.showWindow("Spectrogram");
// Start a sound grabber that will grab from your default microphone
final JavaSoundAudioGrabber jsag = new JavaSoundAudioGrabber(new AudioFormat(16, 44.1, 1));
new Thread(jsag).start();
// Wait until the grabber has started (sometimes it takes a while)
while (jsag.isStopped())
Thread.sleep(50);
// Then send each of the frames to the visualisation
SampleChunk sc = null;
while ((sc = jsag.nextSampleChunk()) != null)
aw.setData(sc);
}
示例4: main
import org.openimaj.audio.SampleChunk; //导入依赖的package包/类
/**
* Main method.
* @param args Command line args (no used)
* @throws MalformedURLException
*/
public static void main( final String[] args ) throws MalformedURLException
{
// Construct a new audio waveform visualisation
final AudioWaveform aw = new AudioWaveform( 400, 400 );
aw.showWindow( "Waveform" );
// Open a URL to the sine wave sweep. If you have downloaded
// this file you should use a new File(<filename>) here.
final XuggleAudio jsag = new XuggleAudio(
new URL( "http://www.audiocheck.net/download.php?" +
"filename=Audio/audiocheck.net_sweep20-20klin.wav" ) );
// Then send each of the frames to the visualisation
SampleChunk sc = null;
while( (sc = jsag.nextSampleChunk()) != null )
aw.setData( sc.getSampleBuffer() );
}
示例5: process
import org.openimaj.audio.SampleChunk; //导入依赖的package包/类
/**
* Processes a single sample chunk: calculates the FFT, gets the magnitudes,
* copies the format (if it's the first chunk), and then goes on to update the image.
*
* @param s The sample chunk to process
*/
public void process( final SampleChunk s )
{
// Process the FFT
this.fftp.process( s.getSampleBuffer() );
// Get the magnitudes to show in the spectrogram
final float[] f = this.fftp.getNormalisedMagnitudes( 1f/Integer.MAX_VALUE )[0];
// Store the format of this sample chunk if we don't have one yet.
// This allows us to continue to draw the frequency bands on the image
// (if it's configured to do that).
if( this.audioFormat == null )
this.audioFormat = s.getFormat().clone();
// Store this FFT into the data member. Note this calls a method in this class.
this.setData( f );
}
示例6: nextSampleChunk
import org.openimaj.audio.SampleChunk; //导入依赖的package包/类
/**
* {@inheritDoc}
* @see org.openimaj.audio.AudioStream#nextSampleChunk()
*/
@Override
public SampleChunk nextSampleChunk()
{
final Oscillator o = this.oscillator;
if( !this.noteOn )
return null;
// o = Oscillator.NONE;
final SampleChunk x = o.getSampleChunk( this.sampleChunkLength,
this.currentTime, this.frequency, this.gain, this.format );
this.applyADSREnvelope( x.getSampleBuffer() );
this.currentTime += x.getSampleBuffer().size() /
(this.format.getSampleRateKHz()*1000d);
this.currentTimeMS = this.currentTime * 1000d;
return x;
}
示例7: getSampleChunk
import org.openimaj.audio.SampleChunk; //导入依赖的package包/类
@Override
public SampleChunk getSampleChunk( final int length, final double time,
final double freq, final int gain, final AudioFormat format )
{
// Work out how many samples per frequency wave
final double samplesPerWave = format.getSampleRateKHz()*1000d/freq;
// Phase offset in samples. (f*t)-floor(f*t) is the part number
// of waves at this point (assuming the first wave starts at a
// phase of zero).
final double p = 2*Math.PI*((freq*time)-Math.floor(freq*time));
// Create an appropriate sample buffer
final SampleBuffer sb = SampleBufferFactory.createSampleBuffer(
format, length );
// Fill it with sin waves
final double z = 2*Math.PI/samplesPerWave;
for( int i = 0; i < length; i++ )
sb.set( i, (float)(Math.sin( i*z+p )*gain) );
return sb.getSampleChunk();
}
示例8: read
import org.openimaj.audio.SampleChunk; //导入依赖的package包/类
/**
* {@inheritDoc}
* @see org.openimaj.io.ObjectReader#read(java.lang.Object)
*/
@Override
public List<SampleBuffer> read( final InputStream stream ) throws IOException
{
// Open the stream.
final XuggleAudio xa = new XuggleAudio( stream );
// Setup a chunker that will get samples in one second chunks.
final int nSamplesInOneSecond = (int)(xa.getFormat().getSampleRateKHz() * 1000);
final FixedSizeSampleAudioProcessor f = new FixedSizeSampleAudioProcessor(
xa, nSamplesInOneSecond );
// Setup our output list
final List<SampleBuffer> buffers = new ArrayList<SampleBuffer>();
// Now read the audio until we're done
SampleChunk sc = null;
while( (sc = f.nextSampleChunk()) != null )
buffers.add( sc.getSampleBuffer() );
System.out.println( "Got "+buffers.size()+" one-second sample buffers.");
return buffers;
}
示例9: process
import org.openimaj.audio.SampleChunk; //导入依赖的package包/类
/**
* {@inheritDoc}
* @see org.openimaj.audio.processor.AudioProcessor#process(org.openimaj.audio.SampleChunk)
*/
@Override
public SampleChunk process( final SampleChunk sample ) throws Exception
{
if( sample.getFormat().getNBits() != this.outputFormat.getNBits() )
throw new IllegalArgumentException( "The number of bits in the " +
"output format is not the same as the sample chunk. Use a " +
"resampling conversion first before using the sample-rate " +
"converter." );
if( sample.getFormat().getNumChannels() != this.outputFormat.getNumChannels() )
throw new IllegalArgumentException( "The number of channels in the " +
"output format is not the same as the sample chunk. Use a " +
"channel converter first before using the sample-rate " +
"converter." );
if( sample.getFormat().getSampleRateKHz() == this.outputFormat.getSampleRateKHz() )
return sample;
final SampleChunk sc = this.sampleConverter.process( sample, this.outputFormat );
sc.setStartTimecode( sample.getStartTimecode() );
return sc;
}
示例10: process
import org.openimaj.audio.SampleChunk; //导入依赖的package包/类
/**
* {@inheritDoc}
* @see org.openimaj.audio.processor.AudioProcessor#process(org.openimaj.audio.SampleChunk)
*/
@Override
public SampleChunk process( final SampleChunk sample ) throws Exception
{
if( sample.getFormat().getSampleRateKHz() != this.outputFormat.getSampleRateKHz() )
throw new IllegalArgumentException( "The sample rate of the " +
"output format is not the same as the sample chunk. Use a " +
"sample rate converter first before using the bit depth" +
"converter." );
if( sample.getFormat().getNumChannels() != this.outputFormat.getNumChannels() )
throw new IllegalArgumentException( "The number of channels in the " +
"output format is not the same as the sample chunk. Use a " +
"channel converter first before using the bit-depth " +
"converter." );
if( sample.getFormat().getNBits() == this.outputFormat.getNBits() )
return sample;
final SampleChunk sc = this.bitDepthConverter.process( sample, this.outputFormat );
sc.setStartTimecode( sample.getStartTimecode() );
return sc;
}
示例11: process
import org.openimaj.audio.SampleChunk; //导入依赖的package包/类
/**
* {@inheritDoc}
* @see org.openimaj.audio.processor.AudioProcessor#process(org.openimaj.audio.SampleChunk)
*/
@Override
public SampleChunk process( final SampleChunk samples )
{
// Detect beats. Note that we stop as soon as we detect a beat.
this.beatDetected = false;
final SampleBuffer sb = samples.getSampleBuffer();
int i = 0;
for(; i < sb.size(); i++ )
{
if( this.beatDetected = this.processSample( sb.get(i) ) )
break;
}
if( this.beatDetected() )
this.beatTimecode.setTimecodeInMilliseconds( (long)(
samples.getStartTimecode().getTimecodeInMilliseconds() +
i * this.format.getSampleRateKHz() ) );
// System.out.println( beatDetected );
// We return the samples unaltered
return samples;
}
示例12: process
import org.openimaj.audio.SampleChunk; //导入依赖的package包/类
/**
* {@inheritDoc}
* @see org.openimaj.audio.processor.AudioProcessor#process(org.openimaj.audio.SampleChunk)
*/
@Override
public SampleChunk process( final SampleChunk sample ) throws Exception
{
final SampleBuffer sb = sample.getSampleBuffer();
final int nc = sb.getFormat().getNumChannels();
for( int c = 0; c < nc; c++ )
{
float previous = 0;
for( int s = 1; s < sb.size()/nc; s++ )
{
final float v = sb.get(s*c);
sb.set( s*c, (float)(v - this.factor * previous) );
previous = v;
}
}
return sample;
}
示例13: process
import org.openimaj.audio.SampleChunk; //导入依赖的package包/类
/**
* {@inheritDoc}
* @see org.openimaj.audio.processor.FixedSizeSampleAudioProcessor#process(org.openimaj.audio.SampleChunk)
*/
@Override
public SampleChunk process( final SampleChunk sample ) throws Exception
{
final SampleBuffer sb = sample.getSampleBuffer();
for( int c = 0; c < sample.getFormat().getNumChannels(); c++ )
{
float acc = 0;
for( int i = 0; i < this.coefficients.length; i++ )
acc += sb.get(i) * this.coefficients[i];
sb.set( 0, acc );
}
return sample;
}
示例14: process
import org.openimaj.audio.SampleChunk; //导入依赖的package包/类
/**
* {@inheritDoc}
* @see org.openimaj.audio.processor.AudioProcessor#process(org.openimaj.audio.SampleChunk)
*/
@Override
public SampleChunk process( final SampleChunk sample ) throws Exception
{
this.filterBank.process( sample );
final double p = this.filterBank.getOutputPower();
final double delta = Math.abs( this.previousPower - p );
if( delta > this.threshold )
this.onsetDetected = true;
this.previousPower = p;
return sample;
}
示例15: process
import org.openimaj.audio.SampleChunk; //导入依赖的package包/类
/**
* {@inheritDoc}
* @see org.openimaj.audio.processor.AudioProcessor#process(org.openimaj.audio.SampleChunk)
*/
@Override
public SampleChunk process( final SampleChunk sample ) throws Exception
{
if( this.filters == null )
{
this.format = sample.getFormat();
this.setupFilters();
}
this.outputPower = 0;
for( final FeedForwardCombFilter filter : this.filters )
{
// Process the sample with each filter
filter.process( sample );
this.outputPower += filter.getOutputPower();
}
return sample;
}