本文整理汇总了Java中ddf.minim.Minim类的典型用法代码示例。如果您正苦于以下问题:Java Minim类的具体用法?Java Minim怎么用?Java Minim使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Minim类属于ddf.minim包,在下文中一共展示了Minim类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: JSBufferedSampleRecorder
import ddf.minim.Minim; //导入依赖的package包/类
/**
* Constructs a JSBufferedSampleRecorder that expects audio in the given AudioFormat and
* which will save to a file with given name.
*
* @param format the AudioFormat you want to record in
* @param name the name of the file to save to (not including the extension)
*/
JSBufferedSampleRecorder(JSMinim sys,
String fileName,
AudioFileFormat.Type fileType,
AudioFormat fileFormat,
int bufferSize)
{
name = fileName;
type = fileType;
format = fileFormat;
buffers = new ArrayList<FloatBuffer>(20);
left = FloatBuffer.allocate(bufferSize*10);
if ( format.getChannels() == Minim.STEREO )
{
right = FloatBuffer.allocate(bufferSize*10);
}
else
{
right = null;
}
system = sys;
}
示例2: process
import ddf.minim.Minim; //导入依赖的package包/类
private synchronized void process()
{
synchronized ( buffer )
{
int frameCount = rawBytes.length / format.getFrameSize();
buffer.setSamplesFromBytes( rawBytes, 0, format, 0, frameCount );
// process the samples
if ( buffer.getChannelCount() == Minim.MONO )
{
effect.process( buffer.getChannel( 0 ) );
}
else if ( buffer.getChannelCount() == Minim.STEREO )
{
effect.process( buffer.getChannel( 0 ), buffer.getChannel( 1 ) );
}
// finally convert them back to bytes
buffer.convertToByteArray( rawBytes, 0, format );
}
}
示例3: save
import ddf.minim.Minim; //导入依赖的package包/类
/**
* Finishes the recording process by closing the file.
*/
public AudioRecordingStream save()
{
try
{
aos.close();
}
catch (IOException e)
{
Minim.error("AudioRecorder.save: An error occurred when trying to save the file:\n"
+ e.getMessage());
}
String filePath = filePath();
AudioInputStream ais = system.getAudioInputStream(filePath);
SourceDataLine sdl = system.getSourceDataLine(ais.getFormat(), 1024);
// this is fine because the recording will always be
// in a raw format (WAV, AU, etc).
long length = AudioUtils.frames2Millis(ais.getFrameLength(), format);
BasicMetaData meta = new BasicMetaData(filePath, length, ais.getFrameLength());
JSPCMAudioRecordingStream recording = new JSPCMAudioRecordingStream(system, meta, ais, sdl, 1024);
return recording;
}
示例4: samples
import ddf.minim.Minim; //导入依赖的package包/类
public void samples(float[] samp)
{
if ( recording )
{
System.arraycopy(samp, 0, fsb.getChannel(0), 0, samp.length);
byte[] raw = fsb.convertToByteArray(format);
try
{
aos.write(raw, 0, raw.length);
}
catch (IOException e)
{
Minim.error("AudioRecorder: An error occurred while trying to write to the file:\n" +
e.getMessage() );
}
}
}
示例5: forward
import ddf.minim.Minim; //导入依赖的package包/类
/**
* Performs a forward transform on values in <code>buffer</code>.
*
* @param buffer
* float[]: the buffer to analyze, must be the same length as timeSize()
* @param startAt
* int: the index to start at in the buffer. there must be at least timeSize() samples
* between the starting index and the end of the buffer. If there aren't, an
* error will be issued and the operation will not be performed.
*
*/
public void forward(float[] buffer, int startAt)
{
if ( buffer.length - startAt < timeSize )
{
Minim.error( "FourierTransform.forward: not enough samples in the buffer between " +
startAt + " and " + buffer.length + " to perform a transform."
);
return;
}
// copy the section of samples we want to analyze
float[] section = new float[timeSize];
System.arraycopy(buffer, startAt, section, 0, section.length);
forward(section);
}
示例6: scaleBand
import ddf.minim.Minim; //导入依赖的package包/类
public void scaleBand(int i, float s)
{
if (s < 0)
{
Minim.error("Can't scale a frequency band by a negative value.");
return;
}
real[i] *= s;
imag[i] *= s;
spectrum[i] *= s;
if (i != 0 && i != timeSize / 2)
{
real[timeSize - i] = real[i];
imag[timeSize - i] = -imag[i];
}
}
示例7: setBand
import ddf.minim.Minim; //导入依赖的package包/类
public void setBand(int i, float a)
{
if (a < 0)
{
Minim.error("Can't set a frequency band to a negative value.");
return;
}
if (real[i] == 0 && imag[i] == 0)
{
real[i] = a;
spectrum[i] = a;
}
else
{
real[i] /= spectrum[i];
imag[i] /= spectrum[i];
spectrum[i] = a;
real[i] *= spectrum[i];
imag[i] *= spectrum[i];
}
if (i != 0 && i != timeSize / 2)
{
real[timeSize - i] = real[i];
imag[timeSize - i] = -imag[i];
}
}
示例8: forward
import ddf.minim.Minim; //导入依赖的package包/类
public void forward(float[] buffer)
{
if (buffer.length != timeSize)
{
Minim
.error("FFT.forward: The length of the passed sample buffer must be equal to timeSize().");
return;
}
doWindow(buffer);
// copy samples to real/imag in bit-reversed order
bitReverseSamples(buffer, 0);
// perform the fft
fft();
// fill the spectrum buffer with amplitudes
fillSpectrum();
}
示例9: inverse
import ddf.minim.Minim; //导入依赖的package包/类
public void inverse(float[] buffer)
{
if (buffer.length > real.length)
{
Minim
.error("FFT.inverse: the passed array's length must equal FFT.timeSize().");
return;
}
// conjugate
for (int i = 0; i < timeSize; i++)
{
imag[i] *= -1;
}
bitReverseComplex();
fft();
// copy the result in real into buffer, scaling as we do
for (int i = 0; i < buffer.length; i++)
{
buffer[i] = real[i] / real.length;
}
}
示例10: forward
import ddf.minim.Minim; //导入依赖的package包/类
public void forward(float[] samples)
{
if (samples.length != timeSize)
{
Minim
.error("DFT.forward: The length of the passed sample buffer must be equal to DFT.timeSize().");
return;
}
doWindow(samples);
int N = samples.length;
for (int f = 0; f <= N / 2; f++)
{
real[f] = 0.0f;
imag[f] = 0.0f;
for (int t = 0; t < N; t++)
{
real[f] += samples[t] * cos(t * f);
imag[f] += samples[t] * -sin(t * f);
}
}
fillSpectrum();
}
示例11: Start
import ddf.minim.Minim; //导入依赖的package包/类
void Start(String[] args)
{
fileFolder = args[0];
minim = new Minim(this);
for (int i = 0; i < 17; ++i)
{
AudioPlayer player = minim.loadFile( args[1] );
if ( player == null )
{
System.out.println("File loading failed on attempt " + i);
break;
}
}
minim.stop();
}
示例12: Start
import ddf.minim.Minim; //导入依赖的package包/类
void Start(String[] args)
{
fileFolder = args[0];
minim = new Minim(this);
AudioPlayer player = minim.loadFile( args[1] );
if ( player != null )
{
player.play();
while( player.isPlaying() )
{
continue;
}
}
}
示例13: uGenerate
import ddf.minim.Minim; //导入依赖的package包/类
@Override
protected void uGenerate(float[] out)
{
if ( sampleCounter <= 0 )
{
if ( audio.getLastValues().length != channelCount() )
{
Minim.error( "BitCrush audio has " + audio.getLastValues().length + " channels and sampledFrame has " + channelCount() );
}
System.arraycopy( audio.getLastValues(), 0, sampledFrame, 0, channelCount() );
sampleCounter = (int)(sampleRate() / Math.max(bitRate.getLastValue(),1));
}
final int res = 1 << (int)bitRes.getLastValue();
for( int i = 0; i < out.length; ++i )
{
int samp = (int)(res * sampledFrame[i]);
out[i] = (float)samp/res;
}
--sampleCounter;
}
示例14: forward
import ddf.minim.Minim; //导入依赖的package包/类
/**
* Performs a forward transform on values in <code>buffer</code>.
*
* @param buffer
* float[]: the buffer to analyze, must be the same length as
* timeSize()
* @param startAt
* int: the index to start at in the buffer. there must be at
* least timeSize() samples between the starting index and the
* end of the buffer. If there aren't, an error will be issued
* and the operation will not be performed.
*
*/
public void forward(float[] buffer, int startAt) {
if (buffer.length - startAt < timeSize) {
Minim.error("FourierTransform.forward: not enough samples in the buffer between "
+ startAt
+ " and "
+ buffer.length
+ " to perform a transform.");
return;
}
// copy the section of samples we want to analyze
float[] section = new float[timeSize];
System.arraycopy(buffer, startAt, section, 0, section.length);
forward(section);
}
示例15: detectMode
import ddf.minim.Minim; //导入依赖的package包/类
/**
* Set the object to use the requested algorithm. If an invalid value is
* passed, the function will report and error and default to
* BeatDetect.SOUND_ENERGY
*
* @param algo
* int: either BeatDetect.SOUND_ENERGY or BeatDetect.FREQ_ENERGY
*
* @related BeatDetect
*/
public void detectMode(int algo) {
if (algo < 0 || algo > 1) {
Minim.error("Unrecognized detect mode, defaulting to SOUND_ENERGY.");
algo = SOUND_ENERGY;
}
if (algo == SOUND_ENERGY) {
if (algorithm == FREQ_ENERGY) {
releaseFEResources();
initSEResources();
initGraphs();
algorithm = algo;
}
} else {
if (algorithm == SOUND_ENERGY) {
releaseSEResources();
initFEResources();
initGraphs();
algorithm = FREQ_ENERGY;
}
}
}