本文整理汇总了Java中org.puredata.core.PdBase.computeAudio方法的典型用法代码示例。如果您正苦于以下问题:Java PdBase.computeAudio方法的具体用法?Java PdBase.computeAudio怎么用?Java PdBase.computeAudio使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.puredata.core.PdBase
的用法示例。
在下文中一共展示了PdBase.computeAudio方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: bake
import org.puredata.core.PdBase; //导入方法依赖的package包/类
/**
* Bake a path to a wav file
* @param patch the patch to bake
* @param wav the wav file to write
* @param channels how many channels (1 for mono, 2 for stereo, can be more than 2 channels)
* @param sampleRate sample rate used by Pd
* @param time baking duration in seconds
* @throws IOException
*/
public static void bake(File patch, File wav, int channels, int sampleRate, float time) throws IOException {
// disable Pd : does nothing if Pd alreay initialized.
PdConfiguration.disabled = true;
// Pause audio.
// Does nothing in headless mode but required to
// have Pd static code executed (load library)
Pd.audio.pause();
int handle = PdBase.openPatch(patch);
PdBase.openAudio(0, channels, sampleRate);
PdBase.computeAudio(true);
int frames = (int)(time * sampleRate);
int samples = frames * channels;
short [] data = new short[samples];
int ticks = frames / PdBase.blockSize();
PdBase.process(ticks, new short[]{}, data);
PdBase.closePatch(handle);
// save
byte [] buf = new byte[data.length * 2];
for(int i=0 ; i<data.length ; i++){
buf[i*2+0] = (byte)(data[i] & 0xFF);
buf[i*2+1] = (byte)((data[i] >> 8) & 0xFF);
}
ByteArrayInputStream stream = new ByteArrayInputStream(buf);
AudioFormat format = new AudioFormat(sampleRate, 16, channels, true, false);
AudioInputStream audioStream = new AudioInputStream(stream, format, data.length);
AudioSystem.write(audioStream, Type.WAVE, wav);
// resume audio
Pd.audio.resume();
}
示例2: PdModule
import org.puredata.core.PdBase; //导入方法依赖的package包/类
private PdModule(int sampleRate, int inputChannels, int outputChannels, Notification notification) {
super(notification);
this.sampleRate = sampleRate;
this.inputChannels = inputChannels;
this.outputChannels = outputChannels;
pdInitAudio(inputChannels, outputChannels, sampleRate);
PdBase.computeAudio(true);
}
示例3: start
import org.puredata.core.PdBase; //导入方法依赖的package包/类
/**
* Start the baking process.
* @param listener used to get processing progression.
*/
public void start(final BakingListener listener)
{
if(bakingThread != null){
throw new GdxRuntimeException("start should only be called once.");
}
bakingThread = new Thread(new Runnable() {
@Override
public void run() {
int total = pendingBakings.size;
int count = 0;
dispatchProgress(listener, 0);
Pd.audio.pause();
while(pendingBakings.size > 0){
Baking baking = pendingBakings.pop();
PdPatch patchToBake = openPatch(baking.patchFile);
PdBase.openAudio(0, 1, baking.sampleRate); // TODO support both mono and stereo ?
PdBase.computeAudio(true);
int frames = (int)(baking.time * baking.sampleRate);
int samples = frames;
baking.data = new float[samples];
int ticks = samples / PdBase.blockSize();
int perr = PdBase.process(ticks, new float[]{}, baking.data);
if(perr != 0) Gdx.app.error("Pd", "process error ....");
closePatch(patchToBake);
if(PdConfiguration.remoteEnabled){
Gdx.app.error("PdBaking", "Warning : enable to retrieve array size in remote mode, assume destination array is big enough");
}else{
int size = Pd.audio.arraySize(baking.array);
if(baking.data.length > size){
size = baking.data.length;
Gdx.app.error("PdBaking", "Warning : destination array " + baking.array + " size too short (" + String.valueOf(size) + "), shrink baked data (" + String.valueOf(baking.data.length) + ")");
}else if(baking.data.length < size){
Gdx.app.error("PdBaking", "Warning : destination array " + baking.array + " size is bigger (" + String.valueOf(size) + ") than baked data (" + String.valueOf(baking.data.length) + "), clearing array to prevent dirty buffer.");
float [] nullData = new float[size - baking.data.length];
Pd.audio.writeArray(baking.array, baking.data.length, nullData, 0, nullData.length);
}
}
Pd.audio.writeArray(baking.array, 0, baking.data, 0, baking.data.length);
baked.put(baking.array, baking);
count++;
dispatchProgress(listener, 100 * (float) count / (float) total);
}
Pd.audio.resume();
dispatchComplete(listener);
}
}, "PdAudioBakery");
bakingThread.start();
}
示例4: run
import org.puredata.core.PdBase; //导入方法依赖的package包/类
@Override
public void run()
{
int ticks = Math.max(1, config.bufferSize / PdBase.blockSize());
short [] inBuffer = new short[config.bufferSize * config.inputChannels];
short [] outBuffer = new short[config.bufferSize * config.outputChannels];
AudioDevice device = createAudioDevice();
AudioRecorder recorder = null;
if(config.inputChannels > 0){
recorder = Gdx.audio.newAudioRecorder(config.sampleRate, config.inputChannels < 2);
}
PdBase.openAudio(config.inputChannels, config.outputChannels, config.sampleRate);
PdBase.computeAudio(true);
final Runnable pollRunnable = new Runnable() {
@Override
public void run() {
PdBase.pollPdMessageQueue();
PdBase.pollMidiQueue();
requirePolling = true;
}
};
long nanoDuration = (long)(1e9 * (double)config.bufferSize / (double)config.sampleRate);
long realTime = System.nanoTime();
long logicTime = realTime;
while(processing){
if(recorder != null){
recorder.read(inBuffer, 0, inBuffer.length);
}
realTime = System.nanoTime();
long waitTime = (logicTime - realTime) / 1000000;
if(waitTime > 0){
try {
Thread.sleep(waitTime);
} catch (InterruptedException e) {
}
}else{
logicTime = realTime;
}
logicTime += nanoDuration;
PdBase.process(ticks, inBuffer, outBuffer);
device.writeSamples(outBuffer, 0, outBuffer.length);
if(requirePolling){
Gdx.app.postRunnable(pollRunnable);
}
}
device.dispose();
if(recorder != null){
recorder.dispose();
}
}