本文整理汇总了C++中mydsp::init方法的典型用法代码示例。如果您正苦于以下问题:C++ mydsp::init方法的具体用法?C++ mydsp::init怎么用?C++ mydsp::init使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mydsp
的用法示例。
在下文中一共展示了mydsp::init方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char *argv[] )
{
float fnbsamples;
CMDUI* interface = new CMDUI(argc, argv);
DSP.buildUserInterface(interface);
interface->addOption("-n", &fnbsamples, 16, 0.0, 100000000.0);
if (DSP.getNumInputs() > 0) {
fprintf(stderr, "no inputs allowed\n");
exit(1);
}
// init signal processor and the user interface values
DSP.init(44100);
// modify the UI values according to the command line options
interface->process_command();
int nouts = DSP.getNumOutputs();
channels chan (kFrames, nouts);
int nbsamples = int(fnbsamples);
while (nbsamples > kFrames) {
DSP.compute(kFrames, 0, chan.buffers());
for (int i = 0; i < kFrames; i++) {
for (int c = 0; c < nouts; c++) {
printf("%8f\t", chan.buffers()[c][i]);
}
cout << endl;
}
nbsamples -= kFrames;
}
DSP.compute(nbsamples, 0, chan.buffers());
for (int i = 0; i < nbsamples; i++) {
for (int c = 0; c < nouts; c++) {
printf("%8f\t", chan.buffers()[c][i]);
}
cout << endl;
}
return 0;
}
示例2: init
/*
* init(samplingRate, bufferFrames)
* Initializes the Audio engine and the DSP code
* with samplingRate and bufferFrames.
* This method also looks for the [style:poly]
* metadata in the Faust code and initializes a
* polyphonic object or not based on that. init
* should be called before start.
*/
bool init(int samplingRate, int bufferSize) {
DSP.init(samplingRate);
inChanNumb = DSP.getNumInputs();
outChanNumb = DSP.getNumOutputs();
// configuring the UI
DSP.buildUserInterface(&mapUI);
DSP.buildUserInterface(&json);
jsonString = json.JSON();
if (jsonString.find("keyboard") != std::string::npos ||
jsonString.find("poly") != std::string::npos){
polyMax = 4;
DSPpoly = new mydsp_poly(polyMax, true);
DSPpoly->init(samplingRate);
} else {
polyMax = 0;
}
return (fAudioDevice.Open(((polyMax > 0) ? DSPpoly : &DSP), inChanNumb, outChanNumb, bufferSize, samplingRate) == 0);
}
示例3: init
/*
* init(samplingRate, bufferFrames)
* Initializes the Audio engine and the DSP code
* with samplingRate and bufferFrames.
* This method also looks for the [style:poly]
* metadata in the Faust code and initializes a
* polyphonic object or not based on that. init
* should be called before start.
*/
void init(int samplingRate, int bufferFrames) {
// configuring global variables
SR = samplingRate;
bufferSize = bufferFrames;
vecSamps = bufferSize;
DSP.init(SR);
inChanNumb = DSP.getNumInputs();
outChanNumb = DSP.getNumOutputs();
// configuring the UI
DSP.buildUserInterface(&mapUI);
DSP.buildUserInterface(&json);
jsonString = json.JSON();
if(jsonString.find("keyboard") != std::string::npos ||
jsonString.find("poly") != std::string::npos){
polyMax = 4;
polyCoef = 1.0f / polyMax;
DSPpoly = new mydsp_poly(SR, bufferSize, polyMax);
}
else{
polyMax = 0;
}
// allocating memory for output channel
bufferout = new float *[outChanNumb];
for (int i = 0; i < outChanNumb; i++) {
bufferout[i] = new float[vecSamps];
}
// allocating memory for input channel
if (inChanNumb >= 1) {
bufferin = new float *[inChanNumb];
for (int i = 0; i < inChanNumb; i++) {
bufferin[i] = new float[vecSamps];
}
}
}
示例4: main
int main(int argc, char *argv[])
{
SNDFILE* in_sf;
SNDFILE* out_sf;
SF_INFO in_info;
SF_INFO out_info;
unsigned int nAppend = 0; // number of frames to append beyond input file
if (argc < 3) {
fprintf(stderr,"*** USAGE: %s input_soundfile output_soundfile\n",argv[0]);
exit(1);
}
nAppend = loptrm(&argc, argv, "--continue", "-c", 0);
CMDUI* interface = new CMDUI(argc, argv);
DSP.buildUserInterface(interface);
interface->process_command();
// open input file
in_info.format = 0;
in_sf = sf_open(interface->input_file(), SFM_READ, &in_info);
if (in_sf == NULL) {
fprintf(stderr,"*** Input file not found.\n");
sf_perror(in_sf);
exit(1);
}
// open output file
out_info = in_info;
out_info.format = in_info.format;
out_info.channels = DSP.getNumOutputs();
out_sf = sf_open(interface->output_file(), SFM_WRITE, &out_info);
if (out_sf == NULL) {
fprintf(stderr,"*** Cannot write output file.\n");
sf_perror(out_sf);
exit(1);
}
// create separator and interleaver
Separator sep(kFrames, in_info.channels, DSP.getNumInputs());
Interleaver ilv(kFrames, DSP.getNumOutputs());
// init signal processor
DSP.init(in_info.samplerate);
//DSP.buildUserInterface(interface);
interface->process_init();
// process all samples
int nbf;
do {
nbf = READ_SAMPLE(in_sf, sep.input(), kFrames);
sep.separate();
DSP.compute(nbf, sep.outputs(), ilv.inputs());
ilv.interleave();
sf_writef_float(out_sf, ilv.output(), nbf);
//sf_write_raw(out_sf, ilv.output(), nbf);
} while (nbf == kFrames);
sf_close(in_sf);
// compute tail, if any
if (nAppend>0) {
FAUSTFLOAT *input = (FAUSTFLOAT*) calloc(nAppend * DSP.getNumInputs(), sizeof(FAUSTFLOAT));
FAUSTFLOAT *inputs[1] = { input };
Interleaver ailv(nAppend, DSP.getNumOutputs());
DSP.compute(nAppend, inputs, ailv.inputs());
ailv.interleave();
sf_writef_float(out_sf, ailv.output(), nAppend);
}
sf_close(out_sf);
}