本文整理汇总了C++中parameters::device_id方法的典型用法代码示例。如果您正苦于以下问题:C++ parameters::device_id方法的具体用法?C++ parameters::device_id怎么用?C++ parameters::device_id使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类parameters
的用法示例。
在下文中一共展示了parameters::device_id方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: run
int cochleo_stream::run(const parameters& param)
{
static const std::size_t frames_per_buffer = 64;
try
{
portaudio::AutoSystem autoSys;
portaudio::System &sys = portaudio::System::instance();
cout << "===================================================" << endl << endl;
auto& device = sys.deviceByIndex(param.device_id());
BOOST_LOG_TRIVIAL(info) << "Opening device " << device.name();
// mono capture interleaved
portaudio::DirectionSpecificStreamParameters input_parameters(device,1,portaudio::FLOAT32,true,0.0,NULL);
portaudio::StreamParameters stream_params(input_parameters,
portaudio::DirectionSpecificStreamParameters::null(),
device.defaultSampleRate(), frames_per_buffer, paNoFlag);
// signal_renderer renderer(stream.sampleRate(), 5);
// portaudio::MemFunCallbackStream<signal_renderer> stream(stream_params, renderer, &signal_renderer::callback);
// BOOST_LOG_TRIVIAL(info) << "Starting 5 s recording";
// stream.start();
// stream.stop();
// stream.close();
// sys.terminate();
}
catch(const portaudio::PaException &e)
{
std::cout << "A PortAudio error occured: " << e.paErrorText() << std::endl;
}
catch(const portaudio::PaCppException &e)
{
std::cout << "A PortAudioCpp error occured: " << e.what() << std::endl;
}
catch(const std::exception &e)
{
std::cout << "A generic exception occured: " << e.what() << std::endl;
}
catch(...)
{
std::cout << "An unknown exception occured." << std::endl;
}
// BOOST_LOG_TRIVIAL(info) << "Input file is " << params.input() << ", " << audio.to_string();
// // initialize filterbank
// filterbank fb(audio.sample_frequency(),
// params.low_frequency(),
// params.high_frequency(),
// params.nb_channels());
// BOOST_LOG_TRIVIAL(info) << "Gammatone filtering on "
// << params.nb_channels() << " channels from "
// << (int)fb.begin()->center_frequency() << "Hz to "
// << (int)fb.rbegin()->center_frequency() << "Hz";
// // declare the resulting multichannel cochleogram.
// // cochleogram[i][j] where i is audio channel, j is the
// // corresponding cochleogram
// std::vector<std::vector<double> > cochleogram(audio.nb_channels(),
// std::vector<double>(xsize*ysize));
// // xaxis is time, yaxis are center frequencies
// const auto xaxis = gammatone::detail::linspace(0.0, audio.duration(), audio.size());
// const auto yaxis = fb.center_frequency();
// const auto xsize = xaxis.size();
// const auto ysize = yaxis.size();
// // process separatly on each audio channel
// for(size_t i=0; i<audio.nb_channels(); i++)
// {
// fb.reset();
// fb.compute(xsize,ysize,audio.channel(i).data(),cochleogram[i].data());
// if(params.normalize()) normalization(xsize,ysize,cochleogram[i].data());
// }
// BOOST_LOG_TRIVIAL(info) << "Entering " << renderer::vtk_version();
// renderer vtk(16/9.0);
// vtk.render(xaxis,yaxis,cochleogram[0].data());
return 0;
}