本文整理汇总了C++中QTime::elapsed方法的典型用法代码示例。如果您正苦于以下问题:C++ QTime::elapsed方法的具体用法?C++ QTime::elapsed怎么用?C++ QTime::elapsed使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类QTime
的用法示例。
在下文中一共展示了QTime::elapsed方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: draw
void draw() {
// Attempting to draw before we're visible and have a valid size will
// produce GL errors.
if (!isVisible() || _size.width() <= 0 || _size.height() <= 0) {
return;
}
makeCurrent();
gpu::Batch batch;
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLORS, { 0.0f, 0.0f, 0.0f, 1.0f });
batch.clearDepthFramebuffer(1e4);
batch.setViewportTransform({ 0, 0, _size.width() * devicePixelRatio(), _size.height() * devicePixelRatio() });
batch.setProjectionTransform(_projectionMatrix);
float t = _time.elapsed() * 1e-3f;
glm::vec3 unitscale { 1.0f };
glm::vec3 up { 0.0f, 1.0f, 0.0f };
float distance = 3.0f;
glm::vec3 camera_position{ distance * sinf(t), 0.0f, distance * cosf(t) };
static const vec3 camera_focus(0);
static const vec3 camera_up(0, 1, 0);
glm::mat4 camera = glm::inverse(glm::lookAt(camera_position, camera_focus, up));
batch.setViewTransform(camera);
batch.setPipeline(_pipeline);
batch.setModelTransform(Transform());
auto geometryCache = DependencyManager::get<GeometryCache>();
// Render grid on xz plane (not the optimal way to do things, but w/e)
// Note: GeometryCache::renderGrid will *not* work, as it is apparenly unaffected by batch rotations and renders xy only
{
static const std::string GRID_INSTANCE = "Grid";
static auto compactColor1 = toCompactColor(vec4{ 0.35f, 0.25f, 0.15f, 1.0f });
static auto compactColor2 = toCompactColor(vec4{ 0.15f, 0.25f, 0.35f, 1.0f });
static std::vector<glm::mat4> transforms;
static gpu::BufferPointer colorBuffer;
if (!transforms.empty()) {
transforms.reserve(200);
colorBuffer = std::make_shared<gpu::Buffer>();
for (int i = 0; i < 100; ++i) {
{
glm::mat4 transform = glm::translate(mat4(), vec3(0, -1, -50 + i));
transform = glm::scale(transform, vec3(100, 1, 1));
transforms.push_back(transform);
colorBuffer->append(compactColor1);
}
{
glm::mat4 transform = glm::mat4_cast(quat(vec3(0, PI / 2.0f, 0)));
transform = glm::translate(transform, vec3(0, -1, -50 + i));
transform = glm::scale(transform, vec3(100, 1, 1));
transforms.push_back(transform);
colorBuffer->append(compactColor2);
}
}
}
auto pipeline = geometryCache->getSimplePipeline();
for (auto& transform : transforms) {
batch.setModelTransform(transform);
batch.setupNamedCalls(GRID_INSTANCE, [=](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {
batch.setViewTransform(camera);
batch.setPipeline(_pipeline);
geometryCache->renderWireShapeInstances(batch, GeometryCache::Line, data.count(), colorBuffer);
});
}
}
{
static const size_t ITEM_COUNT = 1000;
static const float SHAPE_INTERVAL = (PI * 2.0f) / ITEM_COUNT;
static const float ITEM_INTERVAL = SHAPE_INTERVAL / TYPE_COUNT;
static const gpu::Element POSITION_ELEMENT{ gpu::VEC3, gpu::FLOAT, gpu::XYZ };
static const gpu::Element NORMAL_ELEMENT{ gpu::VEC3, gpu::FLOAT, gpu::XYZ };
static const gpu::Element COLOR_ELEMENT{ gpu::VEC4, gpu::NUINT8, gpu::RGBA };
static const gpu::Element TRANSFORM_ELEMENT{ gpu::MAT4, gpu::FLOAT, gpu::XYZW };
static std::vector<Transform> transforms;
static std::vector<vec4> colors;
static gpu::BufferPointer indirectBuffer;
static gpu::BufferPointer transformBuffer;
static gpu::BufferPointer colorBuffer;
static gpu::BufferView colorView;
static gpu::BufferView instanceXfmView;
if (!transformBuffer) {
transformBuffer = std::make_shared<gpu::Buffer>();
colorBuffer = std::make_shared<gpu::Buffer>();
indirectBuffer = std::make_shared<gpu::Buffer>();
static const float ITEM_RADIUS = 20;
static const vec3 ITEM_TRANSLATION{ 0, 0, -ITEM_RADIUS };
for (size_t i = 0; i < TYPE_COUNT; ++i) {
GeometryCache::Shape shape = SHAPE[i];
GeometryCache::ShapeData shapeData = geometryCache->_shapes[shape];
{
//.........这里部分代码省略.........
示例2: RunReal
/** \fn PreviewGenerator::RunReal(void)
* \brief This call creates a preview without starting a new thread.
*/
bool PreviewGenerator::RunReal(void)
{
QString msg;
QTime tm = QTime::currentTime();
bool ok = false;
bool is_local = IsLocal();
if (!is_local && !!(m_mode & kRemote))
{
LOG(VB_GENERAL, LOG_ERR, LOC +
QString("RunReal() file not local: '%1'")
.arg(m_pathname));
}
else if (!(m_mode & kLocal) && !(m_mode & kRemote))
{
LOG(VB_GENERAL, LOG_ERR, LOC +
QString("RunReal() Preview of '%1' failed "
"because mode was invalid 0x%2")
.arg(m_pathname).arg((int)m_mode,0,16));
}
else if (!!(m_mode & kLocal) && LocalPreviewRun())
{
ok = true;
msg = QString("Generated on %1 in %2 seconds, starting at %3")
.arg(gCoreContext->GetHostName())
.arg(tm.elapsed()*0.001)
.arg(tm.toString(Qt::ISODate));
}
else if (!!(m_mode & kRemote))
{
if (is_local && (m_mode & kLocal))
{
LOG(VB_GENERAL, LOG_WARNING, LOC + "Failed to save preview."
"\n\t\t\tYou may need to check user and group ownership on"
"\n\t\t\tyour frontend and backend for quicker previews.\n"
"\n\t\t\tAttempting to regenerate preview on backend.\n");
}
ok = RemotePreviewRun();
if (ok)
{
msg = QString("Generated remotely in %1 seconds, starting at %2")
.arg(tm.elapsed()*0.001)
.arg(tm.toString(Qt::ISODate));
}
else
{
msg = "Remote preview failed";
}
}
else
{
msg = "Could not access recording";
}
QMutexLocker locker(&m_previewLock);
if (m_listener)
{
QString output_fn = m_outFileName.isEmpty() ?
(m_programInfo.GetPathname()+".png") : m_outFileName;
QDateTime dt;
if (ok)
{
QFileInfo fi(output_fn);
if (fi.exists())
dt = fi.lastModified();
}
QString message = (ok) ? "PREVIEW_SUCCESS" : "PREVIEW_FAILED";
QStringList list;
list.push_back(QString::number(m_programInfo.GetRecordingID()));
list.push_back(output_fn);
list.push_back(msg);
list.push_back(dt.isValid()?dt.toUTC().toString(Qt::ISODate):"");
list.push_back(m_token);
QCoreApplication::postEvent(m_listener, new MythEvent(message, list));
}
return ok;
}
示例3: xmas
QTime time = QTime::fromString("1.30", "m.s");
// time is 00:01:30.000
//! [8]
//! [9]
QTime::isValid(21, 10, 30); // returns true
QTime::isValid(22, 5, 62); // returns false
//! [9]
//! [10]
QTime t;
t.start();
some_lengthy_task();
qDebug("Time elapsed: %d ms", t.elapsed());
//! [10]
//! [11]
QDateTime now = QDateTime::currentDateTime();
QDateTime xmas(QDate(now.date().year(), 12, 25), QTime(0, 0));
qDebug("There are %d seconds to Christmas", now.secsTo(xmas));
//! [11]
//! [12]
QTime time1 = QTime::fromString("131", "HHh");
// time1 is 13:00:00
QTime time1 = QTime::fromString("1apA", "1amAM");
// time1 is 01:00:00
示例4: processFile
void MainWindow::processFile() {
if (generalLock == false) {
generalLock = true;
} else {
return;
}
QTime timer;
if (!fileLoaded) {
mainWindow.statusLabel->setText("No file loaded");
return;
} else {
QString pattern = mainWindow.patternTextEdit->toPlainText();
if (pattern == "") {
mainWindow.statusLabel->setText("No pattern given");
} else {
QString output = "";
QList<int> results;
int elapsedTime;
mainWindow.statusLabel->setText("Processing file contents...");
QCoreApplication::processEvents();
if (mainWindow.chooseAlgCombo->currentText() == "Brute Force") {
BruteForce bf= BruteForce();
timer.start();
results = bf.find(pattern, pattern.size(), textFileContents, textFileContents.size(), mainWindow.progressBar);
elapsedTime = timer.elapsed();
}
else if (mainWindow.chooseAlgCombo->currentText() == "Morris-Pratt") {
MorrisPratt mp = MorrisPratt();
timer.start();
results = mp.find(pattern, pattern.size(), textFileContents, textFileContents.size(), mainWindow.progressBar);
elapsedTime = timer.elapsed();
}
else if (mainWindow.chooseAlgCombo->currentText() == "Boyer-Moore") {
BoyerMoore bm = BoyerMoore();
timer.start();
results = bm.find(pattern, pattern.size(), textFileContents, textFileContents.size(), mainWindow.progressBar);
elapsedTime = timer.elapsed();
}
else if (mainWindow.chooseAlgCombo->currentText() == "TwoWay") {
TwoWay tw = TwoWay();
timer.start();
results = tw.find(pattern, pattern.size(), textFileContents, textFileContents.size(), mainWindow.progressBar);
elapsedTime = timer.elapsed();
}
else {
mainWindow.statusLabel->setText("Algorithm not supported");
return;
}
mainWindow.statusLabel->setText("Preparing results...");
QCoreApplication::processEvents();
output = processOutput(results, elapsedTime);
chartWidget->addResult(getNameShortcut(mainWindow.chooseAlgCombo->currentText()), "", pattern, elapsedTime, textFileContents.size() / 1024);
chartWidget->repaint();
mainWindow.statusLabel->setText("Processing finished");
mainWindow.consoleBrowser->setText(output);
}
}
generalLock = false;
}
示例5: armAdios
void Worker::armAdios()
{
QTime t;
int count=0;
while(rbAdios->isChecked())
{
if (count>1)
break;
count++;
try {
armrocio->setPose(RoboCompArmrocio::hola1);
}
catch (Ice::Exception e ) {
qDebug()<<"Error talking to Arm"<<e.what();
}
t.start();
while(t.elapsed()<4000)
{
if(!armrocio->isMoving())
{
break;
}
QApplication::processEvents();
usleep(1000);
}
if(count == 1)
{
try
{
QString s (QString::fromUtf8("<prosody range=\"x-high\"> Adiós.</prosody> <emphasis level=\"moderate\"> Cuídate mucho. Nos vemos el próximo día.</emphasis>" ));
speech->say( s.toStdString(),true);
// armrocio->setPose(RoboCompArmrocio::adios);
}
catch (Ice::Exception e)
{
qDebug()<<"Error talking to ArmRocio: "<<e.what();
}
}
try
{
armrocio->setPose(RoboCompArmrocio::hola2);
}
catch (Ice::Exception e ) {
qDebug()<<"Error talking to Arm"<<e.what();
}
t.restart();
while(t.elapsed()<4000)
{
if(!armrocio->isMoving())
{
break;
}
QApplication::processEvents();
usleep(1000);
}
}
try
{
armrocio->setPose(RoboCompArmrocio::reposo);
}
catch (Ice::Exception e ) {
qDebug()<<"Error talking to Arm"<<e.what();
}
}
示例6: main
int main (int, char **)
{
int w = 768, h = 768, iterations = 0;
std::vector<glm::vec3> colors(w * h, glm::vec3{0.f, 0.f, 0.f});
Ray cam {{50, 52, 295.6}, glm::normalize(glm::vec3{0, -0.042612, -1})}; // cam pos, dir
float near = 1.f;
float far = 10000.f;
glm::mat4 camera =
glm::scale(glm::mat4(1.f), glm::vec3(float(w), float(h), 1.f))
* glm::translate(glm::mat4(1.f), glm::vec3(0.5, 0.5, 0.f))
* glm::perspective(float(54.5f * pi / 180.f), float(w) / float(h), near, far)
* glm::lookAt(cam.origin, cam.origin + cam.direction, glm::vec3(0, 1, 0))
;
glm::mat4 screenToRay = glm::inverse(camera);
QTime t;
t.start();
#pragma omp parallel for
for (int y = 0; y < h; y++)
{
std::cerr << "\rRendering: " << 100 * iterations / ((w-1)*(h-1)) << "%";
for (unsigned short x = 0; x < w; x++)
{
glm::vec3 r;
float smoothies = 5.f;
for(int smooths = 0; smooths < smoothies; ++smooths)
{
float u = random_u();
//float v = random_u();
float R = sqrt(-2*log(u));
//float R2 = sqrt(-2*log(v));
float xDecal = R * cos(2*pi*u)*.5;
float yDecal = R * sin(2*pi*u)*.5;
glm::vec4 p0 = screenToRay * glm::vec4{float(x)+xDecal-.5, float(h - y )+ yDecal-.5, 0.f, 1.f};
glm::vec4 p1 = screenToRay * glm::vec4{float(x)+xDecal-.5, float(h - y )+ yDecal-.5, 1.f, 1.f};
glm::vec3 pp0 = glm::vec3(p0 / p0.w);
glm::vec3 pp1 = glm::vec3(p1 / p1.w);
glm::vec3 d = glm::normalize(pp1 - pp0);
r += radiance (Ray{pp0, d});
}
r/=smoothies;
colors[y * w + x] = colors[y * w + x]*0.25f + glm::clamp(r, glm::vec3(0.f, 0.f, 0.f), glm::vec3(1.f, 1.f, 1.f));// * 0.25f;
++iterations;
}
}
{
std::fstream f("C:\\Users\\etu\\Desktop\\image6.ppm", std::fstream::out);
f << "P3\n" << w << " " << h << std::endl << "255" << std::endl;
for (auto c : colors)
f << toInt(c.x) << " " << toInt(c.y) << " " << toInt(c.z) << " ";
}
std::cout << std::endl << "Rendered in " << t.elapsed()/1000. << "s." << std::endl;
}
示例7: benchMixer
QString VdpauWidget::benchMixer()
{
QString directoryName(dataDirectory);
directoryName.append("mpghd.dat");
MPEGDecoder *d = new MPEGDecoder( vc, directoryName );
if ( !d->init() ) {
delete d;
return "Can't initialize MPEG decoder!";
}
if ( mixerWidth!=d->width || mixerHeight!=d->height )
createMixer( d->width, d->height );
QList< VdpVideoSurface > list = d->getOrderedFrames();
VdpStatus st = vc->vdp_output_surface_create( vc->vdpDevice, VDP_RGBA_FORMAT_B8G8R8A8, d->width, d->height, &mixerSurface );
if ( st != VDP_STATUS_OK ) {
delete d;
return "FATAL: Can't create mixer output surface !!\n";
}
int i, loop=0;
VdpRect vid_source = { 0, 0, d->width, d->height };
setSkipChroma( 0 );
// weave
setDeinterlace( DEINT_BOB );
QTime t;
int e;
t.start();
while ( t.elapsed() < MIXERLOOP ) {
for ( i=1; i<NUMSURFACES-1; ++i ) {
vc->vdp_video_mixer_render( mixer, VDP_INVALID_HANDLE, 0, VDP_VIDEO_MIXER_PICTURE_STRUCTURE_FRAME,
0, 0, list.at(i), 0, 0, &vid_source, mixerSurface, &vid_source, &vid_source, 0, NULL );
}
++loop;
}
e = t.elapsed();
int n = (NUMSURFACES-2)*loop;
benchMixerResult = QString("MIXER WEAVE (%1x%2): %3 frames/s\n").arg(d->width).arg(d->height).arg(n*1000/e);
// bob
loop = 0;
t.start();
while ( t.elapsed() < MIXERLOOP ) {
for ( i=1; i<NUMSURFACES-1; ++i ) {
st = vc->vdp_video_mixer_render( mixer, VDP_INVALID_HANDLE, 0, VDP_VIDEO_MIXER_PICTURE_STRUCTURE_BOTTOM_FIELD,
0, 0, list.at(i), 0, 0, &vid_source, mixerSurface, &vid_source, &vid_source, 0, NULL );
if ( st != VDP_STATUS_OK )
fprintf( stderr, "vdp_video_mixer_render failed: %s\n", vc->vdp_get_error_string( st ) );
st = vc->vdp_video_mixer_render( mixer, VDP_INVALID_HANDLE, 0, VDP_VIDEO_MIXER_PICTURE_STRUCTURE_TOP_FIELD,
0, 0, list.at(i), 0, 0, &vid_source, mixerSurface, &vid_source, &vid_source, 0, NULL );
if ( st != VDP_STATUS_OK )
fprintf( stderr, "vdp_video_mixer_render failed: %s\n", vc->vdp_get_error_string( st ) );
}
loop += 2;
}
e = t.elapsed();
n = (NUMSURFACES-2)*loop;
benchMixerResult += QString("MIXER BOB (%1x%2): %3 fields/s\n").arg(d->width).arg(d->height).arg(n*1000/e);
VdpVideoSurface past[2];
VdpVideoSurface future[1];
// temporal
setDeinterlace( DEINT_TEMPORAL );
loop = 0;
t.start();
while ( t.elapsed() < MIXERLOOP ) {
for ( i=1; i<NUMSURFACES-1; ++i ) {
past[1] = past[0] = list.at(i-1);
future[0] = list.at(i);
st = vc->vdp_video_mixer_render( mixer, VDP_INVALID_HANDLE, 0, VDP_VIDEO_MIXER_PICTURE_STRUCTURE_BOTTOM_FIELD,
2, past, list.at(i), 1, future, &vid_source, mixerSurface, &vid_source, &vid_source, 0, NULL );
if ( st != VDP_STATUS_OK )
fprintf( stderr, "vdp_video_mixer_render failed: %s\n", vc->vdp_get_error_string( st ) );
past[0] = list.at(i);
future[0] = list.at(i+1);
st = vc->vdp_video_mixer_render( mixer, VDP_INVALID_HANDLE, 0, VDP_VIDEO_MIXER_PICTURE_STRUCTURE_TOP_FIELD,
2, past, list.at(i), 1, future, &vid_source, mixerSurface, &vid_source, &vid_source, 0, NULL );
if ( st != VDP_STATUS_OK )
fprintf( stderr, "vdp_video_mixer_render failed: %s\n", vc->vdp_get_error_string( st ) );
}
loop += 2;
}
e = t.elapsed();
n = (NUMSURFACES-2)*loop;
benchMixerResult += QString("MIXER TEMPORAL (%1x%2): %3 fields/s\n").arg(d->width).arg(d->height).arg(n*1000/e);
// temporal + ivtc
setIvtc( 1 );
loop = 0;
t.start();
while ( t.elapsed() < MIXERLOOP ) {
for ( i=1; i<NUMSURFACES-1; ++i ) {
past[1] = past[0] = list.at(i-1);
future[0] = list.at(i);
st = vc->vdp_video_mixer_render( mixer, VDP_INVALID_HANDLE, 0, VDP_VIDEO_MIXER_PICTURE_STRUCTURE_BOTTOM_FIELD,
2, past, list.at(i), 1, future, &vid_source, mixerSurface, &vid_source, &vid_source, 0, NULL );
if ( st != VDP_STATUS_OK )
//.........这里部分代码省略.........
示例8: benchMT
// next 2 functions are the 2 threads.
// the first one (the main thread) runs the decoder
// the second one runs the mixer
QString VdpauWidget::benchMT()
{
// init a mpeg decoder
QString directoryName(dataDirectory);
directoryName.append("mpghd.dat");
MPEGDecoder *m = new MPEGDecoder( vc, directoryName );
if ( !m->init() ) {
delete m;
return "Can't initialize MPEG decoder (1)!";
}
// create the rgba surface used by the mixer
VdpStatus st = vc->vdp_output_surface_create( vc->vdpDevice, VDP_RGBA_FORMAT_B8G8R8A8, m->width, m->height, &mixerSurface );
if ( st != VDP_STATUS_OK ) {
delete m;
return "FATAL: Can't create mixer output surface !!\n";
}
if ( mixerWidth!=m->width || mixerHeight!=m->height )
createMixer( m->width, m->height );
setDeinterlace( DEINT_TEMPORAL );
// init the mixer thread
// m->getOrderedFrames returns a list of 22 decoded surfaces in display order and destroys the decoder ...
VdpauThread vt( vc, m->getOrderedFrames(), mixer, mixerSurface, m->width, m->height );
// ... so we can create a new one here
directoryName.clear();
directoryName.append(dataDirectory);
directoryName.append("mpghd.dat");
MPEGDecoder *d = new MPEGDecoder( vc, directoryName );
if ( !d->init( true ) ) {
delete d;
delete m;
vc->vdp_output_surface_destroy( mixerSurface );
return "Can't initialize MPEG decoder (2)!";
}
vt.running = true;
// start the mixer thread
vt.start();
int loop=0;
QTime t;
t.start();
// this is the decoder loop
while ( t.elapsed() < MIXERLOOP ) {
// decode next frame (25 frames in turn)
d->getNextFrame();
++loop;
}
int e = t.elapsed();
vt.running = false;
// wait for the mixer thread to end
vt.wait();
benchMTResult = QString("MULTITHREADED MPEG DECODING (%1x%2): %3 frames/s\n").arg(d->width).arg(d->height).arg(loop*1000/e);
benchMTResult += vt.result;
delete d;
delete m;
vc->vdp_output_surface_destroy( mixerSurface );
return benchMTResult;
}
示例9: event
bool SceneViewer::event(QEvent *e) {
if (e->type() == QEvent::ShortcutOverride || e->type() == QEvent::KeyPress) {
if (!((QKeyEvent *)e)->isAutoRepeat()) {
TApp::instance()->getCurrentTool()->storeTool();
}
}
if (e->type() == QEvent::ShortcutOverride) {
TTool *tool = TApp::instance()->getCurrentTool()->getTool();
if (tool && tool->isEnabled() && tool->getName() == T_Type &&
tool->isActive())
e->accept();
return true;
}
if (e->type() == QEvent::KeyRelease) {
if (!((QKeyEvent *)e)->isAutoRepeat()) {
QWidget *focusWidget = QApplication::focusWidget();
if (focusWidget == 0 ||
QString(focusWidget->metaObject()->className()) == "SceneViewer")
TApp::instance()->getCurrentTool()->restoreTool();
}
}
// discard too frequent move events
static QTime clock;
if (e->type() == QEvent::MouseButtonPress)
clock.start();
else if (e->type() == QEvent::MouseMove) {
if (clock.elapsed() < 10) {
e->accept();
return true;
}
clock.start();
}
/*
switch(e->type())
{
case QEvent::Enter:
qDebug() << "************************** Enter";
break;
case QEvent::Leave:
qDebug() << "************************** Leave";
break;
case QEvent::TabletPress:
qDebug() << "************************** TabletPress" << m_pressure;
break;
case QEvent::TabletMove:
qDebug() << "************************** TabletMove";
break;
case QEvent::TabletRelease:
qDebug() << "************************** TabletRelease";
break;
case QEvent::MouseButtonPress:
qDebug() << "**************************MouseButtonPress" << m_pressure << " "
<< m_tabletEvent;
break;
case QEvent::MouseMove:
qDebug() << "**************************MouseMove" << m_pressure;
break;
case QEvent::MouseButtonRelease:
qDebug() << "**************************MouseButtonRelease";
break;
case QEvent::MouseButtonDblClick:
qDebug() << "============================== MouseButtonDblClick";
break;
}
*/
return QGLWidget::event(e);
}
示例10: test_deserialise_timing
void AdapterTimeSeriesDataSetTest::test_deserialise_timing()
{
try {
// Create configuration node.
_fixedSizePackets = "false";
_config = _configXml(_fixedSizePackets, _dataBitSize,
_udpPacketsPerIteration, _samplesPerPacket,
_outputChannelsPerSubband, _subbandsPerPacket, _nRawPolarisations);
typedef TYPES::i16complex i16c;
// Construct the adapter.
AdapterTimeSeriesDataSet adapter(_config);
// Construct a data blob to adapt into.
TimeSeriesDataSetC32 timeSeries;
unsigned nTimes = (_udpPacketsPerIteration * _samplesPerPacket);
unsigned nTimeBlocks = nTimes / _outputChannelsPerSubband;
unsigned nData = _subbandsPerPacket * _nRawPolarisations * _samplesPerPacket;
size_t packetSize = sizeof(UDPPacket::Header) + (nData * _dataBitSize * 2) / 8;
size_t chunkSize = packetSize * _udpPacketsPerIteration;
// Configure the adapter setting the data blob, chunk size and service data.
adapter.config(&timeSeries, chunkSize, QHash<QString, DataBlob*>());
// Create and fill UDP packets.
std::vector<UDPPacket> packets(_udpPacketsPerIteration);
unsigned index = 0;
for (unsigned i = 0; i < _udpPacketsPerIteration; ++i)
{
// Fill in the header
packets[i].header.version = uint8_t(0 + i);
packets[i].header.sourceInfo = uint8_t(1 + i);
packets[i].header.configuration = uint16_t(_dataBitSize);
packets[i].header.station = uint16_t(3 + i);
packets[i].header.nrBeamlets = uint8_t(4 + i);
packets[i].header.nrBlocks = uint8_t(5 + i);
packets[i].header.timestamp = uint32_t(6 + i);
packets[i].header.blockSequenceNumber = uint32_t(7 + i);
// Fill in the data
for (unsigned ii = 0, t = 0; t < _samplesPerPacket; ++t) {
for (unsigned c = 0; c < _subbandsPerPacket; ++c) {
for (unsigned p = 0; p < _nRawPolarisations; ++p) {
i16c* data = reinterpret_cast<i16c*>(packets[i].data);
index = _nRawPolarisations * (t * _subbandsPerPacket + c) + p;
data[index] = i16c(ii++, i);
}
}
}
}
// Stick the chunk of packets into an QIODevice (buffer).
{
QBuffer buffer;
buffer.setData(reinterpret_cast<char*>(&packets[0]), chunkSize);
buffer.open(QBuffer::ReadOnly);
adapter.deserialise(&buffer);
}
QBuffer buffer;
buffer.setData(reinterpret_cast<char*>(&packets[0]), chunkSize);
buffer.open(QBuffer::ReadOnly);
QTime timer;
timer.start();
adapter.deserialise(&buffer);
int elapsed = timer.elapsed();
// std::cout << timeSeries.timeSeries(0) <<
cout << endl;
cout << "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@" << endl;
cout << "[AdapterTimeSeriesDataSet]: deserialise() " << endl;
cout << "- nChan = " << _outputChannelsPerSubband << endl << endl;
if (_verbose) {
cout << "- nBlocks = " << nTimeBlocks << endl;
cout << "- nSubbands = " << _subbandsPerPacket << endl;
cout << "- nPols = " << _nRawPolarisations << endl;
cout << "- nTimes = " << nTimes << endl;
}
cout << "* Elapsed = " << elapsed << " ms." << endl;
cout << "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@" << endl;
}
catch (const QString& err) {
CPPUNIT_FAIL(err.toStdString().data());
}
}
示例11: tryAcquireWithTimeout
void tst_QSemaphore::tryAcquireWithTimeout()
{
QFETCH(int, timeout);
QSemaphore semaphore;
QTime time;
QCOMPARE(semaphore.available(), 0);
semaphore.release();
QCOMPARE(semaphore.available(), 1);
time.start();
QVERIFY(!semaphore.tryAcquire(2, timeout));
QVERIFY(time.elapsed() >= timeout);
QCOMPARE(semaphore.available(), 1);
semaphore.release();
QCOMPARE(semaphore.available(), 2);
time.start();
QVERIFY(!semaphore.tryAcquire(3, timeout));
QVERIFY(time.elapsed() >= timeout);
QCOMPARE(semaphore.available(), 2);
semaphore.release(10);
QCOMPARE(semaphore.available(), 12);
time.start();
QVERIFY(!semaphore.tryAcquire(100, timeout));
QVERIFY(time.elapsed() >= timeout);
QCOMPARE(semaphore.available(), 12);
semaphore.release(10);
QCOMPARE(semaphore.available(), 22);
time.start();
QVERIFY(!semaphore.tryAcquire(100, timeout));
QVERIFY(time.elapsed() >= timeout);
QCOMPARE(semaphore.available(), 22);
time.start();
QVERIFY(semaphore.tryAcquire(1, timeout));
QVERIFY(time.elapsed() <= timeout);
QCOMPARE(semaphore.available(), 21);
time.start();
QVERIFY(semaphore.tryAcquire(1, timeout));
QVERIFY(time.elapsed() <= timeout);
QCOMPARE(semaphore.available(), 20);
time.start();
QVERIFY(semaphore.tryAcquire(10, timeout));
QVERIFY(time.elapsed() <= timeout);
QCOMPARE(semaphore.available(), 10);
time.start();
QVERIFY(semaphore.tryAcquire(10, timeout));
QVERIFY(time.elapsed() <= timeout);
QCOMPARE(semaphore.available(), 0);
// should not be able to acquire more
time.start();
QVERIFY(!semaphore.tryAcquire(1, timeout));
QVERIFY(time.elapsed() >= timeout);
QCOMPARE(semaphore.available(), 0);
time.start();
QVERIFY(!semaphore.tryAcquire(1, timeout));
QVERIFY(time.elapsed() >= timeout);
QCOMPARE(semaphore.available(), 0);
time.start();
QVERIFY(!semaphore.tryAcquire(10, timeout));
QVERIFY(time.elapsed() >= timeout);
QCOMPARE(semaphore.available(), 0);
time.start();
QVERIFY(!semaphore.tryAcquire(10, timeout));
QVERIFY(time.elapsed() >= timeout);
QCOMPARE(semaphore.available(), 0);
}
示例12: run
//.........这里部分代码省略.........
QTime t;
t.start();
_mutex.lock();
if( _localCheckOnly ) {
csync_set_local_only( csync, true );
}
_mutex.unlock();
if( csync_init(csync) < 0 ) {
CSYNC_ERROR_CODE err = csync_errno();
QString errStr;
switch( err ) {
case CSYNC_ERR_LOCK:
errStr = tr("CSync failed to create a lock file.");
break;
case CSYNC_ERR_STATEDB_LOAD:
errStr = tr("CSync failed to load the state db.");
break;
case CSYNC_ERR_MODULE:
errStr = tr("CSync failed to load the ownCloud module.");
break;
case CSYNC_ERR_TIMESKEW:
errStr = tr("The system time between the local machine and the server differs "
"too much. Please use a time syncronization service (ntp) on both machines.");
break;
case CSYNC_ERR_FILESYSTEM:
errStr = tr("CSync could not detect the filesystem type.");
break;
case CSYNC_ERR_TREE:
errStr = tr("CSync got an error while processing internal trees.");
break;
default:
errStr = tr("An internal error number %1 happend.").arg( (int) err );
}
qDebug() << " #### ERROR String emitted: " << errStr;
emit csyncError(errStr);
goto cleanup;
}
qDebug() << "############################################################### >>";
if( csync_update(csync) < 0 ) {
emit csyncError(tr("CSync Update failed."));
goto cleanup;
}
qDebug() << "<<###############################################################";
csync_set_userdata(csync, wStats);
walkTime.start();
if( csync_walk_local_tree(csync, &checkPermissions, 0) < 0 ) {
qDebug() << "Error in treewalk.";
if( wStats->errorType == WALK_ERROR_DIR_PERMS ) {
emit csyncError(tr("The local filesystem has directories which are write protected.\n"
"That prevents ownCloud from successful syncing.\n"
"Please make sure that all directories are writeable."));
} else if( wStats->errorType == WALK_ERROR_WALK ) {
emit csyncError(tr("CSync encountered an error while examining the file system.\n"
"Syncing is not possible."));
} else if( wStats->errorType == WALK_ERROR_INSTRUCTIONS ) {
emit csyncError(tr("CSync update generated a strange instruction.\n"
"Please write a bug report."));
}
emit csyncError(tr("Local filesystem problems. Better disable Syncing and check."));
goto cleanup;
}
qDebug() << " ..... Local walk finished: " << walkTime.elapsed();
// emit the treewalk results. Do not touch the wStats after this.
emit treeWalkResult(wStats);
_mutex.lock();
if( _localCheckOnly ) {
_mutex.unlock();
// we have to go out here as its local check only.
goto cleanup;
} else {
_mutex.unlock();
// check if we can write all over.
if( csync_reconcile(csync) < 0 ) {
emit csyncError(tr("CSync reconcile failed."));
goto cleanup;
}
if( csync_propagate(csync) < 0 ) {
emit csyncError(tr("CSync propagate failed."));
goto cleanup;
}
}
cleanup:
csync_destroy(csync);
/*
* Attention: do not delete the wStat memory here. it is deleted in the
* slot catching the signel treeWalkResult because this thread can faster
* die than the slot has read out the data.
*/
qDebug() << "CSync run took " << t.elapsed() << " Milliseconds";
}
示例13: painter
void
ImageViewController::imageAndGridDoneSlot(
QImage image,
Carta::Lib::VectorGraphics::VGList gridVG,
Carta::Lib::VectorGraphics::VGList contourVG,
ServiceSync::JobId /*jobId*/ )
{
/// \todo we should make sure the jobId matches the last submitted job, otherwise
/// we are wasting CPU rendering old job...
// qDebug() << "imageAndGridDoneSlot" << jobId << "xyz";
m_renderBuffer = image;
// draw the grid over top
QTime t;
t.restart();
QPainter painter( & m_renderBuffer );
painter.setRenderHint( QPainter::Antialiasing, true );
Carta::Lib::VectorGraphics::VGListQPainterRenderer vgRenderer;
if ( ! vgRenderer.render( gridVG, painter ) ) {
qWarning() << "could not render grid vector graphics";
}
qDebug() << "Grid VG rendered in" << t.elapsed() / 1000.0 << "sec" << "xyz";
t.restart();
{
QPen lineColor( QColor( "red" ), 1 );
lineColor.setCosmetic( true );
painter.setPen( lineColor );
// where does 0.5, 0.5 map to?
QPointF p1 = m_renderService-> img2screen( { 0.5, 0.5 }
);
// where does 1.5, 1.5 map to?
QPointF p2 = m_renderService-> img2screen( { 1.5, 1.5 }
);
QTransform tf;
double m11 = p2.x() - p1.x();
double m22 = p2.y() - p1.y();
double m33 = 1; // no projection
double m13 = 0; // no projection
double m23 = 0; // no projection
double m12 = 0; // no shearing
double m21 = 0; // no shearing
double m31 = p1.x() - m11 * 0.5;
double m32 = p1.y() - m22 * 0.5;
tf.setMatrix( m11, m12, m13, m21, m22, m23, m31, m32, m33 );
painter.setTransform( tf );
}
if ( ! vgRenderer.render( contourVG, painter ) ) {
qWarning() << "could not render contour vector graphics";
}
qDebug() << "Contour VG rendered in" << t.elapsed() / 1000.0 << "sec" << "xyz";
// // paint contours
// QPen lineColor( QColor( "red" ), 1 );
// lineColor.setCosmetic( true );
// painter.setPen( lineColor );
// // where does 0.5, 0.5 map to?
// QPointF p1 = m_renderService-> img2screen( { 0.5, 0.5 }
// );
// // where does 1.5, 1.5 map to?
// QPointF p2 = m_renderService-> img2screen( { 1.5, 1.5 }
// );
// QTransform tf;
// double m11 = p2.x() - p1.x();
// double m22 = p2.y() - p1.y();
// double m33 = 1; // no projection
// double m13 = 0; // no projection
// double m23 = 0; // no projection
// double m12 = 0; // no shearing
// double m21 = 0; // no shearing
// double m31 = p1.x() - m11 * 0.5;
// double m32 = p1.y() - m22 * 0.5;
// tf.setMatrix( m11, m12, m13, m21, m22, m23, m31, m32, m33 );
// painter.setTransform( tf );
// for ( size_t k = 0 ; k < m_contours.size() ; ++k ) {
// std::vector < QPolygonF > con = m_contours[k];
// for ( size_t i = 0 ; i < con.size() ; ++i ) {
// QPolygonF & poly = con[i];
// painter.drawPolyline( poly );
// }
// }
// schedule a repaint with the connector
m_connector-> refreshView( this );
} // imageAndGridDoneSlot
示例14: on_btIdentify_pressed
void NBioBSP_IndexSearch::on_btIdentify_pressed()
{
NBioAPI_UINT32 nDataCount = 0;
//NBioAPI Search DB Count.
NBioAPI_GetDataCountFromIndexSearchDB(m_hNBioBSP, &nDataCount);
ui->pgbarSearch->setRange(0, nDataCount);
ui->pgbarSearch->setValue(0);
m_bStopFlag = false;
NBioAPI_RETURN nRet = NBioAPI_OpenDevice(m_hNBioBSP, NBioAPI_DEVICE_ID_AUTO);
if (NBioAPIERROR_NONE != nRet) {
QString szError;
szError.sprintf("NBioAPI_OpenDevice error: %04X", nRet);
ui->labelStatus->setText(szError);
}
else {
NBioAPI_FIR_HANDLE hFIR;
//NBioAPI Capture
nRet = NBioAPI_Capture(m_hNBioBSP, NBioAPI_FIR_PURPOSE_IDENTIFY, &hFIR, NBioAPI_USE_DEFAULT_TIMEOUT, NULL, NULL);
if (NBioAPIERROR_NONE == nRet) {
NBioAPI_INPUT_FIR inputFIR;
inputFIR.Form = NBioAPI_FIR_FORM_HANDLE;
inputFIR.InputFIR.FIRinBSP = &hFIR;
NBioAPI_INDEXSEARCH_FP_INFO infoFp;
NBioAPI_INDEXSEARCH_CALLBACK_INFO_0 callbackInfo0;
callbackInfo0.CallBackType = 0;
callbackInfo0.CallBackFunction = MyIndexSearchCallBack;
callbackInfo0.UserCallBackParam = this;
//Search DB
QTime t;
ui->btStop->setEnabled(true);
pSearchModel->removeRows(0, pSearchModel->rowCount(QModelIndex()), QModelIndex());
t.start();
nRet = NBioAPI_IdentifyDataFromIndexSearchDB(m_hNBioBSP, &inputFIR, 5, &infoFp, &callbackInfo0);
int nElapsed = t.elapsed();
ui->btStop->setEnabled(false);
ui->labelSearchTime->setText(QString("%1.%2 sec").arg(nElapsed / 1000).arg(nElapsed % 1000));
if (NBioAPIERROR_NONE != nRet) {
if (NBioAPIERROR_INDEXSEARCH_IDENTIFY_STOP != nRet)
QMessageBox::warning(this, "NBioBSP_IndexSearch", "Failed to identify fingerprint data from DB!");
}
else {
int nIndex = 0;
pSearchModel->insertRow(nIndex, QModelIndex());
pSearchModel->setData(pSearchModel->index(nIndex, 0, QModelIndex()), infoFp.ID);
pSearchModel->setData(pSearchModel->index(nIndex, 1, QModelIndex()), infoFp.FingerID);
pSearchModel->setData(pSearchModel->index(nIndex, 2, QModelIndex()), infoFp.SampleNumber);
pSearchModel->setData(pSearchModel->index(nIndex, 3, QModelIndex()), "-");
}
}
NBioAPI_FreeFIRHandle(m_hNBioBSP, hFIR);
NBioAPI_CloseDevice(m_hNBioBSP, NBioAPI_DEVICE_ID_AUTO);
}
if (ui->pgbarSearch->maximum() == 0)
ui->pgbarSearch->setRange(0, 100);
}
示例15: internalSearchPath
//.........这里部分代码省略.........
#ifdef CATCHCHALLENGER_EXTRA_CHECK
extraControlOnData(pathToGo.left,CatchChallenger::Orientation_left);
extraControlOnData(pathToGo.right,CatchChallenger::Orientation_right);
extraControlOnData(pathToGo.top,CatchChallenger::Orientation_top);
extraControlOnData(pathToGo.bottom,CatchChallenger::Orientation_bottom);
#endif
simplifiedMapList[current_map].pathToGo[coord]=pathToGo;
}
if(destination_map==current_map && tempPoint.x==destination_x && tempPoint.y==destination_y)
{
tryCancel=false;
std::vector<std::pair<CatchChallenger::Orientation,uint8_t/*step number*/> > returnedVar;
if(returnedVar.empty() || pathToGo.bottom.size()<returnedVar.size())
if(!pathToGo.bottom.empty())
returnedVar=pathToGo.bottom;
if(returnedVar.empty() || pathToGo.top.size()<returnedVar.size())
if(!pathToGo.top.empty())
returnedVar=pathToGo.top;
if(returnedVar.empty() || pathToGo.right.size()<returnedVar.size())
if(!pathToGo.right.empty())
returnedVar=pathToGo.right;
if(returnedVar.empty() || pathToGo.left.size()<returnedVar.size())
if(!pathToGo.left.empty())
returnedVar=pathToGo.left;
if(!returnedVar.empty())
{
if(returnedVar.back().second<=1)
{
qDebug() << "Bug due for last step";
return;
}
else
{
qDebug() << "Path result into " << time.elapsed() << "ms";
returnedVar.back().second--;
emit result(current_map,x,y,returnedVar);
return;
}
}
else
{
returnedVar.clear();
qDebug() << "Bug due to resolved path is empty";
return;
}
}
//revers resolv
//add to point to parse
{
//if the right case have been parsed
coord=std::pair<uint8_t,uint8_t>(tempPoint.x+1,tempPoint.y);
if(simplifiedMapForPathFinding.pathToGo.find(coord)==simplifiedMapForPathFinding.pathToGo.cend())
{
MapPointToParse newPoint=tempPoint;
newPoint.x++;
if(newPoint.x<simplifiedMapForPathFinding.width)
if(PathFinding::canGoOn(simplifiedMapForPathFinding,newPoint.x,newPoint.y) || (destination_map==current_map && newPoint.x==destination_x && newPoint.y==destination_y))
{
std::pair<uint8_t,uint8_t> point(newPoint.x,newPoint.y);
if(simplifiedMapForPathFinding.pointQueued.find(point)==simplifiedMapForPathFinding.pointQueued.cend())
{
simplifiedMapList[current_map].pointQueued.insert(point);
mapPointToParseList.push_back(newPoint);
}
}
}