本文整理汇总了C++中outputs函数的典型用法代码示例。如果您正苦于以下问题:C++ outputs函数的具体用法?C++ outputs怎么用?C++ outputs使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了outputs函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: rt_OneStep
/* Function: rtOneStep ========================================================
*
* Abstract:
* Perform one step of the model.
*/
static void rt_OneStep(RT_MODEL *S)
{
real_T tnext;
/***********************************************
* Check and see if error status has been set *
***********************************************/
if (rtmGetErrorStatus(S) != NULL) {
GBLbuf.stopExecutionFlag = 1;
return;
}
/* enable interrupts here */
tnext = rt_SimGetNextSampleHit();
rtsiSetSolverStopTime(rtmGetRTWSolverInfo(S),tnext);
outputs(S, 0);
rtExtModeSingleTaskUpload(S);
update(S, 0);
rt_SimUpdateDiscreteTaskSampleHits(rtmGetNumSampleTimes(S),
rtmGetTimingData(S),
rtmGetSampleHitPtr(S),
rtmGetTPtr(S));
if (rtmGetSampleTime(S,0) == CONTINUOUS_SAMPLE_TIME) {
rt_UpdateContinuousStates(S);
}
rtExtModeCheckEndTrigger();
} /* end rtOneStep */
示例2: QString
QString DMXUSB::outputInfo(quint32 output)
{
QString str;
if (output == QLCIOPlugin::invalidLine())
{
if (m_outputs.size() == 0)
{
str += QString("<BR><B>%1</B>").arg(tr("No output support available."));
str += QString("<P>");
str += tr("Make sure that you have your hardware firmly plugged in. "
"NOTE: FTDI VCP interface is not supported by this plugin.");
str += QString("</P>");
}
}
else if (output < quint32(m_outputs.size()))
{
str += QString("<H3>%1</H3>").arg(outputs()[output]);
str += QString("<P>");
str += tr("Device is operating correctly.");
str += QString("</P>");
QString add = m_outputs[output]->additionalInfo();
if (add.isEmpty() == false)
str += add;
}
str += QString("</BODY>");
str += QString("</HTML>");
return str;
}
示例3: get
void Views::copyView(const std::string& viewname,
const std::string& copyname)
{
vpz::View view = get(viewname);
vpz::View copy = get(viewname);
copy.setName(copyname);
std::string copyoutputname;
int number = 1;
do {
copyoutputname = view.output() + "_";
copyoutputname += boost::lexical_cast< std::string >(number);
++number;
}while (outputs().exist(copyoutputname));
copyOutput(view.output(), copyoutputname);
switch (copy.type()) {
case vpz::View::TIMED:
addTimedView(copy.name(), copy.timestep(), copyoutputname);
break;
case vpz::View::EVENT:
addEventView(copy.name(), copyoutputname);
break;
case vpz::View::FINISH:
addFinishView(copy.name(), copyoutputname);
break;
}
}
示例4: outputs
dmatrix3 ConvLayer::backpropagation() const
{
dmatrix3 outputs(Excitations.size(), dmatrix2
(Excitations[0].size(), dvec
(Excitations[0][0].size(), 0.0)));
ivec step;
step.reserve(4);
int index;
for(int z=0;z<Errors.size();z++) {
index = 0;
for(int y=0;y<Errors[0].size();y++) {
for(int x=0;x<Errors[0][0].size();x++, index++) {
step = Steps[index];
for(int i=step[0];i<step[1];i++) {
for(int j=step[2];j<step[3];j++) {
outputs[z][i][j] += sigmoid_p(
Excitations[z][i][j] *
Errors[z][y][x]);
}
}
}
}
}
return outputs;
}
示例5: ensure_space_for_latent_data
// The imputation method is a "collapsed Gibbs sampler" that integrates out
// latent data from preceding layers (i.e. preceding nodes are activated
// probabilistically), but conditions on the latent data from the current
// layer and the layer above.
void GFFPS::impute_hidden_layer_outputs(RNG &rng) {
int number_of_hidden_layers = model_->number_of_hidden_layers();
if (number_of_hidden_layers == 0) return;
ensure_space_for_latent_data();
clear_latent_data();
std::vector<Vector> allocation_probs =
model_->activation_probability_workspace();
std::vector<Vector> complementary_allocation_probs = allocation_probs;
std::vector<Vector> workspace = allocation_probs;
for (int i = 0; i < model_->dat().size(); ++i) {
const Ptr<RegressionData> &data_point(model_->dat()[i]);
Nnet::HiddenNodeValues &outputs(imputed_hidden_layer_outputs_[i]);
model_->fill_activation_probabilities(data_point->x(), allocation_probs);
impute_terminal_layer_inputs(rng, data_point->y(), outputs.back(),
allocation_probs.back(),
complementary_allocation_probs.back());
for (int layer = number_of_hidden_layers - 1; layer > 0; --layer) {
// This for-loop intentionally skips layer 0, because the inputs to the
// first hidden layer are the observed predictors.
imputers_[layer].impute_inputs(
rng,
outputs,
allocation_probs[layer - 1],
complementary_allocation_probs[layer - 1],
workspace[layer - 1]);
}
imputers_[0].store_initial_layer_latent_data(outputs[0], data_point);
}
}
示例6: qIdsForNodes
Qt3DCore::QNodeCreatedChangeBasePtr QRenderTarget::createNodeCreationChange() const
{
auto creationChange = Qt3DCore::QNodeCreatedChangePtr<QRenderTargetData>::create(this);
auto &data = creationChange->data;
data.outputIds = qIdsForNodes(outputs());
return creationChange;
}
示例7: eval
void eval(int num, array **arrays)
{
std::vector<af_array> outputs(num);
for (int i = 0; i < num; i++) {
outputs[i] = arrays[i]->get();
}
AF_THROW(af_eval_multiple(num, &outputs[0]));
}
示例8: rt_context_update
void node::rt_context_update (rt_process_context& ctx)
{
for (auto& in : inputs ())
in.rt_context_update (ctx);
for (auto& out : outputs ())
out.rt_context_update (ctx);
rt_on_context_update (ctx);
}
示例9: outputs
static EORB_CPP_node *read_expr_8 (void)
{
EORB_CPP_node *l;
EORB_CPP_node *r;
char c;
#ifdef DEBUG_EXPR
if (debugging)
{
outputs("~E8:");
}
#endif
l = read_expr_9();
while (1)
{
c = getnhsexpand();
switch (c)
{
case '+':
case '-':
#ifdef DEBUG_EXPR
if (debugging)
{
outputc(c);
}
#endif
r = read_expr_9();
l = newnode(l, c, r);
break;
default:
#ifdef DEBUG_EXPR
if (debugging)
{
outputs("~");
}
#endif
Push(c);
return (l);
break;
}
}
}
示例10: updateCached
void updateCached()
{ ScalarFieldArray N;
FluidMixture::Outputs outputs(&N, 0, &Adiel_rhoExplicitTilde, 0, &Adiel);
fluidMixture->getFreeEnergy(outputs); //Fluid free energy including coupling
Ntilde.resize(N.size());
for(unsigned i=0; i<N.size(); i++)
Ntilde[i] = J(N[i]);
}
示例11: gradient
void gradient(View& view, const Eigen::VectorXd& parameters, Eigen::VectorXd& gradient_vector)
{
// TODO: Check concept for InputIterator
//double N = std::distance(first_input, last_input);
double scaling_factor = 1. / view.size();
gradient_vector.setZero();
// DEBUG
//std::cout << gradient_ << std::endl;
for (unsigned int i = 0; i < view.size(); i++) {
forward_propagation(parameters, view.first(i), outputs());
back_propagation_error(parameters, view.first(i), outputs(), view.second(i), gradient_vector, scaling_factor);
}
// DEBUG
//std::cout << gradient_ << std::endl;
}
示例12: split
//! split une liste de signaux sur n bus
siglist split(const siglist& inputs, int nbus)
{
int nlines = (int)inputs.size();
siglist outputs(nbus);
for (int b=0; b<nbus; b++) {
outputs[b] = inputs[b % nlines];
}
return outputs;
}
示例13: main
task main()
{
int myval2 = 2; // defined as local to task main()(preferred)
int myval3 = 3;
inputs(myval1);
processing(myval2);
outputs(myval3);
}
示例14: assert
std::vector<double> nevil::basic_feedforward_nn::update(const std::vector<double> &inputs)
{
assert ((_num_input_nodes == inputs.size())
&& "Error: matrix size and input size don't match!");
std::vector<double> outputs(_num_output_nodes, 0);
for (std::size_t i = 0; i < _num_output_nodes; ++i)
for (std::size_t j = 0; j < _num_input_nodes; ++j)
outputs[i] += _weights[(i * _num_input_nodes) + j] * inputs[j];
return outputs;
}
示例15: check
Vector<double> MeanSquaredError::calculate_terms(void) const {
// Control sentence
#ifndef NDEBUG
check();
#endif
// Neural network stuff
const MultilayerPerceptron* multilayer_perceptron_pointer =
neural_network_pointer->get_multilayer_perceptron_pointer();
const unsigned inputs_number =
multilayer_perceptron_pointer->get_inputs_number();
const unsigned outputs_number =
multilayer_perceptron_pointer->get_outputs_number();
// Data set stuff
const Instances& instances = data_set_pointer->get_instances();
const unsigned training_instances_number =
instances.count_training_instances_number();
// Mean squared error stuff
Vector<double> performance_terms(training_instances_number);
Vector<double> inputs(inputs_number);
Vector<double> outputs(outputs_number);
Vector<double> targets(outputs_number);
for (unsigned i = 0; i < training_instances_number; i++) {
// Input vector
inputs = data_set_pointer->get_training_input_instance(i);
// Output vector
outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);
// Target vector
targets = data_set_pointer->get_training_target_instance(i);
// Error
performance_terms[i] = outputs.calculate_distance(targets);
}
return (performance_terms / sqrt((double)training_instances_number));
}