本文整理汇总了C++中Parts类的典型用法代码示例。如果您正苦于以下问题:C++ Parts类的具体用法?C++ Parts怎么用?C++ Parts使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Parts类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: layout2d
extern "C" int DLLEXPORT layout2d(
LayoutRect * layout_rects,
unsigned int num,
scalar sheet_x,
scalar sheet_y,
scalar cut_size,
Layout ** res)
{
Sheet sheet;
sheet.size[0] = sheet_x;
sheet.size[1] = sheet_y;
Parts parts;
for (unsigned int i = 0; i < num; i++)
{
auto rect = &layout_rects[i];
parts.push_back(Part(rect->size[0], rect->size[1],
rect->can_rotate != 0, rect->amount));
}
Parts sheets;
sheets.push_back(Part(sheet.size[0], sheet.size[1]));
ResultsGenerator generator;
generator.put_SawThickness(cut_size);
generator.Begin(parts, sheets);
Result outer_result;
int ret = generator.NextResult(outer_result) ? 1 : 0;
if (ret) {
*res = _make_raskroy_layout(&outer_result.raskroy,
cut_size,
sheet);
}
return ret;
}
示例2: file
void Twitter::testUploadImage()
{
this->setConsumerKey("consumer_key");
this->setConsumerSecret("consumer_secret");
this->setOauthToken("oauth_token");
this->setOauthTokenSecret("oauth_token_secret");
QFile file("test.png");
file.open(QIODevice::ReadOnly);
Part imagePart;
imagePart.contentDisposition = "name=\"media[]\"; filename=\"1234.png\"";
imagePart.contentType = "application/octet-stream";
imagePart.data = file.readAll();
Part statusPart;
statusPart.contentDisposition = "name=\"status\"";
statusPart.contentType = "";
statusPart.data = "Abrakadabra";
Parts parts;
parts.insert("image", imagePart);
parts.insert("status", statusPart);
QByteArray data = Helper::buildMultipartBody(this->mutipartBoundary(), parts);
resource("https://upload.twitter.com/1/statuses/update_with_media.json", "POST", Params(), data);
}
示例3: assert
void Joint::build(const Vector3d &v, const Rotation &r, bool dynamics)
{
if (m_bodyNum != BODY_NUM) { return; }
assert(m_world);
m_joint = createJoint(m_bodies[0], m_bodies[1]);
Parts *parts = (Parts*) dBodyGetData(m_bodies[1]);
assert(parts);
double x, y, z;
parts->givePosition(x, y, z);
m_rotv.set(x, y, z);
m_rotv -= m_anchor;
Vector3d av = m_anchor;
av.rotate(r);
av += v;
applyAnchor(av.x(), av.y(), av.z());
if (m_fixed) {
dJointSetFixed(m_joint);
}
if (dynamics) {
m_jfb = new dJointFeedback;
dJointSetFeedback(m_joint, m_jfb);
}
}
示例4: printf
void SimObjBase::dump()
{
printf("Agent(%d) : %s\n", id(), name());
for (AttrM::const_iterator i=m_attrs.begin(); i!=m_attrs.end(); i++) {
std::cout << i->second->toString()<< std::endl;
}
PartsIterator *itr = getPartsIterator();
Parts *p = NULL;
while (itr && (p = itr->next()) != NULL) {
p->dump();
}
delete itr;
}
示例5: Begin
void ResultsGenerator::Begin(Parts &parts, const Parts &sheets)
{
m_remains.clear();
for (auto s = 0; s <= 1; s++)
{
m_sizes[s].clear();
for (auto pPart = parts.begin(); pPart != parts.end(); pPart++)
m_sizes[s].AddPart(*pPart, s);
// order from big to small
std::sort(m_sizes[s].begin(), m_sizes[s].end(), std::greater_equal<Size>());
for (auto pSize = m_sizes[s].begin(); pSize != m_sizes[s].end(); pSize++)
{
std::sort(pSize->other_sizes.begin(), pSize->other_sizes.end(),
std::greater_equal<OtherSize>());
// set pointer to the smallest size
pSize->other_sizes.SetMin();
}
}
m_sheets = sheets;
}
示例6: getPartsIterator
void SimObjBase::draw(GLContext &c)
{
PartsIterator *itr = getPartsIterator();
Parts *p = NULL;
while (itr && (p = itr->next()) != NULL) {
glPushMatrix();
p->draw(c);
glPopMatrix();
}
delete itr;
/*
printf("Agent(%d) : %s\n", id(), name());
printf("(%f, %f, %f)\n", x(), y(), z());
printf("\t : (%f, %f, %f, %f)\n", r00(), r01(), r02(), r03());
printf("\t : (%f, %f, %f, %f)\n", r10(), r11(), r12(), r13());
printf("\t : (%f, %f, %f, %f)\n", r20(), r21(), r22(), r23());
for (PartsM::iterator i=m_parts.begin(); i!=m_parts.end(); i++) {
Parts *p = i->second;
p->dump();
}
*/
}
示例7:
void SearchSpacePruning<T>::filterCandidatesByDepth(Parts& parts, vectorCandidate& candidates, const Mat& depth, const float zfactor) {
vectorCandidate new_candidates;
const unsigned int N = candidates.size();
for (unsigned int n = 0; n < N; ++n) {
const unsigned int c = candidates[n].component();
const unsigned int nparts = parts.nparts(c);
const vector<Rect>& boxes = candidates[n].parts();
for (unsigned int p = nparts-1; p >= 1; --p) {
ComponentPart part = parts.component(c,p);
Point anchor = part.anchor(0);
Rect child = boxes[part.self()];
Rect parent = boxes[part.parent().self()];
T cmed_depth = Math::median<T>(depth(child));
T pmed_depth = Math::median<T>(depth(parent));
if (cmed_depth > 0 && pmed_depth > 0) {
if (abs(cmed_depth-pmed_depth) > norm(anchor)*zfactor) break;
}
if (p == 1) new_candidates.push_back(candidates[n]);
}
}
candidates = new_candidates;
}
示例8: getObj
double AgentController::onAction(ActionEvent &evt)
{
try {
static int deg = 0;
SimObj *my = getObj(myname());
Vector3d v;
my->getPosition(v);
LOG_MSG(("pos = (%f, %f, %f)", v.x(), v.y(), v.z()));
//my->setJointAngle("R_SHOULDER", DEG2RAD(deg));
my->setJointAngle("R_ELBOW", DEG2RAD(90));
my->setJointAngle("R_SHOULDER", DEG2RAD(deg));
//my->setJointAngle("R_SHOULDER", DEG2RAD(deg));
Parts *p = my->getParts("RU_ARM");
if (p) {
const double *pos = p->getPosition();;
LOG_MSG(("RU_ARM(%f, %f, %f)", pos[0], pos[1], pos[2]));
const double *q = p->getQuaternion();
LOG_MSG((" (%f, %f, %f, %f", q[0], q[1], q[2], q[3]));
}
p = my->getParts("RL_ARM");
if (p) {
const double *pos = p->getPosition();;
LOG_MSG(("RL_ARM(%f, %f, %f)", pos[0], pos[1], pos[2]));
const double *q = p->getQuaternion();
LOG_MSG((" (%f, %f, %f, %f", q[0], q[1], q[2], q[3]));
}
deg += 45;
} catch(SimObj::Exception &) {
;
}
return 0.1;
}
示例9: getAttachedParts
PartContainer::Parts PartContainer::getAllAttachedParts(Part* part) const
{
Parts parts = getAttachedParts(part);
for(Part* p : parts)
{
Parts tmp = getAllAttachedParts(p);
parts.insert(parts.end(), tmp.begin(), tmp.end());
}
return parts;
}
示例10: sizeof
/**
* @brief Binalize of the entity
* @param Size of binalized data
* @return Binalized data
*/
char *SimObjBase::toBinary(int &n)
{
const int RESIZE = 1000;
static int bufsize = RESIZE;
static char *buf = new char[bufsize];
char *p = buf;
p += sizeof(DataLengthType); // skip datalen
BINARY_SET_DATA_L_INCR(p, Id, id());
BINARY_SET_STRING_INCR(p, name());
BINARY_SET_STRING_INCR(p, classname());
short attached = isAttached();
BINARY_SET_DATA_S_INCR(p, short, attached);
BINARY_SET_DATA_L_INCR(p, Operation, m_ops);
DataOffsetType ao_offset = p - buf;
BINARY_SET_DATA_S_INCR(p, DataOffsetType, 0); // skip attr offset
DataOffsetType bo_offset = p - buf;
BINARY_SET_DATA_S_INCR(p, DataOffsetType, 0); // skip body offset
// attrs
{
DataOffsetType attrs_offset = p - buf;
BINARY_SET_DATA_S(buf + ao_offset, DataOffsetType, attrs_offset);
BINARY_SET_DATA_S_INCR(p, DataLengthType, 0); // skip attrs size
DataLengthType attrssize = sizeof(DataLengthType);
for (AttrM::iterator i=m_attrs.begin(); i!=m_attrs.end(); i++) {
int head = p - buf;
Attribute *attr = i->second;
int nn;
char *pp = attr->toBinary(nn);
if (head + nn >= bufsize) {
int newsize = bufsize + RESIZE;
char *newbuf = new char[newsize];
memcpy(newbuf, buf, head);
delete buf;
buf = newbuf;
bufsize = newsize;
p = buf + head;
}
memcpy(p, pp, nn);
p += nn;
attrssize += nn;
}
BINARY_SET_DATA_S(buf + attrs_offset, DataLengthType, attrssize);
}
// set body offset value
{
DataOffsetType body_offset = p - buf;
BINARY_SET_DATA_S(buf + bo_offset, DataOffsetType, body_offset);
// body
BINARY_SET_DATA_S_INCR(p, DataLengthType, 0); // skip body size
DataLengthType bodysize = sizeof(DataLengthType);
//for (PartsM::iterator i=m_parts.begin(); i!=m_parts.end(); i++) {
PartsIterator *itr = getPartsIterator();
Parts *parts = NULL;
while (itr && (parts = itr->next()) != NULL) {
if (parts->isBlind()) { continue; }
int head = p - buf;
//Parts *parts = i->second;
// added by sekikawa (2007/12/4)
#ifdef TAKU_TEST
parts->calcAbsPos(this);
#endif
int nn;
char *pp = parts->toBinary(nn);
if (head + nn >= bufsize) {
int newsize = bufsize + RESIZE;
char *newbuf = new char[newsize];
memcpy(newbuf, buf, head);
delete buf;
buf = newbuf;
bufsize = newsize;
p = buf + head;
}
memcpy(p, pp, nn);
p += nn;
bodysize += nn;
}
BINARY_SET_DATA_S(buf + body_offset, DataLengthType, bodysize);
delete itr;
}
//.........这里部分代码省略.........
示例11: xv
void DynamicProgram<T>::argmin(Parts& parts, const vector2DMat& rootv, const vector2DMat& rooti, const vectorf scales, const vector4DMat& Ix, const vector4DMat& Iy, const vector4DMat& Ik, vectorCandidate& candidates) {
// for each scale, and each component, traverse back down the tree to retrieve the part positions
int nscales = scales.size();
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int n = 0; n < nscales; ++n) {
T scale = scales[n];
for (int c = 0; c < parts.ncomponents(); ++c) {
// get the scores and indices for this tree of parts
const vector2DMat& Iknc = Ik[n][c];
const vector2DMat& Ixnc = Ix[n][c];
const vector2DMat& Iync = Iy[n][c];
int nparts = parts.nparts(c);
// threshold the root score
Mat over_thresh = rootv[n][c] > thresh_;
Mat rootmix = rooti[n][c];
vectorPoint inds;
find(over_thresh, inds);
for (int i = 0; i < inds.size(); ++i) {
Candidate candidate;
vectori xv(nparts);
vectori yv(nparts);
vectori mv(nparts);
for (int p = 0; p < nparts; ++p) {
ComponentPart part = parts.component(c, p);
// calculate the child's points from the parent's points
int x, y, m;
if (part.isRoot()) {
x = xv[0] = inds[i].x;
y = yv[0] = inds[i].y;
m = mv[0] = rootmix.at<int>(inds[i]);
} else {
int idx = part.parent().self();
x = xv[idx];
y = yv[idx];
m = mv[idx];
xv[p] = Ixnc[p][m].at<int>(y,x);
yv[p] = Iync[p][m].at<int>(y,x);
mv[p] = Iknc[p][m].at<int>(y,x);
}
// calculate the bounding rectangle and add it to the Candidate
Point ptwo = Point(2,2);
Point pone = Point(1,1);
Point xy1 = (Point(xv[p],yv[p])-ptwo)*scale;
Point xy2 = xy1 + Point(part.xsize(m), part.ysize(m))*scale - pone;
if (part.isRoot()) candidate.addPart(Rect(xy1, xy2), rootv[n][c].at<T>(inds[i]));
else candidate.addPart(Rect(xy1, xy2), 0.0);
}
#ifdef _OPENMP
#pragma omp critical(addcandidate)
#endif
{
candidates.push_back(candidate);
}
}
}
}
}
示例12: ncscores
void DynamicProgram<T>::min(Parts& parts, vector2DMat& scores, vector4DMat& Ix, vector4DMat& Iy, vector4DMat& Ik, vector2DMat& rootv, vector2DMat& rooti) {
// initialize the outputs, preallocate vectors to make them thread safe
// TODO: better initialisation of Ix, Iy, Ik
const int nscales = scores.size();
const int ncomponents = parts.ncomponents();
Ix.resize(nscales, vector3DMat(ncomponents));
Iy.resize(nscales, vector3DMat(ncomponents));
Ik.resize(nscales, vector3DMat(ncomponents));
rootv.resize(nscales, vectorMat(ncomponents));
rooti.resize(nscales, vectorMat(ncomponents));
// for each scale, and each component, update the scores through message passing
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int nc = 0; nc < nscales*ncomponents; ++nc) {
// calculate the inner loop variables from the dual variables
const int n = floor(nc / ncomponents);
const int c = nc % ncomponents;
// allocate the inner loop variables
Ix[n][c].resize(parts.nparts(c));
Iy[n][c].resize(parts.nparts(c));
Ik[n][c].resize(parts.nparts(c));
vectorMat ncscores(scores[n].size());
for (int p = parts.nparts(c)-1; p > 0; --p) {
// get the component part (which may have multiple mixtures associated with it)
ComponentPart cpart = parts.component(c, p);
int nmixtures = cpart.nmixtures();
Ix[n][c][p].resize(nmixtures);
Iy[n][c][p].resize(nmixtures);
Ik[n][c][p].resize(nmixtures);
// intermediate results for mixtures of this part
vectorMat scoresp;
vectorMat Ixp;
vectorMat Iyp;
for (int m = 0; m < nmixtures; ++m) {
// raw score outputs
Mat score_in, score_dt, Ix_dt, Iy_dt;
if (cpart.score(ncscores, m).empty()) {
score_in = cpart.score(scores[n], m);
} else {
score_in = cpart.score(ncscores, m);
}
// get the anchor position
Point anchor = cpart.anchor(m);
// compute the distance transform
distanceTransform(score_in, cpart.defw(m), anchor, score_dt, Ix_dt, Iy_dt);
scoresp.push_back(score_dt);
Ixp.push_back(Ix_dt);
Iyp.push_back(Iy_dt);
//cout << score_dt(Range(0,10), Range(0,10)) << endl;
// calculate a valid region of interest for the scores
/*
int X = score_in.cols;
int Y = score_in.rows;
int xmin = std::max(std::min(anchor.x, X), 0);
int ymin = std::max(std::min(anchor.y, Y), 0);
int xmax = std::min(std::max(anchor.x+X, 0), X);
int ymax = std::min(std::max(anchor.y+Y, 0), Y);
int xoff = std::max(-anchor.x, 0);
int yoff = std::max(-anchor.y, 0);
// shift the score by the Part's offset from its parent
Mat scorem = -numeric_limits<T>::infinity() * Mat::ones(score_dt.size(), score_dt.type());
Mat Ixm = Mat::zeros(Ix_dt.size(), Ix_dt.type());
Mat Iym = Mat::zeros(Iy_dt.size(), Iy_dt.type());
if (xoff < X && yoff < Y && (ymax - ymin) > 0 && (xmax - xmin) > 0) {
Mat score_dt_range = score_dt(Range(ymin, ymax), Range(xmin, xmax));
Mat score_range = scorem(Range(yoff, yoff+ymax-ymin), Range(xoff, xoff+xmax-xmin));
Mat Ix_dt_range = Ix_dt(Range(ymin, ymax), Range(xmin, xmax));
Mat Ixm_range = Ixm(Range(yoff, yoff+ymax-ymin), Range(xoff, xoff+xmax-xmin));
Mat Iy_dt_range = Iy_dt(Range(ymin, ymax), Range(xmin, xmax));
Mat Iym_range = Iym(Range(yoff, yoff+ymax-ymin), Range(xoff, xoff+xmax-xmin));
score_dt_range.copyTo(score_range);
Ix_dt_range.copyTo(Ixm_range);
Iy_dt_range.copyTo(Iym_range);
}
// push the scores onto the intermediate vectors
scoresp.push_back(scorem);
Ixp.push_back(Ixm);
Iyp.push_back(Iym);
*/
}
nmixtures = cpart.parent().nmixtures();
for (int m = 0; m < nmixtures; ++m) {
vectorMat weighted;
// weight each of the child scores
//.........这里部分代码省略.........
示例13: CreateParts
void Pipe::TrainEpoch(int epoch) {
Instance *instance;
Parts *parts = CreateParts();
Features *features = CreateFeatures();
vector<double> scores;
vector<double> gold_outputs;
vector<double> predicted_outputs;
double total_cost = 0.0;
double total_loss = 0.0;
double eta;
int num_instances = instances_.size();
double lambda = 1.0/(options_->GetRegularizationConstant() *
(static_cast<double>(num_instances)));
timeval start, end;
gettimeofday(&start, NULL);
int time_decoding = 0;
int time_scores = 0;
int num_mistakes = 0;
LOG(INFO) << " Iteration #" << epoch + 1;
dictionary_->StopGrowth();
for (int i = 0; i < instances_.size(); i++) {
int t = num_instances * epoch + i;
instance = instances_[i];
MakeParts(instance, parts, &gold_outputs);
MakeFeatures(instance, parts, features);
// If using only supported features, must remove the unsupported ones.
// This is necessary not to mess up the computation of the squared norm
// of the feature difference vector in MIRA.
if (options_->only_supported_features()) {
RemoveUnsupportedFeatures(instance, parts, features);
}
timeval start_scores, end_scores;
gettimeofday(&start_scores, NULL);
ComputeScores(instance, parts, features, &scores);
gettimeofday(&end_scores, NULL);
time_scores += diff_ms(end_scores, start_scores);
if (options_->GetTrainingAlgorithm() == "perceptron" ||
options_->GetTrainingAlgorithm() == "mira" ) {
timeval start_decoding, end_decoding;
gettimeofday(&start_decoding, NULL);
decoder_->Decode(instance, parts, scores, &predicted_outputs);
gettimeofday(&end_decoding, NULL);
time_decoding += diff_ms(end_decoding, start_decoding);
if (options_->GetTrainingAlgorithm() == "perceptron") {
for (int r = 0; r < parts->size(); ++r) {
if (!NEARLY_EQ_TOL(gold_outputs[r], predicted_outputs[r], 1e-6)) {
++num_mistakes;
}
}
eta = 1.0;
} else {
CHECK(false) << "Plain mira is not implemented yet.";
}
MakeGradientStep(parts, features, eta, t, gold_outputs,
predicted_outputs);
} else if (options_->GetTrainingAlgorithm() == "svm_mira" ||
options_->GetTrainingAlgorithm() == "crf_mira" ||
options_->GetTrainingAlgorithm() == "svm_sgd" ||
options_->GetTrainingAlgorithm() == "crf_sgd") {
double loss;
timeval start_decoding, end_decoding;
gettimeofday(&start_decoding, NULL);
if (options_->GetTrainingAlgorithm() == "svm_mira" ||
options_->GetTrainingAlgorithm() == "svm_sgd") {
// Do cost-augmented inference.
double cost;
decoder_->DecodeCostAugmented(instance, parts, scores, gold_outputs,
&predicted_outputs, &cost, &loss);
total_cost += cost;
} else {
// Do marginal inference.
double entropy;
decoder_->DecodeMarginals(instance, parts, scores, gold_outputs,
&predicted_outputs, &entropy, &loss);
CHECK_GE(entropy, 0.0);
}
gettimeofday(&end_decoding, NULL);
time_decoding += diff_ms(end_decoding, start_decoding);
if (loss < 0.0) {
if (!NEARLY_EQ_TOL(loss, 0.0, 1e-9)) {
LOG(INFO) << "Warning: negative loss set to zero: " << loss;
}
loss = 0.0;
}
total_loss += loss;
// Compute difference between predicted and gold feature vectors.
FeatureVector difference;
MakeFeatureDifference(parts, features, gold_outputs, predicted_outputs,
&difference);
//.........这里部分代码省略.........
示例14: ncscores
void DynamicProgram<T>::min(Parts& parts, vector2DMat& scores, vector4DMat& Ix, vector4DMat& Iy, vector4DMat& Ik, vector2DMat& rootv, vector2DMat& rooti) {
// initialize the outputs, preallocate vectors to make them thread safe
// TODO: better initialisation of Ix, Iy, Ik
const unsigned int nscales = scores.size();
const unsigned int ncomponents = parts.ncomponents();
Ix.resize(nscales, vector3DMat(ncomponents));
Iy.resize(nscales, vector3DMat(ncomponents));
Ik.resize(nscales, vector3DMat(ncomponents));
rootv.resize(nscales, vectorMat(ncomponents));
rooti.resize(nscales, vectorMat(ncomponents));
// for each scale, and each component, update the scores through message passing
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (unsigned int nc = 0; nc < nscales*ncomponents; ++nc) {
// calculate the inner loop variables from the dual variables
const unsigned int n = floor(nc / ncomponents);
const unsigned int c = nc % ncomponents;
// allocate the inner loop variables
Ix[n][c].resize(parts.nparts(c));
Iy[n][c].resize(parts.nparts(c));
Ik[n][c].resize(parts.nparts(c));
vectorMat ncscores(scores[n].size());
for (int p = parts.nparts(c)-1; p > 0; --p) {
// get the component part (which may have multiple mixtures associated with it)
ComponentPart cpart = parts.component(c, p);
const unsigned int nmixtures = cpart.nmixtures();
const unsigned int pnmixtures = cpart.parent().nmixtures();
Ix[n][c][p].resize(pnmixtures);
Iy[n][c][p].resize(pnmixtures);
Ik[n][c][p].resize(pnmixtures);
// intermediate results for mixtures of this part
vectorMat scoresp;
vectorMat Ixp;
vectorMat Iyp;
for (unsigned int m = 0; m < nmixtures; ++m) {
// raw score outputs
Mat_<T> score_in, score_dt;
Mat_<int> Ix_dt, Iy_dt;
if (cpart.score(ncscores, m).empty()) {
score_in = cpart.score(scores[n], m);
} else {
score_in = cpart.score(ncscores, m);
}
// get the anchor position
Point anchor = cpart.anchor(m);
// compute the distance transform
vectorf w = cpart.defw(m);
Quadratic fx(-w[0], -w[1]);
Quadratic fy(-w[2], -w[3]);
dt_.compute(score_in, fx, fy, anchor, score_dt, Ix_dt, Iy_dt);
scoresp.push_back(score_dt);
Ixp.push_back(Ix_dt);
Iyp.push_back(Iy_dt);
}
for (unsigned int m = 0; m < pnmixtures; ++m) {
vectorMat weighted;
// weight each of the child scores
// TODO: More elegant way of handling bias
for (unsigned int mm = 0; mm < nmixtures; ++mm) {
weighted.push_back(scoresp[mm] + cpart.bias(mm)[m]);
}
// compute the max over the mixtures
Mat maxv, maxi;
Math::reduceMax<T>(weighted, maxv, maxi);
// choose the best indices
Mat Ixm, Iym;
Math::reducePickIndex<int>(Ixp, maxi, Ixm);
Math::reducePickIndex<int>(Iyp, maxi, Iym);
Ix[n][c][p][m] = Ixm;
Iy[n][c][p][m] = Iym;
Ik[n][c][p][m] = maxi;
// update the parent's score
ComponentPart parent = cpart.parent();
if (parent.score(ncscores,m).empty()) parent.score(scores[n],m).copyTo(parent.score(ncscores,m));
parent.score(ncscores,m) += maxv;
if (parent.self() == 0) {
ComponentPart root = parts.component(c);
}
}
}
// add bias to the root score and find the best mixture
ComponentPart root = parts.component(c);
Mat rncscore = root.score(ncscores,0);
T bias = root.bias(0)[0];
vectorMat weighted;
//.........这里部分代码省略.........
示例15: new_layout2d
extern "C" int DLLEXPORT new_layout2d(
LayoutRect * layout_rects,
unsigned int num,
scalar sheet_x,
scalar sheet_y,
scalar cut_size,
Layout ** res)
{
Rect sheet;
sheet.Size[0] = sheet_x;
sheet.Size[1] = sheet_y;
Parts parts;
for (unsigned int i = 0; i < num; i++)
{
auto rect = &layout_rects[i];
Part part(rect->size[0], rect->size[1],
rect->can_rotate != 0, rect->amount);
part.Tag = (int)i;
parts.push_back(part);
}
// merge parts with the same relevant characteristics
std::map<PartKey, std::list<Part*> > unique_parts_map;
for (auto i = parts.begin(); i != parts.end(); i++) {
PartKey part_key;
part_key.rect = i->rect;
part_key.can_rotate = i->Rotate;
part_key.normalize();
unique_parts_map[part_key].push_back(&*i);
}
Parts unique_parts;
for (auto i = unique_parts_map.begin(); i != unique_parts_map.end(); i++) {
Part part;
part.rect = i->first.rect;
part.Rotate = i->first.can_rotate;
part.parts = i->second;
// calculate combined amount
part.Amount = 0;
for_each(part.parts.begin(), part.parts.end(),
[&part](Part * el) { part.Amount += el->Amount; });
unique_parts.push_back(part);
}
LayoutBuilder layout_builder;
// initialize amounts vector
Amounts remains(unique_parts.size());
// assing amount offsets to parts
// and amounts to remains
auto offset = 0;
std::for_each(unique_parts.begin(),
unique_parts.end(),
[&offset, &remains](Part & part) {
part.AmountOffset = offset++;
remains[part.AmountOffset] = part.Amount;
});
// initialize sizes lookups
Sizes sizes[2];
for (auto s = 0; s <= 1; s++)
{
for (auto pPart = unique_parts.begin(); pPart != unique_parts.end(); pPart++)
sizes[s].AddPart(*pPart, s);
// order from big to small
std::sort(sizes[s].begin(), sizes[s].end(), std::greater_equal<Size>());
for (auto pSize = sizes[s].begin(); pSize != sizes[s].end(); pSize++)
{
std::sort(pSize->other_sizes.begin(), pSize->other_sizes.end(),
std::greater_equal<OtherSize>());
pSize->other_sizes.SetMin();
}
}
scalar min_size[2];
Layout2d optimizer(sizes, min_size, &remains);
optimizer.put_SawThickness(cut_size);
int ret = optimizer.new_optimize(sheet, layout_builder) ? 1 : 0;
if (ret) {
unique_ptr<Layout> layout(new Layout);
layout_builder.simplify();
layout_builder.check();
layout_builder.to_layout(*layout);
*res = layout.release();
// report back new amounts
for (size_t i = 0; i < parts.size(); i++) {
layout_rects[i].amount = parts[i].Amount;
}
}
return ret;
}