本文整理汇总了C++中MatrixPtr::add方法的典型用法代码示例。如果您正苦于以下问题:C++ MatrixPtr::add方法的具体用法?C++ MatrixPtr::add怎么用?C++ MatrixPtr::add使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MatrixPtr
的用法示例。
在下文中一共展示了MatrixPtr::add方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: backward
void SlopeInterceptLayer::backward(const UpdateCallback& callback) {
MatrixPtr inG = getInputGrad(0);
MatrixPtr outG = getOutputGrad();
if (inG) {
REGISTER_TIMER_INFO("BwSlopeInterceptTimer", getName().c_str());
inG->add(*outG, config_.slope());
}
}
示例2: doOneConvtTest
TEST(Layer, convTransLayerFwd2) {
MatrixPtr result;
result = Matrix::create(1, 5 * 5, false, false);
result->zeroMem();
result->add(1.0);
doOneConvtTest(/* imgSize */ 5,
/* output_x */ 1,
/* stride */ 1,
/* padding */ 0,
/* filter_size */ 5,
result);
float resultData[] = {1, 2, 2, 2, 1,
2, 4, 4, 4, 2,
2, 4, 4, 4, 2,
2, 4, 4, 4, 2,
1, 2, 2, 2, 1};
result->setData(resultData);
doOneConvtTest(/* imgSize */ 5,
/* output_x */ 2,
/* stride */ 1,
/* padding */ 0,
/* filter_size */ 4,
result);
float resultData2[] = {1, 2, 2, 2, 1,
2, 4, 4, 4, 2,
2, 4, 4, 4, 2,
2, 4, 4, 4, 2,
1, 2, 2, 2, 1};
result->setData(resultData2);
doOneConvtTest(/* imgSize */ 5,
/* output_x */ 2,
/* stride */ 2,
/* padding */ 1,
/* filter_size */ 5,
result);
float resultData3[] = {1, 1, 2, 1, 1,
1, 1, 2, 1, 1,
2, 2, 4, 2, 2,
1, 1, 2, 1, 1,
1, 1, 2, 1, 1};
result->setData(resultData3);
doOneConvtTest(/* imgSize */ 5,
/* output_x */ 2,
/* stride */ 2,
/* padding */ 0,
/* filter_size */ 3,
result);}
示例3: backward
void ResizeLayer::backward(const UpdateCallback& callback) {
const Argument& input = getInput(0);
size_t height = input.value->getHeight();
size_t width = input.value->getWidth();
if (!input.grad) {
return;
}
MatrixPtr tmp =
Matrix::create(input.grad->getData(), height * width / getSize(),
getSize(), false, useGpu_);
tmp->add(*output_.grad);
}
示例4: forward
void SlopeInterceptLayer::forward(PassType passType) {
Layer::forward(passType);
MatrixPtr inV = getInputValue(0);
/* malloc memory for the output_ if necessary */
size_t batchSize = inV->getHeight();
size_t size = getSize();
CHECK_EQ(size, inV->getWidth());
{
REGISTER_TIMER_INFO("FwResetTimer", getName().c_str());
reserveOutput(batchSize, size);
}
MatrixPtr outV = getOutputValue();
{
REGISTER_TIMER_INFO("FwSlopeInterceptTimer", getName().c_str());
outV->mulScalar(*inV, config_.slope());
outV->add(config_.intercept());
}
}
示例5: backwardSequence
void GatedRecurrentLayer::backwardSequence(int batchSize,
size_t numSequences,
const int *starts,
MatrixPtr inputGrad) {
REGISTER_TIMER_INFO("GruBwSequenceTime", getName().c_str());
hl_gru_value gruValue;
gruValue.gateWeight = (gateWeight_->getW())->getData();
gruValue.stateWeight = (stateWeight_->getW())->getData();
gruValue.gateValue = gate_.value->getData();
gruValue.resetOutputValue = resetOutput_.value->getData();
gruValue.outputValue = output_.value->getData();
hl_gru_grad gruGrad;
gruGrad.gateWeightGrad =
(gateWeight_->getWGrad() ? gateWeight_->getWGrad()->getData() : nullptr);
gruGrad.stateWeightGrad =
(stateWeight_->getWGrad() ? stateWeight_->getWGrad()->getData() : nullptr);
gruGrad.gateGrad = gate_.grad->getData();
gruGrad.resetOutputGrad = resetOutput_.grad->getData();
gruGrad.outputGrad = output_.grad->getData();
if (!reversed_) {
gruValue.gateValue += (batchSize - 1) * getSize() * 3;
gruValue.resetOutputValue += (batchSize - 1) * getSize();
gruValue.outputValue += (batchSize - 1) * getSize();
gruGrad.gateGrad += (batchSize - 1) * getSize() * 3;
gruGrad.resetOutputGrad += (batchSize - 1) * getSize();
gruGrad.outputGrad += (batchSize - 1) * getSize();
gruValue.prevOutValue = gruValue.outputValue - getSize();
gruGrad.prevOutGrad = gruGrad.outputGrad - getSize();
} else {
gruValue.prevOutValue = gruValue.outputValue + getSize();
gruGrad.prevOutGrad = gruGrad.outputGrad + getSize();
}
auto nextFrame = [&gruValue, &gruGrad](bool reversed, int frameSize) {
if (reversed) {
gruValue.gateValue += frameSize * 3;
gruValue.resetOutputValue += frameSize;
gruValue.outputValue += frameSize;
gruGrad.gateGrad += frameSize * 3;
gruGrad.resetOutputGrad += frameSize;
gruGrad.outputGrad += frameSize;
gruValue.prevOutValue = gruValue.outputValue + frameSize;
gruGrad.prevOutGrad = gruGrad.outputGrad + frameSize;
} else {
gruValue.gateValue -= frameSize * 3;
gruValue.resetOutputValue -= frameSize;
gruValue.outputValue -= frameSize;
gruGrad.gateGrad -= frameSize * 3;
gruGrad.resetOutputGrad -= frameSize;
gruGrad.outputGrad -= frameSize;
gruValue.prevOutValue = gruValue.outputValue - frameSize;
gruGrad.prevOutGrad = gruGrad.outputGrad - frameSize;
}
};
{
AsyncGpuBlock asyncGpuBlock;
for (size_t n = 0; n < numSequences; ++n) {
int length;
if (reversed_) {
length = starts[n + 1] - starts[n];
} else {
length = starts[numSequences - n] - starts[numSequences - n - 1];
}
for (int l = 0; l < length; ++l) {
if (l == length - 1) {
gruValue.prevOutValue = nullptr;
gruGrad.prevOutGrad = nullptr;
}
if (useGpu_) {
GruCompute::backward<1>(gruValue, gruGrad, getSize());
} else {
GruCompute::backward<0>(gruValue, gruGrad, getSize());
}
nextFrame(reversed_, getSize());
}
}
}
if (inputGrad) {
inputGrad->add(*gate_.grad);
}
if (bias_ && bias_->getWGrad()) {
bias_->getWGrad()->collectBias(*gate_.grad, 1);
}
}