本文整理汇总了C++中MatrixPtr::addBias方法的典型用法代码示例。如果您正苦于以下问题:C++ MatrixPtr::addBias方法的具体用法?C++ MatrixPtr::addBias怎么用?C++ MatrixPtr::addBias使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MatrixPtr
的用法示例。
在下文中一共展示了MatrixPtr::addBias方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: forward
void ExpandLayer::forward(PassType passType) {
Layer::forward(passType);
// Expand layer should have exactly 2 input, one for data, one for size
CHECK_EQ(2U, inputLayers_.size());
// using two input:
// * first one for data;
// * second one only for sequence info
const Argument& shapeInput = getInput(1);
const Argument& dataInput = getInput(0);
size_t outputBatchSize = shapeInput.getBatchSize();
auto startPositions = type_ ? shapeInput.subSequenceStartPositions
: shapeInput.sequenceStartPositions;
size_t numSequences = startPositions->getSize() - 1;
const int* starts = startPositions->getData(false);
CHECK_EQ(starts[numSequences], shapeInput.getBatchSize());
if (type_) {
// when trans_type = seq, input[1] must hasSubseq
CHECK_EQ(shapeInput.hasSubseq(), 1UL);
CHECK_EQ(dataInput.getNumSequences(), shapeInput.getNumSequences());
} else {
CHECK_EQ(dataInput.getBatchSize(), shapeInput.getNumSequences());
}
// set output sequence info as shape sequence
output_.sequenceStartPositions = shapeInput.sequenceStartPositions;
if (shapeInput.hasSubseq()) {
output_.subSequenceStartPositions = shapeInput.subSequenceStartPositions;
}
// reserve output: Expand output to batchsize of sequence data.
reserveOutput(outputBatchSize, dataInput.value->getWidth());
MatrixPtr inputValue = getInputValue(0);
MatrixPtr outputValue = getOutputValue();
ICpuGpuVector::resizeOrCreate(expandStartsPos_, outputBatchSize, false);
int* expandStarts = expandStartsPos_->getMutableData(false);
for (size_t sequenceId = 0; sequenceId < numSequences; ++sequenceId) {
int sequenceLength = starts[sequenceId + 1] - starts[sequenceId];
for (int j = 0; j < sequenceLength; j++) {
expandStarts[starts[sequenceId] + j] = sequenceId;
}
}
outputValue->copyByRowIndex(*inputValue,
*expandStartsPos_->getVector(useGpu_));
if (biases_.get() != NULL) {
outputValue->addBias(*(biases_->getW()), 1);
}
}
示例2: getOutputValue
void DeConv3DLayer::addBias() {
MatrixPtr outMat = getOutputValue();
MatrixPtr bias = Matrix::create(biases_->getW()->getData(),
1,
biases_->getW()->getElementCnt(),
false,
useGpu_);
if (this->sharedBiases_) {
outMat->addSharedBias(*(bias), 1.0f);
} else {
outMat->addBias(*(bias), 1.0f);
}
}
示例3: forward
void AverageLayer::forward(PassType passType) {
SequencePoolLayer::forward(passType);
MatrixPtr inputValue = getInputValue(0);
getOutputValue()->sequenceAvgForward(
*inputValue, *startPositions_->getVector(useGpu_), mode_);
/* add the bias-vector AFTER average operation */
if (biases_.get() != NULL) {
MatrixPtr outV = getOutputValue();
outV->addBias(*(biases_->getW()), 1);
}
/* activation */ { forwardActivation(); }
}
示例4: forward
void MixedLayer::forward(PassType passType) {
Layer::forward(passType);
int batchSize = getInput(0).getBatchSize();
int size = getSize();
{
REGISTER_TIMER_INFO("FwResetTimer", getName().c_str());
resetOutput(batchSize, size);
}
MatrixPtr outV = getOutputValue();
for (size_t i = 0; i != inputLayers_.size(); ++i) {
if (projections_[i]) {
projections_[i]->forward(&getInput(i), &output_, passType);
}
}
std::vector<const Argument*> ins;
for (auto& op : operators_) {
ins.clear();
for (auto& input_index : op->getConfig().input_indices()) {
ins.push_back(&getInput(input_index));
}
op->forward(ins, &output_, passType);
}
/* add the bias-vector */
if (biases_.get() != NULL) {
REGISTER_TIMER_INFO("FwBiasTimer", getName().c_str());
outV->addBias(*(biases_->getW()), 1, sharedBias_);
}
/* activation */ {
REGISTER_TIMER_INFO("FwAtvTimer", getName().c_str());
forwardActivation();
}
}
示例5: forward
void SequenceConcatLayer::forward(PassType passType) {
Layer::forward(passType);
size_t dim = getSize();
const Argument& input1 = getInput(0);
size_t numSequences1 = input1.getNumSequences();
auto startPositions1 = input1.sequenceStartPositions->getVector(false);
const Argument& input2 = getInput(1);
size_t numSequences2 = input2.getNumSequences();
auto startPositions2 = input2.sequenceStartPositions->getVector(false);
CHECK_EQ(dim, input1.value->getWidth());
CHECK_EQ(startPositions1->getData()[numSequences1], input1.getBatchSize());
CHECK_EQ(numSequences1, startPositions1->getSize() - 1);
CHECK_EQ(dim, input2.value->getWidth());
CHECK_EQ(startPositions2->getData()[numSequences2], input2.getBatchSize());
CHECK_EQ(numSequences2, startPositions2->getSize() - 1);
CHECK_EQ(numSequences1, numSequences2);
MatrixPtr inputValue1 = getInputValue(0);
MatrixPtr inputValue2 = getInputValue(1);
// reset output
reserveOutput(inputValue1->getHeight() + inputValue2->getHeight(), dim);
MatrixPtr outputValue = getOutputValue();
const int* starts1 = startPositions1->getData();
const int* starts2 = startPositions2->getData();
{
AsyncGpuBlock asyncGpuBlock;
REGISTER_TIMER_INFO("SequenceConcatLayerForward", getName().c_str());
size_t offset = 0;
size_t leftNumIns = 0;
size_t rightNumIns = 0;
for (size_t seqId = 0; seqId < numSequences1; ++seqId) {
leftNumIns = starts1[seqId + 1] - starts1[seqId];
outputValue->subMatrix(offset, leftNumIns)
->assign(*(inputValue1->subMatrix(starts1[seqId], leftNumIns)));
offset += leftNumIns;
rightNumIns = starts2[seqId + 1] - starts2[seqId];
outputValue->subMatrix(offset, rightNumIns)
->assign(*(inputValue2->subMatrix(starts2[seqId], rightNumIns)));
offset += rightNumIns;
}
// modify the sequenceStartPositions
ICpuGpuVector::resizeOrCreate(
output_.sequenceStartPositions, numSequences1 + 1, false);
int* tgtBuf = output_.sequenceStartPositions->getMutableData(false);
for (size_t seqId = 0; seqId < numSequences1 + 1; ++seqId) {
tgtBuf[seqId] = starts1[seqId] + starts2[seqId];
}
}
if (biases_.get() != NULL) {
MatrixPtr outV = getOutputValue();
outV->addBias(*(biases_->getW()), 1);
}
/* activation */
forwardActivation();
}