本文整理汇总了C++中Net::forward方法的典型用法代码示例。如果您正苦于以下问题:C++ Net::forward方法的具体用法?C++ Net::forward怎么用?C++ Net::forward使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Net
的用法示例。
在下文中一共展示了Net::forward方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: readNetFromCaffe
TEST_P(Reproducibility_SqueezeNet_v1_1, Accuracy)
{
Net net = readNetFromCaffe(findDataFile("dnn/squeezenet_v1.1.prototxt", false),
findDataFile("dnn/squeezenet_v1.1.caffemodel", false));
int targetId = GetParam();
net.setPreferableTarget(targetId);
Mat input = blobFromImage(imread(_tf("googlenet_0.png")), 1.0f, Size(227,227), Scalar(), false);
ASSERT_TRUE(!input.empty());
Mat out;
if (targetId == DNN_TARGET_OPENCL)
{
// Firstly set a wrong input blob and run the model to receive a wrong output.
// Then set a correct input blob to check CPU->GPU synchronization is working well.
net.setInput(input * 2.0f);
out = net.forward();
}
net.setInput(input);
out = net.forward();
Mat ref = blobFromNPY(_tf("squeezenet_v1.1_prob.npy"));
normAssert(ref, out);
}
示例2: findDataFile
TEST(Reproducibility_MobileNet_SSD, Accuracy)
{
const string proto = findDataFile("dnn/MobileNetSSD_deploy.prototxt", false);
const string model = findDataFile("dnn/MobileNetSSD_deploy.caffemodel", false);
Net net = readNetFromCaffe(proto, model);
Mat sample = imread(_tf("street.png"));
Mat inp = blobFromImage(sample, 1.0f / 127.5, Size(300, 300), Scalar(127.5, 127.5, 127.5), false);
net.setInput(inp);
Mat out = net.forward();
Mat ref = blobFromNPY(_tf("mobilenet_ssd_caffe_out.npy"));
normAssert(ref, out);
// Check that detections aren't preserved.
inp.setTo(0.0f);
net.setInput(inp);
out = net.forward();
const int numDetections = out.size[2];
ASSERT_NE(numDetections, 0);
for (int i = 0; i < numDetections; ++i)
{
float confidence = out.ptr<float>(0, 0, i)[2];
ASSERT_EQ(confidence, 0);
}
}
示例3: imread
TEST(Test_Darknet, read_yolo_voc_stream)
{
Mat ref;
Mat sample = imread(_tf("dog416.png"));
Mat inp = blobFromImage(sample, 1.0/255, Size(416, 416), Scalar(), true, false);
const std::string cfgFile = findDataFile("dnn/yolo-voc.cfg", false);
const std::string weightsFile = findDataFile("dnn/yolo-voc.weights", false);
// Import by paths.
{
Net net = readNetFromDarknet(cfgFile, weightsFile);
net.setInput(inp);
net.setPreferableBackend(DNN_BACKEND_OPENCV);
ref = net.forward();
}
// Import from bytes array.
{
std::string cfg, weights;
readFileInMemory(cfgFile, cfg);
readFileInMemory(weightsFile, weights);
Net net = readNetFromDarknet(&cfg[0], cfg.size(), &weights[0], weights.size());
net.setInput(inp);
net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat out = net.forward();
normAssert(ref, out);
}
}
示例4: testInPlaceActivation
void testInPlaceActivation(LayerParams& lp)
{
EXPECT_FALSE(lp.name.empty());
LayerParams pool;
pool.set("pool", "ave");
pool.set("kernel_w", 2);
pool.set("kernel_h", 2);
pool.set("stride_w", 2);
pool.set("stride_h", 2);
pool.type = "Pooling";
Net net;
int poolId = net.addLayer(pool.name, pool.type, pool);
net.connect(0, 0, poolId, 0);
net.addLayerToPrev(lp.name, lp.type, lp);
Mat input({1, kNumChannels, 10, 10}, CV_32F);
randu(input, -1.0f, 1.0f);
net.setInput(input);
Mat outputDefault = net.forward(lp.name).clone();
net.setInput(input);
net.setPreferableBackend(DNN_BACKEND_HALIDE);
Mat outputHalide = net.forward(lp.name).clone();
normAssert(outputDefault, outputHalide);
}
示例5: findDataFile
TEST(Torch_Importer, ENet_accuracy)
{
Net net;
{
const string model = findDataFile("dnn/Enet-model-best.net", false);
net = readNetFromTorch(model, true);
ASSERT_FALSE(net.empty());
}
Mat sample = imread(_tf("street.png", false));
Mat inputBlob = blobFromImage(sample, 1./255);
net.setInput(inputBlob, "");
Mat out = net.forward();
Mat ref = blobFromNPY(_tf("torch_enet_prob.npy", false));
// Due to numerical instability in Pooling-Unpooling layers (indexes jittering)
// thresholds for ENet must be changed. Accuracy of resuults was checked on
// Cityscapes dataset and difference in mIOU with Torch is 10E-4%
normAssert(ref, out, "", 0.00044, 0.44);
const int N = 3;
for (int i = 0; i < N; i++)
{
net.setInput(inputBlob, "");
Mat out = net.forward();
normAssert(ref, out, "", 0.00044, 0.44);
}
}
示例6: weights
TEST_P(Concat, Accuracy)
{
Vec3i inSize = get<0>(GetParam());
Vec3i numChannels = get<1>(GetParam());
Net net;
std::vector<int> convLayerIds;
convLayerIds.reserve(numChannels.channels);
for (int i = 0, n = numChannels.channels; i < n; ++i)
{
if (!numChannels[i])
break;
Mat weights({numChannels[i], inSize[0], 1, 1}, CV_32F);
randu(weights, -1.0f, 1.0f);
LayerParams convParam;
convParam.set("kernel_w", 1);
convParam.set("kernel_h", 1);
convParam.set("num_output", numChannels[i]);
convParam.set("bias_term", false);
convParam.type = "Convolution";
std::ostringstream ss;
ss << "convLayer" << i;
convParam.name = ss.str();
convParam.blobs.push_back(weights);
int layerId = net.addLayer(convParam.name, convParam.type, convParam);
convLayerIds.push_back(layerId);
net.connect(0, 0, layerId, 0);
}
LayerParams concatParam;
concatParam.type = "Concat";
concatParam.name = "testLayer";
int concatId = net.addLayer(concatParam.name, concatParam.type, concatParam);
net.connect(0, 0, concatId, 0);
for (int i = 0; i < convLayerIds.size(); ++i)
{
net.connect(convLayerIds[i], 0, concatId, i + 1);
}
Mat input({1, inSize[0], inSize[1], inSize[2]}, CV_32F);
randu(input, -1.0f, 1.0f);
net.setInput(input);
Mat outputDefault = net.forward(concatParam.name).clone();
net.setPreferableBackend(DNN_BACKEND_HALIDE);
Mat outputHalide = net.forward(concatParam.name).clone();
normAssert(outputDefault, outputHalide);
}
示例7: findDataFile
TEST_P(Reproducibility_MobileNet_SSD, Accuracy)
{
const string proto = findDataFile("dnn/MobileNetSSD_deploy.prototxt", false);
const string model = findDataFile("dnn/MobileNetSSD_deploy.caffemodel", false);
Net net = readNetFromCaffe(proto, model);
int targetId = GetParam();
const float l1 = (targetId == DNN_TARGET_OPENCL_FP16) ? 1.5e-4 : 1e-5;
const float lInf = (targetId == DNN_TARGET_OPENCL_FP16) ? 4e-4 : 1e-4;
net.setPreferableTarget(targetId);
Mat sample = imread(_tf("street.png"));
Mat inp = blobFromImage(sample, 1.0f / 127.5, Size(300, 300), Scalar(127.5, 127.5, 127.5), false);
net.setInput(inp);
Mat out = net.forward();
const float scores_diff = (targetId == DNN_TARGET_OPENCL_FP16) ? 4e-4 : 1e-5;
const float boxes_iou_diff = (targetId == DNN_TARGET_OPENCL_FP16) ? 5e-3 : 1e-4;
Mat ref = blobFromNPY(_tf("mobilenet_ssd_caffe_out.npy"));
normAssertDetections(ref, out, "", 0.0, scores_diff, boxes_iou_diff);
// Check that detections aren't preserved.
inp.setTo(0.0f);
net.setInput(inp);
out = net.forward();
out = out.reshape(1, out.total() / 7);
const int numDetections = out.rows;
ASSERT_NE(numDetections, 0);
for (int i = 0; i < numDetections; ++i)
{
float confidence = out.ptr<float>(i)[2];
ASSERT_EQ(confidence, 0);
}
// Check batching mode.
ref = ref.reshape(1, numDetections);
inp = blobFromImages(std::vector<Mat>(2, sample), 1.0f / 127.5, Size(300, 300), Scalar(127.5, 127.5, 127.5), false);
net.setInput(inp);
Mat outBatch = net.forward();
// Output blob has a shape 1x1x2Nx7 where N is a number of detection for
// a single sample in batch. The first numbers of detection vectors are batch id.
outBatch = outBatch.reshape(1, outBatch.total() / 7);
EXPECT_EQ(outBatch.rows, 2 * numDetections);
normAssert(outBatch.rowRange(0, numDetections), ref, "", l1, lInf);
normAssert(outBatch.rowRange(numDetections, 2 * numDetections).colRange(1, 7), ref.colRange(1, 7),
"", l1, lInf);
}
示例8: test
static void test(LayerParams& params, Mat& input)
{
randu(input, -1.0f, 1.0f);
Net net;
int lid = net.addLayer(params.name, params.type, params);
net.connect(0, 0, lid, 0);
net.setInput(input);
Mat outputDefault = net.forward(params.name).clone();
net.setPreferableBackend(DNN_BACKEND_HALIDE);
Mat outputHalide = net.forward(params.name).clone();
normAssert(outputDefault, outputHalide);
}
示例9: SkipTestException
TEST_P(Test_ONNX_nets, Googlenet)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("");
const String model = _tf("models/googlenet.onnx");
Net net = readNetFromONNX(model);
ASSERT_FALSE(net.empty());
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
std::vector<Mat> images;
images.push_back( imread(_tf("../googlenet_0.png")) );
images.push_back( imread(_tf("../googlenet_1.png")) );
Mat inp = blobFromImages(images, 1.0f, Size(), Scalar(), false);
Mat ref = blobFromNPY(_tf("../googlenet_prob.npy"));
checkBackend(&inp, &ref);
net.setInput(inp);
ASSERT_FALSE(net.empty());
Mat out = net.forward();
normAssert(ref, out, "", default_l1, default_lInf);
expectNoFallbacksFromIE(net);
}
示例10: runTorchNet
void runTorchNet(const String& prefix, String outLayerName = "",
bool check2ndBlob = false, bool isBinary = false,
double l1 = 0.0, double lInf = 0.0)
{
String suffix = (isBinary) ? ".dat" : ".txt";
Mat inp, outRef;
ASSERT_NO_THROW( inp = readTorchBlob(_tf(prefix + "_input" + suffix), isBinary) );
ASSERT_NO_THROW( outRef = readTorchBlob(_tf(prefix + "_output" + suffix), isBinary) );
checkBackend(backend, target, &inp, &outRef);
Net net = readNetFromTorch(_tf(prefix + "_net" + suffix), isBinary);
ASSERT_FALSE(net.empty());
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
if (outLayerName.empty())
outLayerName = net.getLayerNames().back();
net.setInput(inp);
std::vector<Mat> outBlobs;
net.forward(outBlobs, outLayerName);
l1 = l1 ? l1 : default_l1;
lInf = lInf ? lInf : default_lInf;
normAssert(outRef, outBlobs[0], "", l1, lInf);
if (check2ndBlob && backend != DNN_BACKEND_INFERENCE_ENGINE)
{
Mat out2 = outBlobs[1];
Mat ref2 = readTorchBlob(_tf(prefix + "_output_2" + suffix), isBinary);
normAssert(out2, ref2, "", l1, lInf);
}
}
示例11: SkipTestException
TEST_P(Test_Torch_nets, OpenFace_accuracy)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is enabled starts from OpenVINO 2018R3");
#endif
checkBackend();
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("");
const string model = findDataFile("dnn/openface_nn4.small2.v1.t7", false);
Net net = readNetFromTorch(model);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
Mat sample = imread(findDataFile("cv/shared/lena.png", false));
Mat sampleF32(sample.size(), CV_32FC3);
sample.convertTo(sampleF32, sampleF32.type());
sampleF32 /= 255;
resize(sampleF32, sampleF32, Size(96, 96), 0, 0, INTER_NEAREST);
Mat inputBlob = blobFromImage(sampleF32, 1.0, Size(), Scalar(), /*swapRB*/true);
net.setInput(inputBlob);
Mat out = net.forward();
Mat outRef = readTorchBlob(_tf("net_openface_output.dat"), true);
normAssert(out, outRef, "", default_l1, default_lInf);
}
示例12: checkBackend
TEST_P(Test_TensorFlow_nets, opencv_face_detector_uint8)
{
checkBackend();
std::string proto = findDataFile("dnn/opencv_face_detector.pbtxt", false);
std::string model = findDataFile("dnn/opencv_face_detector_uint8.pb", false);
Net net = readNetFromTensorflow(model, proto);
Mat img = imread(findDataFile("gpu/lbpcascade/er.png", false));
Mat blob = blobFromImage(img, 1.0, Size(), Scalar(104.0, 177.0, 123.0), false, false);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
net.setInput(blob);
// Output has shape 1x1xNx7 where N - number of detections.
// An every detection is a vector of values [id, classId, confidence, left, top, right, bottom]
Mat out = net.forward();
// References are from test for Caffe model.
Mat ref = (Mat_<float>(6, 7) << 0, 1, 0.99520785, 0.80997437, 0.16379407, 0.87996572, 0.26685631,
0, 1, 0.9934696, 0.2831718, 0.50738752, 0.345781, 0.5985168,
0, 1, 0.99096733, 0.13629119, 0.24892329, 0.19756334, 0.3310290,
0, 1, 0.98977017, 0.23901358, 0.09084064, 0.29902688, 0.1769477,
0, 1, 0.97203469, 0.67965847, 0.06876482, 0.73999709, 0.1513494,
0, 1, 0.95097077, 0.51901293, 0.45863652, 0.5777427, 0.5347801);
double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 4e-3 : 3.4e-3;
double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.024 : 1e-2;
normAssertDetections(ref, out, "", 0.9, scoreDiff, iouDiff);
}
示例13: findDataFile
TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
{
std::string netPath = findDataFile("dnn/ssd_mobilenet_v1_coco.pb", false);
std::string netConfig = findDataFile("dnn/ssd_mobilenet_v1_coco.pbtxt", false);
std::string imgPath = findDataFile("dnn/street.png", false);
Mat inp;
resize(imread(imgPath), inp, Size(300, 300));
inp = blobFromImage(inp, 1.0f / 127.5, Size(), Scalar(127.5, 127.5, 127.5), true);
std::vector<String> outNames(3);
outNames[0] = "concat";
outNames[1] = "concat_1";
outNames[2] = "detection_out";
std::vector<Mat> target(outNames.size());
for (int i = 0; i < outNames.size(); ++i)
{
std::string path = findDataFile("dnn/tensorflow/ssd_mobilenet_v1_coco." + outNames[i] + ".npy", false);
target[i] = blobFromNPY(path);
}
Net net = readNetFromTensorflow(netPath, netConfig);
net.setPreferableTarget(GetParam());
net.setInput(inp);
std::vector<Mat> output;
net.forward(output, outNames);
normAssert(target[0].reshape(1, 1), output[0].reshape(1, 1), "", 1e-5, 1.5e-4);
normAssert(target[1].reshape(1, 1), output[1].reshape(1, 1), "", 1e-5, 3e-4);
normAssert(target[2].reshape(1, 1), output[2].reshape(1, 1), "", 4e-5, 1e-2);
}
示例14: test
void test(
Net& model,
torch::Device device,
DataLoader& data_loader,
size_t dataset_size) {
torch::NoGradGuard no_grad;
model.eval();
double test_loss = 0;
int32_t correct = 0;
for (const auto& batch : data_loader) {
auto data = batch.data.to(device), targets = batch.target.to(device);
auto output = model.forward(data);
test_loss += torch::nll_loss(
output,
targets,
/*weight=*/{},
Reduction::Sum)
.template item<float>();
auto pred = output.argmax(1);
correct += pred.eq(targets).sum().template item<int64_t>();
}
test_loss /= dataset_size;
std::printf(
"\nTest set: Average loss: %.4f | Accuracy: %.3f\n",
test_loss,
static_cast<double>(correct) / dataset_size);
}
示例15: train
void train(
int32_t epoch,
Net& model,
torch::Device device,
DataLoader& data_loader,
torch::optim::Optimizer& optimizer,
size_t dataset_size) {
model.train();
size_t batch_idx = 0;
for (auto& batch : data_loader) {
auto data = batch.data.to(device), targets = batch.target.to(device);
optimizer.zero_grad();
auto output = model.forward(data);
auto loss = torch::nll_loss(output, targets);
AT_ASSERT(!std::isnan(loss.template item<float>()));
loss.backward();
optimizer.step();
if (batch_idx++ % kLogInterval == 0) {
std::printf(
"\rTrain Epoch: %ld [%5ld/%5ld] Loss: %.4f",
epoch,
batch_idx * batch.data.size(0),
dataset_size,
loss.template item<float>());
}
}
}