当前位置: 首页>>代码示例>>C++>>正文


C++ Blob类代码示例

本文整理汇总了C++中Blob的典型用法代码示例。如果您正苦于以下问题:C++ Blob类的具体用法?C++ Blob怎么用?C++ Blob使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Blob类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: TYPED_TEST

TYPED_TEST(ConvolutionLayerTest, TestNDAgainst2D) {
  typedef typename TypeParam::Dtype Dtype;
  const int kernel_h = 11;
  const int kernel_w = 13;
  vector<int> bottom_shape(4);
  bottom_shape[0] = 15;
  bottom_shape[1] = 18;
  bottom_shape[2] = kernel_h * 2;
  bottom_shape[3] = kernel_w * 2;
  FillerParameter filler_param;
  GaussianFiller<Dtype> filler(filler_param);
  for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) {
    this->blob_bottom_vec_[i]->Reshape(bottom_shape);
    filler.Fill(this->blob_bottom_vec_[i]);
  }
  LayerParameter layer_param;
  ConvolutionParameter* convolution_param =
      layer_param.mutable_convolution_param();
  convolution_param->set_num_output(12);
  convolution_param->set_bias_term(false);
  convolution_param->set_group(6);
  convolution_param->set_kernel_h(kernel_h);
  convolution_param->set_kernel_w(kernel_w);
  convolution_param->mutable_weight_filler()->set_type("gaussian");
  Blob<Dtype> weights;
  Blob<Dtype> top_diff;
  // Shape and fill weights and top_diff.
  bool copy_diff;
  bool reshape;
  {
    ConvolutionLayer<Dtype> layer(layer_param);
    layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
    top_diff.ReshapeLike(*this->blob_top_);
    filler.Fill(&top_diff);
    ASSERT_EQ(1, layer.blobs().size());
    copy_diff = false; reshape = true;
    weights.CopyFrom(*layer.blobs()[0], copy_diff, reshape);
  }
  vector<bool> propagate_down(1, true);
  Blob<Dtype> result_2d;
  Blob<Dtype> backward_result_2d;
  Blob<Dtype> backward_weight_result_2d;
  // Test with 2D im2col
  {
    caffe_set(this->blob_top_->count(), Dtype(0),
              this->blob_top_->mutable_cpu_data());
    caffe_set(this->blob_bottom_->count(), Dtype(0),
              this->blob_bottom_->mutable_cpu_diff());
    caffe_set(weights.count(), Dtype(0), weights.mutable_cpu_diff());
    // Do SetUp and Forward; save Forward result in result_2d.
    convolution_param->set_force_nd_im2col(false);
    ConvolutionLayer<Dtype> layer_2d(layer_param);
    layer_2d.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
    ASSERT_EQ(1, layer_2d.blobs().size());
    copy_diff = false; reshape = false;
    layer_2d.blobs()[0]->CopyFrom(weights, copy_diff, reshape);
    layer_2d.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
    copy_diff = false; reshape = true;
    result_2d.CopyFrom(*this->blob_top_, copy_diff, reshape);
    // Copy pre-generated top diff into actual top diff;
    // do Backward and save result in backward_result_2d.
    ASSERT_EQ(this->blob_top_->shape(), top_diff.shape());
    caffe_copy(top_diff.count(), top_diff.cpu_data(),
               this->blob_top_->mutable_cpu_diff());
    layer_2d.Backward(this->blob_top_vec_, propagate_down,
                      this->blob_bottom_vec_);
    copy_diff = true; reshape = true;
    backward_result_2d.CopyFrom(*this->blob_bottom_, copy_diff, reshape);
    backward_weight_result_2d.CopyFrom(weights, copy_diff, reshape);
  }
  Blob<Dtype> result_nd;
  Blob<Dtype> backward_result_nd;
  Blob<Dtype> backward_weight_result_nd;
  // Test with ND im2col
  {
    caffe_set(this->blob_top_->count(), Dtype(0),
              this->blob_top_->mutable_cpu_data());
    caffe_set(this->blob_bottom_->count(), Dtype(0),
              this->blob_bottom_->mutable_cpu_diff());
    caffe_set(weights.count(), Dtype(0), weights.mutable_cpu_diff());
    // Do SetUp and Forward; save Forward result in result_nd.
    convolution_param->set_force_nd_im2col(true);
    ConvolutionLayer<Dtype> layer_nd(layer_param);
    layer_nd.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
    ASSERT_EQ(1, layer_nd.blobs().size());
    copy_diff = false; reshape = false;
    layer_nd.blobs()[0]->CopyFrom(weights, copy_diff, reshape);
    layer_nd.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
    copy_diff = false; reshape = true;
    result_nd.CopyFrom(*this->blob_top_, copy_diff, reshape);
    // Copy pre-generated top diff into actual top diff;
    // do Backward and save result in backward_result_nd.
    ASSERT_EQ(this->blob_top_->shape(), top_diff.shape());
    caffe_copy(top_diff.count(), top_diff.cpu_data(),
               this->blob_top_->mutable_cpu_diff());
    layer_nd.Backward(this->blob_top_vec_, propagate_down,
                      this->blob_bottom_vec_);
    copy_diff = true; reshape = true;
    backward_result_nd.CopyFrom(*this->blob_bottom_, copy_diff, reshape);
    backward_weight_result_nd.CopyFrom(weights, copy_diff, reshape);
//.........这里部分代码省略.........
开发者ID:ALISCIFP,项目名称:caffe-stn,代码行数:101,代码来源:test_convolution_layer.cpp

示例2: TEST_F

TEST_F(TestProducer, ContentKeyRequest)
{
  Name prefix("/prefix");
  Name suffix("/a/b/c");
  Name expectedInterest(prefix);
  expectedInterest.append(Encryptor::getNAME_COMPONENT_READ());
  expectedInterest.append(suffix);
  expectedInterest.append(Encryptor::getNAME_COMPONENT_E_KEY());

  Name cKeyName(prefix);
  cKeyName.append(Encryptor::getNAME_COMPONENT_SAMPLE());
  cKeyName.append(suffix);
  cKeyName.append(Encryptor::getNAME_COMPONENT_C_KEY());

  Name timeMarker("20150101T100000/20150101T120000");
  MillisecondsSince1970 testTime1 = fromIsoString("20150101T100001");
  MillisecondsSince1970 testTime2 = fromIsoString("20150101T110001");
  Name::Component testTimeRounded1("20150101T100000");
  Name::Component testTimeRounded2("20150101T110000");
  Name::Component testTimeComponent2("20150101T110001");

  // Create content keys required for this test case:
  for (size_t i = 0; i < suffix.size(); ++i) {
    createEncryptionKey(expectedInterest, timeMarker);
    expectedInterest = expectedInterest.getPrefix(-2).append
      (Encryptor::getNAME_COMPONENT_E_KEY());
  }

  int expressInterestCallCount = 0;

  // Prepare a TestFace to instantly answer calls to expressInterest.
  class TestFace : public Face {
  public:
    TestFace(TestProducer* parent, const Name& timeMarker,
             int* expressInterestCallCount)
    : Face("localhost"),
      parent_(parent),
      timeMarker_(timeMarker),
      expressInterestCallCount_(expressInterestCallCount)
    {}

    virtual uint64_t
    expressInterest
      (const Interest& interest, const OnData& onData,
       const OnTimeout& onTimeout, const OnNetworkNack& onNetworkNack,
       WireFormat& wireFormat = *WireFormat::getDefaultWireFormat())
    {
      ++(*expressInterestCallCount_);

      Name interestName(interest.getName());
      interestName.append(timeMarker_);
      if (parent_->encryptionKeys.find(interestName) == parent_->encryptionKeys.end())
        throw runtime_error
          ("TestFace::expressInterest: Can't find " + interestName.toUri());
      onData(ptr_lib::make_shared<Interest>(interest),
             parent_->encryptionKeys[interestName]);

      return 0;
    }

  private:
    TestProducer* parent_;
    Name timeMarker_;
    int *expressInterestCallCount_;
  };

  TestFace face(this, timeMarker, &expressInterestCallCount);

  // Verify that the content key is correctly encrypted for each domain, and
  // the produce method encrypts the provided data with the same content key.
  ptr_lib::shared_ptr<ProducerDb> testDb(new Sqlite3ProducerDb(databaseFilePath));
  Producer producer(prefix, suffix, &face, keyChain.get(), testDb);
  Blob contentKey;

  // An initial test to confirm that keys are created for this time slot.
  Name contentKeyName1 = producer.createContentKey
    (testTime1,
     bind(&TestProducer::checkEncryptionKeys, this, _1, testTime1,
          testTimeRounded1, 3, &expressInterestCallCount, &contentKey, cKeyName,
          testDb));

  // Verify that we do not repeat the search for e-keys. The total
  //   expressInterestCallCount should be the same.
  Name contentKeyName2 = producer.createContentKey
    (testTime2,
     bind(&TestProducer::checkEncryptionKeys, this, _1, testTime2,
          testTimeRounded2, 3, &expressInterestCallCount, &contentKey, cKeyName,
          testDb));

  // Confirm content key names are correct
  ASSERT_EQ(cKeyName, contentKeyName1.getPrefix(-1));
  ASSERT_EQ(testTimeRounded1, contentKeyName1.get(6));
  ASSERT_EQ(cKeyName, contentKeyName2.getPrefix(-1));
  ASSERT_EQ(testTimeRounded2, contentKeyName2.get(6));

  // Confirm that produce encrypts with the correct key and has the right name.
  Data testData;
  producer.produce(testData, testTime2, Blob(DATA_CONTENT, sizeof(DATA_CONTENT)));

  const Name& producedName = testData.getName();
//.........这里部分代码省略.........
开发者ID:,项目名称:,代码行数:101,代码来源:

示例3: SkDebugf

void GrAtlasTextBatch::onPrepareDraws(Target* target) const {
    // if we have RGB, then we won't have any SkShaders so no need to use a localmatrix.
    // TODO actually only invert if we don't have RGBA
    SkMatrix localMatrix;
    if (this->usesLocalCoords() && !this->viewMatrix().invert(&localMatrix)) {
        SkDebugf("Cannot invert viewmatrix\n");
        return;
    }

    GrTexture* texture = fFontCache->getTexture(this->maskFormat());
    if (!texture) {
        SkDebugf("Could not allocate backing texture for atlas\n");
        return;
    }

    GrMaskFormat maskFormat = this->maskFormat();

    FlushInfo flushInfo;
    if (this->usesDistanceFields()) {
        flushInfo.fGeometryProcessor.reset(
            this->setupDfProcessor(this->viewMatrix(), fFilteredColor, this->color(), texture));
    } else {
        GrTextureParams params(SkShader::kClamp_TileMode, GrTextureParams::kNone_FilterMode);
        flushInfo.fGeometryProcessor.reset(
            GrBitmapTextGeoProc::Create(this->color(),
                                        texture,
                                        params,
                                        maskFormat,
                                        localMatrix,
                                        this->usesLocalCoords()));
    }

    flushInfo.fGlyphsToFlush = 0;
    size_t vertexStride = flushInfo.fGeometryProcessor->getVertexStride();
    SkASSERT(vertexStride == GrAtlasTextBlob::GetVertexStride(maskFormat));

    int glyphCount = this->numGlyphs();
    const GrBuffer* vertexBuffer;

    void* vertices = target->makeVertexSpace(vertexStride,
                                             glyphCount * kVerticesPerGlyph,
                                             &vertexBuffer,
                                             &flushInfo.fVertexOffset);
    flushInfo.fVertexBuffer.reset(SkRef(vertexBuffer));
    flushInfo.fIndexBuffer.reset(target->resourceProvider()->refQuadIndexBuffer());
    if (!vertices || !flushInfo.fVertexBuffer) {
        SkDebugf("Could not allocate vertices\n");
        return;
    }

    unsigned char* currVertex = reinterpret_cast<unsigned char*>(vertices);

    // We cache some values to avoid going to the glyphcache for the same fontScaler twice
    // in a row
    const SkDescriptor* desc = nullptr;
    SkGlyphCache* cache = nullptr;
    GrFontScaler* scaler = nullptr;
    SkTypeface* typeface = nullptr;

    GrBlobRegenHelper helper(this, target, &flushInfo);

    for (int i = 0; i < fGeoCount; i++) {
        const Geometry& args = fGeoData[i];
        Blob* blob = args.fBlob;
        size_t byteCount;
        void* blobVertices;
        int subRunGlyphCount;
        blob->regenInBatch(target, fFontCache, &helper, args.fRun, args.fSubRun, &cache,
                           &typeface, &scaler, &desc, vertexStride, args.fViewMatrix, args.fX,
                           args.fY, args.fColor, &blobVertices, &byteCount, &subRunGlyphCount);

        // now copy all vertices
        memcpy(currVertex, blobVertices, byteCount);

#ifdef SK_DEBUG
        // bounds sanity check
        SkRect rect;
        rect.setLargestInverted();
        SkPoint* vertex = (SkPoint*) ((char*)blobVertices);
        rect.growToInclude(vertex, vertexStride, kVerticesPerGlyph * subRunGlyphCount);

        if (this->usesDistanceFields()) {
            args.fViewMatrix.mapRect(&rect);
        }
        // Allow for small numerical error in the bounds.
        SkRect bounds = fBounds;
        bounds.outset(0.001f, 0.001f);
        SkASSERT(bounds.contains(rect));
#endif

        currVertex += byteCount;
    }

    // Make sure to attach the last cache if applicable
    if (cache) {
        SkGlyphCache::AttachCache(cache);
    }
    this->flush(target, &flushInfo);
}
开发者ID:BertiKarsunke,项目名称:skia,代码行数:99,代码来源:GrAtlasTextBatch.cpp

示例4: out

    void ZimCreator::createClusters(ArticleSource& src, const std::string& tmpfname)
    {
      std::ofstream out(tmpfname.c_str());

      Cluster cluster;
      cluster.setCompression(compression);

      DirentsType::size_type count = 0, progress = 0;
      for (DirentsType::iterator di = dirents.begin(); out && di != dirents.end(); ++di, ++count)
      {
        while (progress < count * 100 / dirents.size() + 1)
        {
          INFO(progress << "% ready");
          progress += 10;
        }

        if (di->isRedirect())
          continue;

        Blob blob = src.getData(di->getAid());
        if (blob.size() > 0)
          isEmpty = false;

        if (di->isCompress())
        {
          di->setCluster(clusterOffsets.size(), cluster.count());
          cluster.addBlob(blob);
          if (cluster.size() >= minChunkSize * 1024)
          {
            log_info("compress cluster with " << cluster.count() << " articles, " << cluster.size() << " bytes; current title \"" << di->getTitle() << '\"');

            clusterOffsets.push_back(out.tellp());
            out << cluster;
            log_debug("cluster compressed");
            cluster.clear();
            cluster.setCompression(compression);
          }
        }
        else
        {
          if (cluster.count() > 0)
          {
            clusterOffsets.push_back(out.tellp());
            cluster.setCompression(compression);
            out << cluster;
            cluster.clear();
            cluster.setCompression(compression);
          }

          di->setCluster(clusterOffsets.size(), cluster.count());
          clusterOffsets.push_back(out.tellp());
          Cluster c;
          c.addBlob(blob);
          c.setCompression(zimcompNone);
          out << c;
        }
      }

      if (cluster.count() > 0)
      {
        clusterOffsets.push_back(out.tellp());
        cluster.setCompression(compression);
        out << cluster;
      }

      if (!out)
        throw std::runtime_error("failed to write temporary cluster file");

      clustersSize = out.tellp();
    }
开发者ID:gema-arta,项目名称:zim-vendor,代码行数:70,代码来源:zimcreator.cpp

示例5: Reshape

void Blob<Dtype>::ReshapeLike(const Blob<Dtype>& other) {
  Reshape(other.num(), other.channels(), other.height(), other.width());
}
开发者ID:ZhangSirM,项目名称:caffe-SPPNet,代码行数:3,代码来源:blob.cpp

示例6:

void Blob<Dtype>::ShareData(const Blob& other) {
  CHECK_EQ(count_, other.count());
  data_ = other.data();
}
开发者ID:siddharthachandra,项目名称:gcrf,代码行数:4,代码来源:blob.cpp

示例7: getECIESSecret

Blob CKey::encryptECIES (CKey& otherKey, Blob const& plaintext)
{

    ECIES_ENC_IV_TYPE iv;
    RandomNumbers::getInstance ().fillBytes (iv.begin (), ECIES_ENC_BLK_SIZE);

    ECIES_ENC_KEY_TYPE secret;
    ECIES_HMAC_KEY_TYPE hmacKey;

    getECIESSecret (otherKey, secret, hmacKey);
    ECIES_HMAC_TYPE hmac = makeHMAC (hmacKey, plaintext);
    hmacKey.zero ();

    EVP_CIPHER_CTX ctx;
    EVP_CIPHER_CTX_init (&ctx);

    if (EVP_EncryptInit_ex (&ctx, ECIES_ENC_ALGO, NULL, secret.begin (), iv.begin ()) != 1)
    {
        EVP_CIPHER_CTX_cleanup (&ctx);
        secret.zero ();
        throw std::runtime_error ("init cipher ctx");
    }

    secret.zero ();

    Blob out (plaintext.size () + ECIES_HMAC_SIZE + ECIES_ENC_KEY_SIZE + ECIES_ENC_BLK_SIZE, 0);
    int len = 0, bytesWritten;

    // output IV
    memcpy (& (out.front ()), iv.begin (), ECIES_ENC_BLK_SIZE);
    len = ECIES_ENC_BLK_SIZE;

    // Encrypt/output HMAC
    bytesWritten = out.capacity () - len;
    assert (bytesWritten > 0);

    if (EVP_EncryptUpdate (&ctx, & (out.front ()) + len, &bytesWritten, hmac.begin (), ECIES_HMAC_SIZE) < 0)
    {
        EVP_CIPHER_CTX_cleanup (&ctx);
        throw std::runtime_error ("");
    }

    len += bytesWritten;

    // encrypt/output plaintext
    bytesWritten = out.capacity () - len;
    assert (bytesWritten > 0);

    if (EVP_EncryptUpdate (&ctx, & (out.front ()) + len, &bytesWritten, & (plaintext.front ()), plaintext.size ()) < 0)
    {
        EVP_CIPHER_CTX_cleanup (&ctx);
        throw std::runtime_error ("");
    }

    len += bytesWritten;

    // finalize
    bytesWritten = out.capacity () - len;

    if (EVP_EncryptFinal_ex (&ctx, & (out.front ()) + len, &bytesWritten) < 0)
    {
        EVP_CIPHER_CTX_cleanup (&ctx);
        throw std::runtime_error ("encryption error");
    }

    len += bytesWritten;

    // Output contains: IV, encrypted HMAC, encrypted data, encrypted padding
    assert (len <= (plaintext.size () + ECIES_HMAC_SIZE + (2 * ECIES_ENC_BLK_SIZE)));
    assert (len >= (plaintext.size () + ECIES_HMAC_SIZE + ECIES_ENC_BLK_SIZE)); // IV, HMAC, data
    out.resize (len);
    EVP_CIPHER_CTX_cleanup (&ctx);
    return out;
}
开发者ID:Aiolossong,项目名称:rippled,代码行数:74,代码来源:CKeyECIES.cpp

示例8: memcpy

Blob CKey::decryptECIES (CKey& otherKey, Blob const& ciphertext)
{
    // minimum ciphertext = IV + HMAC + 1 block
    if (ciphertext.size () < ((2 * ECIES_ENC_BLK_SIZE) + ECIES_HMAC_SIZE) )
        throw std::runtime_error ("ciphertext too short");

    // extract IV
    ECIES_ENC_IV_TYPE iv;
    memcpy (iv.begin (), & (ciphertext.front ()), ECIES_ENC_BLK_SIZE);

    // begin decrypting
    EVP_CIPHER_CTX ctx;
    EVP_CIPHER_CTX_init (&ctx);

    ECIES_ENC_KEY_TYPE secret;
    ECIES_HMAC_KEY_TYPE hmacKey;
    getECIESSecret (otherKey, secret, hmacKey);

    if (EVP_DecryptInit_ex (&ctx, ECIES_ENC_ALGO, NULL, secret.begin (), iv.begin ()) != 1)
    {
        secret.zero ();
        hmacKey.zero ();
        EVP_CIPHER_CTX_cleanup (&ctx);
        throw std::runtime_error ("unable to init cipher");
    }

    // decrypt mac
    ECIES_HMAC_TYPE hmac;
    int outlen = ECIES_HMAC_SIZE;

    if ( (EVP_DecryptUpdate (&ctx, hmac.begin (), &outlen,
                             & (ciphertext.front ()) + ECIES_ENC_BLK_SIZE, ECIES_HMAC_SIZE + 1) != 1) || (outlen != ECIES_HMAC_SIZE) )
    {
        secret.zero ();
        hmacKey.zero ();
        EVP_CIPHER_CTX_cleanup (&ctx);
        throw std::runtime_error ("unable to extract hmac");
    }

    // decrypt plaintext (after IV and encrypted mac)
    Blob plaintext (ciphertext.size () - ECIES_HMAC_SIZE - ECIES_ENC_BLK_SIZE);
    outlen = plaintext.size ();

    if (EVP_DecryptUpdate (&ctx, & (plaintext.front ()), &outlen,
                           & (ciphertext.front ()) + ECIES_ENC_BLK_SIZE + ECIES_HMAC_SIZE + 1,
                           ciphertext.size () - ECIES_ENC_BLK_SIZE - ECIES_HMAC_SIZE - 1) != 1)
    {
        secret.zero ();
        hmacKey.zero ();
        EVP_CIPHER_CTX_cleanup (&ctx);
        throw std::runtime_error ("unable to extract plaintext");
    }

    // decrypt padding
    int flen = 0;

    if (EVP_DecryptFinal (&ctx, & (plaintext.front ()) + outlen, &flen) != 1)
    {
        secret.zero ();
        hmacKey.zero ();
        EVP_CIPHER_CTX_cleanup (&ctx);
        throw std::runtime_error ("plaintext had bad padding");
    }

    plaintext.resize (flen + outlen);

    // verify integrity
    if (hmac != makeHMAC (hmacKey, plaintext))
    {
        secret.zero ();
        hmacKey.zero ();
        EVP_CIPHER_CTX_cleanup (&ctx);
        throw std::runtime_error ("plaintext had bad hmac");
    }

    secret.zero ();
    hmacKey.zero ();

    EVP_CIPHER_CTX_cleanup (&ctx);
    return plaintext;
}
开发者ID:Aiolossong,项目名称:rippled,代码行数:81,代码来源:CKeyECIES.cpp

示例9:

double
MaxY::operator()(Blob &blob) const
{ 
  const Rectangle& rect = blob.bounding_rect();
  return static_cast<double>(rect.origin().y() + rect.height());
}
开发者ID:srgblnch,项目名称:ISL,代码行数:6,代码来源:Criteria.cpp

示例10: LOG

ThreadableWebSocketChannel::SendResult WebSocketChannel::send(const Blob& binaryData)
{
    LOG(Network, "WebSocketChannel %p send() Sending Blob '%s'", this, binaryData.url().elidedString().utf8().data());
    enqueueBlobFrame(WebSocketFrame::OpCodeBinary, binaryData);
    return ThreadableWebSocketChannel::SendSuccess;
}
开发者ID:fmalita,项目名称:webkit,代码行数:6,代码来源:WebSocketChannel.cpp

示例11:

void Blob<Dtype>::ShareDiff(const Blob& other) {
  CHECK_EQ(count_, other.count());
  diff_ = other.diff();
}
开发者ID:ZhangSirM,项目名称:caffe-SPPNet,代码行数:4,代码来源:blob.cpp

示例12: encode

    WriteMethod ExifParser::encode(
              Blob&     blob,
        const byte*     pData,
              uint32_t  size,
              ByteOrder byteOrder,
        const ExifData& exifData
    )
    {
        ExifData ed = exifData;

        // Delete IFD0 tags that are "not recorded" in compressed images
        // Reference: Exif 2.2 specs, 4.6.8 Tag Support Levels, section A
        static const char* filteredIfd0Tags[] = {
            "Exif.Image.PhotometricInterpretation",
            "Exif.Image.StripOffsets",
            "Exif.Image.RowsPerStrip",
            "Exif.Image.StripByteCounts",
            "Exif.Image.JPEGInterchangeFormat",
            "Exif.Image.JPEGInterchangeFormatLength",
            "Exif.Image.SubIFDs"
        };
        for (unsigned int i = 0; i < EXV_COUNTOF(filteredIfd0Tags); ++i) {
            ExifData::iterator pos = ed.findKey(ExifKey(filteredIfd0Tags[i]));
            if (pos != ed.end()) {
#ifdef DEBUG
                std::cerr << "Warning: Exif tag " << pos->key() << " not encoded\n";
#endif
                ed.erase(pos);
            }
        }

        // Delete IFDs which do not occur in JPEGs
        static const IfdId filteredIfds[] = {
            subImage1Id,
            subImage2Id,
            subImage3Id,
            subImage4Id,
            panaRawIfdId,
            ifd2Id
        };
        for (unsigned int i = 0; i < EXV_COUNTOF(filteredIfds); ++i) {
#ifdef DEBUG
            std::cerr << "Warning: Exif IFD " << filteredIfds[i] << " not encoded\n";
#endif
            eraseIfd(ed, filteredIfds[i]);
        }

        // IPTC and XMP are stored elsewhere, not in the Exif APP1 segment.
        const IptcData emptyIptc;
        const XmpData  emptyXmp;

        // Encode and check if the result fits into a JPEG Exif APP1 segment
        std::auto_ptr<TiffHeaderBase> header(new TiffHeader(byteOrder));
        WriteMethod wm = TiffParserWorker::encode(blob,
                                                  pData,
                                                  size,
                                                  ed,
                                                  emptyIptc,
                                                  emptyXmp,
                                                  Tag::root,
                                                  TiffMapping::findEncoder,
                                                  header.get());
        if (blob.size() <= 65527) return wm;

        // If it doesn't fit, remove additional tags
        blob.clear();

        // Delete preview tags if the preview is larger than 32kB.
        // Todo: Enhance preview classes to be able to write and delete previews and use that instead.
        // Table must be sorted by preview, the first tag in each group is the size
        static const PreviewTags filteredPvTags[] = {
            { pttLen, "Exif.Minolta.ThumbnailLength"                  },
            { pttTag, "Exif.Minolta.ThumbnailOffset"                  },
            { pttLen, "Exif.Minolta.Thumbnail"                        },
            { pttLen, "Exif.NikonPreview.JPEGInterchangeFormatLength" },
            { pttIfd, "NikonPreview"                                  },
            { pttLen, "Exif.Olympus.ThumbnailLength"                  },
            { pttTag, "Exif.Olympus.ThumbnailOffset"                  },
            { pttLen, "Exif.Olympus.ThumbnailImage"                   },
            { pttLen, "Exif.Olympus.Thumbnail"                        },
            { pttLen, "Exif.Olympus2.ThumbnailLength"                 },
            { pttTag, "Exif.Olympus2.ThumbnailOffset"                 },
            { pttLen, "Exif.Olympus2.ThumbnailImage"                  },
            { pttLen, "Exif.Olympus2.Thumbnail"                       },
            { pttLen, "Exif.OlympusCs.PreviewImageLength"             },
            { pttTag, "Exif.OlympusCs.PreviewImageStart"              },
            { pttTag, "Exif.OlympusCs.PreviewImageValid"              },
            { pttLen, "Exif.Pentax.PreviewLength"                     },
            { pttTag, "Exif.Pentax.PreviewOffset"                     },
            { pttTag, "Exif.Pentax.PreviewResolution"                 },
            { pttLen, "Exif.Thumbnail.StripByteCounts"                },
            { pttIfd, "Thumbnail"                                     },
            { pttLen, "Exif.Thumbnail.JPEGInterchangeFormatLength"    },
            { pttIfd, "Thumbnail"                                     }
        };
        bool delTags = false;
        ExifData::iterator pos;
        for (unsigned int i = 0; i < EXV_COUNTOF(filteredPvTags); ++i) {
            switch (filteredPvTags[i].ptt_) {
            case pttLen:
//.........这里部分代码省略.........
开发者ID:,项目名称:,代码行数:101,代码来源:

示例13: main

int main() {
  Blob blob;
  Net ann;

  if( BUILD_BEST_NGRAMS ) {
    ifstream fin(I_F_NAME);
    if( !fin.is_open() ) {
      cout << "Couldn't open input file. Exiting." << endl;
      exit(1);
    } 
    
    // read input file
    blob.readFile(fin);
    fin.close();
    
    // Compute info gain for all ngrams, workhorse function
    blob.IG();
    
    // write out best ngrams
    ofstream fout(O_F_NAME);
    if( !fout.is_open() ) {
      cout << "Couldn't open output file. Exiting." << endl;
      exit(1);
    }
    blob.writeBest(fout);
    fout.close();
  }
  
  ifstream fin2(O_F_NAME);
  if( !fin2.is_open() ) {
    cout << "Couldn't open best grams file. Exiting." << endl;
    exit(1);
  }
  

  // Read best grams into nn object
  ann.readBestGrams(fin2);
  fin2.close();

  // read training data
  ifstream fin3(TRAIN_DATA);
  if( !fin3.is_open() ) {
    cout << "Couldn't open training file. Exiting." << endl;
    exit(1);
  }
  
  ann.readTrainingData(fin3);
  fin3.close();

  ann.train();

  ifstream fin4(TEST_DATA);
  if( !fin4.is_open() ) {
    cout << "Couldn't open testing file. Exiting." << endl;
    exit(1);
  }  

  cout << "Testing..." << flush;

  ann.readTestingData(fin4);
  fin4.close();

  ann.test();

  cout << "Done." << endl;
}
开发者ID:KeisterBun,项目名称:CS504,代码行数:66,代码来源:main.cpp

示例14: ex_feature

int ex_feature(int argc, char** argv){
	namespace bf=boost::filesystem;
	if (argc < 7){
		LOG(ERROR)<< "Usage: "<<argv[0]<<" pretrained_net_param feature_extraction_proto_file extract_feature_blob_name filelist meanfile mode";
		return 1;
	}
	int mode = atoi(argv[6]);
	if(mode == 1){
		LOG(ERROR) << "Using CPU";
		Caffe::set_mode(Caffe::CPU);
	}else{
		//using gpu
		LOG(ERROR)<< "Using GPU";
		uint device_id = 0;
		LOG(ERROR) << "Using Device_id=" << device_id;
		Caffe::SetDevice(device_id);
		Caffe::set_mode(Caffe::GPU);
	}
	
	Caffe::set_phase(Caffe::TEST);
	string extract_feature_blob_name=argv[3];
	//string svm_model = argv[3];
	string tst_filelist=argv[4];
	string mean_file = argv[5];
	//string save_path = argv[6];
	LOG(ERROR) << "load cnn model";
	shared_ptr<Net<Dtype> > feature_extraction_net(new Net<Dtype>(argv[2]));
	feature_extraction_net->CopyTrainedLayersFrom(argv[1]);
	//shared_ptr<Blob<Dtype> > feature_blob=feature_extraction_net->blob_by_name(extract_feature_blob_name);
	int layerIdx = feature_extraction_net->layerIdx_by_name(extract_feature_blob_name);
	if(layerIdx == -1){
		LOG(ERROR) << "Can't find layer:" << extract_feature_blob_name;
		return 1;
	}else{
		LOG(ERROR) << "LayerIdx:" << layerIdx << " continue...";
	}
	
	vector<vector<Blob<Dtype>*> >& top_vecs = feature_extraction_net->top_vecs();
	shared_ptr<Blob<Dtype> >  feature_blob(top_vecs[layerIdx][0]);
	shared_ptr<Blob<Dtype> > data_blob = feature_extraction_net->blob_by_name("data");
	LOG(ERROR) << "batch size:" << data_blob->num();
	int batch_size = data_blob->num();
	int channels = data_blob->channels();
	int height = data_blob->height();
	int width = data_blob->width();
	CHECK_EQ(height, width);
	int crop_size = height;
	//LOG(ERROR) << 
	//return 1;
	vector<string> images;
	if(!readFromFile(tst_filelist, images)){
		std::cout<< "parse Data Done." << std::endl;
	}else{
		std::cout<<"parse Data failed."<<std::endl;
		return 1;
	}
	Blob<Dtype> data_mean;
	//std::string mean_file = argv[5];
	BlobProto blob_proto;
	std::cout << "reading data_mean from " << mean_file << std::endl;
	ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto);
	data_mean.FromProto(blob_proto);
	cv::Mat mat_mean = Blob2Mat<Dtype>(data_mean);
	CHECK_EQ(data_mean.num(), 1);
	CHECK_EQ(data_mean.width(), data_mean.height());
	CHECK_EQ(data_mean.channels(), 3);
	std::cout << "prepare parameters" << std::endl;

	
	float scale = 1.0;	
	//bf::path output_path(save_path);
	Blob<Dtype>* bottom = new Blob<Dtype>(batch_size, 3, crop_size, crop_size);
	vector<Blob<Dtype>*> bottomV;
	bottomV.push_back(bottom);
	int numCaches = ceil(float(images.size()) / batch_size);
	Dtype* feature_blob_data;
	Dtype* im_blob_ori;
	int num=0;
	int startIdx = 0;
	//bf::path ftrfile = output_path;
	//ftrfile.replace_extension(".ftr");
	//std::ofstream fo(ftrfile.string().c_str());
	bool multivew = false;
	LOG(ERROR) << "cachesize:" << batch_size << " numCaches:" << numCaches;
	clock_t start_processing, end_processing;
	start_processing = clock();
	for(int cacheIdx = 0;cacheIdx < numCaches;cacheIdx++){
		LOG(ERROR) << "processing:" << cacheIdx << "/" << numCaches;
		vector< vector<Dtype> > cache;
		//vector< vector<Dtype> > resultcache;
		clock_t start_cache, end_cache;
		start_cache = clock();
		vector<vector<int> > img_size;
		readImagesToCache(cache, images, crop_size, mat_mean, batch_size, &startIdx, &num, scale, multivew, img_size);
		end_cache = clock();
		LOG(ERROR) << "readImageToCache:" << (end_cache-start_cache) << "ms";
		start_cache = clock();
		int nBatches = ceil(float(cache.size()) / batch_size);
		//LOG(ERROR) << "nBatches:"<< nBatches << " cache:" << cache.size();
		assert(img_size.size() == nBatches);
//.........这里部分代码省略.........
开发者ID:dsisds,项目名称:caffe-visualization,代码行数:101,代码来源:deconv_for_position.cpp

示例15: caffe_copy

void SliceLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
  if (!propagate_down[0]) { return; }
  Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
  if (slice_dim_ == 0) {
    int offset_num = 0;
    for (int i = 0; i < top.size(); ++i) {
      Blob<Dtype>* blob = top[i];
      const Dtype* top_diff = blob->cpu_diff();
      caffe_copy(blob->count(), top_diff,
                 bottom_diff + (*bottom)[0]->offset(offset_num));
      offset_num += blob->num();
    }
  } else if (slice_dim_ == 1) {
    int offset_channel = 0;
    for (int i = 0; i < top.size(); ++i) {
      Blob<Dtype>* blob = top[i];
      const Dtype* top_diff = blob->cpu_diff();
      const int num_elem = blob->channels() * blob->height() * blob->width();
      for (int n = 0; n < num_; ++n) {
        caffe_copy(num_elem, top_diff + blob->offset(n),
                   bottom_diff + (*bottom)[0]->offset(n, offset_channel));
      }
      offset_channel += blob->channels();
    }
  }  // slice_dim_ is guaranteed to be 0 or 1 by SetUp.
}
开发者ID:ZhitingHu,项目名称:NN,代码行数:27,代码来源:slice_layer.cpp


注:本文中的Blob类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。