本文整理汇总了C++中Datum::channels方法的典型用法代码示例。如果您正苦于以下问题:C++ Datum::channels方法的具体用法?C++ Datum::channels怎么用?C++ Datum::channels使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Datum
的用法示例。
在下文中一共展示了Datum::channels方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: NumSequenceMatches
int NumSequenceMatches(const TransformationParameter transform_param,
const Datum& datum, Phase phase) {
// Get crop sequence with Caffe seed 1701.
DataTransformer<Dtype> transformer(transform_param, phase);
const int crop_size = transform_param.crop_size();
Caffe::set_random_seed(seed_);
transformer.InitRand();
Blob<Dtype> blob(1, datum.channels(), datum.height(), datum.width());
if (transform_param.crop_size() > 0) {
blob.Reshape(1, datum.channels(), crop_size, crop_size);
}
vector<vector<Dtype> > crop_sequence;
for (int iter = 0; iter < this->num_iter_; ++iter) {
vector<Dtype> iter_crop_sequence;
transformer.Transform(datum, &blob);
for (int j = 0; j < blob.count(); ++j) {
iter_crop_sequence.push_back(blob.cpu_data()[j]);
}
crop_sequence.push_back(iter_crop_sequence);
}
// Check if the sequence differs from the previous
int num_sequence_matches = 0;
for (int iter = 0; iter < this->num_iter_; ++iter) {
vector<Dtype> iter_crop_sequence = crop_sequence[iter];
transformer.Transform(datum, &blob);
for (int j = 0; j < blob.count(); ++j) {
num_sequence_matches += (crop_sequence[iter][j] == blob.cpu_data()[j]);
}
}
return num_sequence_matches;
}
示例2: DataLayerSetUp
void VideoDataKDLayer<Dtype>:: DataLayerSetUp(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top){
const int new_height = this->layer_param_.video_data_kd_param().new_height();
const int new_width = this->layer_param_.video_data_kd_param().new_width();
const int new_length = this->layer_param_.video_data_kd_param().new_length();
const int num_segments = this->layer_param_.video_data_kd_param().num_segments();
const string& source = this->layer_param_.video_data_kd_param().source();
LOG(INFO) << "Opening file: " << source;
std:: ifstream infile(source.c_str());
string filename;
string dir_mvs;
string dir_tvl1;
int label;
int length;
while (infile >> dir_mvs >> dir_tvl1 >> filename >> length >> label){
lines_dir_.push_back(std::make_pair(dir_mvs, dir_tvl1));
lines_.push_back(std::make_pair(filename,label));
lines_duration_.push_back(length);
}
if (this->layer_param_.video_data_kd_param().shuffle()){
const unsigned int prefectch_rng_seed = caffe_rng_rand();
prefetch_rng_1_.reset(new Caffe::RNG(prefectch_rng_seed));
prefetch_rng_2_.reset(new Caffe::RNG(prefectch_rng_seed));
ShuffleVideos();
}
LOG(INFO) << "A total of " << lines_.size() << " videos.";
lines_id_ = 0;
Datum datum;
const unsigned int frame_prefectch_rng_seed = caffe_rng_rand();
frame_prefetch_rng_.reset(new Caffe::RNG(frame_prefectch_rng_seed));
int average_duration = (int) lines_duration_[lines_id_]/num_segments;
vector<int> offsets;
for (int i = 0; i < num_segments; ++i){
caffe::rng_t* frame_rng = static_cast<caffe::rng_t*>(frame_prefetch_rng_->generator());
int offset = (*frame_rng)() % (average_duration - new_length + 1);
offsets.push_back(offset+i*average_duration);
}
if (this->layer_param_.video_data_kd_param().modality() == VideoDataKDParameter_Modality_FLOW)
CHECK(ReadSegmentFlowToDatum_KD(lines_[lines_id_].first, lines_dir_[lines_id_].first, lines_dir_[lines_id_].second, lines_[lines_id_].second, offsets, new_height, new_width, new_length, &datum));
else
CHECK(ReadSegmentRGBToDatum(lines_[lines_id_].first, lines_[lines_id_].second, offsets, new_height, new_width, new_length, &datum, true));
const int crop_size = this->layer_param_.transform_param().crop_size();
const int batch_size = this->layer_param_.video_data_kd_param().batch_size();
if (crop_size > 0){
top[0]->Reshape(batch_size, datum.channels(), crop_size, crop_size);
this->prefetch_data_.Reshape(batch_size, datum.channels(), crop_size, crop_size);
} else {
top[0]->Reshape(batch_size, datum.channels(), datum.height(), datum.width());
this->prefetch_data_.Reshape(batch_size, datum.channels(), datum.height(), datum.width());
}
LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width();
top[1]->Reshape(batch_size, 1, 1, 1);
this->prefetch_label_.Reshape(batch_size, 1, 1, 1);
vector<int> top_shape = this->data_transformer_->InferBlobShape(datum);
this->transformed_data_.Reshape(top_shape);
}
示例3: main
int main(int argc, char** argv) {
::google::InitGoogleLogging(argv[0]);
if (argc != 3) {
LOG(ERROR)<< "Usage: demo_compute_image_mean input_leveldb output_file";
return(0);
}
leveldb::DB* db;
leveldb::Options options;
options.create_if_missing = false;
LOG(INFO) << "Opening leveldb " << argv[1];
leveldb::Status status = leveldb::DB::Open(options, argv[1], &db);
CHECK(status.ok()) << "Failed to open leveldb " << argv[1];
leveldb::ReadOptions read_options;
read_options.fill_cache = false;
leveldb::Iterator* it = db->NewIterator(read_options);
it->SeekToFirst();
Datum datum;
BlobProto sum_blob;
int count = 0;
datum.ParseFromString(it->value().ToString());
sum_blob.set_num(1);
sum_blob.set_channels(datum.channels());
sum_blob.set_height(datum.height());
sum_blob.set_width(datum.width());
const int data_size = datum.channels() * datum.height() * datum.width();
for (int i = 0; i < datum.data().size(); ++i) {
sum_blob.add_data(0.);
}
LOG(INFO) << "Starting Iteration";
for (it->SeekToFirst(); it->Valid(); it->Next()) {
// just a dummy operation
datum.ParseFromString(it->value().ToString());
const string& data = datum.data();
CHECK_EQ(data.size(), data_size)<< "Incorrect data field size " << data.size();
for (int i = 0; i < data.size(); ++i) {
sum_blob.set_data(i, sum_blob.data(i) + (uint8_t) data[i]);
}
++count;
if (count % 10000 == 0) {
LOG(ERROR)<< "Processed " << count << " files.";
if (count == 100000) break;
}
}
for (int i = 0; i < sum_blob.data_size(); ++i) {
sum_blob.set_data(i, sum_blob.data(i) / count);
}
// Write to disk
LOG(INFO) << "Write to " << argv[2];
WriteProtoToBinaryFile(sum_blob, argv[2]);
delete db;
return 0;
}
示例4: Transform
void DataTransformer<Dtype>::Transform(const Datum& datum,
Blob<Dtype>* transformed_blob) {
const int datum_channels = datum.channels();
const int datum_height = datum.height();
const int datum_width = datum.width();
const int channels = transformed_blob->channels();
const int height = transformed_blob->height();
const int width = transformed_blob->width();
const int num = transformed_blob->num();
CHECK_EQ(channels, datum_channels);
CHECK_LE(height, datum_height);
CHECK_LE(width, datum_width);
CHECK_GE(num, 1);
const int crop_size = param_.crop_size();
if (crop_size) {
CHECK_EQ(crop_size, height);
CHECK_EQ(crop_size, width);
} else {
CHECK_EQ(datum_height, height);
CHECK_EQ(datum_width, width);
}
Dtype* transformed_data = transformed_blob->mutable_cpu_data();
Transform(datum, transformed_data);
}
示例5: FillDatum
TYPED_TEST(DataTransformTest, TestCropSize) {
TransformationParameter transform_param;
const bool unique_pixels = false; // all pixels the same equal to label
const int label = 0;
const int channels = 3;
const int height = 4;
const int width = 5;
const int crop_size = 2;
transform_param.set_crop_size(crop_size);
Datum datum;
FillDatum(label, channels, height, width, unique_pixels, &datum);
DataTransformer<TypeParam> transformer(transform_param, TEST);
transformer.InitRand();
Blob<TypeParam> blob(1, channels, crop_size, crop_size);
for (int iter = 0; iter < this->num_iter_; ++iter) {
transformer.Transform(datum, &blob);
EXPECT_EQ(blob.num(), 1);
EXPECT_EQ(blob.channels(), datum.channels());
EXPECT_EQ(blob.height(), crop_size);
EXPECT_EQ(blob.width(), crop_size);
for (int j = 0; j < blob.count(); ++j) {
EXPECT_EQ(blob.cpu_data()[j], label);
}
}
}
示例6: InferBlobShape
vector<int> DataTransformer<Dtype>::InferBlobShape(const Datum& datum) {
if (datum.encoded()) {
#ifdef USE_OPENCV
CHECK(!(param_.force_color() && param_.force_gray()))
<< "cannot set both force_color and force_gray";
cv::Mat cv_img;
if (param_.force_color() || param_.force_gray()) {
// If force_color then decode in color otherwise decode in gray.
cv_img = DecodeDatumToCVMat(datum, param_.force_color());
} else {
cv_img = DecodeDatumToCVMatNative(datum);
}
// InferBlobShape using the cv::image.
return InferBlobShape(cv_img);
#else
LOG(FATAL) << "Encoded datum requires OpenCV; compile with USE_OPENCV.";
#endif // USE_OPENCV
}
const int crop_size = param_.crop_size();
const int datum_channels = datum.channels();
const int datum_height = datum.height();
const int datum_width = datum.width();
// Check dimensions.
CHECK_GT(datum_channels, 0);
CHECK_GE(datum_height, crop_size);
CHECK_GE(datum_width, crop_size);
// Build BlobShape.
vector<int> shape(4);
shape[0] = 1;
shape[1] = datum_channels;
shape[2] = (crop_size)? crop_size: datum_height;
shape[3] = (crop_size)? crop_size: datum_width;
return shape;
}
示例7: DatumToCVMat
cv::Mat DatumToCVMat(const Datum& datum) {
if (datum.encoded()) {
cv::Mat cv_img;
cv_img = DecodeDatumToCVMatNative(datum);
return cv_img;
}
const string& data = datum.data();
int datum_channels = datum.channels();
int datum_height = datum.height();
int datum_width = datum.width();
CHECK(datum_channels==3);
cv::Mat cv_img(datum_height, datum_width, CV_8UC3);
for (int h = 0; h < datum_height; ++h) {
for (int w = 0; w < datum_width; ++w) {
for (int c = 0; c < datum_channels; ++c) {
int datum_index = (c * datum_height + h) * datum_width + w;
cv_img.at<cv::Vec3b>(h, w)[c] = static_cast<uchar>(data[datum_index]);
}
}
}
return cv_img;
}
示例8: FillDatum
TYPED_TEST(DataTransformTest, TestCropSize) {
TransformationParameter transform_param;
const bool unique_pixels = false; // all pixels the same equal to label
const int_tp label = 0;
const int_tp channels = 3;
const int_tp height = 4;
const int_tp width = 5;
const int_tp crop_size = 2;
transform_param.set_crop_size(crop_size);
Datum datum;
FillDatum(label, channels, height, width, unique_pixels, &datum);
DataTransformer<TypeParam>* transformer =
new DataTransformer<TypeParam>(transform_param, TEST,
Caffe::GetDefaultDevice());
transformer->InitRand();
Blob<TypeParam>* blob =
new Blob<TypeParam>(1, channels, crop_size, crop_size);
for (int_tp iter = 0; iter < this->num_iter_; ++iter) {
transformer->Transform(datum, blob);
EXPECT_EQ(blob->num(), 1);
EXPECT_EQ(blob->channels(), datum.channels());
EXPECT_EQ(blob->height(), crop_size);
EXPECT_EQ(blob->width(), crop_size);
for (int_tp j = 0; j < blob->count(); ++j) {
EXPECT_EQ(blob->cpu_data()[j], label);
}
}
}
示例9: ReadImageToDatum
TEST_F(IOTest, TestReadImageToDatumResizedSquare) {
string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg";
Datum datum;
ReadImageToDatum(filename, 0, 256, 256, &datum);
EXPECT_EQ(datum.channels(), 3);
EXPECT_EQ(datum.height(), 256);
EXPECT_EQ(datum.width(), 256);
}
示例10: LOG
void DataDrivingLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// Initialize DB
leveldb::DB* db_temp;
leveldb::Options options;
options.max_open_files = 100;
options.create_if_missing = false;
LOG(INFO) << "Opening leveldb " << this->layer_param_.data_driving_param().source();
leveldb::Status status = leveldb::DB::Open(
options, this->layer_param_.data_driving_param().source(), &db_temp);
CHECK(status.ok()) << "Failed to open leveldb "
<< this->layer_param_.data_driving_param().source() << std::endl
<< status.ToString();
db_.reset(db_temp);
// Read a data point, to initialize the prefetch and top blobs.
string value;
const int kMaxKeyLength = 256;
char key_cstr[kMaxKeyLength];
srand((int)time(0));
snprintf(key_cstr, kMaxKeyLength, "%08d", 1);
db_->Get(leveldb::ReadOptions(), string(key_cstr), &value);
Datum datum;
datum.ParseFromString(value);
int batch_size=this->layer_param_.data_driving_param().batch_size();
// image
top[0]->Reshape(batch_size, datum.channels(), datum.height(), datum.width());
this->prefetch_data_.Reshape(batch_size, datum.channels(), datum.height(), datum.width());
LOG(INFO) << "output data size: " << top[0]->num() << ","
<< top[0]->channels() << "," << top[0]->height() << ","
<< top[0]->width();
// label
top[1]->Reshape(batch_size, 1, 1, para_dim);
this->prefetch_label_.Reshape(batch_size, 1, 1, para_dim);
const string& mean_file = this->layer_param_.data_driving_param().mean_file();
LOG(INFO) << "Loading mean file from: " << mean_file;
BlobProto blob_proto;
ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto);
data_mean_.FromProto(blob_proto);
}
示例11: ReadImageToCVMat
TEST_F(IOTest, TestCVMatToDatum) {
string filename = EXAMPLES_SOURCE_DIR "images/cat.jpg";
cv::Mat cv_img = ReadImageToCVMat(filename);
Datum datum;
CVMatToDatum(cv_img, &datum);
EXPECT_EQ(datum.channels(), 3);
EXPECT_EQ(datum.height(), 360);
EXPECT_EQ(datum.width(), 480);
}
示例12: GetChannelMean
vector<double> GetChannelMean(scoped_ptr<db::Cursor>& cursor)
{
vector<double> meanv(3, 0);
int count = 0;
LOG(INFO) << "Starting Iteration";
while (cursor->valid()) {
Datum datum;
datum.ParseFromString(cursor->value());
DecodeDatumNative(&datum);
const std::string& data = datum.data();
int w = datum.width(), h = datum.height();
int ch = datum.channels();
int dim = w*h;
double chmean[3] = { 0,0,0 };
for (int i = 0; i < ch;i++)
{
int chstart = i*dim;
for (int j = 0; j < dim;j++)
chmean[i] += (uint8_t)data[chstart+j];
chmean[i] /= dim;
}
if (ch == 1)
{
meanv[0] += chmean[0];
meanv[1] += chmean[0];
meanv[2] += chmean[0];
}
else
{
meanv[0] += chmean[0];
meanv[1] += chmean[1];
meanv[2] += chmean[2];
}
++count;
if (count % 10000 == 0) {
LOG(INFO) << "Processed " << count << " files.";
}
cursor->Next();
}
if (count % 10000 != 0) {
LOG(INFO) << "Processed " << count << " files.";
}
for (int c = 0; c < 3; ++c) {
LOG(INFO) << "mean_value channel [" << c << "]:" << meanv[c] / count;
}
return meanv;
}
示例13: Transform
void DataTransformer<Dtype>::Transform(const Datum& datum,
Blob<Dtype>* transformed_blob) {
#ifndef CAFFE_HEADLESS
// If datum is encoded, decoded and transform the cv::image.
if (datum.encoded()) {
CHECK(!(param_.force_color() && param_.force_gray()))
<< "cannot set both force_color and force_gray";
cv::Mat cv_img;
if (param_.force_color() || param_.force_gray()) {
// If force_color then decode in color otherwise decode in gray.
cv_img = DecodeDatumToCVMat(datum, param_.force_color());
} else {
cv_img = DecodeDatumToCVMatNative(datum);
}
// Transform the cv::image into blob.
return Transform(cv_img, transformed_blob);
} else {
if (param_.force_color() || param_.force_gray()) {
LOG(ERROR) << "force_color and force_gray only for encoded datum";
}
}
#endif
const int crop_size = param_.crop_size();
const int datum_channels = datum.channels();
const int datum_height = datum.height();
const int datum_width = datum.width();
// Check dimensions.
const int channels = transformed_blob->channels();
const int height = transformed_blob->height();
const int width = transformed_blob->width();
const int num = transformed_blob->num();
CHECK_EQ(channels, datum_channels);
CHECK_LE(height, datum_height);
CHECK_LE(width, datum_width);
CHECK_GE(num, 1);
if (crop_size) {
CHECK_EQ(crop_size, height);
CHECK_EQ(crop_size, width);
} else {
CHECK_EQ(datum_height, height);
CHECK_EQ(datum_width, width);
}
Dtype* transformed_data = transformed_blob->mutable_cpu_data();
Transform(datum, transformed_data);
}
示例14: LOG
void DataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// Initialize DB
db_.reset(db::GetDB(this->layer_param_.data_param().backend()));
db_->Open(this->layer_param_.data_param().source(), db::READ);
cursor_.reset(db_->NewCursor());
if (this->layer_param_.data_param().rand_skip() ||
this->layer_param_.data_param().skip()) {
unsigned int skip;
// Check if we should randomly skip a few data points
if (this->layer_param_.data_param().rand_skip()) {
skip = caffe_rng_rand() %
this->layer_param_.data_param().rand_skip();
} else {
skip = this->layer_param_.data_param().skip();
}
LOG(INFO) << "Skipping first " << skip << " data points.";
while (skip-- > 0) {
cursor_->Next();
}
}
// Read a data point, and use it to initialize the top blob.
Datum datum;
datum.ParseFromString(cursor_->value());
bool force_color = this->layer_param_.data_param().force_encoded_color();
if ((force_color && DecodeDatum(&datum, true)) ||
DecodeDatumNative(&datum)) {
LOG(INFO) << "Decoding Datum";
}
// image
int crop_size = this->layer_param_.transform_param().crop_size();
if (crop_size > 0) {
top[0]->Reshape(this->layer_param_.data_param().batch_size(),
datum.channels(), crop_size, crop_size);
this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(),
datum.channels(), crop_size, crop_size);
this->transformed_data_.Reshape(1, datum.channels(), crop_size, crop_size);
} else {
top[0]->Reshape(
this->layer_param_.data_param().batch_size(), datum.channels(),
datum.height(), datum.width());
this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(),
datum.channels(), datum.height(), datum.width());
this->transformed_data_.Reshape(1, datum.channels(),
datum.height(), datum.width());
}
LOG(INFO) << "output data size: " << top[0]->num() << ","
<< top[0]->channels() << "," << top[0]->height() << ","
<< top[0]->width();
// label
if (this->output_labels_) {
vector<int> label_shape(1, this->layer_param_.data_param().batch_size());
top[1]->Reshape(label_shape);
this->prefetch_label_.Reshape(label_shape);
}
}
示例15: db
TYPED_TEST(DBTest, TestKeyValue) {
unique_ptr<db::DB> db(db::GetDB(TypeParam::backend));
db->Open(this->source_, db::READ);
unique_ptr<db::Cursor> cursor(db->NewCursor());
EXPECT_TRUE(cursor->valid());
string key = cursor->key();
Datum datum;
datum.ParseFromString(cursor->value());
EXPECT_EQ(key, "cat.jpg");
EXPECT_EQ(datum.channels(), 3);
EXPECT_EQ(datum.height(), 360);
EXPECT_EQ(datum.width(), 480);
cursor->Next();
EXPECT_TRUE(cursor->valid());
key = cursor->key();
datum.ParseFromString(cursor->value());
EXPECT_EQ(key, "fish-bike.jpg");
EXPECT_EQ(datum.channels(), 3);
EXPECT_EQ(datum.height(), 323);
EXPECT_EQ(datum.width(), 481);
cursor->Next();
EXPECT_FALSE(cursor->valid());
}