当前位置: 首页>>代码示例>>C++>>正文


C++ ASSERT_ND函数代码示例

本文整理汇总了C++中ASSERT_ND函数的典型用法代码示例。如果您正苦于以下问题:C++ ASSERT_ND函数的具体用法?C++ ASSERT_ND怎么用?C++ ASSERT_ND使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了ASSERT_ND函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: SortedBuffer

DumpFileSortedBuffer::DumpFileSortedBuffer(
  fs::DirectIoFile* file, memory::AlignedMemorySlice io_buffer)
  : SortedBuffer(
    reinterpret_cast<char*>(io_buffer.get_block()),
    io_buffer.get_size(),
    fs::file_size(file->get_path())),
    file_(file),
    io_buffer_(io_buffer) {
  ASSERT_ND(buffer_size_ % kAlignment == 0);
  ASSERT_ND(total_size_ % kAlignment == 0);
}
开发者ID:HeadhunterXamd,项目名称:foedus_code,代码行数:11,代码来源:log_buffer.cpp

示例2: ASSERT_ND

inline uint32_t ConstDiv::rem64(uint64_t n, uint32_t d, uint64_t q) const {
#ifndef NDEBUG
  ASSERT_ND(d == d_);
#endif  // NDEBUG
  ASSERT_ND(n / d == q);
  if (flags_ & kFlagPowerOfTwo) {
    return n & ((1ULL << d_highest_bits_) - 1ULL);
  } else {
    return n - d * q;
  }
}
开发者ID:HeadhunterXamd,项目名称:foedus_code,代码行数:11,代码来源:const_div.hpp

示例3: assert_mcs_aligned

xct::McsBlockIndex ThreadPimpl::mcs_initial_lock(xct::McsLock* mcs_lock) {
  assert_mcs_aligned(mcs_lock);
  ASSERT_ND(!mcs_lock->is_locked());
  // so far we allow only 2^16 MCS blocks per transaction. we might increase later.
  ASSERT_ND(current_xct_.get_mcs_block_current() < 0xFFFFU);
  xct::McsBlockIndex block_index = current_xct_.increment_mcs_block_current();
  ASSERT_ND(block_index > 0);
  mcs_init_block(mcs_lock, block_index, false);
  mcs_lock->reset(id_, block_index);
  assorted::memory_fence_acq_rel();
  return block_index;
}
开发者ID:kumagi,项目名称:foedus_code,代码行数:12,代码来源:thread_pimpl.cpp

示例4: ASSERT_ND

ErrorCode ThreadPimpl::follow_page_pointers_for_write_batch(
  uint16_t batch_size,
  storage::VolatilePageInit page_initializer,
  storage::DualPagePointer** pointers,
  storage::Page** parents,
  const uint16_t* index_in_parents,
  storage::Page** out) {
  // REMINDER: Remember that it might be parents == out. It's not an issue in this function, tho.
  // this method is not quite batched as it doesn't need to be.
  // still, less branches because we can assume all of them need a writable volatile page.
  for (uint16_t b = 0; b < batch_size; ++b) {
    storage::DualPagePointer* pointer = pointers[b];
    if (pointer == nullptr) {
      out[b] = nullptr;
      continue;
    }
    ASSERT_ND(!parents[b]->get_header().snapshot_);
    storage::Page** page = out + b;
    storage::VolatilePagePointer volatile_pointer = pointer->volatile_pointer_;
    if (!volatile_pointer.is_null()) {
      *page = global_volatile_page_resolver_.resolve_offset(volatile_pointer);
    } else if (pointer->snapshot_pointer_ == 0) {
      // we need a volatile page. so construct it from snapshot
      CHECK_ERROR_CODE(install_a_volatile_page(pointer, page));
    } else {
      ASSERT_ND(page_initializer);
      // we must not install a new volatile page in snapshot page. We must not hit this case.
      memory::PagePoolOffset offset = core_memory_->grab_free_volatile_page();
      if (UNLIKELY(offset == 0)) {
        return kErrorCodeMemoryNoFreePages;
      }
      storage::Page* new_page = local_volatile_page_resolver_.resolve_offset_newpage(offset);
      storage::VolatilePagePointer new_page_id;
      new_page_id.components.numa_node = numa_node_;
      new_page_id.components.offset = offset;
      storage::VolatilePageInitArguments args = {
        holder_,
        new_page_id,
        new_page,
        parents[b],
        index_in_parents[b]
      };
      page_initializer(args);
      storage::assert_valid_volatile_page(new_page, offset);
      ASSERT_ND(new_page->get_header().snapshot_ == false);

      *page = place_a_new_volatile_page(offset, pointer);
    }
    ASSERT_ND(out[b] != nullptr);
  }
  return kErrorCodeOk;
}
开发者ID:kumagi,项目名称:foedus_code,代码行数:52,代码来源:thread_pimpl.cpp

示例5: ASSERT_ND

uint32_t PagePoolOffsetAndEpochChunk::get_safe_offset_count(const Epoch& threshold) const {
  ASSERT_ND(is_sorted());
  OffsetAndEpoch dummy;
  dummy.safe_epoch_ = threshold.value();
  struct CompareEpoch {
    bool operator() (const OffsetAndEpoch& left, const OffsetAndEpoch& right) {
      return Epoch(left.safe_epoch_) < Epoch(right.safe_epoch_);
    }
  };
  const OffsetAndEpoch* result = std::lower_bound(chunk_, chunk_ + size_, dummy, CompareEpoch());
  ASSERT_ND(result);
  ASSERT_ND(result - chunk_ <= size_);
  return result - chunk_;
}
开发者ID:kumagi,项目名称:foedus_code,代码行数:14,代码来源:page_pool.cpp

示例6: LOG

bool LogMapper::add_new_bucket(storage::StorageId storage_id) {
  if (buckets_allocated_count_ >= buckets_memory_.get_size() / sizeof(Bucket)) {
    // we allocated all buckets_memory_! we have to flush the buckets now.
    // this shouldn't happen often.
    LOG(WARNING) << to_string() << " ran out of buckets_memory_, so it has to flush buckets before"
      " processing one IO buffer. This shouldn't happen often. check your log_mapper_bucket_kb_"
      " setting. this=" << *this;
    return false;
  }

  Bucket* base_address = reinterpret_cast<Bucket*>(buckets_memory_.get_block());
  Bucket* new_bucket = base_address + buckets_allocated_count_;
  ++buckets_allocated_count_;
  new_bucket->storage_id_ = storage_id;
  new_bucket->counts_ = 0;
  new_bucket->next_bucket_ = nullptr;

  BucketHashList* hashlist = find_storage_hashlist(storage_id);
  if (hashlist) {
    // just add this as a new tail
    ASSERT_ND(hashlist->storage_id_ == storage_id);
    ASSERT_ND(hashlist->tail_->is_full());
    hashlist->tail_->next_bucket_ = new_bucket;
    hashlist->tail_ = new_bucket;
    ++hashlist->bucket_counts_;
  } else {
    // we don't even have a linked list for this.
    // If this happens often, maybe we should have 65536 hash buckets...
    if (hashlist_allocated_count_ >= kBucketHashListMaxCount) {
      LOG(WARNING) << to_string() << " ran out of hashlist memory, so it has to flush buckets now"
        " This shouldn't happen often. We must consider increasing kBucketHashListMaxCount."
        " this=" << *this;
      return false;
    }

    // allocate from the pool
    BucketHashList* new_hashlist =
      reinterpret_cast<BucketHashList*>(tmp_hashlist_buffer_slice_.get_block())
      + hashlist_allocated_count_;
    ++hashlist_allocated_count_;

    new_hashlist->storage_id_ = storage_id;
    new_hashlist->head_ = new_bucket;
    new_hashlist->tail_ = new_bucket;
    new_hashlist->bucket_counts_ = 1;

    add_storage_hashlist(new_hashlist);
  }
  return true;
}
开发者ID:HeadhunterXamd,项目名称:foedus_code,代码行数:50,代码来源:log_mapper_impl.cpp

示例7: ASSERT_ND

void LogMapper::send_bucket_partition_general(
  const Bucket* bucket,
  storage::StorageType storage_type,
  storage::PartitionId partition,
  const BufferPosition* positions) {
  uint64_t written = 0;
  uint32_t log_count = 0;
  uint32_t shortest_key_length = 0xFFFF;
  uint32_t longest_key_length = 0;
  // stitch the log entries in send buffer
  char* send_buffer = reinterpret_cast<char*>(tmp_send_buffer_slice_.get_block());
  const char* io_base = reinterpret_cast<const char*>(io_buffer_.get_block());
  ASSERT_ND(tmp_send_buffer_slice_.get_size() == kSendBufferSize);

  for (uint32_t i = 0; i < bucket->counts_; ++i) {
    uint64_t pos = from_buffer_position(positions[i]);
    const log::LogHeader* header = reinterpret_cast<const log::LogHeader*>(io_base + pos);
    ASSERT_ND(header->storage_id_ == bucket->storage_id_);
    uint16_t log_length = header->log_length_;
    ASSERT_ND(log_length > 0);
    ASSERT_ND(log_length % 8 == 0);
    if (written + log_length > kSendBufferSize) {
      // buffer full. send out.
      send_bucket_partition_buffer(
        bucket,
        partition,
        send_buffer,
        log_count,
        written,
        shortest_key_length,
        longest_key_length);
      log_count = 0;
      written = 0;
      shortest_key_length = 0xFFFF;
      longest_key_length = 0;
    }
    std::memcpy(send_buffer + written, header, header->log_length_);
    written += header->log_length_;
    ++log_count;
    update_key_lengthes(header, storage_type, &shortest_key_length, &longest_key_length);
  }
  send_bucket_partition_buffer(
    bucket,
    partition,
    send_buffer,
    log_count,
    written,
    shortest_key_length,
    longest_key_length);
}
开发者ID:HeadhunterXamd,项目名称:foedus_code,代码行数:50,代码来源:log_mapper_impl.cpp

示例8: inserts_varlen_task

ErrorStack inserts_varlen_task(const proc::ProcArguments& args) {
  EXPECT_EQ(sizeof(uint32_t), args.input_len_);
  uint32_t id = *reinterpret_cast<const uint32_t*>(args.input_buffer_);
  EXPECT_NE(id, 2U);

  thread::Thread* context = args.context_;
  storage::hash::HashStorage hash(args.engine_, kName);
  ASSERT_ND(hash.exists());
  xct::XctManager* xct_manager = args.engine_->get_xct_manager();
  Epoch commit_epoch;

  WRAP_ERROR_CODE(xct_manager->begin_xct(context, xct::kSerializable));

  char buffer[16];
  std::memset(buffer, 0, sizeof(buffer));
  for (uint32_t i = 0; i < kRecords / 2U; ++i) {
    uint64_t rec = id * kRecords / 2U + i;
    // first 8 bytes, mod 17 to have next layers.
    assorted::write_bigendian<uint64_t>(static_cast<uint64_t>(rec % 17U), buffer);
    // and 1-4 bytes of decimal representation in text
    std::string str = std::to_string(rec);
    std::memcpy(buffer + sizeof(uint64_t), str.data(), str.size());
    uint16_t len = sizeof(uint64_t) + str.size();
    uint64_t data = rec + kDataAddendum;
    ErrorCode ret = hash.insert_record(context, buffer, len, &data, sizeof(data));
    EXPECT_EQ(kErrorCodeOk, ret) << i;
  }

  // CHECK_ERROR(hash.debugout_single_thread(args.engine_));
  WRAP_ERROR_CODE(xct_manager->precommit_xct(context, &commit_epoch));
  // CHECK_ERROR(hash.debugout_single_thread(args.engine_));
  WRAP_ERROR_CODE(xct_manager->wait_for_commit(commit_epoch));
  return kRetOk;
}
开发者ID:kumagi,项目名称:foedus_code,代码行数:34,代码来源:test_snapshot_hash.cpp

示例9: verify_varlen_task

ErrorStack verify_varlen_task(const proc::ProcArguments& args) {
  thread::Thread* context = args.context_;
  storage::hash::HashStorage hash(args.engine_, kName);
  ASSERT_ND(hash.exists());
  CHECK_ERROR(hash.verify_single_thread(context));
  // CHECK_ERROR(hash.debugout_single_thread(args.engine_));
  xct::XctManager* xct_manager = args.engine_->get_xct_manager();
  WRAP_ERROR_CODE(xct_manager->begin_xct(context, xct::kSerializable));

  char buffer[16];
  std::memset(buffer, 0, sizeof(buffer));
  for (uint32_t i = 0; i < kRecords; ++i) {
    uint64_t rec = i;
    assorted::write_bigendian<uint64_t>(static_cast<uint64_t>(rec % 17U), buffer);
    std::string str = std::to_string(rec);
    std::memcpy(buffer + sizeof(uint64_t), str.data(), str.size());
    uint16_t len = sizeof(uint64_t) + str.size();

    uint64_t data;
    uint16_t capacity = sizeof(data);
    ErrorCode ret = hash.get_record(context, buffer, len, &data, &capacity);
    EXPECT_EQ(kErrorCodeOk, ret) << i;
    EXPECT_EQ(i + kDataAddendum, data) << i;
    EXPECT_EQ(sizeof(data), capacity) << i;
  }

  Epoch commit_epoch;
  ErrorCode committed = xct_manager->precommit_xct(context, &commit_epoch);
  EXPECT_EQ(kErrorCodeOk, committed);
  return kRetOk;
}
开发者ID:kumagi,项目名称:foedus_code,代码行数:31,代码来源:test_snapshot_hash.cpp

示例10: ASSERT_ND

void HashDataPage::release_pages_recursive(
  const memory::GlobalVolatilePageResolver& page_resolver,
  memory::PageReleaseBatch* batch) {
  if (next_page_.volatile_pointer_.components.offset != 0) {
    HashDataPage* next = reinterpret_cast<HashDataPage*>(
      page_resolver.resolve_offset(next_page_.volatile_pointer_));
    ASSERT_ND(next->header().get_in_layer_level() == 0);
    ASSERT_ND(next->get_bin() == get_bin());
    next->release_pages_recursive(page_resolver, batch);
    next_page_.volatile_pointer_.components.offset = 0;
  }

  VolatilePagePointer volatile_id;
  volatile_id.word = header().page_id_;
  batch->release(volatile_id);
}
开发者ID:kumagi,项目名称:foedus_code,代码行数:16,代码来源:hash_page_impl.cpp

示例11: verify_task

ErrorStack verify_task(const proc::ProcArguments& args) {
  thread::Thread* context = args.context_;
  storage::masstree::MasstreeStorage masstree(args.engine_, kName);
  ASSERT_ND(masstree.exists());
  CHECK_ERROR(masstree.verify_single_thread(context));
  xct::XctManager* xct_manager = args.engine_->get_xct_manager();
  WRAP_ERROR_CODE(xct_manager->begin_xct(context, xct::kSerializable));

  for (uint32_t i = 0; i < kRecords; ++i) {
    uint64_t rec = i;
    storage::masstree::KeySlice slice = storage::masstree::normalize_primitive<uint64_t>(rec);
    uint64_t data;
    uint16_t capacity = sizeof(data);
    ErrorCode ret = masstree.get_record_normalized(context, slice, &data, &capacity, true);
/*
    if (ret != kErrorCodeOk || rec != data) {
      CHECK_ERROR(masstree.verify_single_thread(context));
      CHECK_ERROR(masstree.debugout_single_thread(args.engine_));
      std::cout << "asdasd" << std::endl;
    }
*/
    EXPECT_EQ(kErrorCodeOk, ret) << i;
    EXPECT_EQ(rec, data) << i;
    EXPECT_EQ(sizeof(data), capacity) << i;
  }

  Epoch commit_epoch;
  ErrorCode committed = xct_manager->precommit_xct(context, &commit_epoch);
  EXPECT_EQ(kErrorCodeOk, committed);
  return kRetOk;
}
开发者ID:HeadhunterXamd,项目名称:foedus_code,代码行数:31,代码来源:test_snapshot_masstree.cpp

示例12: LOG

ErrorStack TpccLoadTask::load_customers_in_district(Wid wid, Did did) {
  LOG(INFO) << "Loading Customer for DID=" << static_cast<int>(did) << ", WID=" << wid
    << ": " << engine_->get_memory_manager()->dump_free_memory_stat();

  // insert to customers_secondary at the end after sorting
  memory::AlignedMemory secondary_keys_buffer;
  struct Secondary {
    char  last_[17];      // +17 -> 17
    char  first_[17];     // +17 -> 34
    char  padding_[2];    // +2 -> 36
    Cid   cid_;           // +4 -> 40
    static bool compare(const Secondary &left, const Secondary& right) ALWAYS_INLINE {
      int cmp = std::memcmp(left.last_, right.last_, sizeof(left.last_));
      if (cmp < 0) {
        return true;
      } else if (cmp > 0) {
        return false;
      }
      cmp = std::memcmp(left.first_, right.first_, sizeof(left.first_));
      if (cmp < 0) {
        return true;
      } else if (cmp > 0) {
        return false;
      }
        ASSERT_ND(left.cid_ != right.cid_);
      if (left.cid_ < right.cid_) {
        return true;
      } else {
        return false;
      }
    }
  };
开发者ID:vanwaals,项目名称:foedus_code,代码行数:32,代码来源:test_masstree_tpcc.cpp

示例13: ASSERT_ND

TpccLoadTask::TpccLoadTask(char* ctime_buffer) {
  // Initialize timestamp (for date columns)
  time_t t_clock;
  ::time(&t_clock);
  timestamp_ = ::ctime_r(&t_clock, ctime_buffer);
  ASSERT_ND(timestamp_);
}
开发者ID:vanwaals,项目名称:foedus_code,代码行数:7,代码来源:test_masstree_tpcc.cpp

示例14: while

void ThreadPimpl::flush_retired_volatile_page(
  uint16_t node,
  Epoch current_epoch,
  memory::PagePoolOffsetAndEpochChunk* chunk) {
  if (chunk->size() == 0) {
    return;
  }
  uint32_t safe_count = chunk->get_safe_offset_count(current_epoch);
  while (safe_count < chunk->size() / 10U) {
    LOG(WARNING) << "Thread-" << id_ << " can return only "
      << safe_count << " out of " << chunk->size()
      << " retired pages to node-" << node  << " in epoch=" << current_epoch
      << ". This means the thread received so many retired pages in a short time period."
      << " Will adavance an epoch to safely return the retired pages."
      << " This should be a rare event.";
    engine_->get_xct_manager()->advance_current_global_epoch();
    current_epoch = engine_->get_xct_manager()->get_current_global_epoch();
    LOG(INFO) << "okay, advanced epoch. now we should be able to return more pages";
    safe_count = chunk->get_safe_offset_count(current_epoch);
  }

  VLOG(0) << "Thread-" << id_ << " batch-returning retired volatile pages to node-" << node
    << " safe_count/count=" << safe_count << "/" << chunk->size() << ". epoch=" << current_epoch;
  memory::PagePool* volatile_pool
    = engine_->get_memory_manager()->get_node_memory(node)->get_volatile_pool();
  volatile_pool->release(safe_count, chunk);
  ASSERT_ND(!chunk->full());
}
开发者ID:kumagi,项目名称:foedus_code,代码行数:28,代码来源:thread_pimpl.cpp

示例15: engine_

DivvyupPageGrabBatch::DivvyupPageGrabBatch(Engine* engine)
: engine_(engine), node_count_(engine->get_options().thread_.group_count_) {
  chunks_ = new PagePoolOffsetChunk[node_count_];
  for (uint16_t node = 0; node < node_count_; ++node) {
    ASSERT_ND(chunks_[node].empty());
  }
}
开发者ID:kumagi,项目名称:foedus_code,代码行数:7,代码来源:page_pool.cpp


注:本文中的ASSERT_ND函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。