当前位置: 首页>>代码示例>>C++>>正文


C++ MemoryManager类代码示例

本文整理汇总了C++中MemoryManager的典型用法代码示例。如果您正苦于以下问题:C++ MemoryManager类的具体用法?C++ MemoryManager怎么用?C++ MemoryManager使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了MemoryManager类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: read

uint64_t CycleCounter::read()
{
	if (emulationLevel >= EMEX_EXTRA_SMALL_5XX)
	{ 
		MemoryManager* mm = deviceHandle->getMemoryManager();
		EemMemoryAccess* ema = (EemMemoryAccess*)mm->getMemoryArea("EEM");

		union CycleCount { struct { uint32_t low, high; }; uint64_t value; };
		CycleCount cycleCount;

		ema->readEemRegister(CCNT0L, &cycleCount.low) && ema->sync();
		ema->readEemRegister(CCNT0H, &cycleCount.high) && ema->sync();
		
		counterValue = 0;

		uint32_t factor = 1;
		uint32_t lsfr2hex[16] = {0x0, 0x1, 0x2, 0x7, 0x5, 0x3, 0x8, 0xb, 0xe, 0x6, 0x4, 0xa, 0xd, 0x9, 0xc, 0};
		for (int i = 0; i < 10; ++i)
		{
			counterValue += factor * lsfr2hex[(cycleCount.value & 0xf)];
			cycleCount.value >>= 4;
			factor *= 15;
		}	
	}
开发者ID:AlessonDavid,项目名称:libMSP430,代码行数:24,代码来源:CycleCounter.cpp

示例2: InitializeGlobalOptions

namespace globalF4MPI{
	MemoryManager MonomialAllocator;
//	PolynomMap globalPolynomMap;

	GlobalOptions globalOptions;
	//Инициализация глобальных переменных после определения их в парсере
	void InitializeGlobalOptions(){
		CMonomial::setOrder((CMonomial::Order)globalOptions.monomOrder, globalOptions.monomOrderParam);
		CModular::setMOD(globalOptions.mod);
		CMonomial::setNumberOfVariables(globalOptions.numberOfVariables);
		globalF4MPI::MonomialAllocator.setSize(CMonomial::degreessize);
		PODvecSize<CInternalMonomial>::setvalsize(CMonomial::degreessize);
		MonomialAllocator.reset();
	}
	void Finalize(){
		MonomialAllocator.reset();
	}
}
开发者ID:galkinvv,项目名称:gb_algs,代码行数:18,代码来源:globalf4.cpp

示例3: eval_map

    Value eval_map(
        MemoryManager                  mm,
        const Functional::value_vec_t& secondary_args,
        boost::any&                    map_state,
        Value                          subvalue
    ) const
    {
        /* If the subvalue we've been given is NULL, likewise return a NULL
         * subvalue. We can't change anything. */
        if (! subvalue) {
            return Value();
        }

        if (subvalue.type() != Value::STRING) {
            return Value();
        }

        ConstByteString text = subvalue.as_string();
        boost::shared_ptr<vector<char> > result(new vector<char>());
        // Ensure that result.data() is non-null, even if we never insert
        // anything.
        result->reserve(1);

        // value_to_data() ensures that a copy is associated with the memory
        // pool and will be deleted when the memory pool goes away.
        value_to_data(result, mm.ib());

        boost::regex_replace(
            back_inserter(*result),
            text.const_data(), text.const_data() + text.length(),
            m_expression,
            m_replacement
        );

        return Value::create_string(
            mm,
            subvalue.name(), subvalue.name_length(),
            ByteString::create_alias(
                mm,
                result->data(), result->size()
            )
        );
    }
开发者ID:robguima,项目名称:ironbee,代码行数:43,代码来源:standard_string.cpp

示例4: create

ActionInstance ActionInstance::create(
    MemoryManager memory_manager,
    Context       context,
    ConstAction   action,
    const char*   parameters
)
{
    ib_action_inst_t* actioninst;

    throw_if_error(
        ib_action_inst_create(
            &actioninst,
            memory_manager.ib(),
            context.ib(),
            action.ib(),
            parameters
        )
    );

    return ActionInstance(actioninst);
}
开发者ID:B0SB05,项目名称:ironbee,代码行数:21,代码来源:action.cpp

示例5: TEST

TEST(TestMemoryManager, Allocations)
{
    ScopedMemoryPoolLite smpl;
    MemoryManager mm = MemoryPoolLite(smpl);
    void* p;
    char* c;

    ASSERT_TRUE(mm);

    p = NULL;
    p = mm.alloc(10);
    EXPECT_TRUE(p);

    p = NULL;
    p = mm.allocate<int>();
    EXPECT_TRUE(p);

    c = NULL;
    c = reinterpret_cast<char*>(mm.calloc(10));
    EXPECT_EQ(10L, count(c, c+10, '\0'));

    c = NULL;
    c = reinterpret_cast<char*>(mm.calloc(5, 7));
    EXPECT_EQ(35L, count(c, c+35, '\0'));

    static const string c_example = "Hello World";

    c = NULL;
    c = mm.strdup("Hello World");
    EXPECT_EQ(c_example, c);

    c = NULL;
    c = reinterpret_cast<char*>(
        mm.memdup(c_example.data(), c_example.size())
    );
    EXPECT_EQ(c_example, string(c, c_example.size()));

    c = NULL;
    c = mm.memdup_to_str(c_example.data(), c_example.size());
    EXPECT_EQ(c_example, c);
}
开发者ID:PutiZL,项目名称:ironbee,代码行数:41,代码来源:test_memory_manager.cpp

示例6: create

OperatorInstance OperatorInstance::create(
    MemoryManager memory_manager,
    Context context,
    ConstOperator op,
    ib_flags_t required_capabilities,
    const char* parameters
)
{
    ib_operator_inst_t* opinst;

    throw_if_error(
        ib_operator_inst_create(
            &opinst,
            memory_manager.ib(),
            context.ib(),
            op.ib(),
            required_capabilities,
            parameters
        )
    );

    return OperatorInstance(opinst);
}
开发者ID:B0SB05,项目名称:ironbee,代码行数:23,代码来源:operator.cpp

示例7: CodeGen

  void CodeGen(MemoryManager& mm, handlers::CodeHandler& ch)
  {
    size_t ml = mm.RequestLocation(id);

    std::stringstream* ss = new std::stringstream();

    *ss << "buff[" << ml << "] = ";

    auto in_id = in_ids.begin();
    PrintAccess(*in_id, mm, *ss);
    in_id++;

    size_t or_count = 0;

    for(in_id = in_id;in_id != in_ids.end();in_id++)
    {
      if(or_count > or_limit)
      {
        *ss << ";";
        ch.AddEntry(ss->str());
        delete ss;
        ss = new std::stringstream();
        *ss << "buff[" << ml << "] = buff[" << ml << "]";
        or_count = or_count - 1;
      }

      *ss << " || ";
      PrintAccess(*in_id, mm , *ss);
      or_count++;
    }

    *ss << ";";

    ch.AddEntry(ss->str());
    delete ss;
  }
开发者ID:AMPedGames,项目名称:libgdl,代码行数:36,代码来源:or_entry.hpp

示例8: allocate_grouped_buffer

SerializedBuffer SerializedBuffer::allocate_grouped_buffer(
	MemoryManager &memory_manager,
	size_type maximum_record_count,
	size_type maximum_group_count,
	size_type total_key_size,
	size_type total_value_size,
	identifier_type target_node)
{
	size_type buffer_size = 0;
	// Common header
	const ptrdiff_t common_header_ptrdiff = buffer_size;
	buffer_size += sizeof(SerializedBufferHeader);
	// Keys
	const ptrdiff_t keys_header_ptrdiff = buffer_size;
	buffer_size += sizeof(SerializedKeysHeader);
	buffer_size  = align_ceil(buffer_size, alignof(max_align_t));
	const ptrdiff_t keys_data_ptrdiff = buffer_size;
	buffer_size += align_ceil(total_key_size, alignof(size_type)); // data
	const ptrdiff_t keys_offsets_ptrdiff = buffer_size;
	buffer_size += (maximum_group_count + 1) * sizeof(size_type);  // offsets
	// Values
	const ptrdiff_t values_header_ptrdiff = buffer_size;
	buffer_size += sizeof(SerializedValuesHeader);
	buffer_size  = align_ceil(buffer_size, alignof(max_align_t));
	const ptrdiff_t values_data_ptrdiff = buffer_size;
	buffer_size += align_ceil(total_value_size, alignof(size_type)); // data
	const ptrdiff_t values_offsets_ptrdiff = buffer_size;
	buffer_size += (maximum_record_count + 1) * sizeof(size_type);   // offsets
	buffer_size += (maximum_group_count + 1) * sizeof(size_type);   // group_offsets

	LockedMemoryReference locked_reference;
	if(target_node == TARGET_NODE_UNSPECIFIED){
		locked_reference = memory_manager.allocate(buffer_size).lock();
	}else{
		locked_reference =
			memory_manager.allocate(buffer_size, target_node).lock();
	}
	const auto ptr =
		reinterpret_cast<uintptr_t>(locked_reference.pointer());

	const auto common_header =
		reinterpret_cast<SerializedBufferHeader *>(
			ptr + common_header_ptrdiff);
	common_header->key_buffer_size =
		static_cast<size_type>(
			values_header_ptrdiff - keys_header_ptrdiff);
	common_header->value_buffer_size =
		static_cast<size_type>(buffer_size - values_header_ptrdiff);

	const auto keys_header =
		reinterpret_cast<SerializedKeysHeader *>(
			ptr + keys_header_ptrdiff);
	keys_header->data_buffer_size =
		static_cast<size_type>(keys_offsets_ptrdiff - keys_data_ptrdiff);
	keys_header->record_count = maximum_group_count;

	const auto values_header =
		reinterpret_cast<SerializedValuesHeader *>(
			ptr + values_header_ptrdiff);
	values_header->data_buffer_size =
		static_cast<size_type>(
			values_offsets_ptrdiff - values_data_ptrdiff);
	values_header->maximum_record_count = maximum_record_count;
	values_header->actual_record_count = 0;

	return SerializedBuffer(locked_reference);
}
开发者ID:ashigeru,项目名称:m3bp,代码行数:67,代码来源:serialized_buffer.cpp

示例9: cleanUp

inline void ContentLeafNameTypeVector::cleanUp()
{
	fMemoryManager->deallocate(fLeafNames); //delete [] fLeafNames;
	fMemoryManager->deallocate(fLeafTypes); //delete [] fLeafTypes;
}
开发者ID:prakash028,项目名称:newscaster,代码行数:5,代码来源:ContentLeafNameTypeVector.hpp

示例10: MemoryManager

MemoryManager::MemoryManager(const MemoryManager& orig):
    MemoryManager(orig.getSize()){}
开发者ID:nogenem,项目名称:Sistemas-Operacionais-I-2015-2,代码行数:2,代码来源:MemoryManager.cpp

示例11: FastMatmulRecursive

void FastMatmulRecursive(LockAndCounter& locker, MemoryManager<Scalar>& mem_mngr, Matrix<Scalar>& A, Matrix<Scalar>& B, Matrix<Scalar>& C, int total_steps, int steps_left, int start_index, double x, int num_threads, Scalar beta) {
    // Update multipliers
    C.UpdateMultiplier(A.multiplier());
    C.UpdateMultiplier(B.multiplier());
    A.set_multiplier(Scalar(1.0));
    B.set_multiplier(Scalar(1.0));
    // Base case for recursion
    if (steps_left == 0) {
        MatMul(A, B, C);
        return;
    }

    Matrix<Scalar> A11 = A.Subblock(2, 2, 1, 1);
    Matrix<Scalar> A12 = A.Subblock(2, 2, 1, 2);
    Matrix<Scalar> A21 = A.Subblock(2, 2, 2, 1);
    Matrix<Scalar> A22 = A.Subblock(2, 2, 2, 2);
    Matrix<Scalar> B11 = B.Subblock(2, 2, 1, 1);
    Matrix<Scalar> B12 = B.Subblock(2, 2, 1, 2);
    Matrix<Scalar> B21 = B.Subblock(2, 2, 2, 1);
    Matrix<Scalar> B22 = B.Subblock(2, 2, 2, 2);
    Matrix<Scalar> C11 = C.Subblock(2, 2, 1, 1);
    Matrix<Scalar> C12 = C.Subblock(2, 2, 1, 2);
    Matrix<Scalar> C21 = C.Subblock(2, 2, 2, 1);
    Matrix<Scalar> C22 = C.Subblock(2, 2, 2, 2);


    // Matrices to store the results of multiplications.
#ifdef _PARALLEL_
    Matrix<Scalar> M1(mem_mngr.GetMem(start_index, 1, total_steps - steps_left, M), C11.m(), C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M2(mem_mngr.GetMem(start_index, 2, total_steps - steps_left, M), C11.m(), C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M3(mem_mngr.GetMem(start_index, 3, total_steps - steps_left, M), C11.m(), C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M4(mem_mngr.GetMem(start_index, 4, total_steps - steps_left, M), C11.m(), C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M5(mem_mngr.GetMem(start_index, 5, total_steps - steps_left, M), C11.m(), C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M6(mem_mngr.GetMem(start_index, 6, total_steps - steps_left, M), C11.m(), C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M7(mem_mngr.GetMem(start_index, 7, total_steps - steps_left, M), C11.m(), C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M8(mem_mngr.GetMem(start_index, 8, total_steps - steps_left, M), C11.m(), C11.m(), C11.n(), C.multiplier());
#else
    Matrix<Scalar> M1(C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M2(C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M3(C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M4(C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M5(C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M6(C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M7(C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M8(C11.m(), C11.n(), C.multiplier());
#endif
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
    bool sequential1 = should_launch_task(8, total_steps, steps_left, start_index, 1, num_threads);
    bool sequential2 = should_launch_task(8, total_steps, steps_left, start_index, 2, num_threads);
    bool sequential3 = should_launch_task(8, total_steps, steps_left, start_index, 3, num_threads);
    bool sequential4 = should_launch_task(8, total_steps, steps_left, start_index, 4, num_threads);
    bool sequential5 = should_launch_task(8, total_steps, steps_left, start_index, 5, num_threads);
    bool sequential6 = should_launch_task(8, total_steps, steps_left, start_index, 6, num_threads);
    bool sequential7 = should_launch_task(8, total_steps, steps_left, start_index, 7, num_threads);
    bool sequential8 = should_launch_task(8, total_steps, steps_left, start_index, 8, num_threads);
#else
    bool sequential1 = false;
    bool sequential2 = false;
    bool sequential3 = false;
    bool sequential4 = false;
    bool sequential5 = false;
    bool sequential6 = false;
    bool sequential7 = false;
    bool sequential8 = false;
#endif



    // M1 = (1 * A11) * (1 * B11)
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
    # pragma omp task if(sequential1) shared(mem_mngr, locker) untied
    {
#endif
        M1.UpdateMultiplier(Scalar(1));
        M1.UpdateMultiplier(Scalar(1));
        FastMatmulRecursive(locker, mem_mngr, A11, B11, M1, total_steps, steps_left - 1, (start_index + 1 - 1) * 8, x, num_threads, Scalar(0.0));
#ifndef _PARALLEL_
#endif
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
        locker.Decrement();
    }
    if (should_task_wait(8, total_steps, steps_left, start_index, 1, num_threads)) {
        # pragma omp taskwait
# if defined(_PARALLEL_) && (_PARALLEL_ == _HYBRID_PAR_)
        SwitchToDFS(locker, num_threads);
# endif
    }
#endif

    // M2 = (1 * A12) * (1 * B21)
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
    # pragma omp task if(sequential2) shared(mem_mngr, locker) untied
    {
#endif
        M2.UpdateMultiplier(Scalar(1));
        M2.UpdateMultiplier(Scalar(1));
        FastMatmulRecursive(locker, mem_mngr, A12, B21, M2, total_steps, steps_left - 1, (start_index + 2 - 1) * 8, x, num_threads, Scalar(0.0));
#ifndef _PARALLEL_
#endif
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
//.........这里部分代码省略.........
开发者ID:liyancas,项目名称:fast-matmul,代码行数:101,代码来源:classical222.hpp

示例12: create

 /**
  * Create new list.
  *
  * Creates a new empty list using @a memory_manager for memory.
  *
  * @param[in] memory_manager Memory manager to use.
  * @return Empty List.
  **/
 static List create(MemoryManager memory_manager)
 {
     ib_list_t* ib_list;
     throw_if_error(ib_list_create(&ib_list, memory_manager.ib()));
     return List(ib_list);
 }
开发者ID:B0SB05,项目名称:ironbee,代码行数:14,代码来源:list.hpp

示例13: process

		void process() {
			while(nextOperation) {
				auto operation = nextOperation;
				switch(operation->type) {
				case OperationType::Add: onAdd(operation); break;
				case OperationType::Remove: onRemove(operation); break;
				case OperationType::RemoveAll: onRemoveAll(operation); break;
				default:
					throw std::runtime_error("Unexpected EntityOperation type");
				}

				nextOperation = operation->nextOperation;
				operation->~T();
				memoryManager->free(sizeof(T), alignof(T), operation);
			}
			nextOperation = nullptr;
			lastOperation = nullptr;
		}
开发者ID:Lusito,项目名称:ECS-tasy,代码行数:18,代码来源:EntityOperations.hpp

示例14:

/*
 * Class:     org_upp_AndroidMath_Vector
 * Method:    destruct
 * Signature: ()V
 */
JNIEXPORT void JNICALL Java_org_upp_AndroidMath_Vector_nativeFinalize
  (JNIEnv *env, jobject obj)
{
	mm.Erase(env, obj);
}
开发者ID:AbdelghaniDr,项目名称:mirror,代码行数:10,代码来源:Vector.cpp

示例15: Vector

/*
 * Class:     org_upp_AndroidMath_Vector
 * Method:    construct
 * Signature: (I)V
 */
JNIEXPORT void JNICALL Java_org_upp_AndroidMath_Vector_construct
  (JNIEnv *env, jobject obj, jint size)
{
	mm.Insert(env, obj, new Vector(size));
}
开发者ID:AbdelghaniDr,项目名称:mirror,代码行数:10,代码来源:Vector.cpp


注:本文中的MemoryManager类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。