当前位置: 首页>>代码示例>>Python>>正文


Python MemConfig.create_mem_ctrl方法代码示例

本文整理汇总了Python中MemConfig.create_mem_ctrl方法的典型用法代码示例。如果您正苦于以下问题:Python MemConfig.create_mem_ctrl方法的具体用法?Python MemConfig.create_mem_ctrl怎么用?Python MemConfig.create_mem_ctrl使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在MemConfig的用法示例。


在下文中一共展示了MemConfig.create_mem_ctrl方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: config_hybrid_mem

# 需要导入模块: import MemConfig [as 别名]
# 或者: from MemConfig import create_mem_ctrl [as 别名]
def config_hybrid_mem(options, system):
    """
    Assign proper address ranges for DRAM and NVM controllers.
    Create memory controllers and add their shared bus to the system.
    """
    system.thnvm_bus = VirtualXBar()
    mem_ctrls = []

    # The default behaviour is to interleave memory channels on 128
    # byte granularity, or cache line granularity if larger than 128
    # byte. This value is based on the locality seen across a large
    # range of workloads.
    intlv_size = max(128, system.cache_line_size.value)

    total_size = Addr(options.mem_size)
    dram_size = pow(2, options.page_bits) * options.ptt_length

    if dram_size < total_size.value:
        nvm_cls = MemConfig.get(options.nvm_type)
        nvm_range = AddrRange(0, total_size - dram_size - 1)
        nvm_ctrl = MemConfig.create_mem_ctrl(nvm_cls, nvm_range,
                                             0, 1, 0, intlv_size)
        # Set the number of ranks based on the command-line
        # options if it was explicitly set
        if issubclass(nvm_cls, DRAMCtrl) and options.mem_ranks:
            nvm_ctrl.ranks_per_channel = options.mem_ranks

        mem_ctrls.append(nvm_ctrl)

    if dram_size > 0:
        dram_cls = MemConfig.get(options.dram_type)
        dram_range = AddrRange(total_size - dram_size, total_size - 1)
        dram_ctrl = MemConfig.create_mem_ctrl(dram_cls, dram_range,
                                              0, 1, 0, intlv_size)
        # Set the number of ranks based on the command-line
        # options if it was explicitly set
        if issubclass(dram_cls, DRAMCtrl) and options.mem_ranks:
            dram_ctrl.ranks_per_channel = options.mem_ranks

        mem_ctrls.append(dram_ctrl)

    system.mem_ctrls = mem_ctrls

    # Connect the controllers to the THNVM bus
    for i in xrange(len(system.mem_ctrls)):
        system.mem_ctrls[i].port = system.thnvm_bus.master

    system.thnvm_bus.slave = system.membus.master
开发者ID:CMU-SAFARI,项目名称:ThyNVM,代码行数:50,代码来源:HybridMemConfig.py

示例2: setup_memory_controllers

# 需要导入模块: import MemConfig [as 别名]
# 或者: from MemConfig import create_mem_ctrl [as 别名]
def setup_memory_controllers(system, ruby, dir_cntrls, options):
    ruby.block_size_bytes = options.cacheline_size
    ruby.memory_size_bits = 48
    block_size_bits = int(math.log(options.cacheline_size, 2))

    if options.numa_high_bit:
        numa_bit = options.numa_high_bit
    else:
        # if the numa_bit is not specified, set the directory bits as the
        # lowest bits above the block offset bits, and the numa_bit as the
        # highest of those directory bits
        dir_bits = int(math.log(options.num_dirs, 2))
        numa_bit = block_size_bits + dir_bits - 1

    index = 0
    mem_ctrls = []
    crossbars = []

    # Sets bits to be used for interleaving.  Creates memory controllers
    # attached to a directory controller.  A separate controller is created
    # for each address range as the abstract memory can handle only one
    # contiguous address range as of now.
    for dir_cntrl in dir_cntrls:
        dir_cntrl.directory.numa_high_bit = numa_bit

        crossbar = None
        if len(system.mem_ranges) > 1:
            crossbar = NoncoherentXBar()
            crossbars.append(crossbar)
            dir_cntrl.memory = crossbar.slave

        for r in system.mem_ranges:
            mem_ctrl = MemConfig.create_mem_ctrl(
                MemConfig.get(options.mem_type), r, index, options.num_dirs,
                int(math.log(options.num_dirs, 2)), options.cacheline_size)

            mem_ctrls.append(mem_ctrl)

            if crossbar != None:
                mem_ctrl.port = crossbar.master
            else:
                mem_ctrl.port = dir_cntrl.memory

        index += 1

    system.mem_ctrls = mem_ctrls

    if len(crossbars) > 0:
        ruby.crossbars = crossbars
开发者ID:MingjunZhou,项目名称:gem5,代码行数:51,代码来源:Ruby.py

示例3: create_system

# 需要导入模块: import MemConfig [as 别名]
# 或者: from MemConfig import create_mem_ctrl [as 别名]

#.........这里部分代码省略.........
            dir_version = i + num_cpu_dirs

            dir_size = MemorySize('0B')
            dir_size.value = mem_module_size

            pf = ProbeFilter(size = pf_size, assoc = 4,
                             start_index_bit = pf_start_bit)

            dev_dir_cntrl = Directory_Controller(version = dir_version,
                                 directory = \
                                 RubyDirectoryMemory( \
                                            version = dir_version,
                                            size = dir_size,
                                            numa_high_bit = \
                                            options.numa_high_bit,
                                            device_directory = True),
                                 probeFilter = pf,
                                 probe_filter_enabled = options.pf_on,
                                 full_bit_dir_enabled = options.dir_on,
                                 ruby_system = ruby_system)

            if options.recycle_latency:
                dev_dir_cntrl.recycle_latency = options.recycle_latency

            exec("ruby_system.dev_dir_cntrl%d = dev_dir_cntrl" % i)
            dev_dir_cntrls.append(dev_dir_cntrl)

            # Connect the directory controller to the network
            dev_dir_cntrl.forwardFromDir = ruby_system.network.slave
            dev_dir_cntrl.responseFromDir = ruby_system.network.slave
            dev_dir_cntrl.dmaResponseFromDir = ruby_system.network.slave

            dev_dir_cntrl.unblockToDir = ruby_system.network.master
            dev_dir_cntrl.responseToDir = ruby_system.network.master
            dev_dir_cntrl.requestToDir = ruby_system.network.master
            dev_dir_cntrl.dmaRequestToDir = ruby_system.network.master

            dev_mem_ctrl = MemConfig.create_mem_ctrl(
                MemConfig.get(options.mem_type), system.gpu.gpu_memory_range,
                i, options.num_dev_dirs, int(math.log(options.num_dev_dirs, 2)),
                options.cacheline_size)
            dev_mem_ctrl.port = dev_dir_cntrl.memory
            dev_mem_ctrls.append(dev_mem_ctrl)

        system.dev_mem_ctrls = dev_mem_ctrls
    else:
        # Since there are no device directories, use CPU directories
        # Fix up the memory sizes of the CPU directories
        num_dirs = len(dir_cntrls)
        add_gpu_mem = gpu_phys_mem_size / num_dirs
        for cntrl in dir_cntrls:
            new_size = cntrl.directory.size.value + add_gpu_mem
            cntrl.directory.size.value = new_size

    #
    # Create controller for the copy engine to connect to in GPU cluster
    # Cache is unused by controller
    #
    cache = L1Cache(size = "4096B", assoc = 2)

    gpu_ce_seq = RubySequencer(version = options.num_cpus + options.num_sc + 1,
                               icache = cache,
                               dcache = cache,
                               max_outstanding_requests = 64,
                               support_inst_reqs = False,
                               ruby_system = ruby_system,
                               connect_to_io = False)

    gpu_ce_cntrl = GPUCopyDMA_Controller(version = 1,
                                  sequencer = gpu_ce_seq,
                                  number_of_TBEs = 256,
                                  ruby_system = ruby_system)

    ruby_system.l1_cntrl_ce = gpu_ce_cntrl

    all_sequencers.append(cpu_ce_seq)
    all_sequencers.append(gpu_ce_seq)

    gpu_ce_cntrl.responseFromDir = ruby_system.network.master
    gpu_ce_cntrl.reqToDirectory = ruby_system.network.slave

    complete_cluster = Cluster(intBW = 32, extBW = 32)
    complete_cluster.add(cpu_ce_cntrl)
    complete_cluster.add(gpu_ce_cntrl)
    complete_cluster.add(cpu_cluster)
    complete_cluster.add(gpu_cluster)

    for cntrl in dir_cntrls:
        complete_cluster.add(cntrl)

    for cntrl in dev_dir_cntrls:
        complete_cluster.add(cntrl)

    for cntrl in dma_cntrls:
        complete_cluster.add(cntrl)

    for cluster in l2_clusters:
        complete_cluster.add(cluster)

    return (all_sequencers, dir_cntrls, complete_cluster)
开发者ID:Urmish,项目名称:CPU-GPU-Coherence,代码行数:104,代码来源:VI_hammer_split.py

示例4: create_system

# 需要导入模块: import MemConfig [as 别名]
# 或者: from MemConfig import create_mem_ctrl [as 别名]
def create_system(options, full_system, system, dma_ports, ruby_system):

    if not buildEnv['GPGPU_SIM']:
        m5.util.panic("This script requires GPGPU-Sim integration to be built.")

    options.access_backing_store = True

    # Run the original protocol script
    buildEnv['PROTOCOL'] = buildEnv['PROTOCOL'].replace('split', 'fusion')
    protocol = buildEnv['PROTOCOL']
    exec "import %s" % protocol
    try:
        (cpu_sequencers, dir_cntrl_nodes, topology) = \
            eval("%s.create_system(options, full_system, system, dma_ports, ruby_system)" % protocol)
    except:
        print "Error: could not create system for ruby protocol inside fusion system %s" % protocol
        raise

    # Faking things to build the rest of the system
    print "Warning!"
    print "Warning: Faking split MOESI_hammer protocol; collecting checkpoints?"
    print "Warning!"

    if options.num_dev_dirs > 0:
        block_size_bits = int(math.log(options.cacheline_size, 2))
        gpu_phys_mem_size = system.gpu.gpu_memory_range.size()
        mem_module_size = gpu_phys_mem_size / options.num_dev_dirs

        #
        # determine size and index bits for probe filter
        # By default, the probe filter size is configured to be twice the
        # size of the L2 cache.
        #
        pf_size = MemorySize(options.sc_l2_size)
        pf_size.value = pf_size.value * 2
        dir_bits = int(math.log(options.num_dev_dirs, 2))
        pf_bits = int(math.log(pf_size.value, 2))
        if options.numa_high_bit:
            if options.pf_on or options.dir_on:
                # if numa high bit explicitly set, make sure it does not overlap
                # with the probe filter index
                assert(options.numa_high_bit - dir_bits > pf_bits)

            # set the probe filter start bit to just above the block offset
            pf_start_bit = block_size_bits
        else:
            if dir_bits > 0:
                pf_start_bit = dir_bits + block_size_bits - 1
            else:
                pf_start_bit = block_size_bits

        dev_dir_cntrls = []
        dev_mem_ctrls = []
        num_cpu_dirs = len(dir_cntrl_nodes)
        for i in xrange(options.num_dev_dirs):
            #
            # Create the Ruby objects associated with the directory controller
            #

            dir_version = i + num_cpu_dirs

            dir_size = MemorySize('0B')
            dir_size.value = mem_module_size

            pf = ProbeFilter(size = pf_size, assoc = 4,
                             start_index_bit = pf_start_bit)

            dev_dir_cntrl = Directory_Controller(version = dir_version,
                                 directory = \
                                 RubyDirectoryMemory( \
                                            version = dir_version,
                                            size = dir_size,
                                            numa_high_bit = \
                                            options.numa_high_bit,
                                            device_directory = True),
                                 probeFilter = pf,
                                 probe_filter_enabled = options.pf_on,
                                 full_bit_dir_enabled = options.dir_on,
                                 ruby_system = ruby_system)

            if options.recycle_latency:
                dev_dir_cntrl.recycle_latency = options.recycle_latency

            exec("ruby_system.dev_dir_cntrl%d = dev_dir_cntrl" % i)
            dev_dir_cntrls.append(dev_dir_cntrl)

            # Connect the directory controller to the network
            dev_dir_cntrl.forwardFromDir = ruby_system.network.slave
            dev_dir_cntrl.responseFromDir = ruby_system.network.slave
            dev_dir_cntrl.dmaResponseFromDir = ruby_system.network.slave

            dev_dir_cntrl.unblockToDir = ruby_system.network.master
            dev_dir_cntrl.responseToDir = ruby_system.network.master
            dev_dir_cntrl.requestToDir = ruby_system.network.master
            dev_dir_cntrl.dmaRequestToDir = ruby_system.network.master

            dev_mem_ctrl = MemConfig.create_mem_ctrl(
                MemConfig.get(options.mem_type), system.gpu.gpu_memory_range,
                i, options.num_dev_dirs, int(math.log(options.num_dev_dirs, 2)),
                options.cacheline_size)
#.........这里部分代码省略.........
开发者ID:Urmish,项目名称:CPU-GPU-Coherence,代码行数:103,代码来源:MOESI_hammer_split.py


注:本文中的MemConfig.create_mem_ctrl方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。