当前位置: 首页>>代码示例>>Python>>正文


Python scan_utils.clone函数代码示例

本文整理汇总了Python中theano.scan_module.scan_utils.clone函数的典型用法代码示例。如果您正苦于以下问题:Python clone函数的具体用法?Python clone怎么用?Python clone使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了clone函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: local_scan_to_gpua

def local_scan_to_gpua(node):
    info = copy.deepcopy(node.op.info)
    if info.get('gpua', False):
        return
    info['gpua'] = True
    nw_ins = [node.inputs[0]]
    e = (1 +
         node.op.n_seqs +
         node.op.n_mit_mot +
         node.op.n_mit_sot +
         node.op.n_sit_sot +
         node.op.n_shared_outs)
    nw_ins += [safe_to_gpu(x) for x in node.inputs[1:e]]
    b = e
    e = e + node.op.n_nit_sot
    nw_ins += node.inputs[b:e]
    nw_ins += [safe_to_gpu(x) for x in node.inputs[e:]]
    scan_ins = [tensor_to_gpu(x) for x in node.op.inputs]
    scan_outs = [safe_to_gpu(x) for x in node.op.outputs]
    scan_outs = scan_utils.clone(
        scan_outs,
        replace=zip(node.op.inputs,
                    [safe_to_cpu(x) for x in scan_ins]))

    # We need to construct the hash here, because scan
    # __init__ does not know about the gpu and can not
    # handle graphs with inputs being on the gpu
    tmp_in, tmp_out = gpu_reconstruct_graph(scan_ins, scan_outs)
    local_fgraph = gof.FunctionGraph(tmp_in, tmp_out, clone=False)
    _cmodule_key = gof.CLinker().cmodule_key_(local_fgraph, [])
    info['gpu_hash'] = hash(_cmodule_key)

    nw_op = scan_op.Scan(scan_ins, scan_outs, info,
                         typeConstructor=GpuArrayType).make_node(*nw_ins)
    return nw_op.outputs
开发者ID:alimuldal,项目名称:Theano,代码行数:35,代码来源:opt.py

示例2: local_scan_to_gpua

def local_scan_to_gpua(node):
    info = copy.deepcopy(node.op.info)
    if info.get("gpua", False):
        return
    info["gpua"] = True
    nw_ins = [node.inputs[0]]
    e = 1 + node.op.n_seqs + node.op.n_mit_mot + node.op.n_mit_sot + node.op.n_sit_sot + node.op.n_shared_outs
    nw_ins += [safe_to_gpu(x) for x in node.inputs[1:e]]
    b = e
    e = e + node.op.n_nit_sot
    nw_ins += node.inputs[b:e]
    nw_ins += [safe_to_gpu(x) for x in node.inputs[e:]]
    scan_ins = [tensor_to_gpu(x) for x in node.op.inputs]

    # The inner output corresponding to the looping condition should not be
    # moved to the gpu
    if node.op.info["as_while"]:
        scan_outs = [safe_to_gpu(x) for x in node.op.outputs[:-1]]
        scan_outs += [node.op.outputs[-1]]
    else:
        scan_outs = [safe_to_gpu(x) for x in node.op.outputs]
    scan_outs = scan_utils.clone(scan_outs, replace=list(zip(node.op.inputs, (safe_to_cpu(x) for x in scan_ins))))

    # We need to construct the hash here, because scan
    # __init__ does not know about the gpu and can not
    # handle graphs with inputs being on the gpu
    tmp_in, tmp_out = gpu_reconstruct_graph(scan_ins, scan_outs)
    local_fgraph = gof.FunctionGraph(tmp_in, tmp_out, clone=True)
    _cmodule_key = gof.CLinker().cmodule_key_(local_fgraph, [])
    info["gpu_hash"] = hash(_cmodule_key)

    nw_op = scan_op.Scan(scan_ins, scan_outs, info, typeConstructor=GpuArrayType).make_node(*nw_ins)
    return nw_op.outputs
开发者ID:naisanza,项目名称:Theano,代码行数:33,代码来源:opt.py

示例3: cond_merge_random_op

def cond_merge_random_op(main_node):
    if isinstance(main_node.op, IfElse):
        return False

    all_inp_nodes = set()
    for inp in main_node.inputs:
        all_inp_nodes.add(inp.owner)
    cond_nodes = [x for x in list(all_inp_nodes)
                        if x and isinstance(x.op, IfElse)]

    if len(cond_nodes) < 2:
        return False

    merging_node = cond_nodes[0]
    for proposal in cond_nodes[1:]:
        if (proposal.inputs[0] == merging_node.inputs[0] and
            not find_up(proposal, merging_node) and
            not find_up(merging_node, proposal)):
            # Create a list of replacements for proposal
            mn_ts = merging_node.inputs[1:][:merging_node.op.n_outs]
            mn_fs = merging_node.inputs[1:][merging_node.op.n_outs:]
            pl_ts = proposal.inputs[1:][:proposal.op.n_outs]
            pl_fs = proposal.inputs[1:][proposal.op.n_outs:]
            new_ins = ([merging_node.inputs[0]] +
                       mn_ts + pl_ts + mn_fs + pl_fs)
            mn_name = '?'
            if merging_node.op.name:
                mn_name = merging_node.op.name
            pl_name = '?'
            mn_n_ts = len(mn_ts)
            mn_n_fs = len(mn_fs)
            if proposal.op.name:
                pl_name = proposal.op.name
            new_ifelse = IfElse(
                n_outs=len(mn_ts + pl_ts),
                as_view=False,
                gpu=False,
                name=mn_name + '&' + pl_name)
            new_outs = new_ifelse(*new_ins, **dict(return_list=True))
            old_outs = []
            if type(merging_node.outputs) not in (list, tuple):
                old_outs += [merging_node.outputs]
            else:
                old_outs += merging_node.outputs
            if type(proposal.outputs) not in (list, tuple):
                old_outs += [proposal.outputs]
            else:
                old_outs += proposal.outputs
            pairs = zip(old_outs, new_outs)
            main_outs = clone(main_node.outputs, replace=pairs)
            return main_outs
开发者ID:DeepLearningIndia,项目名称:Theano,代码行数:51,代码来源:ifelse.py

示例4: gpu_reconstruct_graph

def gpu_reconstruct_graph(inputs, outputs, tag=None):
    """
    Different interface to clone, that allows you to pass inputs.
    Compared to clone, this method always replaces the inputs with
    new variables of the same type, and returns those ( in the same
    order as the original inputs).
    """
    if tag is None:
        tag = ''
    nw_inputs = [gpu_safe_new(x, tag) for x in inputs]
    givens = {}
    for nw_x, x in zip(nw_inputs, inputs):
        givens[x] = nw_x
    nw_outputs = scan_utils.clone(outputs, replace=givens)
    return (nw_inputs, nw_outputs)
开发者ID:benmoran,项目名称:Theano,代码行数:15,代码来源:opt.py

示例5: apply

 def apply(self, fgraph):
     nodelist = list(fgraph.toposort())
     cond_nodes = filter(lambda s: isinstance(s.op, IfElse), nodelist)
     if len(cond_nodes) < 2:
         return False
     merging_node = cond_nodes[0]
     for proposal in cond_nodes[1:]:
         if (proposal.inputs[0] == merging_node.inputs[0] and
             not find_up(proposal, merging_node)):
             # Create a list of replacements for proposal
             mn_ts = merging_node.inputs[1:][:merging_node.op.n_outs]
             mn_fs = merging_node.inputs[1:][merging_node.op.n_outs:]
             pl_ts = proposal.inputs[1:][:proposal.op.n_outs]
             pl_fs = proposal.inputs[1:][proposal.op.n_outs:]
             new_ins = ([merging_node.inputs[0]] +
                        mn_ts + pl_ts + mn_fs + pl_fs)
             mn_name = '?'
             if merging_node.op.name:
                 mn_name = merging_node.op.name
             pl_name = '?'
             mn_n_ts = len(mn_ts)
             mn_n_fs = len(mn_fs)
             if proposal.op.name:
                 pl_name = proposal.op.name
             new_ifelse = IfElse(
                 n_outs=len(mn_ts + pl_ts),
                 as_view=False,
                 gpu=False,
                 name=mn_name + '&' + pl_name)
             print 'here'
             new_outs = new_ifelse(*new_ins, **dict(return_list=True))
             new_outs = [clone(x) for x in new_outs]
             old_outs = []
             if type(merging_node.outputs) not in (list, tuple):
                 old_outs += [merging_node.outputs]
             else:
                 old_outs += merging_node.outputs
             if type(proposal.outputs) not in (list, tuple):
                 old_outs += [proposal.outputs]
             else:
                 old_outs += proposal.outputs
             pairs = zip(old_outs, new_outs)
             fgraph.replace_all_validate(pairs, reason='cond_merge')
开发者ID:DeepLearningIndia,项目名称:Theano,代码行数:43,代码来源:ifelse.py

示例6: scan


#.........这里部分代码省略.........
                    str(condition), ' is ignored'))

        for pos, inner_out in enumerate(outputs):
            # we need to see if we need to pad our sequences with an
            # unbroadcastable dimension; case example : we return an
            # output for which we want all intermediate. If n_steps is 1
            # then, if we return the output as given by the innner function
            # this will represent only a slice and it will have one
            # dimension less.
            if (isinstance(inner_out.type, tensor.TensorType) and
                return_steps.get(pos, 0) != 1):
                outputs[pos] = tensor.unbroadcast(
                    tensor.shape_padleft(inner_out), 0)
        if len(outputs) == 1:
            outputs = outputs[0]

        return (outputs, updates)

    ##
    # Step 4. Compile the dummy function
    ##

    # We can now compile a dummy function just to see what shared variable
    # we have and what are their update rules (note that the user has
    # the option not to pass the shared variable to scan, so we need to
    # pick them manually and add them to scan)
    # make the compilation as fast as possible by not applying any
    # optimization or conversion to C [ note this region is not important
    # for performance so we can do stuff as unoptimal as we wish ]

    # extract still missing inputs (there still might be so) and add them
    # as non sequences at the end of our args
    fake_nonseqs = [x.type() for x in non_seqs]
    fake_outputs = scan_utils.clone(outputs,
                                    replace=OrderedDict(zip(non_seqs,
                                                     fake_nonseqs)))
    all_inputs = itertools.ifilter(
        lambda x: (isinstance(x, gof.Variable) and
                   not isinstance(x, SharedVariable) and
                   not isinstance(x, gof.Constant)),
        gof.graph.inputs(fake_outputs))
    extra_inputs = [x for x in all_inputs if x not in args + fake_nonseqs]
    non_seqs += extra_inputs
    # Note we do not use all_inputs directly since the order of variables
    # in args is quite important
    dummy_args += extra_inputs

    dummy_outs = outputs
    if condition is not None:
        dummy_outs.append(condition)
    dummy_f = function(dummy_args,
                       dummy_outs,
                       updates=updates,
                       mode=compile.mode.Mode(linker='py',
                                              optimizer=None),
                       on_unused_input='ignore',
                       profile=False)

    ##
    # Step 5. Re-arange inputs of scan into a more strict order
    ##

    # Step 5.0 Check the outputs of the dummy function to see if they
    # match with user provided data

    # if the number of outputs to the function does not match the number of
开发者ID:Micseb,项目名称:Theano,代码行数:67,代码来源:scan.py

示例7: rules

    # Step 4. Compile the dummy function
    ##

    # We can now compile a dummy function just to see what shared variable
    # we have and what are their update rules (note that the user has
    # the option not to pass the shared variable to scan, so we need to
    # pick them manually and add them to scan)
    # make the compilation as fast as possible by not applying any
    # optimization or conversion to C [ note this region is not important
    # for performance so we can do stuff as unoptimal as we wish ]

    # extract still missing inputs (there still might be so) and add them
    # as non sequences at the end of our args
    fake_nonseqs = [x.type() for x in non_seqs]
    fake_outputs = scan_utils.clone(outputs,
                                    replace=OrderedDict(zip(non_seqs,
                                                     fake_nonseqs)))
    all_inputs = itertools.ifilter(
        lambda x: (isinstance(x, gof.Variable) and
                   not isinstance(x, SharedVariable) and
                   not isinstance(x, gof.Constant)),
        gof.graph.inputs(fake_outputs))
    extra_inputs = [x for x in all_inputs if x not in args + fake_nonseqs]
    non_seqs += extra_inputs
    # Note we do not use all_inputs directly since the order of variables
    # in args is quite important
    dummy_args += extra_inputs

    dummy_outs = outputs
    if condition is not None:
        dummy_outs.append(condition)
开发者ID:Jackwangyang,项目名称:Theano,代码行数:31,代码来源:scan.py

示例8: rules

    ###   Step 4. Compile the dummy function
    ##

    # We can now compile a dummy function just to see what shared variable
    # we have and what are their update rules (note that the user has
    # the option not to pass the shared variable to scan, so we need to
    # pick them manually and add them to scan)
    # make the compilation as fast as possible by not applying any
    # optimization or conversion to C [ note this region is not important
    # for performance so we can do stuff as unoptimal as we wish ]

    # extract still missing inputs (there still might be so) and add them
    # as non sequences at the end of our args
    fake_nonseqs = [x.type() for x in non_seqs]
    fake_outputs = scan_utils.clone(outputs + updates.values(),
                                    replace=dict(zip(non_seqs,
                                                     fake_nonseqs)))
    all_inputs = itertools.ifilter(
        lambda x: (isinstance(x, gof.Variable) and
                   not isinstance(x, SharedVariable) and
                   not isinstance(x, gof.Constant)),
        gof.graph.inputs(fake_outputs))
    extra_inputs = filter(lambda x: x not in args + fake_nonseqs,
                                    all_inputs)
    non_seqs += extra_inputs
    ## Note we do not use all_inputs directly since the order of variables
    ## in args is quite important
    dummy_args += extra_inputs

    dummy_outs = outputs
    if condition is not None:
开发者ID:aelaguiz,项目名称:Theano,代码行数:31,代码来源:scan.py

示例9: scan


#.........这里部分代码省略.........
                    " is ignored",
                )
            )

        for pos, inner_out in enumerate(outputs):
            # we need to see if we need to pad our sequences with an
            # unbroadcastable dimension; case example : we return an
            # output for which we want all intermediate. If n_steps is 1
            # then, if we return the output as given by the innner function
            # this will represent only a slice and it will have one
            # dimension less.
            if isinstance(inner_out.type, tensor.TensorType) and return_steps.get(pos, 0) != 1:
                outputs[pos] = tensor.unbroadcast(tensor.shape_padleft(inner_out), 0)
        if len(outputs) == 1:
            outputs = outputs[0]

        return (outputs, updates)

    ##
    # Step 4. Compile the dummy function
    ##

    # We can now compile a dummy function just to see what shared variable
    # we have and what are their update rules (note that the user has
    # the option not to pass the shared variable to scan, so we need to
    # pick them manually and add them to scan)
    # make the compilation as fast as possible by not applying any
    # optimization or conversion to C [ note this region is not important
    # for performance so we can do stuff as unoptimal as we wish ]

    # extract still missing inputs (there still might be so) and add them
    # as non sequences at the end of our args
    fake_nonseqs = [x.type() for x in non_seqs]
    fake_outputs = scan_utils.clone(outputs + updates.values(), replace=dict(zip(non_seqs, fake_nonseqs)))
    all_inputs = itertools.ifilter(
        lambda x: (
            isinstance(x, gof.Variable) and not isinstance(x, SharedVariable) and not isinstance(x, gof.Constant)
        ),
        gof.graph.inputs(fake_outputs),
    )
    extra_inputs = filter(lambda x: x not in args + fake_nonseqs, all_inputs)
    non_seqs += extra_inputs
    # Note we do not use all_inputs directly since the order of variables
    # in args is quite important
    dummy_args += extra_inputs

    dummy_outs = outputs
    if condition is not None:
        dummy_outs.append(condition)

    # If we use a regular dict here, the results are non-deterministic
    if not isinstance(updates, (list, tuple)):
        if isinstance(updates, dict) and not isinstance(updates, OrderedDict):
            warnings.warn("Using non-deterministic dictionary.")

    dummy_f = function(
        dummy_args,
        dummy_outs,
        updates=updates,
        mode=compile.mode.Mode(linker="py", optimizer=None),
        on_unused_input="ignore",
    )

    ##
    # Step 5. Re-arange inputs of scan into a more strict order
    ##
开发者ID:amanrajdce,项目名称:Theano,代码行数:67,代码来源:scan.py

示例10: gpuScanOptimization

def gpuScanOptimization(node):
    """
    scan(host_from_gpu) -> host_from_gpu(GPUscan)
    gpu_from_host(scan) -> GPUscan(gpu_from_host)
    """

    # gpu_from_host(scan) -> GPUscan(gpu_from_host)
    if node.op == gpu_from_host:
        host_input = node.inputs[0]
        if (
            host_input.owner
            and isinstance(host_input.owner.op, scan_op.Scan)
            and not host_input.owner.op.info["gpu"]
            and len(host_input.owner.outputs) == 1
        ):
            # Note that we are not doing the right thing here !!
            # This is because the local optimizer expects only one
            # output that corresponds to the input of ``node``
            # If we do this for each output seperately we will have
            # multiple scan ops in the graph ( as many as outputs )
            # and I'm not sure they will get merged into one again
            # So for now I will just cover a limited case when there
            # is only one output and the local optimizer can be used
            # TODO (fix) : either make sure the different scans get
            # merged or implement this optimization as a global
            # optimization
            thescan = host_input.owner.op
            info = thescan.info.copy()
            info["gpu"] = True
            inputs = host_input.owner.inputs
            nw_ins = [inputs[0]]
            e = 1 + thescan.n_seqs + thescan.n_mit_mot + thescan.n_mit_sot + thescan.n_sit_sot + thescan.n_shared_outs
            nw_ins += [safe_to_gpu(x) for x in inputs[1:e]]
            b = e
            e = e + thescan.n_nit_sot
            nw_ins += inputs[b:e]
            nw_ins += [safe_to_gpu(x) for x in inputs[e:]]
            scan_ins = [tensor_to_cuda(x) for x in thescan.inputs]
            scan_outs = [safe_to_gpu(x) for x in thescan.outputs]
            scan_outs = scan_utils.clone(scan_outs, replace=zip(thescan.inputs, [safe_to_cpu(x) for x in scan_ins]))
            # We need to construct the hash here, because scan
            # __init__ does not know about cuda ndarray and can not
            # handle graphs with inputs being Cuda Ndarrays
            tmp_in, tmp_out = gpu_reconstruct_graph(scan_ins, scan_outs)
            local_env = gof.Env(tmp_in, tmp_out)
            _cmodule_key = gof.CLinker.cmodule_key_(local_env, [])
            info["gpu_hash"] = hash(_cmodule_key)

            typeConstructor = lambda broadcastable, dtype: CudaNdarrayType(broadcastable=broadcastable)
            nw_op = scan_op.Scan(scan_ins, scan_outs, info, typeConstructor=typeConstructor).make_node(*nw_ins)
            _outputs = nw_op.outputs
            return _outputs

    # scan(host_from_gpu) -> host_from_gpu(GPUscan)
    if type(node.op) == scan_op.Scan and not node.op.info["gpu"]:
        if numpy.any([(i.owner and i.owner.op == host_from_gpu) for i in node.inputs]):

            thescan = node.op
            info = thescan.info.copy()
            info["gpu"] = True
            inputs = node.inputs
            nw_ins = [inputs[0]]
            e = 1 + thescan.n_seqs + thescan.n_mit_mot + thescan.n_mit_sot + thescan.n_sit_sot + thescan.n_shared_outs
            nw_ins += [safe_to_gpu(x) for x in inputs[1:e]]
            b = e
            e = e + thescan.n_nit_sot
            nw_ins += inputs[b:e]
            nw_ins += [safe_to_gpu(x) for x in inputs[e:]]

            scan_ins = [tensor_to_cuda(x) for x in thescan.inputs]
            scan_outs = [safe_to_gpu(x) for x in thescan.outputs]
            scan_outs = scan_utils.clone(scan_outs, replace=zip(thescan.inputs, [safe_to_cpu(x) for x in scan_ins]))

            # We need to construct the hash here, because scan
            # __init__ does not know about cuda ndarray and can not
            # handle graphs with inputs being Cuda Ndarrays
            tmp_in, tmp_out = gpu_reconstruct_graph(scan_ins, scan_outs)
            local_env = gof.Env(tmp_in, tmp_out)
            _cmodule_key = gof.CLinker.cmodule_key_(local_env, [])
            info["gpu_hash"] = hash(_cmodule_key)
            typeConstructor = lambda broadcastable, dtype: CudaNdarrayType(broadcastable=broadcastable)
            _outputs = (
                scan_op.Scan(scan_ins, scan_outs, info, typeConstructor=typeConstructor).make_node(*nw_ins).outputs
            )
            outputs = []
            for x, y in zip(_outputs, node.outputs):
                if isinstance(y.type, CudaNdarrayType):
                    outputs += [x]
                else:
                    outputs += [safe_to_cpu(x)]
            return outputs
    return False
开发者ID:olivierverdier,项目名称:Theano,代码行数:92,代码来源:opt.py


注:本文中的theano.scan_module.scan_utils.clone函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。