当前位置: 首页>>代码示例>>Python>>正文


Python gof.local_optimizer函数代码示例

本文整理汇总了Python中theano.gof.local_optimizer函数的典型用法代码示例。如果您正苦于以下问题:Python local_optimizer函数的具体用法?Python local_optimizer怎么用?Python local_optimizer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了local_optimizer函数的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: f

    def f(maker):
        def local_opt(node):
            dev = theano.sandbox.gpuarray.init_dev.device
            if cuda_only and not dev.startswith('cuda'):
                return

            if type(node.op) in OP:

                # Either one of our inputs is on the gpu or
                # all of our client are on the gpu
                if (any([i.owner and i.owner.op == host_from_gpu
                         for i in node.inputs]) or
                    all([c != 'output' and c.op == gpu_from_host
                         for c, idx in node.outputs[0].clients])):
                    new_op = maker(node)
                    # This is needed as sometimes new_op inherit from OP.
                    if new_op and new_op != node.op:
                        if isinstance(new_op, theano.Op):
                            return [safe_to_cpu(o) for o in
                                    new_op(*node.inputs, return_list=True)]
                        elif isinstance(new_op, (tuple, list)):
                            return [safe_to_cpu(o) for o in new_op]
                        else:  # suppose it is a variable on the GPU
                            return [host_from_gpu(new_op)]
            return False
        local_opt.__name__ = maker.__name__
        return local_optimizer(OP)(local_opt)
开发者ID:benmoran,项目名称:Theano,代码行数:27,代码来源:opt.py

示例2: f

    def f(maker):
        def local_opt(node):
            if type(node.op) in OP:
                # Either one of our inputs is on the gpu or
                # all of our clients are on the gpu
                replace = False
                # TODO: Maybe set context_name with infer_context_name()?
                context_name = None
                # We replace if any input is a host_from_gpu
                for i in node.inputs:
                    if i.owner and i.owner.op == host_from_gpu:
                        context_name = i.owner.inputs[0].type.context_name
                        replace = True
                        break
                if not replace:
                    # We replace if *all* clients are on the GPU
                    clients = [c for o in node.outputs for c in o.clients]
                    replace = len(clients) != 0
                    for c, idx in clients:
                        if (c == 'output' or
                                not isinstance(c.op, GpuFromHost)):
                            replace = False
                    # TODO: check that the clients want the same context?
                    if replace:
                        # All clients are GpuFromHost and we have at least one
                        context_name = clients[0][0].op.context_name

                # Check if we should replace
                if (not replace or
                    (cuda_only and
                     get_context(context_name).kind != 'cuda')):
                    return False

                new_op = maker(node, context_name)
                # This is needed as sometimes new_op inherits from OP.
                if new_op and new_op != node.op:
                    if isinstance(new_op, theano.Op):
                        # tag the inputs with the context in case
                        # the context was derived from the outputs
                        def tag(i, ctx):
                            i.tag.context_name = ctx
                            return i
                        inputs = [tag(i, context_name) for i in node.inputs]
                        return [safe_to_cpu(o) for o in
                                new_op(*inputs, return_list=True)]
                    elif isinstance(new_op, (tuple, list)):
                        return [safe_to_cpu(o) for o in new_op]
                    else:  # suppose it is a variable on the GPU
                        return [host_from_gpu(new_op)]
            return False
        local_opt.__name__ = maker.__name__
        return local_optimizer(OP)(local_opt)
开发者ID:BenJaEGo,项目名称:Theano,代码行数:52,代码来源:opt.py

示例3: f

 def f(maker):
     def local_opt(node):
         if type(node.op) is OP:
             # This does not support nodes that have more than one output.
             assert len(node.outputs) == 1
             # either one of our inputs is on the gpu or
             # all of our client are on the gpu
             if (any([i.owner and i.owner.op == host_from_gpu
                      for i in node.inputs]) or
                 all([c != 'output' and c.op == gpu_from_host
                      for c, idx in node.outputs[0].clients])):
                 new_op = maker(node)
                 # This is needed as sometimes new_op inherit from OP.
                 if new_op and new_op != node.op:
                     if isinstance(new_op, theano.Op):
                         return [host_from_gpu(new_op(*node.inputs))]
                     else:  # suppose it is a variable on the GPU
                         return [host_from_gpu(new_op)]
         return False
     local_opt.__name__ = maker.__name__
     return local_optimizer([OP])(local_opt)
开发者ID:csxlyan,项目名称:Theano,代码行数:21,代码来源:opt.py

示例4: f

    def f(maker):
        def local_opt(node):
            if type(node.op) in OP:

                # Either one of our inputs is on the gpu or
                # all of our client are on the gpu
                if any([i.owner and i.owner.op == host_from_gpu for i in node.inputs]) or all(
                    [c != "output" and c.op == gpu_from_host for c, idx in node.outputs[0].clients]
                ):
                    new_op = maker(node)
                    # This is needed as sometimes new_op inherit from OP.
                    if new_op and new_op != node.op:
                        if isinstance(new_op, theano.Op):
                            return [host_from_gpu(o) for o in new_op(*node.inputs, return_list=True)]
                        elif isinstance(new_op, (tuple, list)):
                            return [host_from_gpu(o) for o in new_op]
                        else:  # suppose it is a variable on the GPU
                            return [host_from_gpu(new_op)]
            return False

        local_opt.__name__ = maker.__name__
        return local_optimizer(OP)(local_opt)
开发者ID:herr-biber,项目名称:Theano,代码行数:22,代码来源:opt.py

示例5: local_large_sparse_targets_gpu

# add CPU TO GPU merge
#@register_specialize
#@local_optimizer([LargeSparseTargets])
def local_large_sparse_targets_gpu(node):
    if not isinstance(node.op, LargeSparseTargets) or theano.config.device == "cpu":
        return False

    if node.op.what_to_output == 0:
        return [GpuLargeSparseTargets(node.op.what_to_output)(*node.inputs)]
    elif node.op.what_to_output == 1:
        return [host_from_gpu(GpuLargeSparseTargets(node.op.what_to_output)(*node.inputs))]
    else:
        out = GpuLargeSparseTargets(node.op.what_to_output)(*node.inputs)
        return [out[0], host_from_gpu(out[1])]

optdb.register("local_large_sparse_targets_gpu", TopoOptimizer(local_optimizer([LargeSparseTargets])(local_large_sparse_targets_gpu)), 49, "fast_run")


def optimize_large_sparse_target(inputs, H, outputs, updates):
    """
    TODO: WRITEME
    """

    # need to rewrite MergeLargeSparseTargetOps because there will be multiple
    # updates containing gradH!

    if not isinstance(updates, OrderedDict):
        raise ValueError("Updates needs to be OrderedDict otherwise keys, and"
                         " values may not match after optimization")

    fgraph = gof.FunctionGraph(inputs,
开发者ID:adbrebs,项目名称:factored_output_layer,代码行数:31,代码来源:op.py


注:本文中的theano.gof.local_optimizer函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。