当前位置: 首页>>代码示例>>Python>>正文


Python basic_ops.host_from_gpu函数代码示例

本文整理汇总了Python中theano.sandbox.cuda.basic_ops.host_from_gpu函数的典型用法代码示例。如果您正苦于以下问题:Python host_from_gpu函数的具体用法?Python host_from_gpu怎么用?Python host_from_gpu使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了host_from_gpu函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: local_large_sparse_targets_gpu

def local_large_sparse_targets_gpu(node):
    if not isinstance(node.op, LargeSparseTargets) or theano.config.device == "cpu":
        return False

    if node.op.what_to_output == 0:
        return [GpuLargeSparseTargets(node.op.what_to_output)(*node.inputs)]
    elif node.op.what_to_output == 1:
        return [host_from_gpu(GpuLargeSparseTargets(node.op.what_to_output)(*node.inputs))]
    else:
        out = GpuLargeSparseTargets(node.op.what_to_output)(*node.inputs)
        return [out[0], host_from_gpu(out[1])]
开发者ID:adbrebs,项目名称:factored_output_layer,代码行数:11,代码来源:op.py

示例2: use_gpu_images2neibs

def use_gpu_images2neibs(node):
    if (type(node.op) is Images2Neibs and
        node.inputs[0].dtype == 'float32' and
        node.op.mode in ['valid', 'wrap_centered']):
        return [host_from_gpu(gpu_images2neibs(gpu_from_host(node.inputs[0]),
                                               node.inputs[1], node.inputs[2],
                                               mode=node.op.mode))]
开发者ID:wqren,项目名称:Theano,代码行数:7,代码来源:neighbours.py

示例3: use_gpu_cumsum

def use_gpu_cumsum(node):
    if type(node.op) is CumOp \
       and node.inputs[0].dtype == 'float32' \
       and node.inputs[0].owner \
       and isinstance(node.inputs[0].owner.op, HostFromGpu):

        if node.op.mode != 'add':
            return None

        axis = node.op.axis
        x = node.inputs[0]

        if axis is not None and x.ndim > GpuCumsum.SUPPORTED_NDIMS:
            return None

        x = gpu_from_host(x)

        if axis is None and x.ndim > 1:
            x = gpu_flatten(x)

        # ``gpu_cumsum`` assume array has been flattened if needed.
        if axis is None:
            axis = 0

        ret = host_from_gpu(GpuCumsum(axis)(x))
        ret.tag.values_eq_approx = values_eq_approx_high_tol
        return [ret]
开发者ID:Faruk-Ahmed,项目名称:Theano,代码行数:27,代码来源:extra_ops.py

示例4: local_gpu_minres

 def local_gpu_minres(node):
     if isinstance(node.op, MinresQLP):
         sw = False
         for inp in node.inputs:
             if inp.owner and inp.owner.op == host_from_gpu:
                 sw = True
         if sw:
             inps = node.inputs
             nw_inps = []
             for inp in inps:
                 if not isinstance(inp.type, CudaNdarrayType):
                     nw_inps.append(gpu_from_host(inp))
                 else:
                     nw_inps.append(inp)
             new_op = node.op
             new_op.gpu = 1
             _new_outs = node.op(*nw_inps)
             new_outs = []
             for out in _new_outs:
                 if isinstance(out.type, CudaNdarrayType):
                     new_outs.append(host_from_gpu(out))
                 else:
                     new_outs.append(out)
             return new_outs
         else:
             return False
开发者ID:LeeEdel,项目名称:theano_optimize,代码行数:26,代码来源:minresQLP.py

示例5: use_gpu_images2neibs

def use_gpu_images2neibs(node):
    if type(node.op) is Images2Neibs:
        return [
            host_from_gpu(
                gpu_images2neibs(gpu_from_host(node.inputs[0]), node.inputs[1], node.inputs[2], mode=node.op.mode)
            )
        ]
开发者ID:daien,项目名称:Theano,代码行数:7,代码来源:neighbours.py

示例6: local_gpu_argmax

def local_gpu_argmax(node):
    if type(node.op) is KArgmax:
        p, = node.inputs
        vals, indx, = node.outputs
        if (p.dtype == vals.dtype == 'float32' and
            any([i.owner and isinstance(i.owner.op, theano.sandbox.cuda.HostFromGpu) for i in node.inputs])):
            gpu_op = GpuKArgmax(node.op.K)
            ret_vals, ret_indx = gpu_op(gpu_from_host(p))
            return [host_from_gpu(ret_vals), T.cast(host_from_gpu(ret_indx), "int32")]
    if (isinstance(node.op, theano.sandbox.cuda.GpuFromHost) and
        node.inputs[0].owner and type(node.inputs[0].owner.op)
        is KArgmax):
        multi = node.inputs[0].owner
        p, = multi.inputs
        vals, indx, = multi.outputs
        if (p.dtype == vals.dtype == 'float32'):
            gpu_op = GpuKArgmax(node.inputs[0].owner.op.K)
            ret_vals, ret_indx = gpu_op(gpu_from_host(p)) 
            return [gpu_from_host(ret_vals), gpu_from_host(ret_indx)]
开发者ID:hydercps,项目名称:hred-qs,代码行数:19,代码来源:theano_extensions.py

示例7: local_gpu_conv3d

def local_gpu_conv3d(node):
    if isinstance(node.op, Conv3D):
        if numpy.any([i.owner and isinstance(i.owner.op, HostFromGpu)
                      for i in node.inputs]):
            if numpy.all([o.type.dtype == 'float32' for o in node.outputs]):
                V, W, b, d = node.inputs
                return [host_from_gpu(gpu_convd(as_cuda_ndarray_variable(V),
                                                as_cuda_ndarray_variable(W),
                                                as_cuda_ndarray_variable(b),
                                                d))]
开发者ID:317070,项目名称:Theano,代码行数:10,代码来源:GpuConv3D.py

示例8: save_data

	def save_data(self, filename, data):
		if type(data) != type(np.asarray([])):
			data = host_from_gpu(data)
			data = np.asarray(data.eval())
		mult = lambda x, y: x * y
		length = reduce(mult, data.shape)
		data = data.reshape(length)
		data = "\n".join([str(i) for i in data])
		f = open(filename, "w")
		f.write(data)
		f.close()
开发者ID:ZhecanJamesWang,项目名称:conv-net-research,代码行数:11,代码来源:convnet.py

示例9: use_gpu_images2neibs

def use_gpu_images2neibs(node):
    if (
        type(node.op) is Images2Neibs
        and node.inputs[0].dtype == "float32"
        and node.op.mode in ["valid", "ignore_borders", "wrap_centered"]
    ):
        return [
            host_from_gpu(
                gpu_images2neibs(gpu_from_host(node.inputs[0]), node.inputs[1], node.inputs[2], mode=node.op.mode)
            )
        ]
开发者ID:Jerryzcn,项目名称:Theano,代码行数:11,代码来源:neighbours.py

示例10: save_weights

def save_weights(weights, filename):
    """ Taken from the convnet code. Deals with network calculated
		on a gpu
	"""
    length = reduce(lambda x, y: x * y, weights.shape.eval())
    data = host_from_gpu(weights).eval()
    data = np.asarray(data)
    data = data.reshape(length)
    data = "\n".join([str(i) for i in data])
    f = open(filename, "w")
    f.write(data)
    f.close()
开发者ID:ZhecanJamesWang,项目名称:conv-net-research,代码行数:12,代码来源:Char74knet.py

示例11: save_data

	def save_data(self, filename, data, gpu = False):
		mult = lambda x, y: x * y
		if gpu:
			length = reduce(mult, data.shape.eval())
			data = host_from_gpu(data).eval()
			data = np.asarray(data)
		else:
			length = reduce(mult, data.shape)
		data = data.reshape(length)
		data = "\n".join([str(i) for i in data])
		f = open(filename, "w")
		f.write(data)
		f.close()
开发者ID:ZhecanJamesWang,项目名称:conv-net-research,代码行数:13,代码来源:convnet.py

示例12: grab_cpu_scalar

def grab_cpu_scalar(v, nd):
    if v.owner is not None:
        n = v.owner
        if (isinstance(n.op, GpuDimShuffle) and
                n.op.new_order == ('x',) * nd):
            return host_from_gpu(n.inputs[0])
        elif (isinstance(n.op, DimShuffle) and
              n.op.new_order == ('x',) * nd):
            return n.inputs[0]
        elif isinstance(n.op, GpuFromHost):
            return grab_cpu_scalar(n.inputs[0], nd=nd)
        else:
            return None
    else:
        if (isinstance(v, Constant) and
                v.broadcastable == (True,) * nd):
            return v.dimshuffle(())
开发者ID:12190143,项目名称:Theano,代码行数:17,代码来源:opt_util.py

示例13: local_gpu_forloop

 def local_gpu_forloop(node):
     if isinstance(node.op, forloop):
         sw = False
         for inp in node.inputs:
             if inp.owner and inp.owner.op == host_from_gpu:
                 sw = True
         if sw:
             inps = node.inputs
             nw_inps = []
             for inp in inps:
                 if not isinstance(inp.type, CudaNdarrayType):
                     nw_inps.append(gpu_from_host(inp))
                 else:
                     nw_inps.append(inp)
             new_outs = node.op(*nw_inps)
             return [host_from_gpu(x) for x in new_outs]
         else:
             return False
开发者ID:pascanur,项目名称:natgrad,代码行数:18,代码来源:utils.py

示例14: local_gpu_multinomial

def local_gpu_multinomial(node):
    if type(node.op) is MultinomialFromUniform:
        p, u = node.inputs
        m, = node.outputs
        if (p.dtype == u.dtype == m.dtype == 'float32' and
            any([i.owner and isinstance(i.owner.op, theano.sandbox.cuda.HostFromGpu)
                 for i in node.inputs])):
            gpu_op = GpuMultinomialFromUniform(node.op.odtype)
            return [host_from_gpu(gpu_op(*[gpu_from_host(i) for i in node.inputs])).T]
    if (isinstance(node.op, theano.sandbox.cuda.GpuFromHost) and
        node.inputs[0].owner and type(node.inputs[0].owner.op) is MultinomialFromUniform):
        multi = node.inputs[0].owner
        p, u = multi.inputs
        m, = multi.outputs
        if (p.dtype == u.dtype == m.dtype == 'float32'):
            gpu_op = GpuMultinomialFromUniform(multi.op.odtype)
            ret = gpu_op(*[gpu_from_host(i) for i in multi.inputs]).T
            # The dimshuffle is on the cpu, but will be moved to the gpu by an opt.
            return [gpu_from_host(ret)]
开发者ID:jmarinero,项目名称:Theano,代码行数:19,代码来源:multinomial.py

示例15: local_assigner

def local_assigner(node):
    if type(node.op) is Assigner:
        p, indx, gr, = node.inputs
        vals, = node.outputs
        if (p.dtype == vals.dtype == 'float32' and
            any([i.owner and isinstance(i.owner.op, theano.sandbox.cuda.HostFromGpu) for i in node.inputs])):
            gpu_op = GpuAssigner()
            ret = gpu_op(gpu_from_host(p),indx,gpu_from_host(gr))
            return [host_from_gpu(ret),]
    if (isinstance(node.op, theano.sandbox.cuda.GpuFromHost) and
        node.inputs[0].owner and type(node.inputs[0].owner.op)
        is Assigner):
        multi = node.inputs[0].owner
        p,indx,gr = multi.inputs
        vals, = multi.outputs
        if (p.dtype == vals.dtype == 'float32'):
            gpu_op = GpuAssigner()
            ret_vals = gpu_op(gpu_from_host(p),indx,gpu_from_host(gr)) 
            return [gpu_from_host(ret_vals)]
开发者ID:hydercps,项目名称:hred-qs,代码行数:19,代码来源:theano_extensions.py


注:本文中的theano.sandbox.cuda.basic_ops.host_from_gpu函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。