当前位置: 首页>>代码示例>>Python>>正文


Python tensor.alloc方法代码示例

本文整理汇总了Python中theano.tensor.alloc方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.alloc方法的具体用法?Python tensor.alloc怎么用?Python tensor.alloc使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.alloc方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: set_output

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import alloc [as 别名]
def set_output(self):
        output_shape = self._output_shape
        padding = self._padding
        unpool_size = self._unpool_size
        unpooled_output = tensor.alloc(0.0,  # Value to fill the tensor
                                       output_shape[0],
                                       output_shape[1] + 2 * padding[0],
                                       output_shape[2],
                                       output_shape[3] + 2 * padding[1],
                                       output_shape[4] + 2 * padding[2])

        unpooled_output = tensor.set_subtensor(unpooled_output[:, padding[0]:output_shape[
            1] + padding[0]:unpool_size[0], :, padding[1]:output_shape[3] + padding[1]:unpool_size[
                1], padding[2]:output_shape[4] + padding[2]:unpool_size[2]],
                                               self._prev_layer.output)
        self._output = unpooled_output 
开发者ID:chrischoy,项目名称:3D-R2N2,代码行数:18,代码来源:layers.py

示例2: test_gpualloc

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import alloc [as 别名]
def test_gpualloc():
    '''
    This tests tries to catch the scenario when, due to infer_shape,
    the input of the alloc changes from tensor scalar to a constant
    1. In this case the original constracted broadcastable pattern will
    have a False for that dimension, but the new broadcastable pattern
    that will be inserted by gpualloc will have  a True since it knows the
    dimension is 1 and therefore broadcastable.
    '''

    x = theano.shared(numpy.ones(3, dtype='float32'), 'x')
    m = (x).dimshuffle(['x', 0])
    v = tensor.alloc(1., *m.shape)
    f = theano.function([], v + x,
                        mode=mode_with_gpu.excluding("local_elemwise_alloc"))
    l = f.maker.fgraph.toposort()
    assert numpy.any([isinstance(x.op, cuda.GpuAlloc) for x in l]) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:19,代码来源:test_opt.py

示例3: test_gpujoin_gpualloc

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import alloc [as 别名]
def test_gpujoin_gpualloc():
    a = T.fmatrix('a')
    a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
    b = T.fmatrix('b')
    b_val = numpy.asarray(numpy.random.rand(3, 5), dtype='float32')

    f = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)) + 4,
                        mode=mode_without_gpu)
    f_gpu = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)),
                            mode=mode_with_gpu)
    f_gpu2 = theano.function([a, b], T.join(0, T.zeros_like(a),
                                           T.ones_like(b)) + 4,
                             mode=mode_with_gpu)

    assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 2
    assert sum([node.op == T.join for node in f.maker.fgraph.toposort()]) == 1
    assert sum([isinstance(node.op, B.GpuAlloc)
                for node in f_gpu.maker.fgraph.toposort()]) == 2
    assert sum([node.op == B.gpu_join
                for node in f_gpu.maker.fgraph.toposort()]) == 1
    assert sum([isinstance(node.op, B.GpuAlloc)
                for node in f_gpu2.maker.fgraph.toposort()]) == 2
    assert sum([node.op == B.gpu_join
                for node in f_gpu2.maker.fgraph.toposort()]) == 1
    assert numpy.allclose(f(a_val, b_val), f_gpu2(a_val, b_val)) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:27,代码来源:test_basic_ops.py

示例4: test_gpualloc_output_to_gpu

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import alloc [as 别名]
def test_gpualloc_output_to_gpu():
    a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
    a = tcn.shared_constructor(a_val)

    b = T.fscalar()
    f = theano.function([b], T.ones_like(a) + b, mode=mode_without_gpu)
    f_gpu = theano.function([b], B.gpu_from_host(T.ones_like(a)) + b,
                            mode=mode_with_gpu)

    f(2)
    f_gpu(2)

    assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 1
    assert sum([node.op == B.gpu_alloc
                for node in f_gpu.maker.fgraph.toposort()]) == 1

    assert numpy.allclose(numpy.ones(a.get_value(borrow=True).shape) + 9,
                          f_gpu(9))
    assert numpy.allclose(f(5), f_gpu(5)) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:21,代码来源:test_basic_ops.py

示例5: local_gpuaalloc2

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import alloc [as 别名]
def local_gpuaalloc2(node):
    """
    Join(axis, {Alloc or HostFromGPU}, ...) -> Join(axis, GpuAlloc, Alloc, ...)

    Moves an alloc that is an input to join to the gpu.

    """
    try:
        get_context(None)
    except ContextNotDefined:
        # If there is no default context then we do not perform the move here.
        return
    if (isinstance(node.op, tensor.Alloc) and
        all(c != 'output' and
            c.op == tensor.join and
            all(i.owner and
                i.owner.op in [host_from_gpu, tensor.alloc]
                for i in c.inputs[1:])
            for c, idx in node.outputs[0].clients)):
        return [host_from_gpu(GpuAlloc(None)(*node.inputs))] 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:22,代码来源:opt.py

示例6: test_gpujoin_gpualloc

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import alloc [as 别名]
def test_gpujoin_gpualloc():
    a = T.fmatrix('a')
    a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
    b = T.fmatrix('b')
    b_val = numpy.asarray(numpy.random.rand(3, 5), dtype='float32')

    f = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)) + 4,
                        mode=mode_without_gpu)
    f_gpu = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)),
                            mode=mode_with_gpu)
    f_gpu2 = theano.function([a, b], T.join(0, T.zeros_like(a),
                                            T.ones_like(b)) + 4,
                             mode=mode_with_gpu)
    assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 2
    assert sum([node.op == T.join for node in f.maker.fgraph.toposort()]) == 1
    assert sum([isinstance(node.op, GpuAlloc)
                for node in f_gpu.maker.fgraph.toposort()]) == 2
    assert sum([node.op == gpu_join
                for node in f_gpu.maker.fgraph.toposort()]) == 1
    assert sum([isinstance(node.op, GpuAlloc)
                for node in f_gpu2.maker.fgraph.toposort()]) == 2
    assert sum([node.op == gpu_join
                for node in f_gpu2.maker.fgraph.toposort()]) == 1
    assert numpy.allclose(f(a_val, b_val), f_gpu2(a_val, b_val)) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:26,代码来源:test_basic_ops.py

示例7: compute_emb

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import alloc [as 别名]
def compute_emb(x, W):

    def _step(xi, emb, W):
        if prm.att_doc:
            new_shape = (xi.shape[0], xi.shape[1], xi.shape[2], prm.dim_emb)
        else:
            new_shape = (xi.shape[0], xi.shape[1], prm.dim_emb)

        out = W[xi.flatten()].reshape(new_shape).sum(-2)
        return out / tensor.maximum(1., tensor.neq(xi,-1).astype('float32').sum(-1, keepdims=True))

    if prm.att_doc:
        emb_init = tensor.alloc(0., x.shape[1], x.shape[2], prm.dim_emb)
    else:
        emb_init = tensor.alloc(0., x.shape[1], prm.dim_emb)

    (embs), scan_updates = theano.scan(_step,
                                sequences=[x],
                                outputs_info=[emb_init],
                                non_sequences=[W],
                                name='emb_scan',
                                n_steps=x.shape[0])

    return embs 
开发者ID:nyu-dl,项目名称:dl4ir-webnav,代码行数:26,代码来源:neuagent.py

示例8: get_output_for

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import alloc [as 别名]
def get_output_for(self, input, **kwargs):
        # by default we assume 'cross', consistent with corrmm.
        conv_mode = 'conv' if self.flip_filters else 'cross'

        from lasagne.layers.dnn import dnn
        image = T.alloc(0., input.shape[0], *self.output_shape[1:])
        conved = dnn.dnn_conv(img=image,
                              kerns=self.W,
                              subsample=self.stride,
                              border_mode=self.pad,
                              conv_mode=conv_mode
                              )

        grad = T.grad(conved.sum(), wrt=image, known_grads={conved: input})

        if self.b is None:
            activation = grad
        elif self.untie_biases:
            activation = grad + self.b.dimshuffle('x', 0, 1, 2)
        else:
            activation = grad + self.b.dimshuffle('x', 0, 'x', 'x')
        return self.nonlinearity(activation) 
开发者ID:alexlee-gk,项目名称:visual_dynamics,代码行数:24,代码来源:layers_theano.py

示例9: combine_fragments_to_dense_bxcyz

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import alloc [as 别名]
def combine_fragments_to_dense_bxcyz(self, tensor, sh):
      """ expected shape: (batch, x, channels, y, z)"""
      ttensor = tensor # be same shape as result, no significant time cost

      output_stride = self.output_stride
      if isinstance(output_stride, list) or isinstance(output_stride, tuple):
          example_stride = np.prod(output_stride)#**3
      else:
          example_stride = output_stride**3
          output_stride  = np.asarray((output_stride,)*3)
      zero = np.array((0), dtype=theano.config.floatX)
      embedding = T.alloc( zero, 1, sh[1]*output_stride[0], sh[2], sh[3]*output_stride[1], sh[4]*output_stride[2]) # first arg. is fill-value (0 in this case) and not an element of the shape
      ix = offset_map(output_stride)
      print "      output_stride",output_stride
      print "      example_stride",example_stride
      for i,(n,m,k) in enumerate(ix):
          embedding = T.set_subtensor(embedding[:,n::output_stride[0],:,m::output_stride[1],k::output_stride[2]], ttensor[i::example_stride])
      return embedding 
开发者ID:GUR9000,项目名称:Deep_MRI_brain_extraction,代码行数:20,代码来源:NN_ConvLayer_3D.py

示例10: pad_to_a_multiple

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import alloc [as 别名]
def pad_to_a_multiple(tensor_, k, pad_with):
    """Pad a tensor to make its first dimension a multiple of a number.

    Parameters
    ----------
    tensor_ : :class:`~theano.Variable`
    k : int
        The number, multiple of which the length of tensor is made.
    pad_with : float or int
        The value for padding.

    """
    new_length = (
        tensor.ceil(tensor_.shape[0].astype('float32') / k) * k).astype('int64')
    new_shape = tensor.set_subtensor(tensor_.shape[:1], new_length)
    canvas = tensor.alloc(pad_with, tensor.prod(new_shape)).reshape(
        new_shape, ndim=tensor_.ndim)
    return tensor.set_subtensor(canvas[:tensor_.shape[0]], tensor_) 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:20,代码来源:expressions.py

示例11: output

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import alloc [as 别名]
def output(self, *args, **kwargs):
        """
        Code is based on https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/expr/normalize.py
        """
        input = self.input_layer.output(*args, **kwargs)

        half = self.n // 2
        sq = T.sqr(input)
        b, ch, r, c = input.shape
        extra_channels = T.alloc(0., b, ch + 2*half, r, c)
        sq = T.set_subtensor(extra_channels[:,half:half+ch,:,:], sq)
        scale = self.k

        for i in xrange(self.n):
            scale += self.alpha * sq[:,i:i+ch,:,:]

        scale = scale ** self.beta

        return input / scale 
开发者ID:benanne,项目名称:kaggle-galaxies,代码行数:21,代码来源:layers.py

示例12: gru_layer

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import alloc [as 别名]
def gru_layer(tparams, emb, options):
    hiddenDimSize = options['hiddenDimSize']
    timesteps = emb.shape[0]
    if emb.ndim == 3: n_samples = emb.shape[1]
    else: n_samples = 1

    def stepFn(wx, h, U_gru):
        uh = T.dot(h, U_gru)
        r = T.nnet.sigmoid(_slice(wx, 0, hiddenDimSize) + _slice(uh, 0, hiddenDimSize))
        z = T.nnet.sigmoid(_slice(wx, 1, hiddenDimSize) + _slice(uh, 1, hiddenDimSize))
        h_tilde = T.tanh(_slice(wx, 2, hiddenDimSize) + r * _slice(uh, 2, hiddenDimSize))
        h_new = z * h + ((1. - z) * h_tilde)
        return h_new

    Wx = T.dot(emb, tparams['W_gru']) + tparams['b_gru']
    results, updates = theano.scan(fn=stepFn, sequences=[Wx], outputs_info=T.alloc(numpy_floatX(0.0), n_samples, hiddenDimSize), non_sequences=[tparams['U_gru']], name='gru_layer', n_steps=timesteps)

    return results 
开发者ID:mp2893,项目名称:gram,代码行数:20,代码来源:gram.py

示例13: connect

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import alloc [as 别名]
def connect(self, inputs, mask, is_train):
    """ is_train: A boolean tensor.
    """
    max_length = inputs.shape[0]
    batch_size = inputs.shape[1]
    outputs_info = [tensor.alloc(numpy_floatX(0.), batch_size, self.hidden_dim),
            tensor.alloc(numpy_floatX(0.), batch_size, self.hidden_dim)]
    # Dropout mask sharing for variational dropout.
    self.is_train = is_train
    if self.recurrent_dropout_layer != None:
      self.recurrent_dropout_layer.generate_mask([batch_size, self.hidden_dim], is_train)
    
    inputs = tensor.dot(inputs, self.W) + self.b
    rval, _ = theano.scan(self._step, # Scan function
                sequences=[inputs, mask], # Input sequence
                outputs_info=outputs_info,
                name=_p(self.prefix, '_layers'),
                n_steps=max_length) # scan steps
    return rval[0] 
开发者ID:luheng,项目名称:deep_srl,代码行数:21,代码来源:layer.py

示例14: gru_layer

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import alloc [as 别名]
def gru_layer(tparams, emb, name, hiddenDimSize):
	timesteps = emb.shape[0]
	if emb.ndim == 3: n_samples = emb.shape[1]
	else: n_samples = 1

	def stepFn(wx, h, U_gru):
		uh = T.dot(h, U_gru)
		r = T.nnet.sigmoid(_slice(wx, 0, hiddenDimSize) + _slice(uh, 0, hiddenDimSize))
		z = T.nnet.sigmoid(_slice(wx, 1, hiddenDimSize) + _slice(uh, 1, hiddenDimSize))
		h_tilde = T.tanh(_slice(wx, 2, hiddenDimSize) + r * _slice(uh, 2, hiddenDimSize))
		h_new = z * h + ((1. - z) * h_tilde)
		return h_new

	Wx = T.dot(emb, tparams['W_gru_'+name]) + tparams['b_gru_'+name]
	results, updates = theano.scan(fn=stepFn, sequences=[Wx], outputs_info=T.alloc(numpy_floatX(0.0), n_samples, hiddenDimSize), non_sequences=[tparams['U_gru_'+name]], name='gru_layer', n_steps=timesteps)

	return results 
开发者ID:mp2893,项目名称:retain,代码行数:19,代码来源:test_retain.py

示例15: theano_repeat

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import alloc [as 别名]
def theano_repeat(arr, n_repeat, stretch=False):
    """
    Create repeats of 2D array using broadcasting.
    Shape[0] incorrect after this node!
    """
    if arr.dtype not in ["float32", "float64"]:
        arr = tensor.cast(arr, "int32")
    if stretch:
        arg1 = arr.dimshuffle((0, 'x', 1))
        arg2 = tensor.alloc(1., 1, n_repeat, arr.shape[1])
        arg2 = tensor.cast(arg2, arr.dtype)
        cloned = (arg1 * arg2).reshape((n_repeat * arr.shape[0], arr.shape[1]))
    else:
        arg1 = arr.dimshuffle(('x', 0, 1))
        arg2 = tensor.alloc(1., n_repeat, 1, arr.shape[1])
        arg2 = tensor.cast(arg2, arr.dtype)
        cloned = (arg1 * arg2).reshape((n_repeat * arr.shape[0], arr.shape[1]))
    shape = expression_shape(arr)
    name = expression_name(arr)
    # Stretched shapes are *WRONG*
    tag_expression(cloned, name + "_stretched", (shape[0], shape[1]))
    return cloned 
开发者ID:kastnerkyle,项目名称:SciPy2015,代码行数:24,代码来源:kdl_template.py


注:本文中的theano.tensor.alloc方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。