当前位置: 首页>>代码示例>>Python>>正文


Python tensor.concatenate方法代码示例

本文整理汇总了Python中theano.tensor.concatenate方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.concatenate方法的具体用法?Python tensor.concatenate怎么用?Python tensor.concatenate使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.concatenate方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: step

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import concatenate [as 别名]
def step(self, word,h_tm1,c_tm1,x):
#{{{
        H=x;
        input_length=x.shape[0];
        C=T.repeat(c_tm1.reshape((1,-1)),input_length,axis=0);
        _HC=K.concatenate([H,C]);
        energy=T.dot(_HC,self.W_A.reshape((-1,1)))+self.b_A;
        energy=K.softmax(energy.reshape((1,-1)));
        x=(H*energy.reshape((-1,1))).sum(axis=0)

        #combine glimpsed with word;
        combine=K.concatenate([x,word]);
        combined=K.dot(combine,self.W_combine)+self.b_combine;
        #original LSTM step
        h_t,c_t=super(AttentionLSTM,self).step_noBatch(combined,h_tm1,c_tm1);
        return  h_t,c_t
#}}} 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:19,代码来源:nn.py

示例2: ctc_update_log_p

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import concatenate [as 别名]
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:26,代码来源:theano_backend.py

示例3: with_additional_nodes

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import concatenate [as 别名]
def with_additional_nodes(self, new_node_strengths, new_node_ids, new_node_states=None):
        """
        Helper function to generate a new state with new nodes added.

        Params:
            new_node_strengths: Tensor of shape (n_batch, n_new_nodes)
            new_node_ids: Tensor of shape (n_batch, n_new_nodes, num_node_ids)
            new_node_states: (Optional) Tensor of shape (n_batch, n_new_nodes, node_state_size)
                If not provided, will be zero

        Returns: A new graph state with the changes
        """
        if new_node_states is None:
            new_node_states = T.zeros([self.n_batch, new_node_strengths.shape[1], self.node_state_size])

        next_node_strengths = T.concatenate([self.node_strengths, new_node_strengths], 1)
        next_node_ids = T.concatenate([self.node_ids, new_node_ids], 1)
        next_node_states = T.concatenate([self.node_states, new_node_states], 1)
        next_n_nodes = next_node_strengths.shape[1]

        next_edge_strengths = pad_to(self.edge_strengths, [self.n_batch, next_n_nodes, next_n_nodes, self.num_edge_types])

        cls = type(self)
        return cls(next_node_strengths, next_node_ids, next_node_states, next_edge_strengths) 
开发者ID:hexahedria,项目名称:gated-graph-transformer-network,代码行数:26,代码来源:graph_state.py

示例4: broadcast_concat

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import concatenate [as 别名]
def broadcast_concat(tensors, axis):
    """
    Broadcast tensors together, then concatenate along axis
    """
    ndim = tensors[0].ndim
    assert all(t.ndim == ndim for t in tensors), "ndims don't match for broadcast_concat: {}".format(tensors)
    broadcast_shapes = []
    for i in range(ndim):
        if i == axis:
            broadcast_shapes.append(1)
        else:
            dim_size = next((t.shape[i] for t in tensors if not t.broadcastable[i]), 1)
            broadcast_shapes.append(dim_size)
    broadcasted_tensors = []
    for t in tensors:
        tile_reps = [bshape if t.broadcastable[i] else 1 for i,bshape in enumerate(broadcast_shapes)]
        if all(rep is 1 for rep in tile_reps):
            # Don't need to broadcast this tensor
            broadcasted_tensors.append(t)
        else:
            broadcasted_tensors.append(T.tile(t, tile_reps))
    return T.concatenate(broadcasted_tensors, axis) 
开发者ID:hexahedria,项目名称:gated-graph-transformer-network,代码行数:24,代码来源:util.py

示例5: cross

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import concatenate [as 别名]
def cross(x, y, axis=None):
    ndim = x.ndim
    assert x.ndim == y.ndim
    if axis is None:
        axis = ndim - 1
    def _getindexslice(a, i):
        return a[tuple([slice(i,i+1) if d == axis else slice(None)
                        for d in xrange(ndim)])]
    x0 = _getindexslice(x, 0)
    x1 = _getindexslice(x, 1)
    x2 = _getindexslice(x, 2)
    y0 = _getindexslice(y, 0)
    y1 = _getindexslice(y, 1)
    y2 = _getindexslice(y, 2)

    res = T.concatenate((x1*y2 - x2*y1,
                         x2*y0 - x0*y2,
                         x0*y1 - x1*y0), axis=axis)
    return res 
开发者ID:hjimce,项目名称:Depth-Map-Prediction,代码行数:21,代码来源:thutil.py

示例6: _context_sensitive_shift

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import concatenate [as 别名]
def _context_sensitive_shift(self, inputs):
        """
        Compute a buffer top representation by mixing buffer top and hidden state.

        NB: This hasn't been an especially effective tool so far.
        """
        assert self.use_tracking_lstm
        buffer_top, tracking_hidden = inputs[2:4]

        # Exclude the cell value from the computation.
        tracking_hidden = tracking_hidden[:, :hidden_dim]

        inp = T.concatenate([tracking_hidden, buffer_top], axis=1)
        inp_dim = self._spec.word_embedding_dim + self.tracking_lstm_hidden_dim
        layer = util.ReLULayer if self.context_sensitive_use_relu else util.Linear
        return layer(inp, inp_dim, self._spec.model_dim, self._vs,
                     name="context_comb_unit", use_bias=True,
                     initializer=util.HeKaimingInitializer()) 
开发者ID:stanfordnlp,项目名称:spinn,代码行数:20,代码来源:recurrences.py

示例7: _make_rnn

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import concatenate [as 别名]
def _make_rnn(self, seq_length=4):
        self.embedding_dim = embedding_dim = 3
        self.vocab_size = vocab_size = 10
        self.seq_length = seq_length
        
        def compose_network(h_prev, inp, embedding_dim, model_dim, vs, name="compose"):
            # Just add the two embeddings!
            W = T.concatenate([T.eye(model_dim), T.eye(model_dim)], axis=0)
            i = T.concatenate([h_prev, inp], axis=1)
            return i.dot(W)

        X = T.imatrix("X")
        training_mode = T.scalar("training_mode")
        vs = VariableStore()
        embeddings = np.arange(vocab_size).reshape(
            (vocab_size, 1)).repeat(embedding_dim, axis=1)
        self.model = RNN(
            embedding_dim, embedding_dim, vocab_size, seq_length, compose_network,
            IdentityLayer, training_mode, None, vs,
            X=X, make_test_fn=True, initial_embeddings=embeddings) 
开发者ID:stanfordnlp,项目名称:spinn,代码行数:22,代码来源:test_plain_rnn.py

示例8: setUp

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import concatenate [as 别名]
def setUp(self):
        if 'gpu' not in theano.config.device:
            raise RuntimeError("Thin stack only defined for GPU usage")

        self.embedding_dim = self.model_dim = 2
        self.vocab_size = 5
        self.seq_length = 5
        self.batch_size = 2
        self.num_classes = 2

        spec = util.ModelSpec(self.model_dim, self.embedding_dim,
                              self.batch_size, self.vocab_size,
                              self.seq_length)

        self.vs = vs = VariableStore()
        def compose_network((c1, c2), *args, **kwargs):
            W = vs.add_param("W", (self.model_dim * 2, self.model_dim))
            b = vs.add_param("b", (self.model_dim,),
                             initializer=util.ZeroInitializer())
            return T.dot(T.concatenate([c1, c2], axis=1), W) + b 
开发者ID:stanfordnlp,项目名称:spinn,代码行数:22,代码来源:test_stack.py

示例9: __init__

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import concatenate [as 别名]
def __init__(self, prev_layers, axis=1):
        """
        list of prev layers to concatenate
        axis to concatenate

        For tensor5, channel dimension is axis=2 (due to theano conv3d
        convention). For image, axis=1
        """
        assert (len(prev_layers) > 1)
        super().__init__(prev_layers[0])
        self._axis = axis
        self._prev_layers = prev_layers

        self._output_shape = self._input_shape.copy()
        for prev_layer in prev_layers[1:]:
            self._output_shape[axis] += prev_layer._output_shape[axis]
        print('Concat the prev layer to [%s]' % ','.join(str(x) for x in self._output_shape)) 
开发者ID:chrischoy,项目名称:3D-R2N2,代码行数:19,代码来源:layers.py

示例10: get_output_for

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import concatenate [as 别名]
def get_output_for(self, input, init=False, **kwargs):
        if input.ndim > 2:
            # if the input has more than two dimensions, flatten it into a
            # batch of feature vectors.
            input = input.flatten(2)
        
        activation = T.tensordot(input, self.W, [[1], [0]])
        abs_dif = (T.sum(abs(activation.dimshuffle(0,1,2,'x') - activation.dimshuffle('x',1,2,0)),axis=2)
                    + 1e6 * T.eye(input.shape[0]).dimshuffle(0,'x',1))

        if init:
            mean_min_abs_dif = 0.5 * T.mean(T.min(abs_dif, axis=2),axis=0)
            abs_dif /= mean_min_abs_dif.dimshuffle('x',0,'x')
            self.init_updates = [(self.log_weight_scale, self.log_weight_scale-T.log(mean_min_abs_dif).dimshuffle(0,'x'))]
        
        f = T.sum(T.exp(-abs_dif),axis=2)

        if init:
            mf = T.mean(f,axis=0)
            f -= mf.dimshuffle('x',0)
            self.init_updates.append((self.b, -mf))
        else:
            f += self.b.dimshuffle('x',0)

        return T.concatenate([input, f], axis=1) 
开发者ID:djsutherland,项目名称:opt-mmd,代码行数:27,代码来源:nn.py

示例11: build_encoder_bi

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import concatenate [as 别名]
def build_encoder_bi(tparams, options):
	"""
	build bidirectional encoder, given pre-computed word embeddings
	"""
	# word embedding (source)
	embedding = tensor.tensor3('embedding', dtype='float32')
	embeddingr = embedding[::-1]
	x_mask = tensor.matrix('x_mask', dtype='float32')
	xr_mask = x_mask[::-1]

	# encoder
	proj = get_layer(options['encoder'])[1](tparams, embedding, options,
											prefix='encoder',
											mask=x_mask)
	projr = get_layer(options['encoder'])[1](tparams, embeddingr, options,
											 prefix='encoder_r',
											 mask=xr_mask)

	ctx = tensor.concatenate([proj[0][-1], projr[0][-1]], axis=1)

	return embedding, x_mask, ctx


# some utilities 
开发者ID:hanzhanggit,项目名称:StackGAN,代码行数:26,代码来源:skipthoughts.py

示例12: param_init_gru

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import concatenate [as 别名]
def param_init_gru(options, params, prefix='gru', nin=None, dim=None):
	"""
	parameter init for GRU
	"""
	if nin == None:
		nin = options['dim_proj']
	if dim == None:
		dim = options['dim_proj']
	W = numpy.concatenate([norm_weight(nin,dim),
						   norm_weight(nin,dim)], axis=1)
	params[_p(prefix,'W')] = W
	params[_p(prefix,'b')] = numpy.zeros((2 * dim,)).astype('float32')
	U = numpy.concatenate([ortho_weight(dim),
						   ortho_weight(dim)], axis=1)
	params[_p(prefix,'U')] = U

	Wx = norm_weight(nin, dim)
	params[_p(prefix,'Wx')] = Wx
	Ux = ortho_weight(dim)
	params[_p(prefix,'Ux')] = Ux
	params[_p(prefix,'bx')] = numpy.zeros((dim,)).astype('float32')

	return params 
开发者ID:hanzhanggit,项目名称:StackGAN,代码行数:25,代码来源:skipthoughts.py

示例13: infer_shape

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import concatenate [as 别名]
def infer_shape(self, node, in_shapes):
        shape_a = in_shapes[0]
        n = node.inputs[1]
        axis = node.inputs[2]
        if len(shape_a) == 1:
            return [(n,)]
        elif isinstance(axis, tensor.TensorConstant):
            out_shape = (list(shape_a[0: axis.data.item()]) + [n] +
                         list(shape_a[axis.data + 1:]))
        else:
            l = len(shape_a)
            shape_a = tensor.stack(shape_a)
            out_shape = tensor.concatenate((shape_a[0: axis], [n],
                                            shape_a[axis + 1:]))
            n_splits = [1] * l
            out_shape = tensor.split(out_shape, n_splits, l)
            out_shape = [a[0] for a in out_shape]
        return [out_shape] 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:20,代码来源:fourier.py

示例14: DiagonalBiLSTM

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import concatenate [as 别名]
def DiagonalBiLSTM(name, input_dim, inputs):
    """
    inputs.shape: (batch size, height, width, input_dim)
    inputs.shape: (batch size, height, width, DIM)
    """
    forward = DiagonalLSTM(name+'.Forward', input_dim, inputs)
    backward = DiagonalLSTM(name+'.Backward', input_dim, inputs[:,:,::-1,:])[:,:,::-1,:]
    batch_size = inputs.shape[0]
    backward = T.concatenate([
        T.zeros([batch_size, 1, WIDTH, DIM], dtype=theano.config.floatX),
        backward[:, :-1, :, :]
    ], axis=1)

    return forward + backward

# inputs.shape: (batch size, height, width, channels) 
开发者ID:igul222,项目名称:pixel_rnn,代码行数:18,代码来源:pixel_rnn.py

示例15: load_embeddings

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import concatenate [as 别名]
def load_embeddings(args):
        lst = [ ]
        vas = [ ]
        with gzip.open(args.embedding) as fin:
            for line in fin:
                parts = line.strip().split()
                w = parts[0]
                e = numpy.array( [[ float(x) for x in parts[1:] ]],
                                 dtype = theano.config.floatX )
                lst.append(w)
                vas.append(e)
        lst.append("## UNK ##")
        vas.append( numpy.zeros(vas[0].shape, dtype = theano.config.floatX) )
        vocabx = dict([ (y,x) for x,y in enumerate(lst) ])
        embeddings = numpy.concatenate(vas)
        assert len(vocabx) == len(embeddings)
        print "{} embedding loaded, size {}".format(embeddings.shape[0], embeddings.shape[1])
        return vocabx, embeddings 
开发者ID:taolei87,项目名称:text_convnet,代码行数:20,代码来源:model.py


注:本文中的theano.tensor.concatenate方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。