当前位置: 首页>>代码示例>>Python>>正文


Python tensor.arange方法代码示例

本文整理汇总了Python中theano.tensor.arange方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.arange方法的具体用法?Python tensor.arange怎么用?Python tensor.arange使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.arange方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: accuracy_instance

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import arange [as 别名]
def accuracy_instance(predictions, targets, n=[1, 2, 3, 4, 5, 10], \
        nb_classes=5, nb_samples_per_class=10, batch_size=1):
    accuracy_0 = theano.shared(np.zeros((batch_size, nb_samples_per_class), \
        dtype=theano.config.floatX))
    indices_0 = theano.shared(np.zeros((batch_size, nb_classes), \
        dtype=np.int32))
    batch_range = T.arange(batch_size)
    def step_(p, t, acc, idx):
        acc = T.inc_subtensor(acc[batch_range, idx[batch_range, t]], T.eq(p, t))
        idx = T.inc_subtensor(idx[batch_range, t], 1)
        return (acc, idx)
    (raw_accuracy, _), _ = theano.foldl(step_, sequences=[predictions.dimshuffle(1, 0), \
        targets.dimshuffle(1, 0)], outputs_info=[accuracy_0, indices_0])
    accuracy = T.mean(raw_accuracy / nb_classes, axis=0)

    return accuracy 
开发者ID:tristandeleu,项目名称:ntm-one-shot,代码行数:18,代码来源:metrics.py

示例2: test_int32_dtype

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import arange [as 别名]
def test_int32_dtype(self):
        # Reported on the theano-user mailing-list:
        # https://groups.google.com/d/msg/theano-users/MT9ui8LtTsY/rwatwEF9zWAJ
        size = 9
        intX = 'int32'

        C = tensor.matrix('C', dtype=intX)
        I = tensor.matrix('I', dtype=intX)

        fI = I.flatten()
        data = tensor.ones_like(fI)
        indptr = tensor.arange(data.shape[0] + 1, dtype='int32')

        m1 = sparse.CSR(data, fI, indptr, (8, size))
        m2 = sparse.dot(m1, C)
        y = m2.reshape(shape=(2, 4, 9), ndim=3)

        f = theano.function(inputs=[I, C], outputs=y)
        i = numpy.asarray([[4, 3, 7, 7], [2, 8, 4, 5]], dtype=intX)
        a = numpy.asarray(numpy.random.randint(0, 100, (size, size)),
                          dtype=intX)
        f(i, a) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:24,代码来源:test_basic.py

示例3: test_infer_shape

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import arange [as 别名]
def test_infer_shape(self):
        mat = (numpy.arange(12) + 1).reshape((4, 3))
        mat[0, 1] = mat[1, 0] = mat[2, 2] = 0

        x_csc = theano.sparse.csc_matrix(dtype=theano.config.floatX)
        mat_csc = sp.csc_matrix(mat, dtype=theano.config.floatX)
        self._compile_and_check([x_csc],
                                [Remove0()(x_csc)],
                                [mat_csc],
                                self.op_class)

        x_csr = theano.sparse.csr_matrix(dtype=theano.config.floatX)
        mat_csr = sp.csr_matrix(mat, dtype=theano.config.floatX)
        self._compile_and_check([x_csr],
                                [Remove0()(x_csr)],
                                [mat_csr],
                                self.op_class) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:19,代码来源:test_basic.py

示例4: negative_log_likelihood

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import arange [as 别名]
def negative_log_likelihood(self, y):
        """Return the mean of the negative log-likelihood of the prediction
        of this model under a given target distribution.

        .. math::

            \frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
            \frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
                \ell (\theta=\{W,b\}, \mathcal{D})

        :type y: theano.tensor.TensorType
        :param y: corresponds to a vector that gives for each example the
                  correct label

        Note: we use the mean instead of the sum so that
              the learning rate is less dependent on the batch size
        """
        # y.shape[0] is (symbolically) the number of rows in y, i.e., number of examples (call it n) in the minibatch
        # T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]
        # T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class
        # LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]]
        # and T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v,
        # i.e., the mean log-likelihood across the minibatch.
        return T.log(self.p_y_given_x[T.arange(y.shape[0]), y]) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:26,代码来源:mlp_test.py

示例5: __init__

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import arange [as 别名]
def __init__(self, seq_len, n_feature):
        import theano.tensor as T
        self.Input = lasagne.layers.InputLayer(shape=(None, seq_len, n_feature))
        self.buildNetwork()
        self.output = lasagne.layers.get_output(self.network)
        self.params = lasagne.layers.get_all_params(self.network, trainable=True)
        self.output_fn = theano.function([self.Input.input_var], self.output)

        fx = T.fvector().astype("float64")
        choices = T.ivector()
        px = self.output[T.arange(self.output.shape[0]), choices]
        log_px = T.log(px)
        cost = -fx.dot(log_px)
        updates = lasagne.updates.adagrad(cost, self.params, 0.0008)
        Input = lasagne.layers.InputLayer(shape=(None, seq_len, n_feature))
        self.train_fn = theano.function([self.Input.input_var, choices, fx], [cost, px, log_px], updates=updates) 
开发者ID:doncat99,项目名称:StockRecommendSystem,代码行数:18,代码来源:agent.py

示例6: update_critic

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import arange [as 别名]
def update_critic(self, random_sample):
        #random_sample = np.random.choice(np.arange(len(self.rewards)-1), self.batch_size)

        states_batch = np.zeros((self.batch_size, self.lookback_size, self.n_feature), dtype = "float32")
        states_next_batch = np.zeros((self.batch_size, self.lookback_size, self.n_feature),dtype = "float32")

        #print random_sample

        for i in range(self.batch_size):
            random_id = random_sample[i]
            states_batch[i,:,:] =np.array(self.states[random_id:random_id+self.lookback_size]).astype("float32")
            states_next_batch[i,:,:] =np.array(self.states[random_id + 1:(random_id+self.lookback_size +1)]).astype("float32")

        reward_batch = np.array([self.rewards[i] for i in random_sample]).astype("float32")
        #using target model to predict
        target_value = self.target_model.predict(states_next_batch).flatten()*self.gamma + reward_batch

        self.critic_model.train(states_batch, target_value.reshape(self.batch_size,1)) 
开发者ID:doncat99,项目名称:StockRecommendSystem,代码行数:20,代码来源:agent.py

示例7: log_cost

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import arange [as 别名]
def log_cost(cls, y, y_hat, y_mask, y_hat_mask, blank_symbol):
        y_hat_mask_len = tensor.sum(y_hat_mask, axis=0, dtype='int32')
        y_mask_len = tensor.sum(y_mask, axis=0, dtype='int32')
        log_probabs = cls.log_path_probabs(y, y_hat,
                                           y_mask, y_hat_mask,
                                           blank_symbol)
        batch_size = log_probabs.shape[1]
        labels_probab = cls.log_add(
            log_probabs[y_hat_mask_len - 1,
                        tensor.arange(batch_size),
                        y_mask_len - 1],
            log_probabs[y_hat_mask_len - 1,
                        tensor.arange(batch_size),
                        y_mask_len - 2])
        avg_cost = tensor.mean(-labels_probab)
        return avg_cost 
开发者ID:mohammadpz,项目名称:CTC-Connectionist-Temporal-Classification,代码行数:18,代码来源:ctc_cost.py

示例8: negative_log_likelihood

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import arange [as 别名]
def negative_log_likelihood(self, y):
        """Return the mean of the negative log-likelihood of the prediction
        of this model under a given target distribution.

        Note: we use the mean instead of the sum so that
              the learning rate is less dependent on the batch size
        """
        # y.shape[0] is (symbolically) the number of rows in y, i.e.,
        # number of examples (call it n) in the minibatch
        # T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]
        # T.log(self.p_y_given_x) is a matrix of
        # Log-Probabilities (call it LP) with one row per example and
        # one column per class LP[T.arange(y.shape[0]),y] is a vector
        # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
        # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
        # the mean (across minibatch examples) of the elements in v,
        # i.e., the mean log-likelihood across the minibatch.
        #print "at least, y must be provided in a flattened view (a list of class values)!"

        return -T.mean(T.log(self.class_probabilities)[T.arange(y.shape[0]),y]) #shape of class_probabilities is e.g. (14*14,2) for 2 classes and 14**2 labels 
开发者ID:GUR9000,项目名称:Deep_MRI_brain_extraction,代码行数:22,代码来源:NN_ConvLayer_2D.py

示例9: negative_log_likelihood

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import arange [as 别名]
def negative_log_likelihood(self, y):
        """Return the mean of the negative log-likelihood of the prediction
        of this model under a given target distribution.

        .. math::

            \frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
            \frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
                \ell (\theta=\{W,b\}, \mathcal{D})

        :type y: theano.tensor.TensorType
        :param y: corresponds to a vector that gives for each example the
                  correct label

        Note: we use the mean instead of the sum so that
              the learning rate is less dependent on the batch size
        """
      
        return -T.mean(T.log(self.class_probabilities)[T.arange(y.shape[0]), y]) 
开发者ID:GUR9000,项目名称:Deep_MRI_brain_extraction,代码行数:21,代码来源:NN_PerceptronLayer.py

示例10: max_pool_along_channel_axis

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import arange [as 别名]
def max_pool_along_channel_axis(sym_input, pool_factor):
    """ for 3D conv."""
    s = None
    for i in xrange(pool_factor):
        t = sym_input[:,:,i::pool_factor]
        if s is None:
            s = t
        else:
            s = T.maximum(s, t)
    return s
#    Ns, Ts, C, Hs, Ws = 1, 70, 1, 70, 70  -> 70^3
#    Nf, Tf, C, Hf, Wf = 32, 5 , 1, 5 , 5  -> 32 filters of shape 5^3
#    signals = numpy.arange(Ns*Ts*C*Hs*Ws).reshape(Ns, Ts, C, Hs, Ws).astype('float32')
#    filters = numpy.arange(Nf*Tf*C*Hf*Wf).reshape(Nf, Tf, C, Hf, Wf).astype('float32')
#
# in 3D
#        input:  (1, 70,  3, 70, 70)
#       filters: (32, 5 , 3,  5 , 5)
#    --> output: (1, 66, 32, 66, 66) 
开发者ID:GUR9000,项目名称:Deep_MRI_brain_extraction,代码行数:21,代码来源:NN_ConvLayer_3D.py

示例11: depool

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import arange [as 别名]
def depool(X, factor=2):
    """
    Luke perforated upsample: http://www.brml.org/uploads/tx_sibibtex/281.pdf
    """
    output_shape = [
        X.shape[1],
        X.shape[2]*factor,
        X.shape[3]*factor
    ]
    stride = X.shape[2]
    offset = X.shape[3]
    in_dim = stride * offset
    out_dim = in_dim * factor * factor

    upsamp_matrix = T.zeros((in_dim, out_dim))
    rows = T.arange(in_dim)
    cols = rows*factor + (rows/stride * factor * offset)
    upsamp_matrix = T.set_subtensor(upsamp_matrix[rows, cols], 1.)

    flat = T.reshape(X, (X.shape[0], output_shape[0], X.shape[2] * X.shape[3]))

    up_flat = T.dot(flat, upsamp_matrix)
    upsamp = T.reshape(up_flat, (X.shape[0], output_shape[0], output_shape[1], output_shape[2]))

    return upsamp 
开发者ID:Ivaylo-Popov,项目名称:Theano-Lights,代码行数:27,代码来源:toolbox.py

示例12: link

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import arange [as 别名]
def link(self, input,words):
#{{{
        """
        Propagate the input through the network and return the last hidden
        vector. The whole sequence is also accessible via self.h, but
        where self.h of shape (sequence_length, batch_size, output_dim)
        """

        # If we use batches, we have to permute the first and second dimension.
        if self.with_batch:
            assert 0,"AttentionLSTM not implement with_batch";
        else:
            self.input = input
            initial_states = [self.h_0, self.c_0] 
        
        step_function=self.step;  

        [e,h,c], _ = theano.scan(
            fn=step_function,
            sequences=[words,T.arange(words.shape[0])],
            outputs_info=[T.zeros((input.shape[0],),
                                  dtype=theano.config.floatX)]+initial_states,
            non_sequences=[self.input],
        )
        self.h = h
        self.output = h[-1]
        self.e=e;
        self.c=c;
        return self.output
#}}}
 
#}}} 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:34,代码来源:nn.py

示例13: modelScore

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import arange [as 别名]
def modelScore(self,tag_ids,scores,s_len):
    #{{{
        """
            ATTENTATION THIS FUNCTION IS SYMBOL PROGRAMMING
            this function is to return the score of our model at a fixed sentence label 
        @param:
            scores:        the scores matrix ,the output of our model
            tag:           a numpy array, which represent one sentence label 
            sent_lens:     a scalar number, the length of sentence.
                because our sentence label will be expand to max sentence length,
                so we will use this to get the original sentence label. 
        @return: 
            a scalar number ,the score;
        """
    #{{{
        n_tags=self.output_dim;
        transitions=self.transitions;
        #score from tags_scores
        real_path_score = scores[T.arange(s_len), tag_ids].sum()

        # Score from transitions
        b_id = theano.shared(value=np.array([n_tags], dtype=np.int32))
        e_id = theano.shared(value=np.array([n_tags + 1], dtype=np.int32))
        padded_tags_ids = T.concatenate([b_id, tag_ids, e_id], axis=0)
        real_path_score += transitions[
                padded_tags_ids[T.arange(s_len + 1)],
                padded_tags_ids[T.arange(s_len + 1) + 1]
            ].sum()
        #to prevent T.exp(real_path_score) to be inf 
        #return real_path_score;
        return real_path_score/s_len;
    #}}}
    #}}} 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:35,代码来源:model.py

示例14: arange

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import arange [as 别名]
def arange(start, stop=None, step=1, dtype='int32'):
    '''Creates a 1-D tensor containing a sequence of integers.

    The function arguments use the same convention as
    Theano's arange: if only one argument is provided,
    it is in fact the "stop" argument.

    The default type of the returned tensor is 'int32' to
    match TensorFlow's default.
    '''
    return T.arange(start, stop=stop, step=step, dtype=dtype) 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:13,代码来源:theano_backend.py

示例15: ctc_interleave_blanks

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import arange [as 别名]
def ctc_interleave_blanks(Y):
    Y_ = T.alloc(-1, Y.shape[0] * 2 + 1)
    Y_ = T.set_subtensor(Y_[T.arange(Y.shape[0]) * 2 + 1], Y)
    return Y_ 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:6,代码来源:theano_backend.py


注:本文中的theano.tensor.arange方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。