当前位置: 首页>>代码示例>>Python>>正文


Python tensor.argmin函数代码示例

本文整理汇总了Python中theano.tensor.argmin函数的典型用法代码示例。如果您正苦于以下问题:Python argmin函数的具体用法?Python argmin怎么用?Python argmin使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了argmin函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_output

    def get_output(self, train=False):
        X = self.get_input(train)
        # mask = self.get_padded_shuffled_mask(train, X, pad=0)
        mask = self.get_input_mask(train=train)
        ind = T.switch(T.eq(mask[:, -1], 1.), mask.shape[-1], T.argmin(mask, axis=-1)).astype('int32').ravel()
        max_time = T.max(ind)
        X = X.dimshuffle((1, 0, 2))
        Y = T.dot(X, self.W) + self.b
        # h0 = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
        h0 = T.repeat(self.h_m1, X.shape[1], axis=0)
        c0 = T.repeat(self.c_m1, X.shape[1], axis=0)

        [outputs, _], updates = theano.scan(
            self._step,
            sequences=Y,
            outputs_info=[h0, c0],
            non_sequences=[self.R], n_steps=max_time,
            truncate_gradient=self.truncate_gradient, strict=True,
            allow_gc=theano.config.scan.allow_gc)

        res = T.concatenate([h0.dimshuffle('x', 0, 1), outputs], axis=0).dimshuffle((1, 0, 2))
        if self.return_sequences:
            return res
        #return outputs[-1]
        return res[T.arange(mask.shape[0], dtype='int32'), ind]
开发者ID:chenych11,项目名称:keras,代码行数:25,代码来源:recurrent.py

示例2: _get_cluster_symbol

 def _get_cluster_symbol(self):
     output = self._get_output_symbol()
     Y_hat = T.reshape(output, (self.batch, self.y_n, self.k))
     y = self._get_y_symbol()
     Y = T.tile(y[:, :, None], (1, 1, self.k))
     diff = T.mean((Y - Y_hat)**2, axis=1)
     cluster = T.argmin(diff, axis=1)
     return cluster
开发者ID:Tinrry,项目名称:anna,代码行数:8,代码来源:__init__.py

示例3: batch_get_nearest_neighbours

def batch_get_nearest_neighbours(samples, dataset):
    sample = Te.matrix(name="sample")
    data = Te.matrix(name="dataset")
    find_nearest_neighbour = theano.function(name="find_nearest_neighbour",
                                             inputs=[sample],
                                             outputs=data[Te.argmin(Te.sum((data[:, None, :] - sample) ** 2, axis=2), axis=0)],
                                             givens={data: dataset['train']['data']})
    return find_nearest_neighbour(samples)
开发者ID:amoliu,项目名称:MADE,代码行数:8,代码来源:sampleMADE.py

示例4: dtw

 def dtw(i, q_p, b_p, Q, D, inf):
   i0 = T.eq(i, 0)
   # inf = T.cast(1e10,'float32') * T.cast(T.switch(T.eq(self.n,0), T.switch(T.eq(i,0), 0, 1), 1), 'float32')
   penalty = T.switch(T.and_(T.neg(n0), i0), big, T.constant(0.0, 'float32'))
   loop = T.constant(0.0, 'float32') + q_p
   forward = T.constant(0.0, 'float32') + T.switch(T.or_(n0, i0), 0, Q[i - 1])
   opt = T.stack([loop, forward])
   k_out = T.cast(T.argmin(opt, axis=0), 'int32')
   return opt[k_out, T.arange(opt.shape[1])] + D[i] + penalty, k_out
开发者ID:atuxhe,项目名称:returnn,代码行数:9,代码来源:RecurrentTransform.py

示例5: get_nearest_neighbours

def get_nearest_neighbours(samples, dataset):
    sample = Te.vector(name="sample")
    data = Te.matrix(name="dataset")
    find_nearest_neighbour = theano.function(name="find_nearest_neighbour",
                                             inputs=[sample],
                                             outputs=data[Te.argmin(Te.sum((data - sample) ** 2, axis=1))],
                                             givens={data: dataset['train']['data']})
    neighbours = []
    for s in samples:
        neighbours += [find_nearest_neighbour(s)]
    return neighbours
开发者ID:amoliu,项目名称:MADE,代码行数:11,代码来源:sampleMADE.py

示例6: init_H

 def init_H(self):
     if not hasattr(self, "_clusters"):
         a = (self.W * tensor.dot(self.W, self._kernel_matrix)).sum(axis=1) \
             - 2.0 * tensor.dot(self._kernel_matrix, self.W.T)
         b = tensor.argmin(a, axis=1)
         self._clusters = function([], b)
     H = .2 * numpy.ones((self._data_size, self._num_latent_topics)).astype(self.W.dtype)
     clusters = self._clusters()
     for i, cluster in enumerate(clusters):
         H[i, cluster] += 1.0
     self.H.set_value(H)
开发者ID:ejake,项目名称:tensor-factorization,代码行数:11,代码来源:convex_non_negative_matrix_factorization.py

示例7: constructMinimalDistanceIndicesVariable

def constructMinimalDistanceIndicesVariable(x, y, n, m):
    sDistances = constructSquaredDistanceMatrixVariable(x, y, n, m)
    lamblinsTrick = False
    if lamblinsTrick:
        # https://github.com/Theano/Theano/issues/1399
        # https://gist.github.com/danielvarga/d0eeacea92e65b19188c
        # https://groups.google.com/forum/#!topic/theano-users/E7ProqnGUMk
        s = sDistances
        bestIndices = T.cast( ( T.arange(n).dimshuffle(0, 'x') * T.cast(T.eq(s, s.min(axis=0, keepdims=True)), 'float32') ).sum(axis=0), 'int32')
        # This is a heavy-handed workaround for the fact that in
        # lamblin's hack, ties lead to completely screwed results.
        bestIndices = T.clip(bestIndices, 0, n-1)
    else:
        bestIndices = T.argmin(sDistances, axis=0)
    return bestIndices
开发者ID:danielvarga,项目名称:earth-moving-generative-net,代码行数:15,代码来源:distances.py

示例8: __init__

    def __init__(self, y, uv, params):
        self.layer0_W_y, self.layer0_b_y = params[0]
        self.layer0_W_uv, self.layer0_b_uv = params[1]
        self.layer1_W, self.layer1_b = params[2]
        self.layer2_W = params[3]
        self.layer3_W, self.layer3_b = params[4]
        self.layer4_W, self.layer4_b = params[5]

        poolsize = (2, 2)
        # layer0_y: conv-maxpooling-tanh
        layer0_y_conv = conv.conv2d(input=y, filters=self.layer0_W_y,
                border_mode='full')
        layer0_y_pool = downsample.max_pool_2d(input=layer0_y_conv,
                ds=poolsize, ignore_border=True)
        layer0_y_out = T.tanh(layer0_y_pool + \
                self.layer0_b_y.reshape(1, -1, 1, 1))

        # layer0_uv: conv-maxpooling-tanh
        layer0_uv_conv = conv.conv2d(input=uv, filters=self.layer0_W_uv,
                border_mode='full')
        layer0_uv_pool = downsample.max_pool_2d(input=layer0_uv_conv,
                ds=poolsize, ignore_border=True)
        layer0_uv_out = T.tanh(layer0_uv_pool + \
                self.layer0_b_uv.reshape(1, -1, 1, 1))

        layer1_input = T.concatenate((layer0_y_out, layer0_uv_out), axis=1)

        # layer1: conv-maxpooling-tanh
        layer1_conv = conv.conv2d(input=layer1_input, filters=self.layer1_W,
                border_mode='full')
        layer1_pool = downsample.max_pool_2d(input=layer1_conv,
                ds=poolsize, ignore_border=True)
        layer1_out = T.tanh(layer1_pool + self.layer1_b.reshape(1, -1, 1, 1))

        # layer2: conv
        layer2_out = conv.conv2d(input=layer1_out, filters=self.layer2_W,
                border_mode='valid')

        layer3_input = layer2_out.reshape((256, -1)).dimshuffle(1, 0)

        # layer3: hidden-layer
        layer3_lin = T.dot(layer3_input, self.layer3_W) + self.layer3_b
        layer3_out = T.tanh(layer3_lin)

        # layer4: logistic-regression
        layer4_out = T.nnet.softmax(T.dot(layer3_out, self.layer4_W) + \
                self.layer4_b)
        self.pred = T.argmin(layer4_out, axis=1)
开发者ID:dailiang,项目名称:icome,代码行数:48,代码来源:process0.py

示例9: straight_through

def straight_through(p, u):


    sts = StraightThroughSampler()

    cum = T.extra_ops.cumsum(p, axis = 1) - T.addbroadcast(T.reshape(u, (u.shape[0], 1)),1)

    cum = T.switch(T.lt(cum, 0.0), 10.0, cum)

    ideal_bucket = T.argmin(cum, axis = 1)

    one_hot = T.extra_ops.to_one_hot(ideal_bucket, 4)

    y = sts(p, one_hot)

    return y
开发者ID:alexmlamb,项目名称:staight_through,代码行数:16,代码来源:straight_through_op.py

示例10: min_risk_choice

    def min_risk_choice(Posterior):

        #The Loss function is a function of the predictiveness profiles
        Preds = predictiveness_profiles(Models, K, num_M)
        
        Loss = ifelse(T.eq(Choice_type, 1), T.pow(1.0 - Preds,2), ifelse(T.eq(Choice_type, 2), T.abs_(1.0 - Preds), - Preds))             
        
        #Kroneckering Loss up num_Obs times (tile Loss, making it num_M by num_M*num_Obs)
        Loss = kron(T.ones((1,num_Obs)), Loss)        
        #Kroneckering up the Posterior, making it num_M by num_Obs*numM
        Posterior = kron(Posterior, T.ones((1,num_M)))

        #Dotting and reshaping down to give num_M by num_Obs expected loss matrix
        Expected_Loss = T.dot(T.ones((1,num_M)),Posterior*Loss)            
        Expected_Loss = T.reshape(Expected_Loss, (num_Obs,num_M)).T
        
        #Choice minimizes risk
        Choice = T.argmin(Expected_Loss, axis = 0) 
        return Choice 
开发者ID:Underfit,项目名称:underfit,代码行数:19,代码来源:bayesian_choice.py

示例11: generate_optimize_basis

    def generate_optimize_basis():
        # original solution
        tx0 = partial.x
        # optimized solution
        tx1 = T.dot(tl.matrix_inverse(T.dot(partial.A.T, partial.A)),
                    T.dot(partial.A.T, y) - gamma/2*partial.theta)

        # investigate zero crossings between tx0 and tx1
        tbetas = tx0 / (tx0 - tx1)
        # investigate tx1
        tbetas = T.concatenate([tbetas, [1.0]])
        # only between tx0 and inclusively tx1
        tbetas = tbetas[(T.lt(0, tbetas) * T.le(tbetas, 1)).nonzero()]

        txbs, _ = theano.map(lambda b: (1-b)*tx0 + b*tx1, [tbetas])
        tlosses, _ = theano.map(loss, [txbs])
        # select the optimum
        txb = txbs[T.argmin(tlosses)]

        return theano.function([tpart, full.x, full.theta],
                               [T.set_subtensor(partial.x,     txb),
                                T.set_subtensor(partial.theta, T.sgn(txb))])
开发者ID:cooijmanstim,项目名称:sparse-coding-theano,代码行数:22,代码来源:featuresign.py

示例12: getDeployFunction

 def getDeployFunction(self, cr):
     from algorithms.algorithm import beam_search, greed
     print "Compiling computing graph."
     get_question_hidden = theano.function([self.question, self.question_mask],
                                      self.last_hidden_state,
                                      name='get_question_hidden')
     _, pred_word_probability = self.softmax_layer.getOutput(self.last_decoder_hidden)
     
     self.last_decoder_hidden
     self.tparams['Wemb']
     
     recons_v = self.tparams['recons_v']
     recons_b = self.tparams['recons_b']
     
     recons_b = recons_b.dimshuffle(['x', 0])
     
     media_h = T.dot(self.tparams['Wemb'], recons_v) + recons_b
     
     recons_h_error_L = T.tanh(media_h) - T.addbroadcast(self.last_decoder_hidden, 0)
     recons_h_error_L = T.sqr(recons_h_error_L).sum(axis=1) 
     recons_h_error_L = recons_h_error_L / self.options['hidden_dim']
     error = -T.log(pred_word_probability) + recons_h_error_L
     score = T.exp(-error)
     pred_word = T.argmin(error)
     
     deploy_model = theano.function(inputs=[self.answer, self.answer_mask, self.last_hidden_state],
                                    outputs=[pred_word, score],
                                    allow_input_downcast=True)
     print "Compiled."
     def dm(sentence):
         print "feed %s: " % sentence
         (x, x_mask) = cr.transformInputData(sentence)
         x = x[:-1]
         x_mask = x_mask[:-1]
         last_s = get_question_hidden(x, x_mask)
         def f(y, y_mask):
             return deploy_model(y, y_mask, last_s)
         return beam_search('', cr, f)
     return dm
开发者ID:ivysoftware,项目名称:DeepEmbedding,代码行数:39,代码来源:GRU_recons.py

示例13: generate_functions

def generate_functions(A, y, gamma):
    tA = T.matrix('A')
    ty = T.vector('y')
    tx = T.vector('x')
    ttheta = T.vector('theta')
    
    tx0 = T.vector('x0')
    tx1 = T.vector('x1')
    tbetas = T.vector('betas')
    
    error = lambda x: T.sum((T.dot(tA, x) - ty)**2)
    derror = lambda x: T.grad(error(x), x)
    penalty = lambda x: x.norm(1)
    loss = lambda x: error(x) + penalty(x)

    entering_index = T.argmax(abs(derror(tx)))
    txs, _ = theano.map(lambda b, x0, x1: (1-b)*x0 + b*x1,
                        [tbetas], [tx0, tx1])

    return {
        "select_entering": theano.function([tx],
                                           [entering_index, derror(tx)[entering_index]],
                                           givens = {tA: A, ty: y}),
        "qp_optimum": theano.function([tA, ttheta],
                                      T.dot(T.inv(T.dot(tA.T, tA)), T.dot(tA.T, ty) - gamma/2*ttheta),
                                      givens = {ty: y}),
        "txs": theano.function([tbetas, tx0, tx1], txs),
        "select_candidate": theano.function([tA, tbetas, tx0, tx1],
                                            txs[T.argmin(theano.map(loss, [txs])[0])],
                                            givens = {ty: y}),
        "optimal_nz": theano.function([tA, tx],
                                      derror(tx) + gamma*T.sgn(tx),
                                      givens = {ty: y}),
        "optimal_z": theano.function([tA, tx],
                                     abs(derror(tx)),
                                     givens = {ty: y}),
        }
开发者ID:gburachas,项目名称:sparse-coding-theano,代码行数:37,代码来源:featuresign.py

示例14: init_weights

w_2 = init_weights((h_size, y_size))


# Forward propagation
yhat   = forwardprop(X, w_1, w_2)

# Backward propagation
cost    = T.mean(T.nnet.categorical_crossentropy(yhat, Y))
params  = [w_1, w_2]
updates = backprop(cost, params)



# Train and predict
train   = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)
pred_y  = T.argmin(yhat, axis=1)
predict = theano.function(inputs=[X], outputs=pred_y, allow_input_downcast=True)



# Run SGD
"""for iter in range(500):
    train(train_X, train_y)
    train_accuracy = np.mean(np.argmax(train_y, axis=1) == predict(train_X))
    test_accuracy  = np.mean(np.argmax(test_y, axis=1) == predict(test_X))
    print predict(test_X)
    print("Iteration = %d, train accuracy = %.2f%%, test accuracy = %.2f%%"
            % (iter + 1, 100 * train_accuracy, 100 * test_accuracy))
    break"""
          
train(train_X, train_y)
开发者ID:RahatIbnRafiq,项目名称:rahattest,代码行数:31,代码来源:TheanoTest3.py

示例15: __call__

    def __call__(self, X, termination_criterion, initial_H=None):
        """
            Compute for each sample its representation.

            Parameters
            ----------
            X : Sample matrix. numpy.ndarray
            termination_criterion: pylearn TerminationCriterion object
            initial_H: Numpy matrix.

            Returns
            -------
            H: H matrix with the representation.
        """

        dataset_size = X.shape[0]

        H = None
        if initial_H is not None:
            if H.shape[0] == dataset_size and H.shape[1] == self._num_latent_topics:
                H = initial_H

        if H is None:
            if not hasattr(self, "predict_clusters"):
                h = tensor.matrix(name="h")
                x = tensor.matrix(name="x")
                kxb = self._kernel(x, self._budget)
                a = (self.W * tensor.dot(self.W, self._kernel_matrix)).sum(axis=1) \
                    - 2.0 * tensor.dot(kxb, self.W.T)
                b = tensor.argmin(a, axis=1)
                self.predict_clusters = function([x], b)

            H = .2 * numpy.ones((self._data_size, self._num_latent_topics)).astype(self.W.dtype)
            clusters = self.predict_clusters(X)
            for i, cluster in enumerate(clusters):
                H[i, cluster] += 1.0

        if not hasattr(self, "predict_representation"):
            h = tensor.matrix(name="h")
            x = tensor.matrix(name="x")
            kxb = self._kernel(x, self._budget)
            kxbp = 0.5 * (numpy.abs(kxb) + kxb)
            kxbn = 0.5 * (numpy.abs(kxb) - kxb)
            a = tensor.dot(h, tensor.dot(self.W, self.kbn))
            b = tensor.dot(kxbp + a, self.W.T)
            c = tensor.dot(h, tensor.dot(self.W, self.kbp))
            d = tensor.dot(kxbn + c, self.W.T)
            e = h * tensor.sqrt(b / (d + self.lambda_vals))
            f = tensor.maximum(e, eps)
            self.predict_representation = function([x, h], f)

        keep_training = True
        if not isfinite(H):
            raise Exception("NaN or Inf in H")

        while keep_training:
            H = self.predict_representation(X, H)
            if not isfinite(H):
                raise Exception("NaN or Inf in H")
            keep_training = termination_criterion.continue_learning(self)

        return H
开发者ID:ejake,项目名称:tensor-factorization,代码行数:62,代码来源:convex_non_negative_matrix_factorization.py


注:本文中的theano.tensor.argmin函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。