当前位置: 首页>>代码示例>>Python>>正文


Python minimize.minimize函数代码示例

本文整理汇总了Python中minimize.minimize函数的典型用法代码示例。如果您正苦于以下问题:Python minimize函数的具体用法?Python minimize怎么用?Python minimize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了minimize函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: step

 def step(self,*args):
     from minimize import minimize
     updateparams(self.model, minimize(\
                  self.model.params.copy(),self.cost,self.grad,\
                  args=args,maxnumfuneval=self.maxfuneval,
                  verbose=False)[0].copy())
     Trainer.step(self,*args)
开发者ID:JohnPaton,项目名称:Master-Thesis,代码行数:7,代码来源:train.py

示例2: minimize

def minimize(text):
    try:
        import jsmin
        return jsmin.jsmin(text)
    except Exception, e:
        import minimize
        return minimize.minimize(text)
开发者ID:psycrow117,项目名称:OpenLayerer,代码行数:7,代码来源:build.py

示例3: fit_nce

    def fit_nce(self, X, k=1, mu_noise=None, L_noise=None,
                mu0=None, L0=None, c0=None, method='minimize',
                maxnumlinesearch=None, maxnumfuneval=None, verbose=False):
        _class = self.__class__
        D, Td = X.shape
        self._init_params(D, mu_noise, L_noise, mu0, L0, c0)

        noise = self._params_noise
        Y = mvn.rvs(noise.mu, noise.L, k * Td).T

        maxnumlinesearch = maxnumlinesearch or DEFAULT_MAXNUMLINESEARCH
        obj = lambda u: _class.J(X, Y, noise.mu, noise.L, *vec_to_params(u))
        grad = lambda u: params_to_vec(
            *_class.dJ(X, Y, noise.mu, noise.L, *vec_to_params(u)))

        t0 = params_to_vec(*self._params_nce)
        if method == 'minimize':
            t_star = minimize(t0, obj, grad,
                              maxnumlinesearch=maxnumlinesearch,
                              maxnumfuneval=maxnumfuneval, verbose=verbose)[0]
        else:
            t_star = sp_minimize(obj, t0, method='BFGS', jac=grad,
                                 options={'disp': verbose,
                                          'maxiter': maxnumlinesearch}).x
        self._params_nce = GaussParams(*vec_to_params(t_star))
        return (self._params_nce, Y)
开发者ID:mcobzarenco,项目名称:nce-models,代码行数:26,代码来源:ncegauss.py

示例4: Module

def Module(name, filename, munge_globals=True):
    with open(filename, "rb" if p.PY2 else "r") as f:
        code = f.read()
    if args.minimize:
        # in modules only locals are worth optimizing
        code = minimize.minimize(code, True, args.obfuscate and munge_globals, args.obfuscate, args.obfuscate)
    return p.Module(name, code)
开发者ID:AndrewSkat,项目名称:unrpyc,代码行数:7,代码来源:compile.py

示例5: _fit_with_minimize

 def _fit_with_minimize(self, learning_rate=0.1, weight_decay=0, momentum=0, verbose = True, max_lr_iter = 5, isnorm = True):
     big_weight = weight_extend(self)
     big_weight, _,_ = minimize.minimize(big_weight, helper_func_eval, (self, isnorm), maxnumlinesearch=3, verbose = False)
     weight_compress(big_weight, self)
     if verbose:
         self.feed_forward()
         return self.empirical_error()
开发者ID:umutekmekci,项目名称:deepNN,代码行数:7,代码来源:NeuralNetwork.py

示例6: trainNN

def trainNN(inputSize, hid1Size, hid2Size, numClasses, lambda_, inputData, labels, n_iterations=100, displ=True):
    if displ:
       sel = np.random.permutation(inputData.shape[1])
       sel = sel[0:100]
       rbm.displayData(inputData[:, sel].T)
    T1 = debugInitializeWeights(hid1Size, inputSize)
    T2 = debugInitializeWeights(hid2Size, hid1Size)
    T3 = debugInitializeWeights(numClasses, hid2Size)
    b1 = np.zeros((hid1Size, 1))
    b2 = np.zeros((hid2Size, 1))
    b3 = np.zeros((numClasses, 1))
    T = np.concatenate((T1.reshape(len(T1.flatten(1)), 1), 
                        T2.reshape(len(T2.flatten(1)), 1), 
                        T3.reshape(len(T3.flatten(1)), 1),
                        b1, b2, b3))

    NNCost = lambda p: CostFunction(p, inputSize, hid1Size, hid2Size, numClasses, inputData, labels, lambda_)
    T, cost, iteration = minimize.minimize(NNCost, T, n_iterations)

    T1 = T[0:(hid1Size*inputSize)].reshape(hid1Size,inputSize)
    T2 = T[(hid1Size*inputSize):(hid1Size*inputSize)+(hid2Size*hid1Size)].reshape(hid2Size,hid1Size)
    T3 = T[(hid1Size*inputSize)+(hid2Size*hid1Size):(hid1Size*inputSize)+(hid2Size*hid1Size)+(
         hid2Size*numClasses)].reshape(numClasses,hid2Size)

    pred = predict(T1, T2, T3, inputData)
    return pred
开发者ID:andfoy,项目名称:NNLib,代码行数:26,代码来源:NNLib.py

示例7: process_data

def process_data(inputs, values): #Funcion que ejecuta la red neuronal como tal
    _beta = 2 #penalidad de la dispersión de datos, limite de dispersion del modelo
    _lambda = 1e-4 #limita la variación de los pesos o weight decay
    _epsilon = 0.1 #evita tener valores propios en la matriz iguales a cero
    _sparsityParam = 0.6 #la activación promedio deseada en cada neurona, entre 0 y 1
    num_iter = 5000 #número máximo de iteraciones

    inputSize = inputs.shape[0] #cantidad de variables de entrada, 6 en este caso
    m = inputs.shape[1]#cantidad de casos de entrenamiento
    hiddenSize = 180 #cantidad de neuronas ocultas, ocultas porque no se sabe bien que hacen
    outputSize = 1 #las dimensiones de salida, en este caso, 1, porque es un problema de regresión

    theta = initializeParameters(outputSize, hiddenSize, inputSize) #inicializa los pesos y los sesgos de la red
    #y retorna un vector de dimension hidden*input + hidden*output + hidden + output
    inputs, meanInput, ZCAWhite = preProcess(inputs, _epsilon)# inicialización de los parámetros
    #retorna números aleatorios como una primera aproximacion
    costF = lambda p: cost.sparseLinearNNCost(p, inputSize, hiddenSize, outputSize, _lambda, _sparsityParam, _beta, inputs, values) #define la función de costo, la cual recibe por parámetro al vector de parámetros theta

    optTheta,costV,i = minimize.minimize(costF,theta,maxnumlinesearch=num_iter)
    pred = cost.predict(inputs, optTheta, inputSize, hiddenSize, outputSize)

    diff = np.linalg.norm(pred-values)/np.linalg.norm(pred+values) #peso de los parametros

    print "RMSE: %g" % (diff)
    

    np.savez('parameters.npz', optTheta = optTheta, meanInput = meanInput, ZCAWhite = ZCAWhite)
开发者ID:valentinaqf94,项目名称:CM20151_HW8_ValentinaQuiroga,代码行数:27,代码来源:linear.py

示例8: optimize_gp_with_minimize

def optimize_gp_with_minimize( gp, params ):
  objective_function = progapy.gp.gp_neglogposterior_using_free_params
  grad_function      = progapy.gp.gp_neglogposterior_grad_wrt_free_params
  
  best_p, v, t = minimize( gp.get_free_params(), \
                          objective_function, \
                          grad_function, \
                          [gp], \
                          maxnumlinesearch=params["maxnumlinesearch"] \
                         )
  print best_p
  gp.set_free_params( best_p )
开发者ID:tedmeeds,项目名称:progapy,代码行数:12,代码来源:optimize.py

示例9: minimizeLayer3

    def minimizeLayer3(self, inputData, targets, max_iter):
        layer2out = self.recognize012(inputData)

        #### Flatten all of our parameters into a 1-D array
        (VV, Dim) = multiFlatten(( self.W[3], self.hB[3] ))

        (X, fX, iters) = cg.minimize(VV, backprop_only3, (Dim, layer2out, targets), max_iter)

        #### Un-Flatten all of our parameters from the 1-D array
        matrices = multiUnFlatten(X, Dim)
        self.W[3]  = matrices[0]
        self.hB[3] = matrices[1]
开发者ID:Wizcorp,项目名称:Eruditio,代码行数:12,代码来源:NeuralNetwork.py

示例10: train_cg

    def train_cg(self, features, labels, weightcost, maxnumlinesearch=numpy.inf, verbose=False):
        """Train the model using conjugate gradients.
  
           Like train() but faster. Uses minimize.py for the optimization. 
        """

        from minimize import minimize
        p, g, numlinesearches = minimize(self.params.copy(), 
                                         self.f, 
                                         self.g, 
                                         (features, labels, weightcost), maxnumlinesearch, verbose=verbose)
        self.updateparams(p)
        return numlinesearches
开发者ID:LeonBai,项目名称:lisa_emotiw-1,代码行数:13,代码来源:logreg.py

示例11: learn

def learn(shape_theta, shape_x, y, r, reg_lambda, n_iter):
    num_movies = y.shape[0]
    num_users = y.shape[1]

    # Normalize Ratings
    y_mean = (y.sum(axis=1)/r.sum(axis=1)).reshape((-1, 1))
    y = y - y_mean.dot(np.ones((1, num_users)))

    param_0 = np.random.randn(np.product(shape_theta) + np.product(shape_x))

    # optimize
    opt, cost, i = minimize(lambda dna: cost_function(dna, shape_theta, shape_x, y, r, reg_lambda),
                            param_0,
                            n_iter)

    theta, x = fold(opt, shape_theta, shape_x)
    return theta, x, y_mean
开发者ID:Seratna,项目名称:Machine-Learning,代码行数:17,代码来源:collaborative_filtering.py

示例12: minimizeAllLayers

    def minimizeAllLayers(self, inputData, targets, max_iter):
        #### Flatten all of our parameters into a 1-D array
        (VV, Dim) = multiFlatten((  self.W[0], self.hB[0],
                                    self.W[1], self.hB[1],
                                    self.W[2], self.hB[2],
                                    self.W[3], self.hB[3]  ))

        (X, fX, iters) = cg.minimize(VV, backprop, (Dim, inputData, targets), max_iter)

        #### Un-Flatten all of our parameters from the 1-D array
        matrices = multiUnFlatten(X, Dim)
        self.W[0]  = matrices[0]
        self.hB[0] = matrices[1]
        self.W[1]  = matrices[2]
        self.hB[1] = matrices[3]
        self.W[2]  = matrices[4]
        self.hB[2] = matrices[5]
        self.W[3]  = matrices[6]
        self.hB[3] = matrices[7]
开发者ID:Wizcorp,项目名称:Eruditio,代码行数:19,代码来源:NeuralNetwork.py

示例13: train

    def train(self, x, y, reg_lambda, n_iter):
        """
        ues optimization algorithm to learn a good set of parameters from the training data x and answer y
        """
        # initiate gradient and gradient entries
        grad = np.zeros_like(self.dna)
        for layer in self.layers:
            # for each layer, set the entry of gradient, through which the gradient will be updated
            layer.grad = grad[layer.pointer: layer.pointer+layer.theta.size].reshape(layer.theta.shape)

        # optimize
        opt, cost, i = minimize(lambda dna: (self.learn(dna, x, y, reg_lambda), np.array(grad)), self.dna, n_iter)
        # TODO optimize.fmin_cg implementation
        # opt = optimize.fmin_cg(f=lambda dna: self.learn(dna, x, y, reg_lambda),  # cost function
        #                        x0=self.dna,  # initial set of parameters
        #                        fprime=lambda t: (np.array(grad),)[0],  # gradient
        #                        maxiter=n_iter)  # number of iteration

        # update dna
        self.dna[:] = opt
开发者ID:Seratna,项目名称:Machine-Learning,代码行数:20,代码来源:bp_network.py

示例14: manifold_traversal

def manifold_traversal(F,N,M,weights,max_iter=5,rbf_var=1e4,verbose=True,checkgrad=True,checkrbf=True):
  # returns two arrays, xpr and r
  #   xpr is optimized x+r
  #   r is optimized r
  # multiply by F to get latent space vector
  if verbose:
    print('manifold_traversal()')
    print('F',F.shape,F.dtype,F.min(),F.max())
    print('N',N)
    print('M',M)
    print('weights',weights)

  xpr_result=[]
  r_result=[]
  r=np.zeros(len(F))
  x=np.zeros(len(F))
  FFT=F.dot(F.T) # K x K
  x[-1]=1
  for weight in weights:

    if checkgrad:
      def f(*args):
        return witness_fn2(*args)[0]
      def g(*args):
        return witness_fn2(*args)[1]
      print('Checking gradient ...')
      err=scipy.optimize.check_grad(f,g,r,*(x,FFT,N,M,rbf_var,weight,False,True))
      print('gradient error',err)
      assert err<1e-5

    r_opt,loss_opt,iter_opt=minimize.minimize(r,witness_fn2,(x,FFT,N,M,rbf_var,weight,verbose,checkrbf),maxnumlinesearch=50,maxnumfuneval=None,red=1.0,verbose=True)
    if verbose:
      print('r_opt',r_opt.shape,r_opt.dtype,r_opt.min(),r_opt.max(),np.linalg.norm(r_opt))
      print('r_opt values',r_opt[:5],'...',r_opt[N:N+5],'...',r_opt[-1])
    xpr_result.append(x+r_opt)
    r_result.append(r_opt)
    r=r_opt
  return np.asarray(xpr_result),np.asarray(r_result)
开发者ID:awg66,项目名称:deepmanifold,代码行数:38,代码来源:matchmmd.py

示例15: set_params

    set_params(model_ft.models_stack[-1], tmp)
    return result

fun_grad = theano.function(
    [model_ft.varin, model_ft.models_stack[-1].vartruth],
    T.grad(model_ft.models_stack[-1].cost() + model_ft.models_stack[-1].weightdecay(weightdecay),
           model_ft.models_stack[-1].params)
)
def return_grad(test_params, input_x, truth_y):
    tmp = get_params(model_ft.models_stack[-1])
    set_params(model_ft.models_stack[-1], test_params)
    result = numpy.concatenate([numpy.array(i).flatten() for i in fun_grad(input_x, truth_y)])
    set_params(model_ft.models_stack[-1], tmp)
    return result
p, g, numlinesearches = minimize(
    get_params(model_ft.models_stack[-1]), return_cost, return_grad,
    (train_x.get_value(), train_y.get_value()), logreg_epc, verbose=False
)
set_params(model_ft.models_stack[-1], p)
save_params(model_ft, 'ZLIN_4000_1000_4000_1000_4000_1000_4000_10_normhid_nolinb_cae1_dropout.npy')
print "***error rate: train: %f, test: %f" % (
    train_set_error_rate(), test_set_error_rate()
)

#############
# FINE-TUNE #
#############

"""
print "\n\n... fine-tuning the whole network"
truth = T.lmatrix('truth')
trainer = GraddescentMinibatch(
开发者ID:hantek,项目名称:zlinnet,代码行数:32,代码来源:expr_cifar10_ZLIN_normhid_nolinb_dropout.py


注:本文中的minimize.minimize函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。