当前位置: 首页>>代码示例>>Python>>正文


Python preprocessing.KernelCenterer类代码示例

本文整理汇总了Python中sklearn.preprocessing.KernelCenterer的典型用法代码示例。如果您正苦于以下问题:Python KernelCenterer类的具体用法?Python KernelCenterer怎么用?Python KernelCenterer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了KernelCenterer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: center_normTrace_decomp

    def center_normTrace_decomp(K):
        print 'centering kernel'
        #### Get transformed features for K_train that DONT snoop when centering, tracing, or eiging#####
        Kcent=KernelCenterer()
        Ktrain=Kcent.fit_transform(K[:in_samples,:in_samples])
        #Ktrain=Ktrain/float(np.trace(Ktrain))
        #[EigVals,EigVectors]=scipy.sparse.linalg.eigsh(Ktrain,k=reduced_dimen,which='LM')
        [EigVals,EigVectors]=scipy.linalg.eigh(Ktrain,eigvals=(in_samples-reduced_dimen,in_samples-1))
        for i in range(len(EigVals)): 
            if EigVals[i]<=0: EigVals[i]=0
        EigVals=np.flipud(np.fliplr(np.diag(EigVals)))
        EigVectors=np.fliplr(EigVectors)
        Ktrain_decomp=np.dot(EigVectors,scipy.linalg.sqrtm(EigVals))
       
        #### Get transformed features for K_test using K_train implied mapping ####
        Kcent=KernelCenterer()
        Kfull=Kcent.fit_transform(K)
        #Kfull=Kfull/float(np.trace(Kfull))
        K_train_test=Kfull[in_samples:,:in_samples]
        Ktest_decomp=np.dot(K_train_test,np.linalg.pinv(Ktrain_decomp.T))

        ####combine mapped train and test vectors and normalize each vector####
        Kdecomp=np.vstack((Ktrain_decomp,Ktest_decomp))
        print 'doing normalization'
        Kdecomp=normalize(Kdecomp,copy=False)
        return Kdecomp
开发者ID:matthew-norton,项目名称:SVM-Kernel-Selection,代码行数:26,代码来源:Kernels.py

示例2: test_kernelcenterer_vs_sklearn

def test_kernelcenterer_vs_sklearn():
    # Compare msmbuilder.preprocessing.KernelCenterer
    # with sklearn.preprocessing.KernelCenterer

    kernelcentererr = KernelCentererR()
    kernelcentererr.fit(np.concatenate(trajs))

    kernelcenterer = KernelCenterer()
    kernelcenterer.fit(trajs)

    y_ref1 = kernelcentererr.transform(trajs[0])
    y1 = kernelcenterer.transform(trajs)[0]

    np.testing.assert_array_almost_equal(y_ref1, y1)
开发者ID:Eigenstate,项目名称:msmbuilder,代码行数:14,代码来源:test_preprocessing.py

示例3: __init__

    def __init__(self, use_total_scatter=True, sigma_sqrd=1e-8, tol=1.0e-3,
                 kernel="linear", gamma=None, degree=3, coef0=1,
                 norm_covariance = False, priors=None, print_timing=False):

        self.use_total_scatter = use_total_scatter
        self.sigma_sqrd = sigma_sqrd
        self.tol = tol
        self.kernel = kernel.lower()
        self.gamma = gamma
        self.degree = degree
        self.coef0 = coef0
        self._centerer = KernelCenterer()

        self.norm_covariance = norm_covariance
        self.print_timing = print_timing
        
        
        self.priors = np.asarray(priors) if priors is not None else None
        
        if self.priors is not None:
            if (self.priors < 0).any():
                raise ValueError('priors must be non-negative')
            if self.priors.sum() != 1:
                print 'warning: the priors do not sum to 1. Renormalizing'
                self.priors = self.priors / self.priors.sum()
开发者ID:andrewjohnlowe,项目名称:JetImages,代码行数:25,代码来源:fisher.py

示例4: cv_mkl

def cv_mkl(kernel_list, labels, mkl, n_folds, dataset, data):

    n_sample, n_labels = labels.shape
    n_km = len(kernel_list)
    tags = np.loadtxt("../data/cv/"+data+".cv")

    for i in range(1,n_folds+1):
        print "Test fold %d" %i
        res_f = "../svm_result/weights/"+dataset+"_fold_%d_%s.weights" % (i,mkl)
        para_f = "../svm_result/upperbound/"+dataset+"_fold_%d_%s.ubound" % (i,mkl)
        test = np.array(tags == i)
        train = np.array(~test)
        train_y = labels[train,:]
        test_y = labels[test,:]
        n_train = len(train_y)
        n_test = len(test_y)
        train_km_list = []

        # all train kernels are nomalized and centered
        for km in kernel_list:
            kc = KernelCenterer()
            train_km = km[np.ix_(train, train)]
            # center train and test kernels                      
            kc.fit(train_km)
            train_km_c = kc.transform(train_km)
            train_km_list.append(train_km_c)

        if mkl == 'UNIMKL':
            res = UNIMKL(train_km_list, train_y)
            np.savetxt(res_f, res)            
        if mkl == 'ALIGNF2':
            res = alignf2(train_km_list, train_y, data)
            np.savetxt(res_f, res)
        if mkl.find('ALIGNF2SOFT') != -1:
            bestC, res = ALIGNF2SOFT(train_km_list, train_y, i, tags, data)
            np.savetxt(res_f, res)
            np.savetxt(para_f, bestC)
        if mkl == "TSMKL":
            W = np.zeros((n_km, n_labels))
            for j in xrange(n_labels):
                print "..label",j
                W[:,j] = TSMKL(train_km_list, train_y[:,j])
            res_f = "../svm_result/weights/"+dataset+"_fold_%d_%s.weights" % (i,mkl)
            np.savetxt(res_f, W)
开发者ID:aalto-ics-kepaco,项目名称:softALIGNF,代码行数:44,代码来源:run_mkl.py

示例5: test_center_kernel

def test_center_kernel():
    """Test that KernelCenterer is equivalent to Scaler in feature space"""
    X_fit = np.random.random((5, 4))
    scaler = Scaler(with_std=False)
    scaler.fit(X_fit)
    X_fit_centered = scaler.transform(X_fit)
    K_fit = np.dot(X_fit, X_fit.T)

    # center fit time matrix
    centerer = KernelCenterer()
    K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
    K_fit_centered2 = centerer.fit_transform(K_fit)
    assert_array_almost_equal(K_fit_centered, K_fit_centered2)

    # center predict time matrix
    X_pred = np.random.random((2, 4))
    K_pred = np.dot(X_pred, X_fit.T)
    X_pred_centered = scaler.transform(X_pred)
    K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
    K_pred_centered2 = centerer.transform(K_pred)
    assert_array_almost_equal(K_pred_centered, K_pred_centered2)
开发者ID:Yangqing,项目名称:scikit-learn,代码行数:21,代码来源:test_preprocessing.py

示例6: __init__

 def __init__(self, n_components=None, kernel="linear",
              gamma=None, degree=3, coef0=1, kernel_params=None, eigen_solver='auto',
              tol=0, max_iter=None, random_state=None,center=False):
     self.n_components = n_components
     self._kernel = kernel
     self.kernel_params = kernel_params
     self.gamma = gamma
     self.degree = degree
     self.coef0 = coef0
     self.eigen_solver = eigen_solver
     self.tol = tol
     self.max_iter = max_iter
     self.random_state = random_state
     self._centerer = KernelCenterer()
     self.center = center
开发者ID:tsterbak,项目名称:kernel_eca,代码行数:15,代码来源:kernel_eca.py

示例7: __init__

 def __init__(self, n_components=None, kernel="linear",
              gamma=None, degree=3, coef0=1, kernel_params=None,
              alpha=1.0, fit_inverse_transform=False, eigen_solver='auto',
              tol=0, max_iter=None, remove_zero_eig=False):
     if fit_inverse_transform and kernel == 'precomputed':
         raise ValueError(
             "Cannot fit_inverse_transform with a precomputed kernel.")
     self.n_components = n_components
     self.kernel = kernel
     self.kernel_params = kernel_params
     self.gamma = gamma
     self.degree = degree
     self.coef0 = coef0
     self.alpha = alpha
     self.fit_inverse_transform = fit_inverse_transform
     self.eigen_solver = eigen_solver
     self.remove_zero_eig = remove_zero_eig
     self.tol = tol
     self.max_iter = max_iter
     self._centerer = KernelCenterer()
开发者ID:rohanp,项目名称:dimensionality_reduction_techniques,代码行数:20,代码来源:kernel_pca.py

示例8: fit

    def fit(self, X, Y):
        """Fit the KCCA model with two views represented by kernels X and Y.

        Parameters
        ----------
        X : array_like, shape = (n_samples, n_features) for data matrix
            or shape = (n_samples, n_samples) for kernel matrix.
            When both X and Y are kernel matrix, the kernel parameter
            should be set to 'precomputed'.
            It is considered to be one view of the data.

        Y : array_like, shape = (n_samples, n_features) for data matrix
            or shape = (n_samples, n_samples) for kernel matrix.
            When both X and Y are kernel matrix, the kernel parameter
            should be set to 'precomputed'.
            It is considered to be another view of the data.

        Returns
        -------
        self : object
            Returns the instance itself.
        """
        check_consistent_length(X, Y)
        X = check_array(X, dtype=np.float, copy=self.copy)
        Y = check_array(Y, dtype=np.float, copy=self.copy, ensure_2d=False)
        if Y.ndim == 1:
            Y = Y.reshape(-1,1)

        n = X.shape[0]
        p = X.shape[1]
        q = Y.shape[1]

        if self.n_components < 1 or self.n_components > n:
            raise ValueError('Invalid number of components: %d' %
                             self.n_components)
        if self.eigen_solver not in ("auto", "dense", "arpack"):
            raise ValueError("Got eigen_solver %s when only 'auto', "
                             "'dense' and 'arparck' are valid" %
                             self.algorithm)
        if self.kernel == 'precomputed' and (p != n or q != n):
            raise ValueError('Invalid kernel matrices dimension')
        if not self.pgso and (self.kapa <= 0 or self.kapa >= 1):
            raise ValueError('kapa should be in (0, 1) when pgso=False')
        if self.pgso and (self.kapa < 0 or self.kapa > 1):
            raise ValueError('kapa should be in [0, 1] when pgso=True')

        KX = self._get_kernel(X)
        KY = self._get_kernel(Y)

        if self.center:
            kc = KernelCenterer()
            self.KXc_ = kc.fit_transform(KX)
            self.KYc_ = kc.fit_transform(KY)
        else:
            self.KXc_ = KX
            self.KYc_ = KY

        if self.pgso:  # use PGSO to decompose kernel matrix
            self._fit_pgso(self.KXc_, self.KYc_)
        else:
            self._fit(self.KXc_, self.KYc_)
        return self
开发者ID:icdishb,项目名称:scikit-learn,代码行数:62,代码来源:kernel_cca.py

示例9: KernelPCA


#.........这里部分代码省略.........
        Inverse transform matrix

    X_transformed_fit_ :
        Projection of the fitted data on the kernel principal components

    References
    ----------
    Kernel PCA was introduced in:
        Bernhard Schoelkopf, Alexander J. Smola,
        and Klaus-Robert Mueller. 1999. Kernel principal
        component analysis. In Advances in kernel methods,
        MIT Press, Cambridge, MA, USA 327-352.
    """

    def __init__(self, n_components=None, kernel="linear",
                 gamma=None, degree=3, coef0=1, kernel_params=None,
                 alpha=1.0, fit_inverse_transform=False, eigen_solver='auto',
                 tol=0, max_iter=None, remove_zero_eig=False):
        if fit_inverse_transform and kernel == 'precomputed':
            raise ValueError(
                "Cannot fit_inverse_transform with a precomputed kernel.")
        self.n_components = n_components
        self.kernel = kernel
        self.kernel_params = kernel_params
        self.gamma = gamma
        self.degree = degree
        self.coef0 = coef0
        self.alpha = alpha
        self.fit_inverse_transform = fit_inverse_transform
        self.eigen_solver = eigen_solver
        self.remove_zero_eig = remove_zero_eig
        self.tol = tol
        self.max_iter = max_iter
        self._centerer = KernelCenterer()

    @property
    def _pairwise(self):
        return self.kernel == "precomputed"

    def _get_kernel(self, X, Y=None):
        if callable(self.kernel):
            params = self.kernel_params or {}
        else:
            params = {"gamma": self.gamma,
                      "degree": self.degree,
                      "coef0": self.coef0}
        return pairwise_kernels(X, Y, metric=self.kernel,
                                filter_params=True, **params)

    def _fit_transform(self, K):
        """ Fit's using kernel K"""
        # center kernel
        K = self._centerer.fit_transform(K)

        if self.n_components is None:
            n_components = K.shape[0]
        else:
            n_components = min(K.shape[0], self.n_components)

        # compute eigenvectors
        if self.eigen_solver == 'auto':
            if K.shape[0] > 200 and n_components < 10:
                eigen_solver = 'arpack'
            else:
                eigen_solver = 'dense'
        else:
开发者ID:rohanp,项目名称:dimensionality_reduction_techniques,代码行数:67,代码来源:kernel_pca.py

示例10: ovkr_mkl

def ovkr_mkl(kernel_list, labels, mkl, n_folds, dataset, data):
    n_sample, n_labels = labels.shape
    n_km = len(kernel_list)
    tags = np.loadtxt("../data/cv/"+data+".cv")

    # Add noise to the output
    noise_level = [0.005, 0.010, 0.015, 0.020, 0.025]

    for nid in xrange(len(noise_level)):
        noi = noise_level[nid]
        print "noise", noi, nid
        Y = addNoise(labels, noi)    

        pred = np.zeros((n_sample, n_labels))
        pred_bin = np.zeros((n_sample, n_labels))

        # Run for each fold   
        for i in range(1,n_folds+1):
            print "Test fold %d" %i
            res_f = "../ovkr_result/noisy_weights/"+dataset+"_fold_%d_%s_noise_%d.weights" % (i,mkl, nid)
            # divide data
            test = np.array(tags == i)
            train = np.array(~test)
            train_y = Y[train,:]
            test_y = Y[test,:]
            n_train = len(train_y)
            n_test = len(test_y)

            train_km_list = []
            test_km_list = []
            for km in kernel_list:
                kc = KernelCenterer()
                train_km = km[np.ix_(train, train)]
                test_km = km[np.ix_(test, train)]
                # center train and test kernels                      
                kc.fit(train_km)
                train_km_c = kc.transform(train_km)
                test_km_c = kc.transform(test_km)
                train_km_list.append(train_km_c)
                test_km_list.append(test_km_c)

            if mkl == 'UNIMKL':
                wei = UNIMKL(n_km, n_labels)
            else:
                wei = np.loadtxt(res_f, ndmin=2)        

            normw = np.linalg.norm(wei)
            uni = np.ones(n_km) / np.linalg.norm(np.ones(n_km))
            if normw == 0:
                wei[:,0] = uni
            else:
                wei[:,0] = wei[:,0] / normw

            train_ckm = np.zeros((n_train,n_train))
            for t in range(n_km):
                train_ckm += wei[t,0]*train_km_list[t]

            # combine train and test kernel using learned weights        
            test_ckm = np.zeros(test_km_list[0].shape)
            for t in range(n_km):
                test_ckm = test_ckm + wei[t,0]*test_km_list[t]

            AP = OVKR_train_CV(train_ckm, train_y, tags[train])
            pred_label = OVKR_test(test_ckm, AP)
            pred[test, :] = pred_label

        pred_real_f = "../ovkr_result/noisy_pred/%s_cvpred_%s_real_noise_%d.npy" % (data, mkl, nid)
        np.save(pred_real_f, pred)
开发者ID:aalto-ics-kepaco,项目名称:softALIGNF,代码行数:68,代码来源:run_ovkr_noise.py

示例11: generate_spike_classes

    return K


if __name__ == "__main__":

    classes = generate_spike_classes(1, 2)
    train = generate_spike_times(classes)
    test = generate_spike_times(classes)
    rasterPlot(train)
    K = compute_K_matrix(train)
    ###############################
    # N = K.shape[0]
    # H = np.eye(N) - np.tile(1./N, [N, N]);
    # Kc = np.dot(np.dot(H, K), H)
    kcenterer = KernelCenterer()  #
    kcenterer.fit(K)  # Center Kernel Matrix
    Kc = kcenterer.transform(K)  #
    ###############################
    D, E = eig(Kc)
    proj = np.dot(Kc, E[:, 0:2])

    ################################ Center test
    Kt = compute_K_matrix(train, test)
    # M = Kt.shape[0]
    # A = np.tile(K.sum(axis=0), [M, 1]) / N
    # B = np.tile(Kt.sum(axis=1),[N, 1]) /N
    # Kc2 = Kt - A - B + K.sum()/ N**2;
    Kc2 = kcenterer.transform(Kt)
    proj2 = np.dot(Kc2, E[:, 0:2])
开发者ID:EderSantana,项目名称:adaptive_kernel_methods,代码行数:29,代码来源:test_paiva.py

示例12: KernelECA

class KernelECA(BaseEstimator, TransformerMixin):
    """Kernel Entropy component analysis (KECA)

    Non-linear dimensionality reduction through the use of kernels (see
    :ref:`metrics`).

    Parameters
    ----------
    n_components: int or None
        Number of components. If None, all non-zero components are kept.

    kernel: "linear" | "poly" | "rbf" | "sigmoid" | "cosine" | "precomputed"
        Kernel.
        Default: "linear"

    degree : int, default=3
        Degree for poly kernels. Ignored by other kernels.

    gamma : float, optional
        Kernel coefficient for rbf and poly kernels. Default: 1/n_features.
        Ignored by other kernels.

    coef0 : float, optional
        Independent term in poly and sigmoid kernels.
        Ignored by other kernels.

    kernel_params : mapping of string to any, optional
        Parameters (keyword arguments) and values for kernel passed as
        callable object. Ignored by other kernels.


    eigen_solver: string ['auto'|'dense'|'arpack']
        Select eigensolver to use.  If n_components is much less than
        the number of training samples, arpack may be more efficient
        than the dense eigensolver.

    tol: float
        convergence tolerance for arpack.
        Default: 0 (optimal value will be chosen by arpack)

    max_iter : int
        maximum number of iterations for arpack
        Default: None (optimal value will be chosen by arpack)
		    
	random_state : int seed, RandomState instance, or None, default : None
        A pseudo random number generator used for the initialization of the
        residuals when eigen_solver == 'arpack'.

    Attributes
    ----------

    lambdas_ :
        Eigenvalues of the centered kernel matrix

    alphas_ :
        Eigenvectors of the centered kernel matrix

    dual_coef_ :
        Inverse transform matrix

    X_transformed_fit_ :
        Projection of the fitted data on the kernel entropy components

    References
    ----------
    Kernel ECA based on:
    (c) Robert Jenssen, University of Tromso, Norway, 2010 
        R. Jenssen, "Kernel Entropy Component Analysis,"
        IEEE Trans. Patt. Anal. Mach. Intel., 32(5), 847-860, 2010.

    """

    def __init__(self, n_components=None, kernel="linear",
                 gamma=None, degree=3, coef0=1, kernel_params=None, eigen_solver='auto',
                 tol=0, max_iter=None, random_state=None,center=False):
        self.n_components = n_components
        self._kernel = kernel
        self.kernel_params = kernel_params
        self.gamma = gamma
        self.degree = degree
        self.coef0 = coef0
        self.eigen_solver = eigen_solver
        self.tol = tol
        self.max_iter = max_iter
        self.random_state = random_state
        self._centerer = KernelCenterer()
        self.center = center
        
    @property
    def _pairwise(self):
        return self.kernel == "precomputed"

    def _get_kernel(self, X, Y=None):
        if callable(self._kernel):
            params = self.kernel_params or {}
        else:
            params = {"gamma": self.gamma,
                      "degree": self.degree,
                      "coef0": self.coef0}
        return pairwise_kernels(X, Y, metric=self._kernel,
#.........这里部分代码省略.........
开发者ID:tsterbak,项目名称:kernel_eca,代码行数:101,代码来源:kernel_eca.py

示例13: KernelCenterer

    plt.figure()
    plt.plot(nComponents,kpcaldaScores,lw=3)

    plt.xlim(1,np.amax(nComponents))
    plt.title('kPCA accuracy')
    plt.xlabel('Number of components')
    plt.ylabel('accuracy')
    plt.xlim([500,1500])
    plt.legend (['LDA'],loc='lower right')
    plt.grid(True)    

if(0):
    # K-PCA second round
    ktrain = pair.rbf_kernel(Xtrain,Xtrain,gamma)
    ktest = pair.rbf_kernel(Xtest,Xtrain,gamma)
    kcent = KernelCenterer()
    kcent.fit(ktrain)
    ktrain = kcent.transform(ktrain)
    ktest = kcent.transform(ktest)
    
    kpca = PCA()
    kpca.fit_transform(ktrain)
    cumvarkPCA2 = np.cumsum(kpca.explained_variance_ratio_[0:220])
    
    # Calculate classifiation scores for each component
    nComponents = np.arange(1,nFeatures)
    kpcaScores2 = np.zeros((5,np.alen(nComponents)))
    for i,n in enumerate(nComponents):   
        kpca2 = PCA(n_components=n)
        kpca2.fit(ktrain)
        XtrainT = kpca2.transform(ktrain)
开发者ID:manuwhs,项目名称:Trapyng,代码行数:31,代码来源:baseFeatureExtractionLib.py

示例14: KernelFisher


#.........这里部分代码省略.........
    `priors_` : array-like, shape = [n_classes]
        Class priors (sum to 1)
    
    `n_components_found_` : int
        number of fisher components found, which is <= n_components
        
    Examples (put fisher.py in working directory)
    --------
    >>> import numpy as np
    >>> from fisher import KernelFisher
    >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
    >>> y = np.array([0, 0, 0, 1, 1, 1])
    >>> fd = KernelFisher()
    >>> fd.fit(X, y)
    KernelFisher(coef0=1, degree=3, gamma=None, kernel='linear',
       norm_covariance=False, print_timing=False, priors=None,
       sigma_sqrd=1e-08, tol=0.001, use_total_scatter=True)
    >>> print(fd.transform([[-0.8, -1]]))
    [[-7.62102356]]]

    """

    def __init__(self, use_total_scatter=True, sigma_sqrd=1e-8, tol=1.0e-3,
                 kernel="linear", gamma=None, degree=3, coef0=1,
                 norm_covariance = False, priors=None, print_timing=False):

        self.use_total_scatter = use_total_scatter
        self.sigma_sqrd = sigma_sqrd
        self.tol = tol
        self.kernel = kernel.lower()
        self.gamma = gamma
        self.degree = degree
        self.coef0 = coef0
        self._centerer = KernelCenterer()

        self.norm_covariance = norm_covariance
        self.print_timing = print_timing
        
        
        self.priors = np.asarray(priors) if priors is not None else None
        
        if self.priors is not None:
            if (self.priors < 0).any():
                raise ValueError('priors must be non-negative')
            if self.priors.sum() != 1:
                print 'warning: the priors do not sum to 1. Renormalizing'
                self.priors = self.priors / self.priors.sum()
                
                
    @property
    def _pairwise(self):
        return self.kernel == "precomputed"

    def _get_kernel(self, X, Y=None):
        params = {"gamma": self.gamma,
                  "degree": self.degree,
                  "coef0": self.coef0}
        try:
            return pairwise_kernels(X, Y, metric=self.kernel,
                                    filter_params=True, **params)
        except AttributeError:
            raise ValueError("%s is not a valid kernel. Valid kernels are: "
                             "rbf, poly, sigmoid, linear and precomputed."
                             % self.kernel)

开发者ID:andrewjohnlowe,项目名称:JetImages,代码行数:65,代码来源:fisher.py

示例15: ALIGNFSOFT

def ALIGNFSOFT(kernel_list, ky, y, test_fold, tags):
    # Find best upper bound in CV and train on whole data
    # Reutrn the weights 
    y = y.ravel()
    n_km = len(kernel_list)

    tag = np.array(tags)
    tag = tag[tag!=test_fold]
    remain_fold = np.unique(tag).tolist()
    all_best_c = []
    for validate_fold in remain_fold:
        train = tag != validate_fold
        validate = tag == validate_fold
        # train on train fold ,validate on validate_fold.
        # Do not use test fold. test fold used in outter cv
        ky_train = ky[np.ix_(train, train)]
        y_train = y[train]
        y_validate = y[validate]
        train_km_list = []
        validate_km_list = []
        n_train = len(y_train)
        n_validate = len(y_validate)

        for km in kernel_list:
            kc = KernelCenterer()
            train_km = km[np.ix_(train, train)]
            validate_km = km[np.ix_(validate, train)]
            # center train and validate kernels                      
            train_km_c = kc.fit_transform(train_km)
            train_km_list.append(train_km_c)
            validate_km_c = kc.transform(validate_km)
            validate_km_list.append(validate_km_c)

        # if the label is too biased, SVM CV will fail, just return ALIGNF solution
        if np.sum(y_train==1) > n_train-3 or np.sum(y_train==-1) > n_train-3:
            return 1e8, ALIGNFSLACK(train_km_list, ky_train, 1e8) 

        Cs = np.exp2(np.array(range(-9,7))).tolist() + [1e8]
        W = np.zeros((n_km, len(Cs)))
        for i in xrange(len(Cs)):
            W[:,i] = ALIGNFSLACK(train_km_list, ky_train, Cs[i])

        W = W / np.linalg.norm(W, 2, 0)
        f1 = np.zeros(len(Cs))
        for i in xrange(len(Cs)):
            train_ckm = np.zeros((n_train,n_train))
            validate_ckm = np.zeros((n_validate,n_train))
            w = W[:,i]
            for j in xrange(n_km):
                train_ckm += w[j]*train_km_list[j]
                validate_ckm += w[j]*validate_km_list[j]
            f1[i] = svm(train_ckm, validate_ckm, y_train, y_validate)
        # return the first maximum
        maxind = np.argmax(f1)
        bestC = Cs[maxind]
        all_best_c.append(bestC)
        print f1
        print "..Best C is", bestC

    bestC = np.mean(all_best_c)
    print "..Take the average best upper bound", bestC
    # use the best upper bound to solve ALIGNFSOFT
    return bestC, ALIGNFSLACK(kernel_list, ky, bestC)    
开发者ID:aalto-ics-kepaco,项目名称:softALIGNF,代码行数:63,代码来源:run_mkl.py


注:本文中的sklearn.preprocessing.KernelCenterer类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。