當前位置: 首頁>>代碼示例>>Python>>正文


Python linear_model.OrthogonalMatchingPursuit類代碼示例

本文整理匯總了Python中sklearn.linear_model.OrthogonalMatchingPursuit的典型用法代碼示例。如果您正苦於以下問題:Python OrthogonalMatchingPursuit類的具體用法?Python OrthogonalMatchingPursuit怎麽用?Python OrthogonalMatchingPursuit使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


在下文中一共展示了OrthogonalMatchingPursuit類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: Linear_Regression

def Linear_Regression(R_data):# return data
    """
    The R_data is with nXm matrix with n observations and m factors.
    Each column will be the time series for each ticker name
    """
    # even though we change the order of getting data
    #ticker_list = R_data.columns.values
    
    #Depend_sid = ticker_list[sid1]
    #Indep_sids = ticker_list[sid2]
    sid_list = []
    for i in range(0,len(factors)):
        sid_list.append(R_data[factors[i]])
    
    Y = R_data[securities[0]]
#     del R_data[securities[0]]
#     indep = R_data.ix[:,1:len(securities)]
    indep = pd.concat(sid_list, axis=1)
    
    omp = OrthogonalMatchingPursuit(n_nonzero_coefs=len(factors), fit_intercept= True)
    omp.fit(indep, Y)
#     coef = omp.coef_
#     idx_r, = coef.nonzero()
#     X = sm.add_constant(indep, prepend=True)
#     lm_Result = sm.OLS(Y, X).fit()
    return omp
開發者ID:darkhorse20,項目名稱:Strategies,代碼行數:26,代碼來源:Pair_Stat_Arb_OMP.py

示例2: test_omp_reaches_least_squares

def test_omp_reaches_least_squares():
    # Use small simple data; it's a sanity check but OMP can stop early
    rng = check_random_state(0)
    n_samples, n_features = (10, 8)
    n_targets = 3
    X = rng.randn(n_samples, n_features)
    Y = rng.randn(n_samples, n_targets)
    omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
    lstsq = LinearRegression()
    omp.fit(X, Y)
    lstsq.fit(X, Y)
    assert_array_almost_equal(omp.coef_, lstsq.coef_)
開發者ID:1992huanghai,項目名稱:scikit-learn,代碼行數:12,代碼來源:test_omp.py

示例3: test_omp_cv

def test_omp_cv():
    y_ = y[:, 0]
    gamma_ = gamma[:, 0]
    ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
                                        max_iter=10, cv=5)
    ompcv.fit(X, y_)
    assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
    assert_array_almost_equal(ompcv.coef_, gamma_)
    omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
                                    n_nonzero_coefs=ompcv.n_nonzero_coefs_)
    omp.fit(X, y_)
    assert_array_almost_equal(ompcv.coef_, omp.coef_)
開發者ID:1992huanghai,項目名稱:scikit-learn,代碼行數:12,代碼來源:test_omp.py

示例4: classify_OMP

def classify_OMP(train, test):
	from sklearn.linear_model import OrthogonalMatchingPursuit as OMP

	x, y = train
	ydim = np.unique(y).shape[0]
	y = [tovec(yi, ydim) for yi in y]

	clf = OMP()
	clf.fit(x, y)
	
	x, y = test
	proba = clf.predict(x)
	return proba
開發者ID:liangxh,項目名稱:idu,代碼行數:13,代碼來源:classification.py

示例5: fit_model_14

    def fit_model_14(self,toWrite=False):
        model = OrthogonalMatchingPursuit()

        for data in self.cv_data:
            X_train, X_test, Y_train, Y_test = data
            model.fit(X_train,Y_train)
            pred = model.predict(X_test)
            print("Model 14 score %f" % (logloss(Y_test,pred),))

        if toWrite:
            f2 = open('model14/model.pkl','w')
            pickle.dump(model,f2)
            f2.close()
開發者ID:JakeMick,項目名稱:kaggle,代碼行數:13,代碼來源:days_work.py

示例6: test_omp_cv

def test_omp_cv():
    # FIXME: This test is unstable on Travis, see issue #3190 for more detail.
    check_skip_travis()
    y_ = y[:, 0]
    gamma_ = gamma[:, 0]
    ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
                                        max_iter=10, cv=5)
    ompcv.fit(X, y_)
    assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
    assert_array_almost_equal(ompcv.coef_, gamma_)
    omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
                                    n_nonzero_coefs=ompcv.n_nonzero_coefs_)
    omp.fit(X, y_)
    assert_array_almost_equal(ompcv.coef_, omp.coef_)
開發者ID:0x0all,項目名稱:scikit-learn,代碼行數:14,代碼來源:test_omp.py

示例7: __init__

 def __init__(self, patch_size=(12,12), max_samples=1000000, **omp_args):
     self.patch_size = patch_size
     self.max_samples = max_samples
     self.omp = OrthogonalMatchingPursuit(**omp_args)
     self.D = None
     self.data = None
     self.components = None
     self.zscore=False
     self.log_amplitude=False
開發者ID:getupyang,項目名稱:AudioSpectrumPatchApproximation,代碼行數:9,代碼來源:AudioSpectrumPatchApproximation.py

示例8: __init__

 def __init__(self, n_components=49, patch_size=(8,8), max_samples=1000000, **kwargs):
     self.omp = OrthogonalMatchingPursuit()
     self.n_components = n_components
     self.patch_size = patch_size
     self.max_samples = max_samples
     self.D = None
     self.data = None
     self.components = None
     self.standardize=False
開發者ID:BinRoot,項目名稱:BregmanToolkit,代碼行數:9,代碼來源:sparseapprox.py

示例9: SparseDeconvolution

def SparseDeconvolution(x,y,p,rtype='omp'):
    
    from numpy import zeros, hstack, floor, array, shape, sign
    from scipy.linalg import toeplitz, norm
    from sklearn.linear_model import OrthogonalMatchingPursuit, Lasso
    
    xm = x[abs(x).argmax()]

    # x = (x.copy())/xm
    x = (x.copy())/xm
    x = x/norm(x)
    
    y = (y.copy())/xm
    
    Nx=len(x)
    Ny=len(y)
    
    X = toeplitz(hstack((x,zeros(Nx+Ny-2))),r=zeros(Ny+Nx-1))

    Y = hstack((zeros(Nx-1),y,zeros(Nx-1)))
    
    if (rtype=='omp')&(type(p)==int):
        
        model = OrthogonalMatchingPursuit(n_nonzero_coefs=p,normalize=False)
        
    elif (rtype=='omp')&(p<1.0):
                
        model = OrthogonalMatchingPursuit(tol=p,normalize=False)
        
        
    elif (rtype=='lasso'):
        
        model = Lasso(alpha=p)

    
    model.fit(X,Y)

    h = model.coef_
    b = model.intercept_
    
    return Y-b,X,h
開發者ID:lesagejonathan,項目名稱:ShawCor,代碼行數:41,代碼來源:spr.py

示例10: CSSK

def CSSK(h,const=5.0,noise=0.0000001):
    """Compressed Sensing replacement of Fourier Transform on 1D array h
       * REQUIRES CVXPY PACKAGE *
         h       = sampled time signal
         const   = scalar multiple dimension of h, larger values give greater
                     resolution albeit with increased cost.
         noise   = scalar constant to account for numerical noise

         returns:
         g       = fourier transform h to frequency domain using CS technique
    """

    h = np.asarray(h, dtype=float)
    Nt = len(h)
    Nw = int(const*Nt)
    t = np.arange(Nt)
    w = np.arange(Nw)
    #F = np.sin(2 * np.pi * np.outer(t,w) / Nw)
    F = (1/np.float(Nw))*np.sin(2.0*np.pi*np.outer(t,w)/np.float(Nw))

    #omp_cv = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
    #omp_cv = OrthogonalMatchingPursuitCV(verbose=True,normalize=True)
    omp_cv = OrthogonalMatchingPursuit(tol=noise)
    omp_cv.fit(F, h)
    coef = omp_cv.coef_
    #idx_r, = coef.nonzero()
    g = coef


    ### begin using cvxpy
    #g = cvx.Variable(Nw)
    ## min |g|_1 subject to |F.g - h|_2 < noise
    #objective = cvx.Minimize(cvx.norm(g,1))
    #constraints = [cvx.norm(F*g - h,2) <= noise]
    #prob = cvx.Problem(objective, constraints)
    #prob.solve(solver='SCS',verbose=True)
    #g = np.asarray(g.value)
    #g = g[:,0]
    ### end using cvxpy
    return g
開發者ID:jjgoings,項目名稱:cq_realtime,代碼行數:40,代碼來源:cs_sklearn.py

示例11: test_estimator_shapes

def test_estimator_shapes():
    omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
    omp.fit(X, y[:, 0])
    assert_equal(omp.coef_.shape, (n_features,))
    assert_equal(omp.intercept_.shape, ())
    assert_true(count_nonzero(omp.coef_) <= n_nonzero_coefs)

    omp.fit(X, y)
    assert_equal(omp.coef_.shape, (n_targets, n_features))
    assert_equal(omp.intercept_.shape, (n_targets,))
    assert_true(count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)

    omp.fit(X, y[:, 0], Gram=G, Xy=Xy[:, 0])
    assert_equal(omp.coef_.shape, (n_features,))
    assert_equal(omp.intercept_.shape, ())
    assert_true(count_nonzero(omp.coef_) <= n_nonzero_coefs)

    omp.fit(X, y, Gram=G, Xy=Xy)
    assert_equal(omp.coef_.shape, (n_targets, n_features))
    assert_equal(omp.intercept_.shape, (n_targets,))
    assert_true(count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
開發者ID:ashish-sadh,項目名稱:scikit-learn,代碼行數:21,代碼來源:test_omp.py

示例12: test_estimator

def test_estimator():
    omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
    omp.fit(X, y[:, 0])
    assert_equal(omp.coef_.shape, (n_features,))
    assert_equal(omp.intercept_.shape, ())
    assert_true(count_nonzero(omp.coef_) <= n_nonzero_coefs)

    omp.fit(X, y)
    assert_equal(omp.coef_.shape, (n_targets, n_features))
    assert_equal(omp.intercept_.shape, (n_targets,))
    assert_true(count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)

    omp.set_params(fit_intercept=False, normalize=False)

    assert_warns(DeprecationWarning, omp.fit, X, y[:, 0], Gram=G, Xy=Xy[:, 0])
    assert_equal(omp.coef_.shape, (n_features,))
    assert_equal(omp.intercept_, 0)
    assert_true(count_nonzero(omp.coef_) <= n_nonzero_coefs)

    assert_warns(DeprecationWarning, omp.fit, X, y, Gram=G, Xy=Xy)
    assert_equal(omp.coef_.shape, (n_targets, n_features))
    assert_equal(omp.intercept_, 0)
    assert_true(count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
開發者ID:Adrellias,項目名稱:scikit-learn,代碼行數:23,代碼來源:test_omp.py

示例13: OrthogonalMatchingPursuit

# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))

# plot the sparse signal
########################
pl.figure(figsize=(7, 7))
pl.subplot(4, 1, 1)
pl.xlim(0, 512)
pl.title("Sparse signal")
pl.stem(idx, w[idx])

# plot the noise-free reconstruction
####################################

omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
pl.subplot(4, 1, 2)
pl.xlim(0, 512)
pl.title("Recovered signal from noise-free measurements")
pl.stem(idx_r, coef[idx_r])

# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
pl.subplot(4, 1, 3)
pl.xlim(0, 512)
開發者ID:2011200799,項目名稱:scikit-learn,代碼行數:31,代碼來源:plot_omp.py

示例14: orthogonal_matching_pursuit

def orthogonal_matching_pursuit(y, D):
    omp = OrthogonalMatchingPursuit()
    omp.fit(D, y)
    return omp
開發者ID:zezhouliu,項目名稱:cs229r-dict-learning,代碼行數:4,代碼來源:KSVD.py

示例15: SparseApproxSpectrum

class SparseApproxSpectrum(object):
    def __init__(self, n_components=49, patch_size=(8,8), max_samples=1000000, **kwargs):
        self.omp = OrthogonalMatchingPursuit()
        self.n_components = n_components
        self.patch_size = patch_size
        self.max_samples = max_samples
        self.D = None
        self.data = None
        self.components = None
        self.standardize=False

    def _extract_data_patches(self, X):
        self.X = X
        data = extract_patches_2d(X, self.patch_size)
        data = data.reshape(data.shape[0], -1)
        if len(data)>self.max_samples:
            data = np.random.permutation(data)[:self.max_samples]
        print data.shape
        if self.standardize:
            self.mn = np.mean(data, axis=0) 
            self.std = np.std(data, axis=0)
            data -= self.mn
            data /= self.std
        self.data = data

    def extract_codes(self, X, standardize=False):
        self.standardize=standardize
        self._extract_data_patches(X)
        self.dico = MiniBatchDictionaryLearning(n_components=self.n_components, alpha=1, n_iter=500)
        print "Dictionary learning from data..."
        self.D = self.dico.fit(self.data)
        return self

    def plot_codes(self, cbar=False, **kwargs):
        #plt.figure(figsize=(4.2, 4))
        N = int(np.ceil(np.sqrt(self.n_components)))
        kwargs.setdefault('cmap', pl.cm.gray_r)
        kwargs.setdefault('origin','bottom')
        kwargs.setdefault('interpolation','nearest')
        for i, comp in enumerate(self.D.components_):
            plt.subplot(N, N, i + 1)
            comp  = comp * self.std + self.mn if self.standardize else comp
            plt.imshow(comp.reshape(self.patch_size), **kwargs)
            if cbar:
                plt.colorbar()
            plt.xticks(())
            plt.yticks(())
        plt.suptitle('Dictionary learned from spectrum patches\n', fontsize=16)
        plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)

    def extract_audio_dir_codes(self, dir_expr='/home/mkc/exp/FMRI/stimuli/Wav6sRamp/*.wav',**kwargs):
        flist=glob.glob(dir_expr)
        self.X = np.vstack([feature_scale(LogFrequencySpectrum(f, nbpo=24, nhop=1024).X,normalize=1).T for f in flist]).T
        self.D = extract_codes(self.X, **kwargs)
        self.plot_codes(**kwargs)
        return self

    def _get_approximation_coefs(self,data, components):
        w = np.array([self.omp.fit(components.T, d.T).coef_ for d in data])
        return w

    def reconstruct_spectrum(self, w=None, randomize=False):
        data = self.data
        components = self.D.components_
        if w is None:
            self.w = self._get_approximation_coefs(data, components)
            w = self.w
        if self.standardize:
            for comp in components: comp  = comp * self.std + self.mn
        if randomize:
            components = np.random.permutation(components)
        recon = np.dot(w, components).reshape(-1,self.patch_size[0],self.patch_size[1])
        self.X_hat = reconstruct_from_patches_2d(recon, self.X.shape)
        return self

    def reconstruct_individual_spectra(self, w=None, randomize=False, plotting=False, **kwargs):
        self.reconstruct_spectrum(w,randomize)
        w, components = self.w, self.D.components_
        self.X_hat_l = []
        for i in range(len(self.w.T)):
            r=np.array((np.matrix(w)[:,i]*np.matrix(components)[i,:])).reshape(-1,self.patch_size[0],self.patch_size[1])
            self.X_hat_l.append(reconstruct_from_patches_2d(r, self.X.shape))
        if plotting:
            plt.figure()            
            for k in range(self.n_components):
                plt.subplot(self.n_components**0.5,self.n_components**0.5,k+1)
                feature_plot(self.X_hat_l[k],nofig=1,**kwargs)
        return self
開發者ID:BinRoot,項目名稱:BregmanToolkit,代碼行數:88,代碼來源:sparseapprox.py


注:本文中的sklearn.linear_model.OrthogonalMatchingPursuit類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。