当前位置: 首页>>代码示例>>Python>>正文


Python preprocessing.PolynomialFeatures类代码示例

本文整理汇总了Python中sklearn.preprocessing.PolynomialFeatures的典型用法代码示例。如果您正苦于以下问题:Python PolynomialFeatures类的具体用法?Python PolynomialFeatures怎么用?Python PolynomialFeatures使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了PolynomialFeatures类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self):
        self.theta = T.matrix()
        # define output for b
        combinations = PolynomialFeatures._combinations(2, 3, False, False)
        n_output_features_ = sum(1 for _ in combinations) + 1
        self.A_b = theano.shared(
            value=np.ones((n_output_features_,), dtype=theano.config.floatX),
            borrow=True, name='A_b')
        self.b_b = theano.shared(value=1.,
                                 borrow=True, name='b_b')

        combinations = PolynomialFeatures._combinations(2, 3, False, False)
        L = [(self.theta[:, 0] ** 0).reshape([-1, 1])]
        for i, c in enumerate(combinations):
            L.append(self.theta[:, c].prod(1).reshape([-1, 1]))
        self.XF3 = T.concatenate(L, axis=1)
        b = (T.dot(self.XF3, self.A_b) + self.b_b).reshape([-1, 1])

        # define output for k
        combinations = PolynomialFeatures._combinations(2, 2, False, False)
        n_output_features_ = sum(1 for _ in combinations) + 1
        self.rho_k = theano.shared(
            value=np.ones((n_output_features_,), dtype=theano.config.floatX),
            borrow=True, name='rho_k')

        combinations = PolynomialFeatures._combinations(2, 2, False, False)
        L = [(self.theta[:, 0] ** 0).reshape([-1, 1])]
        for i, c in enumerate(combinations):
            L.append(self.theta[:, c].prod(1).reshape([-1, 1]))
        self.XF2 = T.concatenate(L, axis=1)
        k = T.dot(self.XF2, self.rho_k).reshape([-1, 1])

        self.outputs = [T.concatenate([b, k], axis=1)]
        self.inputs = [self.theta]
        self.trainable_weights = [self.A_b, self.b_b, self.rho_k]
开发者ID:teopir,项目名称:ifqi,代码行数:35,代码来源:pbo_grad_test.py

示例2: linearRegreSin

def linearRegreSin(url,degree):
    [a,b] = getData(url)
    trainA = a[0:139]
    trainB = b[0:139]
    testA = a[140:]
    testB = b[140:]

    poly = PolynomialFeatures(degree)
    trainA = np.float64(poly.fit_transform(trainA))
    testA = np.float64(poly.fit_transform(testA))
    theta = np.dot(np.dot(np.linalg.inv(np.dot(trainA.T,trainA)),trainA.T),trainB)
    plt.figure(1)
    plt.xlabel('x')
    plt.ylabel('y')
    plt.title('data')
    plt.plot(trainA[:,1],trainB,"r*")
    y=np.dot(trainA, theta)
    print(pow(sum((y-trainB)**2),1/2)/140) #print MSE

    y=np.dot(testA, theta)
    #plt.plot(testA[:,1], testB, "r.")
    plt.plot(testA[:,1],y,"k*")
    print(pow(sum((y-testB)**2),1/2)/60) #print MSE
    plt.show()
    print(theta)
开发者ID:MorPhingG,项目名称:MachineLearning,代码行数:25,代码来源:LinearRegression.py

示例3: myTradingSystem

def myTradingSystem(DATE, OPEN, HIGH, LOW, CLOSE, VOL, OI, P, R, RINFO, exposure, equity, settings):
    """ This system uses linear regression to allocate capital into the desired equities"""

    # Get parameters from setting
    nMarkets = len(settings['markets'])
    lookback = settings['lookback']
    dimension = settings['dimension']
    threshold = settings['threshold']

    pos = np.zeros(nMarkets, dtype=np.float)

    poly = PolynomialFeatures(degree=dimension)
    for market in range(nMarkets):
        reg = linear_model.LinearRegression()
        try:
            reg.fit(poly.fit_transform(np.arange(lookback).reshape(-1, 1)), CLOSE[:, market])
            trend = (reg.predict(poly.fit_transform(np.array([[lookback]]))) - CLOSE[-1, market]) / CLOSE[-1, market]

            if abs(trend[0]) < threshold:
                trend[0] = 0

            pos[market] = np.sign(trend)

        # for NaN data set position to 0
        except ValueError:
            pos[market] = .0

    return pos, settings
开发者ID:marcoracer,项目名称:quantiacs-python,代码行数:28,代码来源:LinearRegression.py

示例4: poly_model

def poly_model(ins,outs,degrees):
	poly   = PolynomialFeatures(degree=degrees)
	X = poly.fit_transform(ins)

	regr = linear_model.LinearRegression()
	regr.fit(X, outs)
	print_model("poly-"+str(degrees), regr, X, outs)
开发者ID:verdverm,项目名称:pypge,代码行数:7,代码来源:theline.py

示例5: interactor

def interactor(df):
    """ This function takes in a data frame and creates binary interaction
    terms from all numerical and categorical variables as well as the assessment
    questions, and outputs a data frame """

    my_data_complete = df.dropna()
    # interactions can only be done for non-missings
    colnames = list(my_data_complete.columns.values)
    # id and date columns
    id_cols_list = [
        x
        for x in colnames  # only for continuous vars
        if not (bool(re.search("_N$", x)) | bool(re.search("_C$", x)) | bool(re.search("_Q$", x)))
    ]
    # actual feature columns - to make interactions from
    new_cols_list = [
        x
        for x in colnames  # only for continuous vars
        if (bool(re.search("_N$", x)) | bool(re.search("_C$", x)) | bool(re.search("_Q$", x)))
    ]
    othervars = my_data_complete[id_cols_list]
    little_df = my_data_complete[new_cols_list]
    # computing all binary interaction terms
    poly = PolynomialFeatures(degree=2, interaction_only=True)
    theints = pd.DataFrame(poly.fit_transform(little_df))
    theints = theints.drop(theints.columns[0], axis=1)  # dropping the first column
    theints.columns = list(new_cols_list + list(itertools.combinations(new_cols_list, 2)))
    # concatenating the interaction terms to the original data frame
    df = pd.DataFrame(othervars.join(theints))
    new_features = theints.columns.values
    return df, new_features
开发者ID:pombredanne,项目名称:babies-public,代码行数:31,代码来源:features.py

示例6: batterLife_chargeMoreThan4

def batterLife_chargeMoreThan4(chargeTime):    
    import numpy as np
    trainDataArr = np.genfromtxt("trainingdata_batteryLife.txt", delimiter = ",")
    trainDataArr = trainDataArr[trainDataArr[ :,0] > 4]
    trainData = trainDataArr[:, 0]
    trainData = trainData.reshape(-1,1)
    trainValue = trainDataArr[:,1]
    testData = np.array(chargeTime)
    testData = testData.reshape(-1,1)
    
    from sklearn.preprocessing import PolynomialFeatures
    from sklearn import linear_model
    
    # Plot outputs
    import matplotlib.pyplot as plt
    plt.scatter(trainData, trainValue,  color='black')
    plt.xticks(())
    plt.yticks(())
    plt.show()

    # Fit regression model
    poly = PolynomialFeatures(degree = 1)
    trainData_ = poly.fit_transform(trainData)
    testData_ = poly.fit_transform(testData)
    
    clf = linear_model.LinearRegression()
    clf.fit(trainData_, trainValue)
    return clf.predict(testData_)
开发者ID:JaneEvans,项目名称:Useful-Python3-Codes,代码行数:28,代码来源:2-level-linear-Regression_batteryLife.py

示例7: hidden_layer

 def hidden_layer(self, X, w):
     # The dimension of matrix Z is (R + 1) * m. The extra dimension is constant
     # extra 1 dimension for bias.
     Z = sigmoid(np.dot(X, w.T))
     p = PolynomialFeatures(degree = 1)
     Z = p.fit_transform(Z)
     return Z
开发者ID:AngeloK,项目名称:cs584-hws,代码行数:7,代码来源:mlp.py

示例8: learning_curve

def learning_curve(classifier, X, y, cv, sample_sizes,
    degree=1, pickle_path=None, verbose=True):
    """ Learning curve
    """

    learning_curves = []
    for i, (train_index, test_index) in enumerate(cv):
        X_train = X[train_index]
        X_test = X[test_index]
        y_train = y[train_index]
        y_test = y[test_index]

        if degree > 1:
            poly = PolynomialFeatures(degree=degree, interaction_only=False, include_bias=True)
            X_train = poly.fit_transform(X_train)
            X_test = poly.transform(X_test)

        lc = []
        for sample in sample_sizes:
            classifier.fit(X_train[:sample], y_train[:sample])

            # apply classifier on test set
            y_pred = classifier.predict(X_test)
            confusion = metrics.confusion_matrix(y_test, y_pred)
            lc.append(balanced_accuracy_expected(confusion))

        learning_curves.append(lc)
        if verbose: print(i, end=' ')
    
    # pickle learning curve
    if pickle_path:
        with open(pickle_path, 'wb') as f:
            pickle.dump(learning_curves, f, protocol=4)
    if verbose: print()
开发者ID:davidjwu,项目名称:mclass-sky,代码行数:34,代码来源:classifier.py

示例9: get_cl

def get_cl(tau, consider='EE', degree=5):
    if consider == 'EE':
        values = values_EE
    else:
        values = values_BB

    v = values#[:100]
    p = points#[:100]

    poly = PolynomialFeatures(degree=degree)
    # Vandermonde matrix of pre-computed paramter values.
    X_ = poly.fit_transform(p.reshape(-1,1))

    predict = np.array([tau]).reshape(1,-1)
    # Creates matrix of values you want to estimate from the existing
    # measurements. Computation speed scales very slowly when you ask for
    # estimate of many sets of parameters.
    predict_ = poly.fit_transform(predict)

    clf = LinearRegression()
    estimate = []
    for l in range(2, v.shape[1]):
        values_l = v[:,l]
        clf.fit(X_, values_l)
        estimate_l = clf.predict(predict_)
        estimate.append(estimate_l)
    estimate = np.array(estimate)

    ell = np.arange(2, l+1)
    Z = 2*np.pi/(ell*(ell+1))
    return ell, Z*estimate[:,0]
开发者ID:pqrs6,项目名称:clee-fast,代码行数:31,代码来源:main_tau.py

示例10: polynomial_expansion

	def polynomial_expansion(self, rank=2): 
		"""
		Expand the features with polynonial of rank rnank 
		"""
		pf = PolynomialFeatures(degree=2)
		self.X_red = pf.fit_transform(self.X_red)
		self.X_white = pf.fit_transform(self.X_white)
开发者ID:Borisdatzar,项目名称:machine_learning_techniques,代码行数:7,代码来源:wine_preprocesser.py

示例11: test_polynomial_fits

def test_polynomial_fits(x, y, n_comps, model, k_folds=3):
  for i in range(1,6):
    poly = PolynomialFeatures(degree=i)
    poly_x = poly.fit_transform(x)
    r2_mean, r2_std, mse_mean, mse_std = run_conventional_linkage(poly_x, y, n_comps, model)
    print r2_mean, r2_std, mse_mean, mse_std
    print
开发者ID:Materials-Informatics-Class-Fall2015,项目名称:MIC-Ternary-Eutectic-Alloy,代码行数:7,代码来源:smart_pipeline.py

示例12: analysis_7

def analysis_7(df_Coredata):
	""" 多次元多項式モデル """

	#https://www.jeremyjordan.me/polynomial-regression/

	X = df_Coredata[['d','e','f','g','i']]
	y = df_Coredata['j']

	# グラフのスタイルを指定
	sns.set(style = 'whitegrid', context = 'notebook')
	# 変数のペアの関係をプロット
	#sns.pairplot(df_Coredata)
	#plt.show()


	#X_train, X_test, y_train, y_test  =  train_test_split(X,y,random_state = 0)
	#lr = linear_model.LinearRegression().fit(X_train, y_train)
	#print("Trainng set score: {:.2f}".format(lr.score(X_train, y_train)))
	#print("Test set score: {:.2f}".format(lr.score(X_test, y_test)))

	### データのスケール変換
	# 標準化
	std_Scaler = StandardScaler()
	data_std = std_Scaler.fit_transform(X)

	mmx_Scaler =MinMaxScaler()
	X_scaled = mmx_Scaler.fit_transform(X)
	#X_test_scaled = scaler.transform(X_test)

	#print(X_train_scaled)

	poly = PolynomialFeatures(degree = 2).fit(data_std)
	print(poly.get_feature_names())
开发者ID:Yotaro723,项目名称:test_2,代码行数:33,代码来源:DoE.py

示例13: main

def main():
    testfile = sys.argv[1]
    modelfile = sys.argv[2]
    polyorder = int(sys.argv[3])
    testweeks = sys.argv[4]

    test_data = np.genfromtxt(testfile, delimiter=',', skip_header=1)

    X = test_data[:,:-1]
    y = test_data[:,-1]

    poly = PolynomialFeatures(degree=polyorder)
    Xpoly = poly.fit_transform(X)

    with open(modelfile, 'rb') as model, open(testweeks) as weeks:
        lr = pickle.load(model)
        games_per_week = (int(line) for line in weeks)
        ranges = []
        pos = 0
        for week in games_per_week:
            newpos = pos + week
            ranges.append( (pos, newpos) )
            pos = newpos
        print('W\tL\tPoints')
        weekly_results = (evaluate_week(week, Xpoly, y, lr) for week in ranges)
        for result in weekly_results:
            print('\t'.join(str(piece) for piece in result))
开发者ID:atmapersaud,项目名称:nfl-predictions,代码行数:27,代码来源:nfl-evaluate.py

示例14: mvr

def mvr(data): 
    x = data[:, 0:len(data[0])-1]
    y = data[:, -1]
    
    minTestingError = np.inf
    for dim in xrange(1,3):
        if(dim > 1):
            print("Mapping into higher dimension of {} \n".format(dim))
        else:
            evaluateGradientDesc(data)
            print("Explicit solution\n")
        poly = PolynomialFeatures(dim)
        z = poly.fit_transform(x)
        
        theta = fitModel(z , y)
        
        print("Intercept     :   {} \nCoefficients : {}\n".format(theta[0], theta[1:]))
        testingError, sol = evaluateModel(z,y, False)
        
        if(dim == 1):
            print "Testing Error :", testingError
        
        if (testingError < minTestingError):
            minTestingError = testingError
            optimalDimension = dim
            optSol = sol
         
    print "Optimal Dimension : {}, Testing Error : {} ".format(optimalDimension, minTestingError)
    return optSol
开发者ID:rakeshadk7,项目名称:MachineLearning,代码行数:29,代码来源:mvr.py

示例15: init_predict

def init_predict(mode):
    """ 整理为用于预测的 X

    i: features
    o: X
    """
    import scipy.io as sio
    import scipy as sp
    from sklearn.preprocessing import PolynomialFeatures

    uid_ave = sio.loadmat('predict_cut_uid_ave.mat')['X']
    poly = PolynomialFeatures(degree=2)
    poly_uid_ave = poly.fit_transform(uid_ave)
    combined_list = [sp.sparse.csc_matrix(poly_uid_ave)]

    if mode == 'f':
        X_words = sio.loadmat('predict_cut_Xf.mat')['X']
    elif mode == 'c':
        X_words = sio.loadmat('predict_cut_Xc.mat')['X']
    else:
        X_words = sio.loadmat('predict_cut_Xl.mat')['X']
    #transformer = TfidfTransformer()
    #X_tfidf = transformer.fit_transform(X_words)

    combined_list.append(X_words)

    X = sp.sparse.hstack(combined_list)

    print(X.shape)
    return X
开发者ID:organization-lab,项目名称:weibo-predict,代码行数:30,代码来源:regressor.py


注:本文中的sklearn.preprocessing.PolynomialFeatures类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。