当前位置: 首页>>代码示例>>Python>>正文


Python MinMaxScaler.partial_fit方法代码示例

本文整理汇总了Python中sklearn.preprocessing.MinMaxScaler.partial_fit方法的典型用法代码示例。如果您正苦于以下问题:Python MinMaxScaler.partial_fit方法的具体用法?Python MinMaxScaler.partial_fit怎么用?Python MinMaxScaler.partial_fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.preprocessing.MinMaxScaler的用法示例。


在下文中一共展示了MinMaxScaler.partial_fit方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: imresize

# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import partial_fit [as 别名]
    img_2 = imresize(img,[224,224])
    train_X = np.zeros((1,3,32,32))
    train_X1 = np.zeros((1,3,224,224))
    train_X[:,0,:,:] = img_1[:,:,0]
    train_X1[:,0,:,:] = img_2[:,:,0]
    train_X[:,1,:,:] = img_1[:,:,1]
    train_X1[:,1,:,:] = img_2[:,:,1]
    train_X[:,2,:,:] = img_1[:,:,2]
    train_X1[:,2,:,:] = img_2[:,:,2]
    #get features
    feat_1 = features(np.array(train_X,dtype=np.float32))
    feat_2 = get_features(img_nr, 1)
    feat_3 = features_caffe(np.array(train_X1,dtype=np.float32))
    print len(feat_3), len(feat_3[0])

    scaler1.partial_fit(feat_1)  
    scaler2.partial_fit(feat_2)  
    scaler3.partial_fit(feat_3)

    
learning_rates = [0.01,0.001,0.0001]
#for eta in learning_rates:
#train
for img_nr in img_train:
    #load image
    if os.path.isfile('/var/node436/local/tstahl/Images/'+ (format(img_nr, "06d")) +'.jpg'):
        img = imread('/var/node436/local/tstahl/Images/'+ (format(img_nr, "06d")) +'.jpg')
    else:
        print 'warning: /var/node436/local/tstahl/Images/'+ (format(img_nr, "06d")) +'.jpg doesnt exist'
    img = imresize(img,[32,32])
    train_X = np.zeros((1,3,32,32))
开发者ID:TopSteely,项目名称:GenericCounting,代码行数:33,代码来源:compare_features1.py

示例2: WordCluster

# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import partial_fit [as 别名]

#.........这里部分代码省略.........

        return lv2

    def get_words_count(self):
        return DB.Vocabulary.select(DB.Vocabulary.lv1,DB.Vocabulary.lv2).where((DB.Vocabulary.lv2 != -1) & (DB.Vocabulary.lv1 != -1)).distinct().count()

    def get_samples(self):
        '''
        获取所有样本
        :return: {(lab,filename):[11,222,333,], ...}

        '''
        docs = {}
        for f in DB.Vocabulary.select(DB.Vocabulary.lv1,DB.Vocabulary.lv2,DB.Feature.label,DB.Feature.docname).join(DB.Feature).where((DB.Vocabulary.lv2 != -1) & (DB.Vocabulary.lv1 != -1)).iterator():
            assert isinstance(f,DB.Vocabulary)
            key = (f.feature.label, f.feature.docname)
            if not docs.has_key(key):
                docs[key]=[]
            docs[key].append(f.lv1)
        return docs

    def create_classifier(self):
        DB.db.connect()
        clf = SGDClassifier( loss="modified_huber")
        labs_map = NameToIndex()

        with DB.db.transaction():
            offset = 0
            words_count = self.get_words_count()
            classes = numpy.arange(0,words_count)
            x_all = []
            y_all = []
            while True:
                print ' %d partial_fit %d'%(time(),offset)
                query = DB.Vocabulary\
                    .select(DB.Vocabulary.lv1, DB.Vocabulary.lv2)\
                    .join(DB.PcaModel, on=(DB.Vocabulary.feature == DB.PcaModel.feature)).order_by( DB.Vocabulary.feature).offset(offset).limit(1000)\
                    .tuples().iterator()
                features = numpy.array(map(lambda x:[x[0]]+list(x[1]),query))
                offset += len(features)
                if len(features) == 0:
                    break

                Y = features[:,0]
                X = features[:,1:]

                labs = []
                for lab in Y:
                    labs.append(labs_map.map(lab))

                if(len(x_all)<10000):
                    x_all = x_all + X.tolist()
                    y_all = y_all + labs
                labs = numpy.array(labs)

                #clf = LinearSVC()
                #clf = OneVsRestClassifier(SVC(probability=True, kernel='linear'))
                #clf.fit(X,labs)
                clf.partial_fit(X,labs,classes)
                print clf.score(x_all,y_all)

            DB.TrainingResult.delete().where(DB.TrainingResult.name == self.__class__.__name__+"_clf").execute()
            DB.TrainingResult.delete().where(DB.TrainingResult.name == self.__class__.__name__+"_labs_map").execute()

            tr = DB.TrainingResult()
            tr.name = self.__class__.__name__+"_clf"
开发者ID:caoym,项目名称:odr,代码行数:70,代码来源:odr.py


注:本文中的sklearn.preprocessing.MinMaxScaler.partial_fit方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。