當前位置: 首頁>>代碼示例>>Python>>正文


Python DataFrame.to_csv方法代碼示例

本文整理匯總了Python中pandas.core.frame.DataFrame.to_csv方法的典型用法代碼示例。如果您正苦於以下問題:Python DataFrame.to_csv方法的具體用法?Python DataFrame.to_csv怎麽用?Python DataFrame.to_csv使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pandas.core.frame.DataFrame的用法示例。


在下文中一共展示了DataFrame.to_csv方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: export_converted_values

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import to_csv [as 別名]
    def export_converted_values(self):
        """
        This function is called initially to convert per-100g values to per serving values
        Once this function is invoked, new file is generated which serves as Database
        This function will need to be called only one time
        :return:
        """
        file_converted = self.file_converted_values
        data_file = self.file_database
        data = self.read_csv(data_file)
        converted_data = list()
        for item in data.values:
            converted_list = list(item[0:2])
            sub_item = item[2:50]
            for nutrient in sub_item:
                import math

                if math.isnan(nutrient):
                    nutrient = 0
                converted_list.append(nutrient * sub_item[47] / 100)
            converted_list.append(item[50])
            converted_data.append(converted_list)
        if len(self.cols) == 0:
            for col_name in list(data._info_axis._data):
                self.cols.append(col_name)
        df = DataFrame(data=converted_data, columns=self.cols)
        df.to_csv(file_converted, index=False)
        print 'File has been exported'
開發者ID:Basit-qc,項目名稱:WHO---Food-Menu,代碼行數:30,代碼來源:buildmenu.py

示例2: feature_engineering

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import to_csv [as 別名]
def feature_engineering(raw_data):
    input_data = raw_data[['Date','AdjClose','AdjVolume']].dropna()
    train_ratio = 0.8
    
    savedata= DataFrame(input_data)
    savedata.to_csv('/home/peng/workspace/datafortrainCao.csv', header=0)
    #===========================================================================
    # Vol_5 = index_cal().VOL_n(input_data, 5)
    # Vol_10 = index_cal().VOL_n(input_data, 10)
    # Vol_15 = index_cal().VOL_n(input_data, 15)
    # Vol_20 = index_cal().VOL_n(input_data, 20)
    # RDV_5 = index_cal().RDV_n(input_data, 5)
    # RDV_10 = index_cal().RDV_n(input_data, 10)
    # RDV_15 = index_cal().RDV_n(input_data, 15)
    # RDV_20 = index_cal().RDV_n(input_data, 20)
    #===========================================================================
    
    EMA15 = index_cal().EMAn(input_data, 15)
    RDP_5 = index_cal().RDP_n(input_data, 5)
    RDP_10 = index_cal().RDP_n(input_data, 10)
    RDP_15 = index_cal().RDP_n(input_data, 15)
    RDP_20 = index_cal().RDP_n(input_data, 20)
    RDP_plus_5 = index_cal().RDP_plus_n(input_data, 5)
    
    all_data = mergeColumnByDate(RDP_5,RDP_10,RDP_15,RDP_20,EMA15,RDP_plus_5)
    features = all_data[['RDP-5','RDP-10','RDP-15','RDP-20','EMA15']]
    features = PCA().fit_transform(features.values)
    (x_train, x_test) = divideTrainTest(features, train_ratio)
    objectives = all_data['RDP+5'].values
    (y_train,y_real) = divideTrainTest(objectives, train_ratio)
    
    return (x_train,y_train,x_test,y_real)
開發者ID:jp1989326,項目名稱:Quant-Ver1,代碼行數:34,代碼來源:applicationOfSVM.py

示例3: generate_input_df

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import to_csv [as 別名]
        def generate_input_df(self, n_topics, vocab_size, document_length, n_docs, 
                              previous_vocab=None, vocab_prefix=None, 
                              df_outfile=None, vocab_outfile=None, 
                              n_bags=1):
                        
            print "Generating input DF"
                        
            # word_dists is the topic x document_length matrix
            word_dists = self.generate_word_dists(n_topics, vocab_size, document_length)                        
            
            # generate each document x terms vector
            docs = np.zeros((vocab_size, n_docs), dtype=int64)
            for i in range(n_docs):
                docs[:, i] = self.generate_document(word_dists, n_topics, vocab_size, document_length)
                
            if previous_vocab is not None:
                width = vocab_size/n_topics
                high = int(document_length / width)                
                # randomly initialises the previous_vocab part
                additional = np.random.randint(high, size=(len(previous_vocab), n_docs))
                docs = np.vstack((additional, docs))
                
            df = DataFrame(docs)
            df = df.transpose()
            print df.shape            
            if self.make_plot:            
                self._plot_nicely(df, 'Documents X Terms', 'Terms', 'Docs')
            
            if df_outfile is not None:
                df.to_csv(df_outfile)        

            print "Generating vocabularies"
            
            # initialises vocab to either previous vocab or a blank list
            if previous_vocab is not None:
                vocab = previous_vocab.tolist()
            else:
                vocab = []

            # add new words
            for n in range(vocab_size):
                if vocab_prefix is None:
                    word = "word_" + str(n)
                else:
                    word = vocab_prefix + "_word_" + str(n)
                # if more than one bag, then initialise word type too
                if n_bags > 1:
                    word_type = np.random.randint(n_bags)
                    tup = (word, word_type)
                    vocab.append(tup)
                else:
                    vocab.append(word)
            
            # save to txt
            vocab = np.array(vocab)
            if vocab_outfile is not None:
                np.savetxt(vocab_outfile, vocab, fmt='%s')
            
            return df, vocab
開發者ID:sdrogers,項目名稱:MS2LDA,代碼行數:61,代碼來源:lda_generate_data.py

示例4: write_to_csv

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import to_csv [as 別名]
 def write_to_csv(self):
     nw_df = DataFrame(list(self.lst))
     nw_df.columns = ['Redirect count','ssl_classification','url_length','hostname_length','subdomain_count','at_sign_in_url','exe_extension_in_request_url','exe_extension_in_landing_url',
                         'ip_as_domain_name','no_of_slashes_in requst_url','no_of_slashes_in_landing_url','no_of_dots_in_request_url','no_of_dots_in_landing_url','tld_value','age_of_domain',
                         'age_of_last_modified','content_length','same_landing_and_request_ip','same_landing_and_request_url']
     frames = [self.df['label'],self.df2['label']]
     new_df = pd.concat(frames)
     new_df = new_df.reset_index()
     nw_df['label'] = new_df['label']
     nw_df.to_csv('dataset1.csv',sep=',', encoding='latin-1')
開發者ID:kegbo,項目名稱:Malicious-URL-Detector,代碼行數:12,代碼來源:train.py

示例5: update_menu

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import to_csv [as 別名]
 def update_menu(self, food):
     """
     Updates the Menu using Pandas
     :param file_name:
     :param food:
     :param cols:
     :return:
     """
     df = DataFrame(data=food, columns=self.cols)
     df.to_csv(self.file_menu, index=False)
     return 'New Food has been added to the MENU'
開發者ID:Basit-qc,項目名稱:WHO---Food-Menu,代碼行數:13,代碼來源:buildmenu.py

示例6: prepare_relations

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import to_csv [as 別名]
def prepare_relations(filepath, splitting=0.9):
    all_data_list = pd.read_csv(filepath, header=None, encoding="utf-8", sep="\t")
    all_data_list.dropna()

    # shuffle(all_data_list)

    splitting = int(math.floor(splitting * len(all_data_list)))
    train_ds = DataFrame(all_data_list[:splitting])
    test_ds = DataFrame(all_data_list[splitting:])

    train_ds.to_csv('data/train.csv', encoding="utf-8", index=False, header=False, sep=",", quotechar='"')
    test_ds.to_csv('data/test.csv', encoding="utf-8", index=False, header=False, sep=",", quotechar='"')
開發者ID:alexeyev,項目名稱:nm,代碼行數:14,代碼來源:data_helpers.py

示例7: average_submissions

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import to_csv [as 別名]
def average_submissions():
    with open('submission_avg_13Aug.csv', 'wb') as f:
        writer = csv.writer(f)
        writer.writerow(['clip', 'seizure', 'early'])

        df1 = read_csv('submission_late_loader_newa.csv')
        df2 = read_csv('submission_newa_all.csv')
        df = DataFrame(columns=['clip', 'seizure', 'early'])
        df['clip'] = df1['clip']
        df['seizure'] = (df1['seizure'] + df2['seizure'])/2.0
        df['early'] = (df1['early'] + df2['early'])/2.0
        with open('submission_avg_13Aug.csv', 'a') as f:
            df.to_csv(f, header=False, index=False)
開發者ID:IraKorshunova,項目名稱:kaggle-seizure-detection,代碼行數:15,代碼來源:avg_csv.py

示例8: CSVDataFrameWriter

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import to_csv [as 別名]
class CSVDataFrameWriter(object):
    def __init__(self, records, columns):
        # TODO: if records is empty, raise a known exception
        # catch it in the view and handle
        assert(len(records) > 0)
        self.dataframe = DataFrame(records, columns=columns)

        # remove columns we don't want
        for col in AbstractDataFrameBuilder.INTERNAL_FIELDS:
            if col in self.dataframe.columns:
                del(self.dataframe[col])

    def write_to_csv(self, csv_file, header=True, index=False):
        self.dataframe.to_csv(csv_file, header=header, index=index, na_rep=NA_REP,
                              encoding='utf-8')
開發者ID:radproject,項目名稱:formhub,代碼行數:17,代碼來源:pandas_mongo_bridge.py

示例9: __init__

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import to_csv [as 別名]
 def __init__(self):
     self.menu = list()
     self.cols = list()
     self.file_menu = 'Menu.csv'
     # self.file_database = 'WHFoods CSV For Zahid.csv'
     self.file_database = 'converted_values.csv'
     self.file_recommended_values = 'WHO Daily Recommended Values.rtf'
     self.tmp_file = 'tmp.csv'
     self.file_converted_values = 'converted_values.csv'
     self.indexes = [2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
                     30, 31, 33, 40, 41, 43, 47]
     self.target_values = dict()
     # clears the Menu First
     df = DataFrame(data=None, columns=None)
     df.to_csv(self.file_menu, index=False)
     df.to_csv(self.tmp_file, index=False)
開發者ID:Basit-qc,項目名稱:WHO---Food-Menu,代碼行數:18,代碼來源:buildmenu.py

示例10: average_csv_data

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import to_csv [as 別名]
def average_csv_data(filename, target, *data_path):
    data_path = data_path[0]
    df_list = []
    for p in data_path:
        d = read_csv(p)
        df_list.append(d)

    avg_df = DataFrame(columns=['clip', target])
    avg_df['clip'] = df_list[0]['clip']
    avg_df[target] = 0
    for df in df_list:
        avg_df[target] += df[target]

    avg_df[target] /= 1.0 * len(df_list)

    with open(filename+'.csv', 'wb') as f:
        avg_df.to_csv(f, header=True, index=False)
開發者ID:IraKorshunova,項目名稱:kaggle-seizure-detection,代碼行數:19,代碼來源:avg_csv.py

示例11: check_tolerance

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import to_csv [as 別名]
 def check_tolerance(self, new_data_for_menu, filtered_col, target):
     """
     This function makes a temporary file tmp.csv to check for the valid Menu
     :param new_data_for_menu:
     :param filtered_col:
     :param target:
     :return:
     """
     success = False
     df = DataFrame(data=new_data_for_menu, columns=self.cols)
     df.to_csv(self.tmp_file, index=False)
     intermediate_data = self.nutrients_list(self.tmp_file, filtered_col, self.indexes)
     indicator = self.calc_tolerance(intermediate_data, target, filtered_col)
     if not indicator:
         tmp_data = self.read_csv(self.tmp_file)
         df = DataFrame(data=tmp_data, columns=self.cols)
         df.to_csv(self.file_menu, index=False)
         success = True
     else:
         tmp_data = self.read_csv(self.file_menu)
         df = DataFrame(data=tmp_data, columns=self.cols)
         df.to_csv(self.tmp_file, index=False)
         # print 'New Item does not satisfy the Tolerance Rule'
     return success
開發者ID:Basit-qc,項目名稱:WHO---Food-Menu,代碼行數:26,代碼來源:buildmenu.py

示例12: enumerate

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import to_csv [as 別名]
for ith,document in enumerate(input_list):
    if ith%100==0:
        print('recording %ith, total %i'%(ith,total))

    spectr=ReadNMSSMToolsSpectr(document,ignore=ignore)
    # inNumber=re.findall(r'\d+',document)[-1]
    # outNumber+=1   # reNumber

    col_name=['No_','path']
    value_row=[ith,document]

    for block,code_value_dict in spectr.__dict__.items():
        # print(block_name)
        try:
            code_2_name=getattr(block_table,block)
        except AttributeError:
            continue
        else:
            for code,value in code_value_dict.items():
                try:
                    col_name.append(code_2_name(code))
                except KeyError:
                    raise# continue
                else:
                    value_row.append(value)
    Data=Data.append(
        DataFrame(numpy.array([value_row]),columns=col_name),
        ignore_index=True)

Data.to_csv('Data_%s.csv'%similarity)
開發者ID:vooum,項目名稱:ScanCommander,代碼行數:32,代碼來源:gather_NTs.py

示例13: DataFrame

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import to_csv [as 別名]
from sys import stdin

from pandas.core.frame import DataFrame

from makstat.zavod import iter_contextual_atom_data

stream = (line.decode('cp1251').strip().encode('utf-8')
          for line in stdin)

df = DataFrame()
for cur_data in iter_contextual_atom_data(stream):
    current = DataFrame.from_dict([cur_data])
    df = df.append(current, ignore_index=False)

print df.to_csv(index=False, quotechar="\"", escapechar="\\")
開發者ID:petrushev,項目名稱:makstat,代碼行數:17,代碼來源:px2csv.py

示例14: weather_data

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import to_csv [as 別名]
        now1 = now - one_day
        row = weatherDF[(weatherDF.Date == now1.strftime("%Y-%m-%d")) & (weatherDF.Station == weather_station)]
        weathers = weathers.append(row)
    return weathers

def weather_data(look_str, weatherDF):
    features = ["Tmax","Tmin","Tavg","DewPoint", "WetBulb", "Heat","Cool","SnowFall", "PrecipTotal", "ResultSpeed"]
    weather_week0 = lookup_last_week_weather(look_str, weatherDF)
    weather_week = weather_week0[features]
    averagesS = weather_week.mean(0)
    maxs = weather_week.max(0)
    maxsS = pd.Series()
    mins = weather_week.min(0)
    minsS = pd.Series()
    for f in features:
        maxsS["%s_max" % f] = maxs[f]
        minsS["%s_min" % f] = mins[f]
    #datapoints = pd.concat([averagesS, maxsS, minsS])
    datapoints = averagesS
    weather_data = DataFrame(datapoints).T
    weather_data["Date"] = look_str
    return weather_data
        
weather_avg = DataFrame()
dates = weather["Date"]
for d in dates:
    row = weather_data(d, weather)
    weather_avg= weather_avg.append(row, ignore_index=True)
weather_avg.to_csv(os.path.join(data_dir,'weather_info_averages5.csv'), index=False)

# duplicates()
開發者ID:juandoso,項目名稱:Competitions,代碼行數:33,代碼來源:utils.py

示例15: dataFrameToCSV

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import to_csv [as 別名]
def dataFrameToCSV(dataframe, filename):
    """ 
    @summary: Dumps a dataframe in 'filename'
    """
    DataFrame.to_csv(dataframe, filename)
開發者ID:chengguangnan,項目名稱:NasdaqData,代碼行數:7,代碼來源:NasdaqDataRetriever.py


注:本文中的pandas.core.frame.DataFrame.to_csv方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。