本文整理汇总了Python中pandas.io.parsers.ExcelWriter类的典型用法代码示例。如果您正苦于以下问题:Python ExcelWriter类的具体用法?Python ExcelWriter怎么用?Python ExcelWriter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ExcelWriter类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _check_extension_sheets
def _check_extension_sheets(self, ext):
path = "__tmp_to_excel_from_excel_sheets__." + ext
self.frame["A"][:5] = nan
self.frame.to_excel(path, "test1")
self.frame.to_excel(path, "test1", cols=["A", "B"])
self.frame.to_excel(path, "test1", header=False)
self.frame.to_excel(path, "test1", index=False)
# Test writing to separate sheets
writer = ExcelWriter(path)
self.frame.to_excel(writer, "test1")
self.tsframe.to_excel(writer, "test2")
writer.save()
reader = ExcelFile(path)
recons = reader.parse("test1", index_col=0)
tm.assert_frame_equal(self.frame, recons)
recons = reader.parse("test2", index_col=0)
tm.assert_frame_equal(self.tsframe, recons)
np.testing.assert_equal(2, len(reader.sheet_names))
np.testing.assert_equal("test1", reader.sheet_names[0])
np.testing.assert_equal("test2", reader.sheet_names[1])
os.remove(path)
示例2: _check_extension_sheets
def _check_extension_sheets(self, ext):
path = '__tmp_to_excel_from_excel_sheets__.' + ext
self.frame['A'][:5] = nan
self.frame.to_excel(path,'test1')
self.frame.to_excel(path,'test1', cols=['A', 'B'])
self.frame.to_excel(path,'test1', header=False)
self.frame.to_excel(path,'test1', index=False)
# Test writing to separate sheets
writer = ExcelWriter(path)
self.frame.to_excel(writer,'test1')
self.tsframe.to_excel(writer,'test2')
writer.save()
reader = ExcelFile(path)
recons = reader.parse('test1',index_col=0)
tm.assert_frame_equal(self.frame, recons)
recons = reader.parse('test2',index_col=0)
tm.assert_frame_equal(self.tsframe, recons)
np.testing.assert_equal(2, len(reader.sheet_names))
np.testing.assert_equal('test1', reader.sheet_names[0])
np.testing.assert_equal('test2', reader.sheet_names[1])
os.remove(path)
示例3: export_to
def export_to(self, file_path, batchsize=100):
self.xls_writer = ExcelWriter(file_path)
# get record count
record_count = self._query_mongo(count=True)
# query in batches and for each batch create an XLSDataFrameWriter and
# write to existing xls_writer object
start = 0
header = True
while start < record_count:
cursor = self._query_mongo(self.filter_query, start=start,
limit=batchsize)
data = self._format_for_dataframe(cursor)
# write all cursor's data to their respective sheets
for section_name, section in self.sections.iteritems():
records = data[section_name]
# TODO: currently ignoring nested repeats so ignore sections that have 0 records
if len(records) > 0:
columns = section["columns"] + self.EXTRA_COLUMNS
writer = XLSDataFrameWriter(records, columns)
writer.write_to_excel(self.xls_writer, section_name,
header=header, index=False)
header = False
# increment counter(s)
start += batchsize
self.xls_writer.save()
示例4: export_to
def export_to(self, file_path, batchsize=1000):
self.xls_writer = ExcelWriter(file_path)
# get record count
record_count = self._query_mongo(count=True)
# query in batches and for each batch create an XLSDataFrameWriter and
# write to existing xls_writer object
start = 0
header = True
while start < record_count:
cursor = self._query_mongo(self.filter_query, start=start,
limit=batchsize)
data = self._format_for_dataframe(cursor)
# write all cursor's data to their respective sheets
for section_name, section in self.sections.iteritems():
records = data[section_name]
# TODO: currently ignoring nested repeats so ignore sections that have 0 records
if len(records) > 0:
# use a different group delimiter if needed
columns = section["columns"]
if self.group_delimiter != DEFAULT_GROUP_DELIMITER:
columns = [self.group_delimiter.join(col.split("/")) for col in columns ]
columns = columns + self.EXTRA_COLUMNS
writer = XLSDataFrameWriter(records, columns)
writer.write_to_excel(self.xls_writer, section_name,
header=header, index=False)
header = False
# increment counter(s)
start += batchsize
time.sleep(0.1)
self.xls_writer.save()
示例5: export_to
def export_to(self, file_path):
self.xls_writer = ExcelWriter(file_path)
# query in batches and for each batch create an XLSDataFrameWriter and
# write to existing xls_writer object
# get records from mongo - do this on export so we can batch if we
# choose to, as we should
cursor = self._query_mongo(self.filter_query)
data = self._format_for_dataframe(cursor)
#TODO: batching will not work as expected since indexes are calculated
# based the current batch, a new batch will re-calculate indexes and if
# they are going into the same excel file, we'll have duplicates
# possible solution - keep track of the last index from each section
# write all cursor's data to different sheets
# TODO: for every repeat, the index should be re-calculated
for section in self.sections:
# TODO: currently ignoring nested repeat data which will have no
# records
records = data[section["name"]]
if len(records) > 0:
section_name = section["name"]
columns = section["columns"] + self.EXTRA_COLUMNS
writer = XLSDataFrameWriter(records, columns)
writer.write_to_excel(self.xls_writer, section_name,
header=True, index=False)
self.xls_writer.save()
示例6: to_excel
def to_excel(self, path, na_rep=''):
"""
Write each DataFrame in Panel to a separate excel sheet
Parameters
----------
excel_writer : string or ExcelWriter object
File path or existing ExcelWriter
na_rep : string, default ''
Missing data rep'n
"""
from pandas.io.parsers import ExcelWriter
writer = ExcelWriter(path)
for item, df in self.iteritems():
name = str(item)
df.to_excel(writer, name, na_rep=na_rep)
writer.save()
示例7: str
# big = big.drop('AnnStaticRet', 1)
# big = big.drop('AnnCapitalRet', 1)
# big['AnnStaticRet'] = new_ind.AnnStaticRet.values
# big['AnnCapitalRet'] = new_ind.AnnCapitalRet.values
today_str = str(str(month) + str(day) + str(year))
big = big.rename(columns={'Last': 'OptionPrice', 'industry': 'Industry'})
xlsx = '.xlsx'
csv = '.csv'
file_name = 'All_covered_call' + today_str
sectors = big.Sector.unique().astype(str)
name_xl = file_name + xlsx
writer = ExcelWriter(name_xl)
big.to_excel(writer, sheet_name='All Sectors')
summary = big.groupby(['Sector', 'Industry']).mean()
summary.to_excel(writer, sheet_name='Sector Summary')
for i in sectors:
to_save = big[big.Sector == i]
name = i.replace('/', '-')
to_save.to_excel(writer, sheet_name=name)
writer.save()
name_cs = file_name + csv
big.to_csv(name_cs)
示例8: XLSDataFrameBuilder
class XLSDataFrameBuilder(AbstractDataFrameBuilder):
"""
Generate structures from mongo and DataDictionary for a DataFrameXLSWriter
This builder can choose to query the data in batches and write to a single
ExcelWriter object using multiple instances of DataFrameXLSWriter
"""
INDEX_COLUMN = u"_index"
PARENT_TABLE_NAME_COLUMN = u"_parent_table_name"
PARENT_INDEX_COLUMN = u"_parent_index"
EXTRA_COLUMNS = [INDEX_COLUMN, PARENT_TABLE_NAME_COLUMN,
PARENT_INDEX_COLUMN]
SHEET_NAME_MAX_CHARS = 30
XLS_SHEET_COUNT_LIMIT = 255
XLS_COLUMN_COUNT_MAX = 255
CURRENT_INDEX_META = 'current_index'
def __init__(self, username, id_string, filter_query=None):
super(XLSDataFrameBuilder, self).__init__(username, id_string,
filter_query)
def _setup(self):
super(XLSDataFrameBuilder, self)._setup()
# need to split columns, with repeats in individual sheets and
# everything else on the default sheet
self._generate_sections()
def export_to(self, file_path, batchsize=100):
self.xls_writer = ExcelWriter(file_path)
# get record count
record_count = self._query_mongo(count=True)
# query in batches and for each batch create an XLSDataFrameWriter and
# write to existing xls_writer object
start = 0
header = True
while start < record_count:
cursor = self._query_mongo(self.filter_query, start=start,
limit=batchsize)
data = self._format_for_dataframe(cursor)
# write all cursor's data to their respective sheets
for section_name, section in self.sections.iteritems():
records = data[section_name]
# TODO: currently ignoring nested repeats so ignore sections that have 0 records
if len(records) > 0:
columns = section["columns"] + self.EXTRA_COLUMNS
writer = XLSDataFrameWriter(records, columns)
writer.write_to_excel(self.xls_writer, section_name,
header=header, index=False)
header = False
# increment counter(s)
start += batchsize
self.xls_writer.save()
def _format_for_dataframe(self, cursor):
"""
Format each record for consumption by a dataframe
returns a dictionary with the key being the name of the sheet, and values
a list of dicts to feed into a DataFrame
"""
data = dict((section_name, []) for section_name in self.sections.keys())
default_section = self.sections[self.survey_name]
default_columns = default_section["columns"]
for record in cursor:
# from record, we'll end up with multiple records, one for each
# section we have
# add records for the default section
self._add_data_for_section(data[self.survey_name],
record, default_columns, self.survey_name)
parent_index = default_section[self.CURRENT_INDEX_META]
for sheet_name, section in self.sections.iteritems():
# skip default section i.e survey name
if sheet_name != self.survey_name:
xpath = section["xpath"]
columns = section["columns"]
# TODO: handle nested repeats -ignoring nested repeats for
# now which will not be in the top level record, perhaps
# nest sections as well so we can recurs in and get them
if record.has_key(xpath):
repeat_records = record[xpath]
num_repeat_records = len(repeat_records)
for repeat_record in repeat_records:
self._add_data_for_section(data[sheet_name],
repeat_record, columns, sheet_name,
parent_index, self.survey_name)
return data
def _add_data_for_section(self, data_section, record, columns, section_name,
parent_index = -1, parent_table_name = None):
data_section.append({})
self.sections[section_name][self.CURRENT_INDEX_META] += 1
#.........这里部分代码省略.........