本文整理汇总了Python中unicodecsv.DictWriter方法的典型用法代码示例。如果您正苦于以下问题:Python unicodecsv.DictWriter方法的具体用法?Python unicodecsv.DictWriter怎么用?Python unicodecsv.DictWriter使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类unicodecsv
的用法示例。
在下文中一共展示了unicodecsv.DictWriter方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: to_csv
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictWriter [as 别名]
def to_csv(pdf, types, encoding):
objs = []
fields = set()
for t in types:
new_objs = getattr(pdf, t + "s")
if len(new_objs):
objs += new_objs
fields = fields.union(set(new_objs[0].keys()))
first_columns = [
"object_type", "page_number",
"x0", "x1", "y0", "y1",
"doctop", "top", "bottom",
"width", "height"
]
cols = first_columns + list(sorted(set(fields) - set(first_columns)))
stdout = (sys.stdout.buffer if sys.version_info[0] >= 3 else sys.stdout)
w = unicodecsv.DictWriter(stdout,
fieldnames=cols, encoding=encoding)
w.writeheader()
w.writerows(objs)
示例2: convert_data_dict_to_csv
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictWriter [as 别名]
def convert_data_dict_to_csv(tdata, extra_fields=None):
'''
Convert dict format data from get_table_data into CSV file content, as a string.
If extra_fields is not None, then add data from extra_fields to each row.
This can be used, e.g. for adding course_id to a table missing that field.
'''
import unicodecsv as csv
from StringIO import StringIO
sfp = StringIO()
extra_fields = extra_fields or {}
fields = extra_fields.keys()
fields += tdata['field_names']
dw = csv.DictWriter(sfp, fieldnames=fields)
dw.writeheader()
for row in tdata['data']:
row.update(extra_fields)
dw.writerow(row)
return sfp.getvalue()
示例3: cleanup_rows_from_grade_persistent
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictWriter [as 别名]
def cleanup_rows_from_grade_persistent(csvfn, tempfn, field_to_fix="passed_timestamp"):
"""
Removes the null values from grades_persistentcoursegrade.csv.gz.
The function also fixes course ids by changing them from their
edX URL format to their usual format. For instance,
course-v1:MITx+STL.162x+2T2017 should be MITx/STL.162x/2T2017.
This operation permanently modifies the CSV.
:param csvfn: The path of the csv.gz to be modified
:param tempfn: The path of the temporary csv.gz
:type csvfn: str
:type tempfn: str
"""
with gzip.open(csvfn, "r") as open_csv:
csv_dict = csv.DictReader(open_csv)
with gzip.open(tempfn, "w+") as write_csv_file:
write_csv = csv.DictWriter(write_csv_file, fieldnames=csv_dict.fieldnames)
write_csv.writeheader()
for row in csv_dict:
row_dict = remove_nulls_from_row(row, field_to_fix)
row_dict = fix_course_ids(row_dict)
write_csv.writerow(row_dict)
os.rename(tempfn, csvfn)
示例4: save_repo_request_rows
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictWriter [as 别名]
def save_repo_request_rows(rows):
with open('out.csv','wb') as f:
w = csv.DictWriter(f, fieldnames=RepoRequest.list_fieldnames(), encoding='utf-8-sig')
for row in rows[1:]: # skip header row
my_repo_request = RepoRequest()
my_repo_request.set_id_seed(row[0])
column_num = 0
for fieldname in RepoRequest.list_fieldnames():
if fieldname != "id":
setattr(my_repo_request, fieldname, row[column_num])
column_num += 1
w.writerow(my_repo_request.to_dict())
print u"adding repo request {}".format(my_repo_request)
db.session.merge(my_repo_request)
safe_commit(db)
示例5: unicode_csv_dict_writer
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictWriter [as 别名]
def unicode_csv_dict_writer(data, header, output_directory, name=None):
try:
import unicodecsv
except ImportError:
print("[+] Install unicodecsv module before executing this function")
sys.exit(1)
if name is None:
name = "output.csv"
print("[+] Writing {} to {}".format(name, output_directory))
with open(os.path.join(output_directory, name), "wb") as csvfile:
writer = unicodecsv.DictWriter(csvfile, fieldnames=header)
writer.writeheader()
writer.writerows(data)
示例6: unicode_csv_dict_writer_py2
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictWriter [as 别名]
def unicode_csv_dict_writer_py2(data, header, output_directory, name=None):
try:
import unicodecsv
except ImportError:
print("[+] Install unicodecsv module before executing this"
" function")
sys.exit(1)
if name is None:
name = "output.csv"
print("[+] Writing {} to {}".format(name, output_directory))
with open(os.path.join(output_directory, name), "wb") as csvfile:
writer = unicodecsv.DictWriter(csvfile, fieldnames=header)
writer.writeheader()
writer.writerows(data)
示例7: next
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictWriter [as 别名]
def next(self):
row = next(self.source)
self.buffer.truncate(0)
self.buffer.seek(0)
if not self.csv:
self.csv = csv.DictWriter(self.buffer, list(row.keys()), quoting=csv.QUOTE_NONNUMERIC)
self.add_header = True
if self.add_header:
if hasattr(self.csv, 'writeheader'):
self.csv.writeheader()
else:
self.csv.writerow(dict((fn, fn) for fn in self.csv.fieldnames))
self.add_header = False
self.csv.writerow(row)
self.buffer.seek(0)
return self.buffer.read()
示例8: rephrase_studentmodule_opaque_keys
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictWriter [as 别名]
def rephrase_studentmodule_opaque_keys(fn_sm):
'''
Generate rephrased studentmodule, with opaque key entries for module_id and course_id translated
into traditional format.
'''
fn_sm = path(fn_sm)
orig_sm_fn = '%s/studentmodule_orig.csv.gz' % (fn_sm.dirname())
cmd = 'cp %s %s' % (fn_sm, orig_sm_fn)
print " Running %s" % cmd
sys.stdout.flush()
os.system(cmd)
ofp = openfile(fn_sm, 'w')
smfp = openfile(orig_sm_fn)
cdr = csv.DictReader(smfp)
first = True
for entry in cdr:
if first:
odw = csv.DictWriter(ofp, fieldnames=cdr.fieldnames)
odw.writeheader()
first = False
fix_opaque_keys(entry, 'module_id')
fix_opaque_keys(entry, 'course_id')
odw.writerow(entry)
ofp.close()
print "Rephrased %s -> %s to convert opaque keys syntax to standard module_id and course_id format" % (orig_sm_fn, fn_sm)
sys.stdout.flush()
#-----------------------------------------------------------------------------
示例9: output_table
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictWriter [as 别名]
def output_table(self):
'''
output person_course table
'''
fieldnames = self.the_dict_schema.keys()
ofn = 'person_course.csv.gz'
ofnj = 'person_course.json.gz'
ofp = self.openfile(ofnj, 'w')
ocsv = csv.DictWriter(self.openfile(ofn, 'w'), fieldnames=fieldnames)
ocsv.writeheader()
self.log("Writing output to %s and %s" % (ofn, ofnj))
# write JSON first - it's safer
cnt = 0
for key, pcent in self.pctab.iteritems():
cnt += 1
check_schema(cnt, pcent, the_ds=self.the_dict_schema, coerce=True)
ofp.write(json.dumps(pcent) + '\n')
ofp.close()
# now write CSV file (may have errors due to unicode)
for key, pcent in self.pctab.iteritems():
if 0: # after switching to unicodecsv, don't do this
try:
if 'countryLabel' in pcent:
if pcent['countryLabel'] == u'R\xe9union':
pcent['countryLabel'] = 'Reunion'
else:
#pcent['countryLabel'] = pcent['countryLabel'].decode('utf8').encode('utf8')
pcent['countryLabel'] = pcent['countryLabel'].encode('ascii', 'ignore')
except Exception as err:
self.log("Error handling country code unicode row=%s" % pcent)
raise
try:
ocsv.writerow(pcent)
except Exception as err:
self.log("Error writing CSV output row=%s" % pcent)
raise
示例10: from_bq_to_local_file
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictWriter [as 别名]
def from_bq_to_local_file(temp_data_filename, bq_tablename, header=True):
setup_bigquery_creds()
client = bigquery.Client()
(dataset_id, table_id) = bq_tablename.split(".")
dataset_ref = client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref)
fieldnames = [schema.name for schema in table.schema]
query = ('SELECT * FROM `unpaywall-bhd.{}` '.format(bq_tablename))
query_job = client.query(
query,
# Location must match that of the dataset(s) referenced in the query.
location='US') # API request - starts the query
rows = list(query_job)
with open(temp_data_filename, 'wb') as f:
# delimiter workaround from https://stackoverflow.com/questions/43048618/csv-reader-refuses-tab-delimiter?noredirect=1&lq=1#comment73182042_43048618
writer = unicodecsv.DictWriter(f, fieldnames=fieldnames, delimiter=str(u'\t').encode('utf-8'))
if header:
writer.writeheader()
for row in rows:
writer.writerow(dict(zip(fieldnames, row)))
print('Saved {} rows from {}.'.format(len(rows), bq_tablename))
return fieldnames
示例11: csvify
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictWriter [as 别名]
def csvify(rows):
'''Expects a list of dictionaries and returns a CSV response.'''
if not rows:
csv_str = ''
else:
s = BytesIO()
keys = rows[0].keys()
dw = csv.DictWriter(s, keys)
dw.writeheader()
dw.writerows([dict(r) for r in rows])
csv_str = s.getvalue()
return Response(csv_str, mimetype='text/csv')
示例12: csv_transactions
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictWriter [as 别名]
def csv_transactions(self, year, month, file_name):
transactions = self.transactions(year, month)
if len(transactions) == 0:
warnings.warn('No transactions for the period ({}-{})'.format(
year, month))
return
with open(file_name, 'wb') as f:
csv_writer = csv.DictWriter(f, fieldnames=self.fieldnames,
encoding='utf-8-sig') # add BOM to csv
csv_writer.writeheader()
csv_writer.writerows(transactions)
示例13: write_ipa_all
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictWriter [as 别名]
def write_ipa_all(ipa_bases, ipa_all, all_segments, sort_order):
with open(ipa_bases, 'rb') as f:
reader = csv.reader(f, encoding='utf-8')
fieldnames = next(reader)
with open(ipa_all, 'wb') as f:
writer = csv.DictWriter(f, encoding='utf-8', fieldnames=fieldnames)
writer.writerow({k: k for k in fieldnames})
all_segments_list = sort_all_segments(sort_order, all_segments)
for segment in all_segments_list:
fields = copy.copy(segment.features)
fields['ipa'] = segment.form
writer.writerow(fields)
示例14: test_write_simple_dict
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictWriter [as 别名]
def test_write_simple_dict(self):
fd, name = tempfile.mkstemp()
fileobj = open(name, 'w+b')
try:
writer = csv.DictWriter(fileobj, fieldnames=["f1", "f2", "f3"])
writer.writeheader()
fileobj.seek(0)
self.assertEqual(fileobj.readline(), b"f1,f2,f3\r\n")
writer.writerow({"f1": 10, "f3": "abc"})
fileobj.seek(0)
fileobj.readline() # header
self.assertEqual(fileobj.read(), b"10,,abc\r\n")
finally:
fileobj.close()
os.unlink(name)
示例15: test_write_unicode_header_dict
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictWriter [as 别名]
def test_write_unicode_header_dict(self):
fd, name = tempfile.mkstemp()
fileobj = open(name, 'w+b')
try:
writer = csv.DictWriter(fileobj, fieldnames=[u"ñ", u"ö"])
writer.writeheader()
fileobj.seek(0)
self.assertEqual(fileobj.readline().decode('utf-8'), u"ñ,ö\r\n")
finally:
fileobj.close()
os.unlink(name)