本文整理汇总了Python中backports.csv.writer函数的典型用法代码示例。如果您正苦于以下问题:Python writer函数的具体用法?Python writer怎么用?Python writer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了writer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fetch
def fetch(self):
fd, tmp_file = tempfile.mkstemp()
pip = PointInPolygon(self.polygon_id, 60)
traffic_signs = []
reader = json.loads(open(self.mapping, 'r').read())
try:
for row in reader:
traffic_signs += row['object']
except:
self.logger.err(row)
raise
with open(tmp_file, 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['accuracy', 'direction', 'image_key', 'first_seen_at', 'last_seen_at', 'value', 'X', 'Y'])
slice = lambda A, n: [A[i:i+n] for i in range(0, len(A), n)]
bboxes = pip.bboxes()
start_time = (datetime.today() - timedelta(days=365*2)).isoformat()[0:10]
b = 0
for traffic_signs_ in slice(traffic_signs, 10):
b = b + 1
self.logger.log('Batch {0}/{1}: {2}'.format(b, round(len(traffic_signs) / 10 + 0.5), ','.join(traffic_signs_)))
for bbox in bboxes:
url = 'https://a.mapillary.com/v3/map_features?bbox={bbox}&client_id={client_id}&layers={layer}&per_page=1000&start_time={start_time}&values={values}'.format(bbox=','.join(map(str, bbox)), layer=self.layer, client_id='MEpmMTFQclBTUWlacjV6RTUxWWMtZzo5OTc2NjY2MmRiMDUwYmMw', start_time=start_time, values=','.join(traffic_signs_))
print(url)
with open(tmp_file, 'a') as csvfile:
writer = csv.writer(csvfile)
r = None
page = 0
while(url):
page = page + 1
self.logger.log("Page {0}".format(page))
r = downloader.get(url)
url = r.links['next']['url'] if 'next' in r.links else None
features = r.json()['features']
filtered = 0
self.logger.log('{0} features fetched'.format(len(features)))
for j in features:
p = j['properties']
image_key = p['detections'][0]['image_key']
gc = j['geometry']['coordinates']
row = [p['accuracy'], p['direction'] if 'direction' in p else None, image_key, p['first_seen_at'], p['last_seen_at'], p['value']] + gc
if row[0] > 0.01 and pip.point_inside_polygon(gc[0], gc[1]):
writer.writerow(row)
filtered = filtered + 1
self.logger.log('{0} keeped'.format(filtered))
return tmp_file
示例2: test_writerows
def test_writerows(self):
class BrokenFile:
def write(self, buf):
raise OSError
writer = csv.writer(BrokenFile())
self.assertRaises(OSError, writer.writerows, [['a']])
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj)
self.assertRaises(TypeError, writer.writerows, None)
writer.writerows([['a','b'],['c','d']])
fileobj.seek(0)
self.assertEqual(fileobj.read(), "a,b\r\nc,d\r\n")
示例3: _write_test
def _write_test(self, fields, expect, **kwargs):
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj, **kwargs)
writer.writerow(fields)
fileobj.seek(0)
self.assertEqual(fileobj.read(),
expect + writer.dialect.lineterminator)
示例4: export_to_csv
def export_to_csv(self, result_list, export_filename="ACRCloud_ScanFile_Results.csv", export_dir="./"):
try:
results = []
for item in result_list:
filename = item["file"]
timestamp = item["timestamp"]
jsoninfo = item["result"]
if "status" in jsoninfo and jsoninfo["status"]["code"] == 0:
row = self.parse_data(jsoninfo)
row = [filename, timestamp] + list(row)
results.append(row)
results = sorted(results, key=lambda x:x[1])
export_filepath = os.path.join(export_dir, export_filename)
with codecs.open(export_filepath, 'w', 'utf-8-sig') as f:
head_row = ['filename', 'timestamp', 'custom_files_title', 'custom_acrid', 'title', 'artists', 'album',
'acrid', 'played_duration', 'label', 'isrc', 'upc', 'dezzer', 'spotify', 'itunes', 'youtube']
dw = csv.writer(f)
dw.writerow(head_row)
dw.writerows(results)
if self.debug:
self.log.info("export_to_csv.Save Data to csv: {0}".format(export_filename))
except Exception as e:
self.log.error("[email protected]_to_csv", exc_info=True)
示例5: write_csv
def write_csv(filename, rows):
with io.open(filename, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(["latitude", "longitude", "datetime"])
for row in rows:
writer.writerow([row["latitude"], row["longitude"], row["datetime"]])
f.close()
示例6: writeUniqueResults
def writeUniqueResults(clustered_dupes, input_file, output_file):
# Write our original data back out to a CSV with a new column called
# 'Cluster ID' which indicates which records refer to each other.
logging.info('saving unique results to: %s' % output_file)
cluster_membership = {}
for cluster_id, (cluster, score) in enumerate(clustered_dupes):
for record_id in cluster:
cluster_membership[record_id] = cluster_id
unique_record_id = cluster_id + 1
writer = csv.writer(output_file)
reader = csv.reader(StringIO(input_file))
heading_row = next(reader)
heading_row.insert(0, u'Cluster ID')
writer.writerow(heading_row)
seen_clusters = set()
for row_id, row in enumerate(reader):
if row_id in cluster_membership:
cluster_id = cluster_membership[row_id]
if cluster_id not in seen_clusters:
row.insert(0, cluster_id)
writer.writerow(row)
seen_clusters.add(cluster_id)
else:
cluster_id = unique_record_id
unique_record_id += 1
row.insert(0, cluster_id)
writer.writerow(row)
示例7: get
def get(self, request, *args, **kwargs):
object_list = self.get_queryset()[:2000]
# Do reasonable ACL check for global
acl_obj = self.translation or self.component or self.project
if not acl_obj:
for change in object_list:
if change.component:
acl_obj = change.component
break
if not request.user.has_perm('change.download', acl_obj):
raise PermissionDenied()
# Always output in english
activate('en')
response = HttpResponse(content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename=changes.csv'
writer = csv.writer(response)
# Add header
writer.writerow(('timestamp', 'action', 'user', 'url', 'target'))
for change in object_list:
writer.writerow((
change.timestamp.isoformat(),
change.get_action_display(),
change.user.username if change.user else '',
get_site_url(change.get_absolute_url()),
change.target,
))
return response
示例8: test_roundtrip_escaped_unquoted_newlines
def test_roundtrip_escaped_unquoted_newlines(self):
with TemporaryFile("w+", newline="") as fileobj:
writer = csv.writer(fileobj, quoting=csv.QUOTE_NONE, escapechar="\\")
rows = [["a\nb", "b"], ["c", "x\r\nd"]]
writer.writerows(rows)
fileobj.seek(0)
for i, row in enumerate(csv.reader(fileobj, quoting=csv.QUOTE_NONE, escapechar="\\")):
self.assertEqual(row, rows[i])
示例9: compare_dialect_123
def compare_dialect_123(self, expected, *writeargs, **kwwriteargs):
with TemporaryFile("w+", newline="", encoding="utf-8") as fileobj:
writer = csv.writer(fileobj, *writeargs, **kwwriteargs)
writer.writerow([1, 2, 3])
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected)
示例10: test_unicode_write
def test_unicode_write(self):
import io
with TemporaryFile("w+", newline='', encoding="utf-8") as fileobj:
writer = csv.writer(fileobj)
writer.writerow(self.names)
expected = ",".join(self.names)+"\r\n"
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected)
示例11: test_roundtrip_quoteed_newlines
def test_roundtrip_quoteed_newlines(self):
with TemporaryFile("w+", newline="") as fileobj:
writer = csv.writer(fileobj)
self.assertRaises(TypeError, writer.writerows, None)
rows = [["a\nb", "b"], ["c", "x\r\nd"]]
writer.writerows(rows)
fileobj.seek(0)
for i, row in enumerate(csv.reader(fileobj)):
self.assertEqual(row, rows[i])
示例12: test_quote_nonnumeric_decimal
def test_quote_nonnumeric_decimal(self):
"""Decimals should not be quoted with non-numeric quoting."""
import decimal
with TemporaryFile("w+", newline="", encoding="utf-8") as fileobj:
writer = csv.writer(fileobj, quoting=csv.QUOTE_NONNUMERIC)
writer.writerow([10, 10.0, decimal.Decimal("10.0"), "10.0"])
expected = '10,10.0,10.0,"10.0"\r\n'
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected)
示例13: test_char_write
def test_char_write(self):
import array, string
a = array.array(str('u'), text_type(string.ascii_letters))
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj, dialect="excel")
writer.writerow(a)
expected = ",".join(a)+"\r\n"
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected)
示例14: test_float_write
def test_float_write(self):
import array
contents = [(20-i)*0.1 for i in range(20)]
a = array.array(str('f'), contents)
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj, dialect="excel")
writer.writerow(a)
expected = ",".join([str(i) for i in a])+"\r\n"
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected)
示例15: get
def get(self):
f = io.StringIO()
writer = csv.writer(f)
headers = [
'User ID',
'Username',
'First Name',
'Last Name',
'Email',
'Telephone',
'Enabled',
'Admin',
'Last Login',
'Last Active',
'Cohorts',
'Hospitals',
'Roles',
]
writer.writerow(headers)
def get_groups(user, group_type):
"""Comma-separated list of groups."""
groups = [x.name for x in user.groups if x.type == group_type]
groups = sorted(groups)
groups = uniq(groups)
return ', '.join(groups)
def get_roles(user):
"""Comma-separated list of roles."""
roles = [gu.role.name for gu in user.group_users]
return ', '.join(sorted(set(roles)))
users = list_users()
for user in users:
output = []
output.append(user.id)
output.append(user.username)
output.append(user.first_name)
output.append(user.last_name)
output.append(user.email)
output.append(user.telephone_number)
output.append(user.is_enabled)
output.append(user.is_admin)
output.append(user.last_login_date)
output.append(user.last_active_date)
output.append(get_groups(user, GROUP_TYPE.COHORT))
output.append(get_groups(user, GROUP_TYPE.HOSPITAL))
output.append(get_roles(user))
writer.writerow(output)
return Response(f.getvalue(), content_type='text/csv')