本文整理汇总了Python中unicodecsv.DictReader方法的典型用法代码示例。如果您正苦于以下问题:Python unicodecsv.DictReader方法的具体用法?Python unicodecsv.DictReader怎么用?Python unicodecsv.DictReader使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类unicodecsv
的用法示例。
在下文中一共展示了unicodecsv.DictReader方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cleanup_rows_from_grade_persistent
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictReader [as 别名]
def cleanup_rows_from_grade_persistent(csvfn, tempfn, field_to_fix="passed_timestamp"):
"""
Removes the null values from grades_persistentcoursegrade.csv.gz.
The function also fixes course ids by changing them from their
edX URL format to their usual format. For instance,
course-v1:MITx+STL.162x+2T2017 should be MITx/STL.162x/2T2017.
This operation permanently modifies the CSV.
:param csvfn: The path of the csv.gz to be modified
:param tempfn: The path of the temporary csv.gz
:type csvfn: str
:type tempfn: str
"""
with gzip.open(csvfn, "r") as open_csv:
csv_dict = csv.DictReader(open_csv)
with gzip.open(tempfn, "w+") as write_csv_file:
write_csv = csv.DictWriter(write_csv_file, fieldnames=csv_dict.fieldnames)
write_csv.writeheader()
for row in csv_dict:
row_dict = remove_nulls_from_row(row, field_to_fix)
row_dict = fix_course_ids(row_dict)
write_csv.writerow(row_dict)
os.rename(tempfn, csvfn)
示例2: test_read_dict_no_fieldnames
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictReader [as 别名]
def test_read_dict_no_fieldnames(self):
fd, name = tempfile.mkstemp()
fileobj = os.fdopen(fd, "w+b")
try:
fileobj.write(b"f1,f2,f3\r\n1,2,abc\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj)
self.assertEqual(reader.fieldnames,
["f1", "f2", "f3"])
self.assertEqual(next(reader),
{"f1": '1', "f2": '2', "f3": 'abc'})
finally:
fileobj.close()
os.unlink(name)
# Two test cases to make sure existing ways of implicitly setting
# fieldnames continue to work. Both arise from discussion in issue3436.
示例3: test_read_short
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictReader [as 别名]
def test_read_short(self):
fd, name = tempfile.mkstemp()
fileobj = os.fdopen(fd, "w+b")
try:
fileobj.write(b"1,2,abc,4,5,6\r\n1,2,abc\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj,
fieldnames="1 2 3 4 5 6".split(),
restval="DEFAULT")
self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
"4": '4', "5": '5', "6": '6'})
self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
"4": 'DEFAULT', "5": 'DEFAULT',
"6": 'DEFAULT'})
finally:
fileobj.close()
os.unlink(name)
示例4: test_node_csv_download
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictReader [as 别名]
def test_node_csv_download(self, node, testapp):
import unicodecsv as csv
node.enrolled_on = dt.datetime.utcnow()
node.last_checkin = dt.datetime.utcnow()
node.last_ip = '1.1.1.1'
node.node_info = {'hardware_vendor': "Honest Achmed's Computer Supply"}
node.save()
resp = testapp.get(url_for('manage.nodes_csv'))
assert resp.headers['Content-Type'] == 'text/csv; charset=utf-8'
assert resp.headers['Content-Disposition'] == 'attachment; filename=nodes.csv'
reader = csv.DictReader(io.BytesIO(resp.body))
row = next(reader)
assert row['Display Name'] == node.display_name
assert row['Host Identifier'] == node.host_identifier
assert row['Enrolled On'] == str(node.enrolled_on)
assert row['Last Check-In'] == str(node.last_checkin)
assert row['Last Ip Address'] == node.last_ip
assert row['Is Active'] == 'True'
assert row['Make'] == node.node_info['hardware_vendor']
示例5: dump_grammar
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictReader [as 别名]
def dump_grammar():
with open(OPTS.filename) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if OPTS.worker and row['WorkerId'] != OPTS.worker: continue
if row['AssignmentStatus'] == 'Rejected': continue
print 'HIT %s' % row['HITId']
print 'WorkerId: %s' % row['WorkerId']
print 'Time: %s s' % row['WorkTimeInSeconds']
input_qids = row['Input.qids'].split('\t')
input_sents = row['Input.sents'].split('\t')
ans_is_good = row['Answer.is-good'].split('\t')
ans_responses = row['Answer.responses'].split('\t')
for qid, s, is_good, response in zip(input_qids, input_sents,
ans_is_good, ans_responses):
print (' Example %s' % qid)
print (' Sentence: %s' % s).encode('utf-8')
print (' Is good? %s' % is_good)
print (' Response: %s' % colored(response, 'cyan')).encode('utf-8')
示例6: dump_verify
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictReader [as 别名]
def dump_verify():
with open(OPTS.filename) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if OPTS.worker and row['WorkerId'] != OPTS.worker: continue
if row['AssignmentStatus'] == 'Rejected': continue
print 'HIT %s' % row['HITId']
print 'WorkerId: %s' % row['WorkerId']
print 'Time: %s s' % row['WorkTimeInSeconds']
qids = row['Input.qids'].split('\t')
questions = row['Input.questions'].split('\t')
sents = row['Answer.sents'].split('\t')
responses = row['Answer.responses'].split('\t')
for qid, q, s_str, response_str in zip(
qids, questions, sents, responses):
print (' Example %s' % qid)
print (' Question %s' % q)
s_list = s_str.split('|')
a_list = response_str.split('|')
for s, a in zip(s_list, a_list):
print (' Sentence: %s' % sent_format(s)).encode('utf-8')
print (' Is good? %s' % colored(a, 'cyan'))
示例7: pred_human_eval
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictReader [as 别名]
def pred_human_eval():
all_preds = collections.defaultdict(list)
with open(OPTS.filename) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
all_preds[row['Input.qid']].append(row['Answer.response'])
preds = {}
for qid in all_preds:
if OPTS.ensemble:
for a in all_preds[qid]:
count = sum(1 for pred in all_preds[qid] if a == pred)
if count > 1:
preds[qid] = a
break
else:
preds[qid] = random.sample(all_preds[qid], 1)[0]
else:
preds[qid] = random.sample(all_preds[qid], 1)[0]
print json.dumps(preds)
示例8: featureset
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictReader [as 别名]
def featureset(self):
"""
Opens the corpus path, reads the data and constructs features to
pass to the classifier. (A simple improvement is to cache this).
Returns a dictionary of features and the label as follows:
[(feats, label) for row in corpus]
This is the expected format for the MaxentClassifier.
"""
if self._featureset is None:
# Time how long it takes to extract features
start = time.time()
self._featureset = []
with open(self.corpus, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
label = row.pop('category')
feats = self.featurizer.featurize(**row)
self._featureset.append((feats, label))
# Record feature extraction time
self.feattime = time.time() - start
return self._featureset
示例9: rephrase_studentmodule_opaque_keys
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictReader [as 别名]
def rephrase_studentmodule_opaque_keys(fn_sm):
'''
Generate rephrased studentmodule, with opaque key entries for module_id and course_id translated
into traditional format.
'''
fn_sm = path(fn_sm)
orig_sm_fn = '%s/studentmodule_orig.csv.gz' % (fn_sm.dirname())
cmd = 'cp %s %s' % (fn_sm, orig_sm_fn)
print " Running %s" % cmd
sys.stdout.flush()
os.system(cmd)
ofp = openfile(fn_sm, 'w')
smfp = openfile(orig_sm_fn)
cdr = csv.DictReader(smfp)
first = True
for entry in cdr:
if first:
odw = csv.DictWriter(ofp, fieldnames=cdr.fieldnames)
odw.writeheader()
first = False
fix_opaque_keys(entry, 'module_id')
fix_opaque_keys(entry, 'course_id')
odw.writerow(entry)
ofp.close()
print "Rephrased %s -> %s to convert opaque keys syntax to standard module_id and course_id format" % (orig_sm_fn, fn_sm)
sys.stdout.flush()
#-----------------------------------------------------------------------------
示例10: load_csv
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictReader [as 别名]
def load_csv(self, fn, key, schema=None, multi=False, fields=None, keymap=None, useCourseDir=True ):
'''
load csv file into memory, storing into dict with specified field (key) as the key.
if multi, then each dict value is a list, with one or more values per key.
if fields, load only those specified fields.
'''
data = OrderedDict()
if keymap is None:
keymap = lambda x: x
for line in csv.DictReader(self.openfile(fn, useCourseDir=useCourseDir)):
try:
the_id = keymap(line[key])
except Exception as err:
self.log("oops, failed to do keymap, key=%s, line=%s" % (line[key], line))
raise
if fields:
newline = { x: line[x] for x in fields }
line = newline
if multi:
if the_id in data:
data[the_id].append(line)
else:
data[the_id] = [ line ]
else:
data[the_id] = line
return data
示例11: gxp_file_reader
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictReader [as 别名]
def gxp_file_reader(fn):
rdr = csv.DictReader(open(fn, "r"), delimiter=str("\t"))
for rec in rdr:
if rec["id"].startswith("#"):
continue
yield rec
示例12: test_parser_test_completeness
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictReader [as 别名]
def test_parser_test_completeness(self):
"""ensure that all rules in grammar have tests"""
grammar_rule_re = re.compile(r"^(\w+)")
grammar_fn = pkg_resources.resource_filename("hgvs", "_data/hgvs.pymeta")
with open(grammar_fn, "r") as f:
grammar_rules = set(r.group(1) for r in filter(None, map(grammar_rule_re.match, f)))
with open(self._test_fn, "r") as f:
reader = csv.DictReader(f, delimiter=str("\t"))
test_rules = set(row["Func"] for row in reader)
untested_rules = grammar_rules - test_rules
self.assertTrue(len(untested_rules) == 0, "untested rules: {}".format(untested_rules))
示例13: test_parser_grammar
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictReader [as 别名]
def test_parser_grammar(self):
with open(self._test_fn, "r") as f:
reader = csv.DictReader(f, delimiter=str("\t"))
fail_cases = []
for row in reader:
if row["Func"].startswith("#"):
continue
# setup input
inputs = self._split_inputs(row["Test"], row["InType"])
expected_results = self._split_inputs(row["Expected"],
row["InType"]) if row["Expected"] else inputs
expected_map = dict(zip(inputs, expected_results))
# step through each item and check
is_valid = True if row["Valid"].lower() == "true" else False
for key in expected_map:
expected_result = six.text_type(expected_map[key]).replace("u'", "'")
function_to_test = getattr(self.p._grammar(key), row["Func"])
row_str = u"{}\t{}\t{}\t{}\t{}".format(row["Func"], key, row["Valid"], "one",
expected_result)
try:
actual_result = six.text_type(function_to_test()).replace("u'", "'")
if not is_valid or (expected_result != actual_result):
print("expected: {} actual:{}".format(expected_result, actual_result))
fail_cases.append(row_str)
except Exception as e:
if is_valid:
print("expected: {} Exception: {}".format(expected_result, e))
fail_cases.append(row_str)
# everything should have passed - report whatever failed
self.assertTrue(len(fail_cases) == 0, pprint.pprint(fail_cases))
示例14: gcp_file_reader
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictReader [as 别名]
def gcp_file_reader(fn):
rdr = csv.DictReader(open(fn, "r"), delimiter=str("\t"))
for rec in rdr:
if rec["id"].startswith("#"):
continue
yield rec
示例15: load_csv
# 需要导入模块: import unicodecsv [as 别名]
# 或者: from unicodecsv import DictReader [as 别名]
def load_csv(sqla_engine, file_name, table_name=None):
table_name = table_name or os.path.basename(file_name).split('.')[0]
path = os.path.join(FIXTURE_PATH, file_name)
table = None
with open(path, 'rb') as fh:
for i, row in enumerate(unicodecsv.DictReader(fh)):
if table is None:
table = _create_table(sqla_engine, table_name, row.keys())
row['_id'] = str(i)
stmt = table.insert(_convert_row(row))
sqla_engine.execute(stmt)
return table