本文整理匯總了Python中unicodecsv.reader方法的典型用法代碼示例。如果您正苦於以下問題:Python unicodecsv.reader方法的具體用法?Python unicodecsv.reader怎麽用?Python unicodecsv.reader使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類unicodecsv
的用法示例。
在下文中一共展示了unicodecsv.reader方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _assert_no_duplicates
# 需要導入模塊: import unicodecsv [as 別名]
# 或者: from unicodecsv import reader [as 別名]
def _assert_no_duplicates(self, input_path, encoding, sep, quotechar):
if input_path.endswith('.csv'):
with open(input_path, 'rb') as csvfile:
reader = unicodecsv.reader(csvfile,
encoding=encoding,
delimiter=sep,
quotechar=quotechar)
fields = next(reader, [])
for col in fields:
if fields.count(col) > 1:
raise DuplicatedField(col)
# TODO: Implementar chequeo de que no hay duplicados para XLSX
elif input_path.endswith('.xlsx'):
pass
示例2: _load_space
# 需要導入模塊: import unicodecsv [as 別名]
# 或者: from unicodecsv import reader [as 別名]
def _load_space(self, space_names):
segs = set()
scripts = list(set([nm.split('-')[1] for nm in space_names]))
punc_fns = ['punc-{}.csv'.format(sc) for sc in scripts]
for punc_fn in punc_fns:
punc_fn = os.path.join('data', 'space', punc_fn)
punc_fn = pkg_resources.resource_filename(__name__, punc_fn)
with open(punc_fn, 'rb') as f:
reader = csv.reader(f, encoding='utf-8')
for (mark,) in reader:
segs.add(mark)
for name in space_names:
fn = os.path.join('data', 'space', name + '.csv')
fn = pkg_resources.resource_filename(__name__, fn)
with open(fn, 'rb') as f:
reader = csv.reader(f, encoding='utf-8')
for _, to_ in reader:
for seg in self.epi.ft.ipa_segs(to_):
segs.add(seg)
enum = enumerate(sorted(list(segs)))
return {seg: num for num, seg in enum}
示例3: _read_bases
# 需要導入模塊: import unicodecsv [as 別名]
# 或者: from unicodecsv import reader [as 別名]
def _read_bases(self, fn, weights):
fn = pkg_resources.resource_filename(__name__, fn)
segments = []
with open(fn, 'rb') as f:
reader = csv.reader(f, encoding='utf-8')
header = next(reader)
names = header[1:]
for row in reader:
ipa = unicodedata.normalize('NFD', row[0])
vals = [{'-': -1, '0': 0, '+': 1}[x] for x in row[1:]]
vec = Segment(names,
{n: v for (n, v) in zip(names, vals)},
weights=weights)
segments.append((ipa, vec))
seg_dict = dict(segments)
return segments, seg_dict, names
示例4: _read_table
# 需要導入模塊: import unicodecsv [as 別名]
# 或者: from unicodecsv import reader [as 別名]
def _read_table(self, filename):
"""Read the data from data/ipa_all.csv into self.segments, a
list of 2-tuples of unicode strings and sets of feature tuples and
self.seg_dict, a dictionary mapping from unicode segments and sets of
feature tuples.
"""
filename = pkg_resources.resource_filename(
__name__, filename)
segments = []
with open(filename, 'rb') as f:
reader = csv.reader(f, encoding='utf-8')
header = next(reader)
names = header[1:]
for row in reader:
seg = row[0]
vals = row[1:]
specs = set(zip(vals, names))
segments.append((seg, specs))
seg_dict = dict(segments)
return segments, seg_dict, names
示例5: get_kanji
# 需要導入模塊: import unicodecsv [as 別名]
# 或者: from unicodecsv import reader [as 別名]
def get_kanji(level, current_pos=1):
"""
get_kanji returns a single record of the current_pos line position
level: 1 - 4 (N1 to N4)
current_pos: up to number of records
"""
kanji = {}
with open(KANJI_FILENAMES[level], 'rb') as fobj:
reader = csv.reader(fobj, delimiter=',', encoding='utf-8')
num_of_lines = 0
for line in reader:
num_of_lines += 1
if num_of_lines == current_pos:
kanji = dict(list(zip(KANJI_FIELDS, line)))
break
return kanji
示例6: get_vocabulary
# 需要導入模塊: import unicodecsv [as 別名]
# 或者: from unicodecsv import reader [as 別名]
def get_vocabulary(current_pos=1):
"""
get_vocabulary returns a single record of the current_pos line position
current_pos: up to number of records
"""
vocabulary = {}
with open(VOCABULARY_FILENAME, 'rb') as fobj:
reader = csv.reader(fobj, delimiter=',', encoding='utf-8')
num_of_lines = 0
for line in reader:
num_of_lines += 1
if num_of_lines == current_pos:
vocabulary = dict(list(zip(VOCABULARY_FIELDS, line)))
break
return vocabulary
示例7: test_space_dialect
# 需要導入模塊: import unicodecsv [as 別名]
# 或者: from unicodecsv import reader [as 別名]
def test_space_dialect(self):
class space(csv.excel):
delimiter = " "
quoting = csv.QUOTE_NONE
escapechar = "\\"
fd, name = tempfile.mkstemp()
fileobj = os.fdopen(fd, "w+b")
try:
fileobj.write(b"abc def\nc1ccccc1 benzene\n")
fileobj.seek(0)
rdr = csv.reader(fileobj, dialect=space())
self.assertEqual(next(rdr), ["abc", "def"])
self.assertEqual(next(rdr), ["c1ccccc1", "benzene"])
finally:
fileobj.close()
os.unlink(name)
示例8: test_read_dict_no_fieldnames
# 需要導入模塊: import unicodecsv [as 別名]
# 或者: from unicodecsv import reader [as 別名]
def test_read_dict_no_fieldnames(self):
fd, name = tempfile.mkstemp()
fileobj = os.fdopen(fd, "w+b")
try:
fileobj.write(b"f1,f2,f3\r\n1,2,abc\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj)
self.assertEqual(reader.fieldnames,
["f1", "f2", "f3"])
self.assertEqual(next(reader),
{"f1": '1', "f2": '2', "f3": 'abc'})
finally:
fileobj.close()
os.unlink(name)
# Two test cases to make sure existing ways of implicitly setting
# fieldnames continue to work. Both arise from discussion in issue3436.
示例9: test_read_short
# 需要導入模塊: import unicodecsv [as 別名]
# 或者: from unicodecsv import reader [as 別名]
def test_read_short(self):
fd, name = tempfile.mkstemp()
fileobj = os.fdopen(fd, "w+b")
try:
fileobj.write(b"1,2,abc,4,5,6\r\n1,2,abc\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj,
fieldnames="1 2 3 4 5 6".split(),
restval="DEFAULT")
self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
"4": '4', "5": '5', "6": '6'})
self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
"4": 'DEFAULT', "5": 'DEFAULT',
"6": 'DEFAULT'})
finally:
fileobj.close()
os.unlink(name)
示例10: __init__
# 需要導入模塊: import unicodecsv [as 別名]
# 或者: from unicodecsv import reader [as 別名]
def __init__(self, s3_config, s3_path, unicode_csv, **kwargs):
self.s3file = s3_config.fs_open(_strip_schema(s3_path))
self.gzfile = TextIOWrapper(
GzipFile(fileobj=self.s3file, mode='rb'),
encoding='utf-8',
newline='',
)
self.reader = get_csv_reader(self.gzfile, unicode_csv, **kwargs)
示例11: __iter__
# 需要導入模塊: import unicodecsv [as 別名]
# 或者: from unicodecsv import reader [as 別名]
def __iter__(self):
return self.reader.__iter__()
示例12: next
# 需要導入模塊: import unicodecsv [as 別名]
# 或者: from unicodecsv import reader [as 別名]
def next(self):
return self.reader.next()
示例13: get_csv_reader
# 需要導入模塊: import unicodecsv [as 別名]
# 或者: from unicodecsv import reader [as 別名]
def get_csv_reader(iterable, unicode_csv, **kwargs):
# The csv module works fine with unicode on Python 3, we will use `unicodecsv` only for Python2.
if unicode_csv and sys.version_info.major == 2:
return unicodecsv.reader(_encode_rows_to_utf8(iterable), **kwargs)
return csv.reader(iterable, **kwargs)
示例14: import_csv_to_list
# 需要導入模塊: import unicodecsv [as 別名]
# 或者: from unicodecsv import reader [as 別名]
def import_csv_to_list(filename, headers = False, astuple = False):
data = []
with open(filename, 'rb') as file:
reader = unicodecsv.reader(file, delimiter = ',', quotechar = '"')
if headers == True:
next(reader, None) # Skip header row.
for row in reader:
if(astuple):
data.append(tuple(row))
else:
data.append(row)
return data
示例15: import_csv_to_dict
# 需要導入模塊: import unicodecsv [as 別名]
# 或者: from unicodecsv import reader [as 別名]
def import_csv_to_dict(filename, params = None, headers = False):
data = []
with open(filename, 'rU') as file:
reader = unicodecsv.reader(file, delimiter = ',', quotechar = '"')
if(headers):
params = reader.next()
for row in reader:
data.append(dict(zip(params, row)))
return data