本文整理汇总了Python中pandas.errors.ParserWarning方法的典型用法代码示例。如果您正苦于以下问题:Python errors.ParserWarning方法的具体用法?Python errors.ParserWarning怎么用?Python errors.ParserWarning使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pandas.errors
的用法示例。
在下文中一共展示了errors.ParserWarning方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_dialect
# 需要导入模块: from pandas import errors [as 别名]
# 或者: from pandas.errors import ParserWarning [as 别名]
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
with tm.assert_produces_warning(ParserWarning):
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
示例2: test_dialect_str
# 需要导入模块: from pandas import errors [as 别名]
# 或者: from pandas.errors import ParserWarning [as 别名]
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
csv.register_dialect('mydialect', delimiter=':')
with tm.assert_produces_warning(ParserWarning):
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
示例3: test_dtype_with_converters
# 需要导入模块: from pandas import errors [as 别名]
# 或者: from pandas.errors import ParserWarning [as 别名]
def test_dtype_with_converters(all_parsers):
parser = all_parsers
data = """a,b
1.1,2.2
1.2,2.3"""
# Dtype spec ignored if converted specified.
with tm.assert_produces_warning(ParserWarning):
result = parser.read_csv(StringIO(data), dtype={"a": "i8"},
converters={"a": lambda x: str(x)})
expected = DataFrame({"a": ["1.1", "1.2"], "b": [2.2, 2.3]})
tm.assert_frame_equal(result, expected)
示例4: test_dialect_conflict_except_delimiter
# 需要导入模块: from pandas import errors [as 别名]
# 或者: from pandas.errors import ParserWarning [as 别名]
def test_dialect_conflict_except_delimiter(all_parsers, custom_dialect,
arg, value):
# see gh-23761.
dialect_name, dialect_kwargs = custom_dialect
parser = all_parsers
expected = DataFrame({"a": [1], "b": [2]})
data = "a:b\n1:2"
warning_klass = None
kwds = dict()
# arg=None tests when we pass in the dialect without any other arguments.
if arg is not None:
if "value" == "dialect": # No conflict --> no warning.
kwds[arg] = dialect_kwargs[arg]
elif "value" == "default": # Default --> no warning.
from pandas.io.parsers import _parser_defaults
kwds[arg] = _parser_defaults[arg]
else: # Non-default + conflict with dialect --> warning.
warning_klass = ParserWarning
kwds[arg] = "blah"
with tm.with_csv_dialect(dialect_name, **dialect_kwargs):
with tm.assert_produces_warning(warning_klass):
result = parser.read_csv(StringIO(data),
dialect=dialect_name, **kwds)
tm.assert_frame_equal(result, expected)
示例5: test_dialect_conflict
# 需要导入模块: from pandas import errors [as 别名]
# 或者: from pandas.errors import ParserWarning [as 别名]
def test_dialect_conflict(self):
data = 'a,b\n1,2'
dialect = 'excel'
exp = DataFrame({'a': [1], 'b': [2]})
with tm.assert_produces_warning(None):
df = self.read_csv(StringIO(data), delimiter=',', dialect=dialect)
tm.assert_frame_equal(df, exp)
with tm.assert_produces_warning(ParserWarning):
df = self.read_csv(StringIO(data), delimiter='.', dialect=dialect)
tm.assert_frame_equal(df, exp)
示例6: test_dtype_with_converter
# 需要导入模块: from pandas import errors [as 别名]
# 或者: from pandas.errors import ParserWarning [as 别名]
def test_dtype_with_converter(self):
data = """a,b
1.1,2.2
1.2,2.3"""
# dtype spec ignored if converted specified
with tm.assert_produces_warning(ParserWarning):
result = self.read_csv(StringIO(data), dtype={'a': 'i8'},
converters={'a': lambda x: str(x)})
expected = DataFrame({'a': ['1.1', '1.2'], 'b': [2.2, 2.3]})
tm.assert_frame_equal(result, expected)
示例7: test_from_csv_sep_none
# 需要导入模块: from pandas import errors [as 别名]
# 或者: from pandas.errors import ParserWarning [as 别名]
def test_from_csv_sep_none(make_csv_file):
make_csv_file()
with pytest.warns(ParserWarning):
pandas_df = pandas.read_csv(TEST_CSV_FILENAME, sep=None)
with pytest.warns(ParserWarning):
modin_df = pd.read_csv(TEST_CSV_FILENAME, sep=None)
df_equals(modin_df, pandas_df)
示例8: _convert_to_ndarrays
# 需要导入模块: from pandas import errors [as 别名]
# 或者: from pandas.errors import ParserWarning [as 别名]
def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
converters=None, dtypes=None):
result = {}
for c, values in compat.iteritems(dct):
conv_f = None if converters is None else converters.get(c, None)
if isinstance(dtypes, dict):
cast_type = dtypes.get(c, None)
else:
# single dtype or None
cast_type = dtypes
if self.na_filter:
col_na_values, col_na_fvalues = _get_na_values(
c, na_values, na_fvalues, self.keep_default_na)
else:
col_na_values, col_na_fvalues = set(), set()
if conv_f is not None:
# conv_f applied to data before inference
if cast_type is not None:
warnings.warn(("Both a converter and dtype were specified "
"for column {0} - only the converter will "
"be used").format(c), ParserWarning,
stacklevel=7)
try:
values = lib.map_infer(values, conv_f)
except ValueError:
mask = algorithms.isin(
values, list(na_values)).view(np.uint8)
values = lib.map_infer_mask(values, conv_f, mask)
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues,
try_num_bool=False)
else:
# skip inference if specified dtype is object
try_num_bool = not (cast_type and is_string_dtype(cast_type))
# general type inference and conversion
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues,
try_num_bool)
# type specified in dtype param
if cast_type and not is_dtype_equal(cvals, cast_type):
cvals = self._cast_types(cvals, cast_type, c)
result[c] = cvals
if verbose and na_count:
print('Filled %d NA values in column %s' % (na_count, str(c)))
return result
示例9: _convert_to_ndarrays
# 需要导入模块: from pandas import errors [as 别名]
# 或者: from pandas.errors import ParserWarning [as 别名]
def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
converters=None, dtypes=None):
result = {}
for c, values in compat.iteritems(dct):
conv_f = None if converters is None else converters.get(c, None)
if isinstance(dtypes, dict):
cast_type = dtypes.get(c, None)
else:
# single dtype or None
cast_type = dtypes
if self.na_filter:
col_na_values, col_na_fvalues = _get_na_values(
c, na_values, na_fvalues)
else:
col_na_values, col_na_fvalues = set(), set()
if conv_f is not None:
# conv_f applied to data before inference
if cast_type is not None:
warnings.warn(("Both a converter and dtype were specified "
"for column {0} - only the converter will "
"be used").format(c), ParserWarning,
stacklevel=7)
try:
values = lib.map_infer(values, conv_f)
except ValueError:
mask = algorithms.isin(
values, list(na_values)).view(np.uint8)
values = lib.map_infer_mask(values, conv_f, mask)
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues,
try_num_bool=False)
else:
# skip inference if specified dtype is object
try_num_bool = not (cast_type and is_string_dtype(cast_type))
# general type inference and conversion
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues,
try_num_bool)
# type specificed in dtype param
if cast_type and not is_dtype_equal(cvals, cast_type):
cvals = self._cast_types(cvals, cast_type, c)
if issubclass(cvals.dtype.type, np.integer) and self.compact_ints:
cvals = lib.downcast_int64(
cvals, parsers.na_values,
self.use_unsigned)
result[c] = cvals
if verbose and na_count:
print('Filled %d NA values in column %s' % (na_count, str(c)))
return result