當前位置: 首頁>>代碼示例>>Python>>正文


Python compat.BytesIO方法代碼示例

本文整理匯總了Python中pandas.compat.BytesIO方法的典型用法代碼示例。如果您正苦於以下問題:Python compat.BytesIO方法的具體用法?Python compat.BytesIO怎麽用?Python compat.BytesIO使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pandas.compat的用法示例。


在下文中一共展示了compat.BytesIO方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_variable_width_unicode

# 需要導入模塊: from pandas import compat [as 別名]
# 或者: from pandas.compat import BytesIO [as 別名]
def test_variable_width_unicode():
    if not compat.PY3:
        pytest.skip("Bytes-related test - only needs to work on Python 3")

    data = """
שלום שלום
ום   שלל
של   ום
""".strip("\r\n")
    encoding = "utf8"
    kwargs = dict(header=None, encoding=encoding)

    expected = read_fwf(BytesIO(data.encode(encoding)),
                        colspecs=[(0, 4), (5, 9)], **kwargs)
    result = read_fwf(BytesIO(data.encode(encoding)), **kwargs)
    tm.assert_frame_equal(result, expected) 
開發者ID:Frank-qlu,項目名稱:recruit,代碼行數:18,代碼來源:test_read_fwf.py

示例2: test_read_csv_chunked_download

# 需要導入模塊: from pandas import compat [as 別名]
# 或者: from pandas.compat import BytesIO [as 別名]
def test_read_csv_chunked_download(self, s3_resource, caplog):
        # 8 MB, S3FS usees 5MB chunks
        df = DataFrame(np.random.randn(100000, 4), columns=list('abcd'))
        buf = BytesIO()
        str_buf = StringIO()

        df.to_csv(str_buf)

        buf = BytesIO(str_buf.getvalue().encode('utf-8'))

        s3_resource.Bucket("pandas-test").put_object(
            Key="large-file.csv",
            Body=buf)

        with caplog.at_level(logging.DEBUG, logger='s3fs.core'):
            read_csv("s3://pandas-test/large-file.csv", nrows=5)
            # log of fetch_range (start, stop)
            assert ((0, 5505024) in {x.args[-2:] for x in caplog.records}) 
開發者ID:Frank-qlu,項目名稱:recruit,代碼行數:20,代碼來源:test_network.py

示例3: __init__

# 需要導入模塊: from pandas import compat [as 別名]
# 或者: from pandas.compat import BytesIO [as 別名]
def __init__(self, filepath_or_buffer, index=None, encoding='ISO-8859-1',
                 chunksize=None):

        self._encoding = encoding
        self._lines_read = 0
        self._index = index
        self._chunksize = chunksize

        if isinstance(filepath_or_buffer, str):
            (filepath_or_buffer, encoding,
             compression, should_close) = get_filepath_or_buffer(
                filepath_or_buffer, encoding=encoding)

        if isinstance(filepath_or_buffer, (str, compat.text_type, bytes)):
            self.filepath_or_buffer = open(filepath_or_buffer, 'rb')
        else:
            # Copy to BytesIO, and ensure no encoding
            contents = filepath_or_buffer.read()
            try:
                contents = contents.encode(self._encoding)
            except UnicodeEncodeError:
                pass
            self.filepath_or_buffer = compat.BytesIO(contents)

        self._read_header() 
開發者ID:Frank-qlu,項目名稱:recruit,代碼行數:27,代碼來源:sas_xport.py

示例4: test_read_csv_chunked_download

# 需要導入模塊: from pandas import compat [as 別名]
# 或者: from pandas.compat import BytesIO [as 別名]
def test_read_csv_chunked_download(self, s3_resource, caplog):
        # 8 MB, S3FS usees 5MB chunks
        df = DataFrame(np.random.randn(100000, 4), columns=list('abcd'))
        buf = BytesIO()
        str_buf = StringIO()

        df.to_csv(str_buf)

        buf = BytesIO(str_buf.getvalue().encode('utf-8'))

        s3_resource.Bucket("pandas-test").put_object(
            Key="large-file.csv",
            Body=buf)

        with caplog.at_level(logging.DEBUG, logger='s3fs.core'):
            read_csv("s3://pandas-test/large-file.csv", nrows=5)
            # log of fetch_range (start, stop)
            assert ((0, 5505024) in set(x.args[-2:] for x in caplog.records)) 
開發者ID:birforce,項目名稱:vnpy_crypto,代碼行數:20,代碼來源:test_network.py

示例5: test_multithread_stringio_read_csv

# 需要導入模塊: from pandas import compat [as 別名]
# 或者: from pandas.compat import BytesIO [as 別名]
def test_multithread_stringio_read_csv(self):
        # see gh-11786
        max_row_range = 10000
        num_files = 100

        bytes_to_df = [
            '\n'.join(
                ['%d,%d,%d' % (i, i, i) for i in range(max_row_range)]
            ).encode() for j in range(num_files)]
        files = [BytesIO(b) for b in bytes_to_df]

        # read all files in many threads
        pool = ThreadPool(8)
        results = pool.map(self.read_csv, files)
        first_result = results[0]

        for result in results:
            tm.assert_frame_equal(first_result, result) 
開發者ID:birforce,項目名稱:vnpy_crypto,代碼行數:20,代碼來源:multithread.py

示例6: __init__

# 需要導入模塊: from pandas import compat [as 別名]
# 或者: from pandas.compat import BytesIO [as 別名]
def __init__(self, filepath_or_buffer, index=None, encoding='ISO-8859-1',
                 chunksize=None):

        self._encoding = encoding
        self._lines_read = 0
        self._index = index
        self._chunksize = chunksize

        if isinstance(filepath_or_buffer, str):
            (filepath_or_buffer, encoding,
             compression, should_close) = get_filepath_or_buffer(
                filepath_or_buffer, encoding=encoding)

        if isinstance(filepath_or_buffer, (str, compat.text_type, bytes)):
            self.filepath_or_buffer = open(filepath_or_buffer, 'rb')
        else:
            # Copy to BytesIO, and ensure no encoding
            contents = filepath_or_buffer.read()
            try:
                contents = contents.encode(self._encoding)
            except:
                pass
            self.filepath_or_buffer = compat.BytesIO(contents)

        self._read_header() 
開發者ID:birforce,項目名稱:vnpy_crypto,代碼行數:27,代碼來源:sas_xport.py

示例7: __init__

# 需要導入模塊: from pandas import compat [as 別名]
# 或者: from pandas.compat import BytesIO [as 別名]
def __init__(self, filepath_or_buffer, index=None, encoding='ISO-8859-1',
                 chunksize=None):

        self._encoding = encoding
        self._lines_read = 0
        self._index = index
        self._chunksize = chunksize

        if isinstance(filepath_or_buffer, str):
            filepath_or_buffer, encoding, compression = get_filepath_or_buffer(
                filepath_or_buffer, encoding=encoding)

        if isinstance(filepath_or_buffer, (str, compat.text_type, bytes)):
            self.filepath_or_buffer = open(filepath_or_buffer, 'rb')
        else:
            # Copy to BytesIO, and ensure no encoding
            contents = filepath_or_buffer.read()
            try:
                contents = contents.encode(self._encoding)
            except:
                pass
            self.filepath_or_buffer = compat.BytesIO(contents)

        self._read_header() 
開發者ID:nccgroup,項目名稱:Splunking-Crime,代碼行數:26,代碼來源:sas_xport.py

示例8: test_bytes_io_input

# 需要導入模塊: from pandas import compat [as 別名]
# 或者: from pandas.compat import BytesIO [as 別名]
def test_bytes_io_input():
    if not compat.PY3:
        pytest.skip("Bytes-related test - only needs to work on Python 3")

    result = read_fwf(BytesIO("שלום\nשלום".encode('utf8')),
                      widths=[2, 2], encoding="utf8")
    expected = DataFrame([["של", "ום"]], columns=["של", "ום"])
    tm.assert_frame_equal(result, expected) 
開發者ID:Frank-qlu,項目名稱:recruit,代碼行數:10,代碼來源:test_read_fwf.py

示例9: test_buffer_rd_bytes_bad_unicode

# 需要導入模塊: from pandas import compat [as 別名]
# 或者: from pandas.compat import BytesIO [as 別名]
def test_buffer_rd_bytes_bad_unicode(c_parser_only):
    # see gh-22748
    parser = c_parser_only
    t = BytesIO(b"\xB0")

    if PY3:
        msg = "'utf-8' codec can't encode character"
        t = TextIOWrapper(t, encoding="ascii", errors="surrogateescape")
    else:
        msg = "'utf8' codec can't decode byte"

    with pytest.raises(UnicodeError, match=msg):
        parser.read_csv(t, encoding="UTF-8") 
開發者ID:Frank-qlu,項目名稱:recruit,代碼行數:15,代碼來源:test_c_parser_only.py

示例10: test_read_csv_handles_boto_s3_object

# 需要導入模塊: from pandas import compat [as 別名]
# 或者: from pandas.compat import BytesIO [as 別名]
def test_read_csv_handles_boto_s3_object(self,
                                             s3_resource,
                                             tips_file):
        # see gh-16135

        s3_object = s3_resource.meta.client.get_object(
            Bucket='pandas-test',
            Key='tips.csv')

        result = read_csv(BytesIO(s3_object["Body"].read()), encoding='utf8')
        assert isinstance(result, DataFrame)
        assert not result.empty

        expected = read_csv(tips_file)
        tm.assert_frame_equal(result, expected) 
開發者ID:Frank-qlu,項目名稱:recruit,代碼行數:17,代碼來源:test_network.py

示例11: test_bytes_io_input

# 需要導入模塊: from pandas import compat [as 別名]
# 或者: from pandas.compat import BytesIO [as 別名]
def test_bytes_io_input(all_parsers):
    if compat.PY2:
        pytest.skip("Bytes-related test does not need to work on Python 2.x")

    encoding = "cp1255"
    parser = all_parsers

    data = BytesIO("שלום:1234\n562:123".encode(encoding))
    result = parser.read_csv(data, sep=":", encoding=encoding)

    expected = DataFrame([[562, 123]], columns=["שלום", "1234"])
    tm.assert_frame_equal(result, expected) 
開發者ID:Frank-qlu,項目名稱:recruit,代碼行數:14,代碼來源:test_common.py

示例12: test_read_csv_unicode

# 需要導入模塊: from pandas import compat [as 別名]
# 或者: from pandas.compat import BytesIO [as 別名]
def test_read_csv_unicode(all_parsers):
    parser = all_parsers
    data = BytesIO(u("\u0141aski, Jan;1").encode("utf-8"))

    result = parser.read_csv(data, sep=";", encoding="utf-8", header=None)
    expected = DataFrame([[u("\u0141aski, Jan"), 1]])
    tm.assert_frame_equal(result, expected) 
開發者ID:Frank-qlu,項目名稱:recruit,代碼行數:9,代碼來源:test_common.py

示例13: test_utf16_bom_skiprows

# 需要導入模塊: from pandas import compat [as 別名]
# 或者: from pandas.compat import BytesIO [as 別名]
def test_utf16_bom_skiprows(all_parsers, sep, encoding):
    # see gh-2298
    parser = all_parsers
    data = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""").replace(",", sep)
    path = "__%s__.csv" % tm.rands(10)
    kwargs = dict(sep=sep, skiprows=2)
    utf8 = "utf-8"

    with tm.ensure_clean(path) as path:
        bytes_data = data.encode(encoding)

        with open(path, "wb") as f:
            f.write(bytes_data)

        bytes_buffer = BytesIO(data.encode(utf8))

        if compat.PY3:
            from io import TextIOWrapper
            bytes_buffer = TextIOWrapper(bytes_buffer, encoding=utf8)

        result = parser.read_csv(path, encoding=encoding, **kwargs)
        expected = parser.read_csv(bytes_buffer, encoding=utf8, **kwargs)

        bytes_buffer.close()
        tm.assert_frame_equal(result, expected) 
開發者ID:Frank-qlu,項目名稱:recruit,代碼行數:31,代碼來源:test_common.py

示例14: test_utf16_example

# 需要導入模塊: from pandas import compat [as 別名]
# 或者: from pandas.compat import BytesIO [as 別名]
def test_utf16_example(all_parsers, csv_dir_path, buffer):
    path = os.path.join(csv_dir_path, "utf16_ex.txt")
    parser = all_parsers

    src = BytesIO(open(path, "rb").read()) if buffer else path
    result = parser.read_csv(src, encoding="utf-16", sep="\t")
    assert len(result) == 50 
開發者ID:Frank-qlu,項目名稱:recruit,代碼行數:9,代碼來源:test_common.py

示例15: test_utf8_bom

# 需要導入模塊: from pandas import compat [as 別名]
# 或者: from pandas.compat import BytesIO [as 別名]
def test_utf8_bom(all_parsers, data, kwargs, expected):
    # see gh-4793
    parser = all_parsers
    bom = u("\ufeff")
    utf8 = "utf-8"

    def _encode_data_with_bom(_data):
        bom_data = (bom + _data).encode(utf8)
        return BytesIO(bom_data)

    result = parser.read_csv(_encode_data_with_bom(data),
                             encoding=utf8, **kwargs)
    tm.assert_frame_equal(result, expected) 
開發者ID:Frank-qlu,項目名稱:recruit,代碼行數:15,代碼來源:test_common.py


注:本文中的pandas.compat.BytesIO方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。