當前位置: 首頁>>代碼示例>>Python>>正文


Python csv.field_size_limit方法代碼示例

本文整理匯總了Python中csv.field_size_limit方法的典型用法代碼示例。如果您正苦於以下問題:Python csv.field_size_limit方法的具體用法?Python csv.field_size_limit怎麽用?Python csv.field_size_limit使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在csv的用法示例。


在下文中一共展示了csv.field_size_limit方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: Records

# 需要導入模塊: import csv [as 別名]
# 或者: from csv import field_size_limit [as 別名]
def Records(self):
    """Reads the CSV data file and generates row records.

    Yields:
      Lists of strings

    Raises:
      ResumeError: If the progress database and data file indicate a different
        number of rows.
    """
    csv_file = self.openfile(self.csv_filename, 'rb')
    reader = self.create_csv_reader(csv_file, skipinitialspace=True)
    try:

      for record in reader:
        yield record
    except csv.Error, e:
      if e.args and e.args[0].startswith('field larger than field limit'):
        raise FieldSizeLimitError(csv.field_size_limit())
      else:
        raise 
開發者ID:elsigh,項目名稱:browserscope,代碼行數:23,代碼來源:bulkloader.py

示例2: test_read_bigfield

# 需要導入模塊: import csv [as 別名]
# 或者: from csv import field_size_limit [as 別名]
def test_read_bigfield(self):
        # This exercises the buffer realloc functionality and field size
        # limits.
        limit = csv.field_size_limit()
        try:
            size = 50000
            bigstring = 'X' * size
            bigline = '%s,%s' % (bigstring, bigstring)
            self._read_test([bigline], [[bigstring, bigstring]])
            csv.field_size_limit(size)
            self._read_test([bigline], [[bigstring, bigstring]])
            self.assertEqual(csv.field_size_limit(), size)
            csv.field_size_limit(size-1)
            self.assertRaises(csv.Error, self._read_test, [bigline], [])
            self.assertRaises(TypeError, csv.field_size_limit, None)
            self.assertRaises(TypeError, csv.field_size_limit, 1, None)
        finally:
            csv.field_size_limit(limit) 
開發者ID:IronLanguages,項目名稱:ironpython2,代碼行數:20,代碼來源:test_csv.py

示例3: index

# 需要導入模塊: import csv [as 別名]
# 或者: from csv import field_size_limit [as 別名]
def index(sqlite_filename, tsv_filename):
    logger.info('Reading tsv file %s', tsv_filename)
    # https://stackoverflow.com/questions/15063936/csv-error-field-larger-than-field-limit-131072
    # https://github.com/balta2ar/brotab/issues/25
    # It should work on Python 3 and Python 2, on any CPU / OS.
    csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))

    with open(tsv_filename, encoding='utf-8') as tsv_file:
        lines = [tuple(line) for line in csv.reader(tsv_file, delimiter='\t',
                                                    quoting=csv.QUOTE_NONE)]

    logger.info(
        'Creating sqlite DB filename %s from tsv %s (%s lines)',
        sqlite_filename, tsv_filename, len(lines))
    conn = sqlite3.connect(sqlite_filename)
    cursor = conn.cursor()
    with suppress(sqlite3.OperationalError):
        cursor.execute('drop table tabs;')
    cursor.execute(
        'create virtual table tabs using fts5('
        '    tab_id, title, url, body, tokenize="porter unicode61");')
    cursor.executemany('insert into tabs values (?, ?, ?, ?)', lines)
    conn.commit()
    conn.close() 
開發者ID:balta2ar,項目名稱:brotab,代碼行數:26,代碼來源:index.py

示例4: load_lookup_file_from_disk

# 需要導入模塊: import csv [as 別名]
# 或者: from csv import field_size_limit [as 別名]
def load_lookup_file_from_disk(file_path):
    """
    parse the lookup file from the given path and return the result

    Args:
        file_path (string): the path to the lookup file

    Returns:
        lookup_data (dict): result from the csv parser
    """
    if not file_exists(file_path):
        raise RuntimeError('Not valid filepath: {}'.format(file_path))

    try:
        with open(file_path, mode='r') as f:
            reader = csv.DictReader(f)
            csv.field_size_limit(CSV_FILESIZE_LIMIT)
            lookup_data = reader.next()
    except Exception as e:
        raise RuntimeError('Error reading model file: %s, %s' % (file_path, str(e)))

    return lookup_data 
開發者ID:nccgroup,項目名稱:Splunking-Crime,代碼行數:24,代碼來源:lookups_util.py

示例5: _load_sentence_file

# 需要導入模塊: import csv [as 別名]
# 或者: from csv import field_size_limit [as 別名]
def _load_sentence_file(self, filename):
        csv.field_size_limit(sys.maxsize)
        opener = gzip.open if filename.endswith('.gz') else open
        entities = dict()
        with opener(filename) as csvfile:
                reader = csv.reader(csvfile, delimiter='\t',quoting=csv.QUOTE_NONE)
                for row in reader:
                        qid = row[0].strip()
                        sentence_json = row[1].strip()
                        if sentence_json:
                                payload = json.loads(sentence_json)
                                annotations = payload['annotations']
                                sentence_entities = [ x['id'] for x in annotations]
                                sentence_entities = [ str(x) for x in sentence_entities]
                                entities[qid] = sentence_entities
                        else:
                                entities[qid] = []
        return entities 
開發者ID:rmit-ir,項目名稱:SummaryRank,代碼行數:20,代碼來源:resources.py

示例6: init

# 需要導入模塊: import csv [as 別名]
# 或者: from csv import field_size_limit [as 別名]
def init(args):
    """
        Load data, build model, create optimizer, create vars to hold metrics, etc.
    """
    #need to handle really large text fields
    csv.field_size_limit(sys.maxsize)

    #load vocab and other lookups
    desc_embed = args.lmbda > 0
    print("loading lookups...")
    dicts = datasets.load_lookups(args, desc_embed=desc_embed)

    model = tools.pick_model(args, dicts)
    print(model)

    if not args.test_model:
        optimizer = optim.Adam(model.parameters(), weight_decay=args.weight_decay, lr=args.lr)
    else:
        optimizer = None

    params = tools.make_param_dict(args)
    
    return args, model, optimizer, params, dicts 
開發者ID:jamesmullenbach,項目名稱:caml-mimic,代碼行數:25,代碼來源:training.py

示例7: read_file

# 需要導入模塊: import csv [as 別名]
# 或者: from csv import field_size_limit [as 別名]
def read_file(data_dir, with_evaluation):
    data = []
    target = []
    with open(join(data_dir, 'dataset.csv'), 'rt', encoding='utf-8') as csvfile:
        csv.field_size_limit(500 * 1024 * 1024)
        reader = csv.reader(csvfile)
        for row in reader:
            if data_dir == './agnews':
                doc = row[1] + '. ' + row[2]
                data.append(doc)
                target.append(int(row[0]) - 1)
            elif data_dir == './yelp':
                data.append(row[1])
                target.append(int(row[0]) - 1)
    if with_evaluation:
        y = np.asarray(target)
        assert len(data) == len(y)
        assert set(range(len(np.unique(y)))) == set(np.unique(y))
    else:
        y = None
    return data, y 
開發者ID:yumeng5,項目名稱:WeSTClass,代碼行數:23,代碼來源:load_data.py

示例8: _increase_csv_field_max_size

# 需要導入模塊: import csv [as 別名]
# 或者: from csv import field_size_limit [as 別名]
def _increase_csv_field_max_size():
    """Makes document entry in dataset as big as possible

    References
    ----------
    https://stackoverflow.com/questions/15063936/csv-error-field-larger-than-field-limit-131072

    """
    max_int = sys.maxsize

    while True:
        try:
            csv.field_size_limit(max_int)

            break

        except OverflowError:
            max_int = int(max_int / 10) 
開發者ID:machine-intelligence-laboratory,項目名稱:TopicNet,代碼行數:20,代碼來源:dataset.py

示例9: unicode_csv_reader

# 需要導入模塊: import csv [as 別名]
# 或者: from csv import field_size_limit [as 別名]
def unicode_csv_reader(unicode_csv_data: TextIOWrapper, **kwargs: Any) -> Any:
    r"""Since the standard csv library does not handle unicode in Python 2, we need a wrapper.
    Borrowed and slightly modified from the Python docs:
    https://docs.python.org/2/library/csv.html#csv-examples
    Args:
        unicode_csv_data (TextIOWrapper): unicode csv data (see example below)

    Examples:
        >>> from torchaudio.datasets.utils import unicode_csv_reader
        >>> import io
        >>> with io.open(data_path, encoding="utf8") as f:
        >>>     reader = unicode_csv_reader(f)
    """

    # Fix field larger than field limit error
    maxInt = sys.maxsize
    while True:
        # decrease the maxInt value by factor 10
        # as long as the OverflowError occurs.
        try:
            csv.field_size_limit(maxInt)
            break
        except OverflowError:
            maxInt = int(maxInt / 10)
    csv.field_size_limit(maxInt)

    for line in csv.reader(unicode_csv_data, **kwargs):
        yield line 
開發者ID:pytorch,項目名稱:audio,代碼行數:30,代碼來源:utils.py

示例10: __init__

# 需要導入模塊: import csv [as 別名]
# 或者: from csv import field_size_limit [as 別名]
def __init__(self, sf):
        # Set csv max reading size to the platform's max size available.
        csv.field_size_limit(sys.maxsize)
        self.sf = sf 
開發者ID:singer-io,項目名稱:tap-salesforce,代碼行數:6,代碼來源:bulk.py

示例11: ContentGenerator

# 需要導入模塊: import csv [as 別名]
# 或者: from csv import field_size_limit [as 別名]
def ContentGenerator(csv_file,
                     batch_size,
                     create_csv_reader=csv.reader,
                     create_csv_writer=csv.writer):
  """Retrieves CSV data up to a batch size at a time.

  Args:
    csv_file: A file-like object for reading CSV data.
    batch_size: Maximum number of CSV rows to yield on each iteration.
    create_csv_reader, create_csv_writer: Used for dependency injection.

  Yields:
    Tuple (entity_count, csv_content) where:
      entity_count: Number of entities contained in the csv_content. Will be
        less than or equal to the batch_size and greater than 0.
      csv_content: String containing the CSV content containing the next
        entity_count entities.
  """
  try:
    csv.field_size_limit(800000)
  except AttributeError:

    pass

  reader = create_csv_reader(csv_file, skipinitialspace=True)
  exhausted = False

  while not exhausted:
    rows_written = 0
    content = StringIO.StringIO()
    writer = create_csv_writer(content)
    try:
      for i in xrange(batch_size):
        row = reader.next()
        writer.writerow(row)
        rows_written += 1
    except StopIteration:
      exhausted = True

    if rows_written > 0:
      yield rows_written, content.getvalue() 
開發者ID:elsigh,項目名稱:browserscope,代碼行數:43,代碼來源:bulkload_client.py

示例12: __init__

# 需要導入模塊: import csv [as 別名]
# 或者: from csv import field_size_limit [as 別名]
def __init__(self, limit):
    self.message = """
A field in your CSV input file has exceeded the current limit of %d.

You can raise this limit by adding the following lines to your config file:

import csv
csv.field_size_limit(new_limit)

where new_limit is number larger than the size in bytes of the largest
field in your CSV.
""" % limit
    Error.__init__(self, self.message) 
開發者ID:elsigh,項目名稱:browserscope,代碼行數:15,代碼來源:bulkloader.py

示例13: __init__

# 需要導入模塊: import csv [as 別名]
# 或者: from csv import field_size_limit [as 別名]
def __init__(self, istream: TextIO, sep: str = ',') -> None:
        # bump the built-in limits on field sizes
        csv.field_size_limit(2**24)

        self.reader = csv.DictReader(istream, delimiter=sep) 
開發者ID:larsyencken,項目名稱:csvdiff,代碼行數:7,代碼來源:records.py

示例14: start

# 需要導入模塊: import csv [as 別名]
# 或者: from csv import field_size_limit [as 別名]
def start(self):
        csv.field_size_limit(sys.maxsize)
        self.parser = create_parser(name=self.lang, lemmas=True) 
開發者ID:graphbrain,項目名稱:graphbrain,代碼行數:5,代碼來源:csv_parser.py

示例15: generate_dictionary_BA

# 需要導入模塊: import csv [as 別名]
# 或者: from csv import field_size_limit [as 別名]
def generate_dictionary_BA(path, files, attributes_list):
    # path = '../Minnemudac/'
    # files = ['Coborn_history_order.csv','Coborn_future_order.csv']
    # files = ['BA_history_order.csv', 'BA_future_order.csv']
    # attributes_list = ['MATERIAL_NUMBER']
    dictionary_table = {}
    counter_table = {}
    for attr in attributes_list:
        dictionary = {}
        dictionary_table[attr] = dictionary
        counter_table[attr] = 0

    csv.field_size_limit(sys.maxsize)
    for filename in files:
        count = 0
        with open(path + filename, 'r') as csvfile:
            reader = csv.reader(csvfile, delimiter=',', quotechar='|')
            for row in reader:
                if count == 0:
                    count += 1
                    continue
                key = attributes_list[0]
                if row[2] not in dictionary_table[key]:
                    dictionary_table[key][row[2]] = counter_table[key]
                    counter_table[key] = counter_table[key] + 1
                    count += 1

    print(counter_table)

    total = 0
    for key in counter_table.keys():
        total = total + counter_table[key]

    print('# dimensions of final vector: ' + str(total) + ' | ' + str(count - 1))

    return dictionary_table, total, counter_table 
開發者ID:HaojiHu,項目名稱:Sets2Sets,代碼行數:38,代碼來源:Sets2Sets.py


注:本文中的csv.field_size_limit方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。