当前位置: 首页>>代码示例>>Python>>正文


Python csv.DictReader类代码示例

本文整理汇总了Python中csv.DictReader的典型用法代码示例。如果您正苦于以下问题:Python DictReader类的具体用法?Python DictReader怎么用?Python DictReader使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了DictReader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

 def __init__(self, fid, commentchar='#', *args, **kwds):
     if issubclass(DictReader, object):
         super(DictReader, self).__init__(fid, *args, **kwds)
     else:
         DictReader.__init__(self, fid, *args, **kwds)
     self.commentchar = commentchar
     self.leadingfield = self.commentchar + 'label'
开发者ID:BRAINSia,项目名称:rs-fMRI-pilot,代码行数:7,代码来源:seedWorkflow.py

示例2: __init__

 def __init__(self,refGene_fn) :
     refGene_f = open(refGene_fn)
     # check for header
     first_line = refGene_f.next()
     if not first_line.strip().startswith('#') :
         refGene_f.seek(0) # first line not header, reset the file pointer
     DictReader.__init__(self,refGene_f,delimiter='\t',fieldnames=RefGeneOutput.FIELD_NAMES)
开发者ID:dvanderk,项目名称:chipsequtil,代码行数:7,代码来源:chipsequtil.py

示例3: load_data

def load_data(uri, dateFormat):
    logging.info('loading data; uri: {0}'.format(uri))
    
    from urllib2 import urlopen
    from csv import DictReader
    
    reader = DictReader(urlopen(uri).readlines())
    
    encodedFieldNames = []
    for fieldname in reader.fieldnames:
        encodedFieldNames.append(fieldname.decode("utf-8-sig").encode("utf-8"))
    reader.fieldnames = encodedFieldNames
    
    data = []
    
    from time import strptime
    
    for row in reader:
        data.append({
            'date': strptime(row['Date'], dateFormat),
            'open': float(row['Open']),
            'close': float(row['Close']),
            'high': float(row['High']),
            'low': float(row['Low']),
            'volume': float(row['Volume'])
        })
    
    return data
开发者ID:vyacheslav-bezborodov,项目名称:python,代码行数:28,代码来源:data_processor.py

示例4: __init__

 def __init__(self, f, fieldnames=None, restkey=None, restval=None,
              dialect="excel", *args, **kw):
     DictReader.__init__(self, f, fieldnames=fieldnames,
                         restkey=restkey, restval=restval,
                         dialect=dialect, *args, **kw)
     # Replace the reader with our unicode-enabled reader.
     self.reader = UnicodeReader(f, dialect=dialect, *args, **kw)
开发者ID:b8va,项目名称:everest,代码行数:7,代码来源:compat.py

示例5: test_subset_with_shapefile_no_ugid

    def test_subset_with_shapefile_no_ugid(self):
        """Test a subset operation using a shapefile without a UGID attribute."""

        output_format = [constants.OUTPUT_FORMAT_NUMPY, constants.OUTPUT_FORMAT_CSV_SHAPEFILE]

        geom = self.get_shapefile_path_with_no_ugid()
        geom_select_uid = [8, 11]
        geom_uid = 'ID'
        rd = self.test_data.get_rd('cancm4_tas')

        for of in output_format:
            ops = OcgOperations(dataset=rd, geom=geom, geom_select_uid=geom_select_uid, geom_uid=geom_uid, snippet=True,
                                output_format=of)
            self.assertEqual(len(ops.geom), 2)
            ret = ops.execute()
            if of == constants.OUTPUT_FORMAT_NUMPY:
                for element in geom_select_uid:
                    self.assertIn(element, ret)
                self.assertEqual(ret.properties[8].dtype.names, ('STATE_FIPS', 'ID', 'STATE_NAME', 'STATE_ABBR'))
            else:
                with open(ret) as f:
                    reader = DictReader(f)
                    row = reader.next()
                    self.assertIn(geom_uid, row.keys())
                    self.assertNotIn(env.DEFAULT_GEOM_UID, row.keys())

                shp_path = os.path.split(ret)[0]
                shp_path = os.path.join(shp_path, 'shp', '{0}_gid.shp'.format(ops.prefix))
                with fiona.open(shp_path) as source:
                    record = source.next()
                    self.assertIn(geom_uid, record['properties'])
                    self.assertNotIn(env.DEFAULT_GEOM_UID, record['properties'])
开发者ID:HydroLogic,项目名称:ocgis,代码行数:32,代码来源:test_work.py

示例6: action

def action(args):
    def newname(leaf, newname):
        leaf.name = newname
        return leaf

    tree = Phylo.parse(args.tree, args.tree_type).next()
    leafs = (leaf for leaf in tree.get_terminals())

    if args.info:
        info = DictReader(args.info, fieldnames = ['seqname','newname'])
        info = {i['seqname']:i['newname'] for i in info}

        # for newick trees :s will be replaced by |s
        if args.tree_type == 'newick':
            info = {s.replace(':', '|'):n for s,n in info.items()}

        leafs = (l for l in leafs if l.name in info)
        leafs = (newname(l, info[l.name]) for l in leafs)

    if args.remove_word:
        leafs = (newname(l, re.sub(args.remove_word, '', l.name)) for l in leafs)
        leafs = (newname(l, l.name.strip()) for l in leafs)

    leafs = (newname(l, args.add_prefix + l.name) for l in leafs)
    leafs = (newname(l, l.name + args.add_suffix) for l in leafs)

    # do this last
    if args.tree_type == 'newick':
        leafs = (newname(l, l.name.replace(' ', '_')) for l in leafs)

    # execute changes and write tree
    list(leafs)
    Phylo.write(tree, args.out, args.tree_type)
开发者ID:crosenth,项目名称:bioy,代码行数:33,代码来源:tree_edit.py

示例7: upload_resources

def upload_resources(filename, skip=0, limit=None):
    """Upload  from a CSV file."""
    # Use sys.stdout.write so resources can be printed nicely and succinctly
    import sys

    date_converter = lambda s: datetime.strptime(s, '%Y-%m-%d')
    bool_converter = lambda s: s == "true"
    resource_schema = facility_schema['fields']
    
    convert_map = {
		'integer': int,
		'float': float,
		'datetime': date_converter,
		'boolean': bool_converter
    }

    convert = {}
    
    for k, v in resource_schema.items():
		field_type = v.get('type')
		if convert_map.has_key(field_type):
			convert[k] = convert_map[field_type]

    def print_flush(msg):
        sys.stdout.write(msg)
        sys.stdout.flush()

    facility_code = facility_schema['facility_code']
    print_every = 1000
    print_flush("Adding resources. Please be patient.")

    with open(filename) as f:
        reader = DictReader(f)
        for i in range(skip):
            reader.next()
        for i, d in enumerate(reader):
            actual_index = i + skip + 2
            do_print = actual_index % print_every == 0
            try:
                d = dict((k, convert.get(k, str)(v)) for k, v in d.items() if v)
                coords = [d.pop('longitude', None), d.pop('latitude', None)]
                if coords[0] and coords[1]:
                    d['location'] = {'type': 'Point', 'coordinates': coords}
                d['facility_code'] = facility_code
                if not check(add_document(facility_schema['endpoint'], d), 201, False):
                    raise Exception()
                if do_print:
                    print_flush(".")

            except Exception as e:
                print "Error adding resource", e
                pprint(d)
                exit()

            if limit and i >= limit:
                break
    # Create a 2dsphere index on the location field for geospatial queries
	app.data.driver.db['resources'].create_index([('location', '2dsphere')])
    print "Resources uploaded!"
开发者ID:machakux,项目名称:taarifa_schools,代码行数:59,代码来源:manage.py

示例8: upload_waterpoints

def upload_waterpoints(filename, skip=0, limit=None):
    """Upload waterpoints from a CSV file."""
    date_converter = lambda s: datetime.strptime(s, '%Y-%m-%d')
    bool_converter = lambda s: s == "true"

    status_map = {
        "non functional": "not functional",
        "functional needs repair": "needs repair"
    }

    status_converter = lambda s: status_map.get(s.lower(), s.lower())

    convert = {
        'gid': int,
        'object_id': int,
        'valid_from': date_converter,
        'valid_to': date_converter,
        'amount_tsh': float,
        'breakdown_year': int,
        'date_recorded': date_converter,
        'gps_height': float,
        'latitude': float,
        'longitude': float,
        'num_private': int,
        'region_code': int,
        'district_code': int,
        'population': int,
        'public_meeting': bool_converter,
        'construction_year': int,
        'status_group': status_converter
    }

    facility_code = "wpf001"

    with open(filename) as f:
        reader = DictReader(f)
        for i in range(skip):
            reader.next()
        for i, d in enumerate(reader):
            print "Adding line", i + skip + 2

            try:
                d = dict((k, convert.get(k, str)(v)) for k, v in d.items() if v)
                coords = [d.pop('longitude'), d.pop('latitude')]
                d['location'] = {'type': 'Point', 'coordinates': coords}
                d['facility_code'] = facility_code
                if not check(add_document('waterpoints', d)):
                    raise Exception()

            except Exception as e:
                print "Error adding waterpoint", e
                pprint(d)
                exit()

            if limit and i >= limit:
                break
    # Create a 2dsphere index on the location field for geospatial queries
    app.data.driver.db['facilities'].create_index([('location', '2dsphere')])
开发者ID:thinkgandhi,项目名称:TaarifaWaterpoints,代码行数:58,代码来源:manage.py

示例9: extractThresholdValues

def extractThresholdValues(fname):
    # parse csv file and add threshold values as dict
    # this method might be called multiple times for one item

    # There are various formats:
    #   combined.modelEvaluation: Threshold Name, Testing.data, Cutoff,
    #                             Sensitivity, Specificity
    #   biomod2.modelEvaluation: Threshold Name, Testing.data, Cutoff.*,
    #                            Sensitivity.*, Specificity.*
    #   maxentResults.csv: Species,<various columns with interesting values>
    #                <threshold name><space><cumulative threshold,
    #                              logistic threshold,area,training omission>
    # FIXME: this is really ugly and csv format detection should be done
    #        differently
    thresholds = {}
    if fname.endswith("maxentResults.csv"):
        csvfile = open(fname, "r")
        dictreader = DictReader(csvfile)
        row = dictreader.next()
        # There is only one row in maxentResults
        namelist = (
            "Fixed cumulative value 1",
            "Fixed cumulative value 5",
            "Fixed cumulative value 10",
            "Minimum training presence",
            "10 percentile training presence",
            "10 percentile training presence",
            "Equal training sensitivity and specificity",
            "Maximum training sensitivity plus specificity",
            "Balance training omission, predicted area and threshold value",
            "Equate entropy of thresholded and original distributions",
        )
        for name in namelist:
            # We extract only 'cumulative threshold'' values
            threshold = "{} cumulative threshold".format(name)
            thresholds[threshold] = Decimal(row[threshold])
    else:
        # assume it's one of our biomod/dismo results
        csvfile = open(fname, "r")
        dictreader = DictReader(csvfile)
        # search the field with Cutoff
        name = "Cutoff"
        for fieldname in dictreader.fieldnames:
            if fieldname.startswith("Cutoff."):
                name = fieldname
                break
        try:
            for row in dictreader:
                try:
                    thresholds[row[""]] = Decimal(row[name])
                except (TypeError, InvalidOperation) as e:
                    LOG.warn(
                        "Couldn't parse threshold value '%s' (%s) from" "file '%s': %s", name, row[name], fname, repr(e)
                    )
        except KeyError:
            LOG.warn("Couldn't extract Threshold '%s' from file '%s'", name, fname)
    return thresholds
开发者ID:chuijbers,项目名称:org.bccvl.compute,代码行数:57,代码来源:transmogrify.py

示例10: __init__

 def __init__(self, f, fieldnames=None, restkey=None, restval=None,
              dialect="excel", encoding='utf-8', *args, **kw):
     DictReader.__init__(self, f, fieldnames=fieldnames,
                         restkey=restkey, restval=restval,
                         dialect=dialect, *args, **kw)
     if not encoding is None:
         f = Utf8Recoder(f, encoding=encoding)
     # Replace the reader with our unicode-enabled reader.
     self.reader = reader(f, dialect=dialect, *args, **kw)
开发者ID:helixyte,项目名称:everest,代码行数:9,代码来源:compat.py

示例11: locations

def locations(rack_locations_path=RACKS_LOCATION_CSV):
    with open(rack_locations_path, 'r') as file:
        csv_file = DictReader(file,
                              ["latitude", "longitude", "icon", "desc", "racks_count", "parking_places"])
        acc = []
        csv_file.__next__()  # Skip the header
        for attributes in csv_file:
            acc.append(RacksLocation(attributes))

        return acc
开发者ID:maciej,项目名称:waw-bike-racks,代码行数:10,代码来源:rack_locations.py

示例12: __init__

 def __init__(self, f, fieldnames=None, restkey=None, restval=None,
              dialect="excel",
              encoding=None,
              *args, **kwds):
     BaseDictReader.__init__(self, f=f, fieldnames=fieldnames,
                             restkey=restkey, restval=restval,
                             dialect=dialect,
                             *args, **kwds)
     from .csv import reader
     self.reader = reader(f, dialect=dialect,
                          encoding=encoding,
                          **kwds)
开发者ID:caktus,项目名称:cordwainer,代码行数:12,代码来源:dicts.py

示例13: __init__

  def __init__(self, csv):
    self.bag = Counter()
    reader = DictReader(open(csv, 'r'), fieldnames=[
      "TileFile", "Borders", "Quantity", "Features", "Notes"])
    reader.next()  # skip header, we've defined our own

    for tile_dict in reader:
      tile = Tile.from_csv(tile_dict)
      quantity = int(tile_dict["Quantity"].strip())
      self.bag[tile] = quantity
      if "B" in tile_dict["Features"]:
        self.first_tile = tile
开发者ID:AlexeyMK,项目名称:carcassonne-tutorial,代码行数:12,代码来源:game.py

示例14: number1

def number1():
    filename = '/home/apt9online/src/bslcks/jtest.csv'
    cong = DictReader(open(filename))

    while True:
        p = cong.next()
        print cong.line_num
        if p['Include on directory'] == 'Yes':
          if p['Family relation'] <> 'Duplicate':
            try:
                Person.objects.get(bslc_individual=p['Indiv #'])
                print "%s %s already exists in the DB" % (p['First name'],p['Last name'])
            except:
                record_person(p)
开发者ID:aawsolutions,项目名称:bslcks,代码行数:14,代码来源:loadem.py

示例15: csvInput

def csvInput(file,options,dialect='excel'):
    header=options['header']
    from csv import DictReader
    with open(file,'r') as f:
        if not header:
            reader = DictReader(f,dialect=dialect)
        else:
            reader = DictReader(f,dialect=dialect,fieldnames=header.split(','))
        reader.fieldnames = map(options['alias'],reader.fieldnames)
        entries =[line for line in reader]
        map(lambda(dict):
                dict.update({"file":file,
                             "format":fileType(file)}),
            entries)
        return entries
开发者ID:CCOM-4027,项目名称:Genomics,代码行数:15,代码来源:parsingfasta.py


注:本文中的csv.DictReader类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。