当前位置: 首页>>代码示例>>Python>>正文


Python shapefile.Reader类代码示例

本文整理汇总了Python中shapefile.Reader的典型用法代码示例。如果您正苦于以下问题:Python Reader类的具体用法?Python Reader怎么用?Python Reader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Reader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_wdb_boundaries

def get_wdb_boundaries(resolution, level, rivers=False):
    polymeta = []
    polybounds = []
    if rivers:
        filename = "WDBII_shp/%s/WDBII_river_%s_L%02i" % (resolution, resolution, level)
    else:
        filename = "WDBII_shp/%s/WDBII_border_%s_L%s" % (resolution, resolution, level)
    print filename
    shf = Reader(filename)
    fields = shf.fields
    for shprec in shf.shapeRecords():
        shp = shprec.shape
        rec = shprec.record
        parts = shp.parts.tolist()
        if parts != [0]:
            print "multipart polygon"
            raise SystemExit
        verts = shp.points
        lons, lats = list(zip(*verts))
        north = max(lats)
        south = min(lats)
        attdict = {}
        for r, key in zip(rec, fields[1:]):
            attdict[key[0]] = r
        area = -1
        id = attdict["id"]
        polymeta.append([-1, -1, south, north, len(lons), id])
        b = np.empty((len(lons), 2), np.float32)
        b[:, 0] = lons
        b[:, 1] = lats
        if lsd is not None:
            b = quantize(b, lsd)
        polybounds.append(b)
    return polybounds, polymeta
开发者ID:jdkloe,项目名称:basemap,代码行数:34,代码来源:readboundaries_shp.py

示例2: get_coast_polygons

def get_coast_polygons(resolution):
    polymeta = []; polybounds = []
    for level in [1,2,3,5]:
        filename = os.path.join(GSHHS_DIR, 'GSHHS_shp/', resolution,
                                'GSHHS_{}_L{}'.format(resolution, level))
        print filename
        shf = Reader(filename)
        fields = shf.fields
        try:
            shf.shapeRecords()
        except:
            continue
        for shprec in shf.shapeRecords():
            shp = shprec.shape; rec = shprec.record
            parts = shp.parts.tolist()
            if parts != [0]:
                print 'multipart polygon'
                raise SystemExit
            verts = shp.points
            lons, lats = list(zip(*verts))
            north = max(lats); south = min(lats)
            attdict={}
            for r,key in zip(rec,fields[1:]):
                attdict[key[0]]=r
            area = attdict['area']
            id = attdict['id']
            polymeta.append([level,area,south,north,len(lons),id])
            b = np.empty((len(lons),2),np.float32)
            b[:,0] = lons; b[:,1] = lats
            if lsd is not None:
                b = quantize(b,lsd)
            polybounds.append(b)

        # Manual fix for incorrect Antarctica polygons at full resolution
        # This issue is only present in the shapefile version and may be fixed
        # in future versions of GSHHS!
        if resolution == 'f' and level == 5:
            i = [item[-1] for item in polymeta].index('4-E')
            coords = polybounds[i][2:-1, :]
            coords = np.vstack([coords,
                                [180.0, -90.0],
                                [0.0, -90.0]]).astype(np.float32)
            polybounds[i] = coords
            polymeta[i][-2] = len(coords)

            j = [item[-1] for item in polymeta].index('4-W')
            coords = polybounds[j][3:, :]
            np.savetxt('coordinates.txt', coords)
            coords = np.vstack([coords,
                                [0.0, coords[-1][1]],
                                [0.0, -90.0],
                                [-180.0, -90.0],
                                coords[0]]).astype(np.float32)

            polybounds[j] = coords
            polymeta[j][-2] = len(coords)

    return polybounds, polymeta
开发者ID:jenshnielsen,项目名称:basemap,代码行数:58,代码来源:readboundaries_shp.py

示例3: extract_flowlines

    def extract_flowlines(self, source, destination, HUC8, verbose = True):
        """Extracts flowlines from the source datafile to the destination using
        the HUC8 for the query."""

        # open the flowline file
    
        if verbose: print('reading the flowline file\n')
    
        shapefile = Reader(source, shapeType = 3)
        records   = shapefile.records()
    
        # figure out which field codes are the Reach code and comid
    
        reach_index = shapefile.fields.index(['REACHCODE', 'C', 14, 0]) - 1
    
        # go through the reach indices, add add them to the list of flowlines
        # if in the watershed; also make a list of the corresponding comids
    
        if verbose: print('searching for flowlines in the watershed\n')
    
        indices = []
       
        i = 0
        for record in records:
            if record[reach_index][:8] == HUC8: indices.append(i)
            i+=1

        if len(indices) == 0:
            if verbose: print('error: query returned no values')
            raise
    
        # write the data from the HUC8 to a new shapefile
    
        w = Writer(shapeType = 3)
    
        for field in shapefile.fields:  w.field(*field)
    
        for i in indices:
            shape = shapefile.shape(i)
            w.poly(shapeType = 3, parts = [shape.points])
    
            record = records[i]
    
            # little work around for blank GNIS_ID and GNIS_NAME values
    
            if isinstance(record[3], bytes):
                record[3] = record[3].decode('utf-8')
            if isinstance(record[4], bytes):
                record[4] = record[4].decode('utf-8')
    
            w.record(*record)
    
        w.save(destination)
    
        if verbose: 
            l = len(indices)
            print('queried {} flowlines from original shapefile\n'.format(l))
开发者ID:geclark330,项目名称:PyHSPF,代码行数:57,代码来源:nhdplusextractor.py

示例4: extract_catchments

    def extract_catchments(self, 
                           source, 
                           destination, 
                           flowlinefile, 
                           verbose = True,
                           ):
        """
        Extracts the catchments from the source data file to the destination
        using the list of comids for the query.
        """

        # make a list of the comids

        comids = self.get_comids(flowlinefile)

        # open the catchment shapefile
    
        if verbose: print('reading the catchment shapefile\n')
    
        shapefile = Reader(source)
    
        # get the index of the feature id, which links to the flowline comid
    
        featureid_index = shapefile.fields.index(['FEATUREID', 'N', 9, 0]) - 1
    
        # go through the comids from the flowlines and add the corresponding 
        # catchment to the catchment list
    
        if verbose: print('searching the catchments in the watershed\n')
    
        records = shapefile.records()
        indices = []
    
        i = 0
        for record in records:
            if record[featureid_index] in comids: indices.append(i)
            i+=1
    
        if len(indices) == 0:
            print('query returned no values, returning\n')
            raise

        # create the new shapefile
    
        if verbose: print('writing the new catchment shapefile\n')
        
        w = Writer()
    
        for field in shapefile.fields:  w.field(*field)
    
        for i in indices:
            shape = shapefile.shape(i)
            w.poly(shapeType = 5, parts = [shape.points])
            w.record(*records[i])
    
        w.save(destination)
开发者ID:djibi2,项目名称:PyHSPF,代码行数:56,代码来源:nhdplusextractor.py

示例5: set_metadata

    def set_metadata(self, 
                     gagefile,
                     ):
        """
        Opens the gage file with the station metadata.
        """

        # metadata for stations

        self.gages  = []
        self.day1s  = []
        self.dayns  = []
        self.drains = []
        self.states = []
        self.sites  = []
        self.nwiss  = []
        self.aves   = []
        self.names  = []

        gagereader = Reader(gagefile, shapeType = 1)

        # get the fields with pertinent info

        day1_index  = gagereader.fields.index(['DAY1',       'N', 19, 0]) - 1
        dayn_index  = gagereader.fields.index(['DAYN',       'N', 19, 0]) - 1
        drain_index = gagereader.fields.index(['DA_SQ_MILE', 'N', 19, 2]) - 1
        HUC8_index  = gagereader.fields.index(['HUC',        'C',  8, 0]) - 1
        state_index = gagereader.fields.index(['STATE',      'C',  2, 0]) - 1
        site_index  = gagereader.fields.index(['SITE_NO',    'C', 15, 0]) - 1
        nwis_index  = gagereader.fields.index(['NWISWEB',    'C', 75, 0]) - 1
        ave_index   = gagereader.fields.index(['AVE',        'N', 19, 3]) - 1
        name_index  = gagereader.fields.index(['STATION_NM', 'C', 60, 0]) - 1

        # iterate through the records

        for r in gagereader.records():
            
            gage  = r[site_index] 
            day1  = r[day1_index] 
            dayn  = r[dayn_index] 
            drain = r[drain_index]
            state = r[state_index]
            nwis  = r[nwis_index]
            ave   = r[ave_index]  
            name  = r[name_index]
            site  = r[site_index]

            self.gages.append(gage)
            self.day1s.append(day1)
            self.dayns.append(dayn)
            self.drains.append(drain)
            self.states.append(state)
            self.sites.append(site)
            self.nwiss.append(nwis)
            self.aves.append(ave)
            self.names.append(name)
开发者ID:djibi2,项目名称:PyHSPF,代码行数:56,代码来源:nwisextractor.py

示例6: merge_shapes

def merge_shapes(inputfile, 
                 outputfile = None, 
                 overwrite = False, 
                 verbose = True, 
                 vverbose = False,
                 ):
    """
    Merges all the shapes in a shapefile into a single shape.
    """

    if outputfile is None: output = '{}/merged'.format(os.getcwd())

    if os.path.isfile(outputfile + '.shp') and not overwrite:
        if verbose: 
            print('combined watershed shapefile {} exists'.format(outputfile))
        return
   
    if verbose: print('combining shapes from {}\n'.format(inputfile) + 
                      'this may take a while...\n')

    # start by copying the projection files

    shutil.copy(inputfile + '.prj', outputfile + '.prj')

    # load the catchment and flowline shapefiles

    r = Reader(inputfile, shapeType = 5)

    try: 

        combined = combine_shapes(r.shapes(), verbose = vverbose)

    except:

        print('error: unable to combine shapes')
        raise

    # create the new file with the merged shapes

    w = Writer(shapeType = 5)

    w.poly(shapeType = 5, parts = [combined])

    # copy the fields from the original and then the first record; note this
    # can be adapted as needed

    for field in r.fields: w.field(*field)
    w.record(*r.record(0))

    w.save(outputfile)

    if verbose: 

        its = inputfile, outputfile
        print('successfully combined shapes from {} to {}\n'.format(*its))
开发者ID:djibi2,项目名称:PyHSPF,代码行数:55,代码来源:vectorutils.py

示例7: extract_raw

def extract_raw(source, destination, HUC8, plot = True, save = True, 
                verbose = True):
    """Extracts the grid data for the HUC8."""

    # make a new directory for the HUC8

    d = '{}/{}/NRCM'.format(destination, HUC8)

    if not os.path.isdir(d): os.mkdir(d)

    # make a "raw directory" for the unaltered info

    raw = '{}/raw'.format(d)

    if not os.path.isdir(raw): 
        os.mkdir(raw)
        if verbose: print('extracting NRCM predictions...\n')

    # use the boundary file to find the bounding box for the grid points

    boundaryfile = '{0}/{1}/{1}boundaries'.format(destination, HUC8)
    subbasinfile = '{0}/{1}/{1}subbasins'.format(destination, HUC8)
    space        = 0.1

    sf = Reader(boundaryfile)

    bbox = get_boundaries(sf.shapes(), space = space)

    xmin, ymin, xmax, ymax = bbox

    if verbose and not os.path.isdir(raw): 
        print('bounding box =', xmin, ymin, xmax, ymax, '\n')

    lats, lons = [], []
    for f in os.listdir(source):
        i = f.index('_')
        lon = float(f[:i])
        lat = float(f[i+1:])

        if inside_box([xmin, ymin], [xmax, ymax], [lon, lat]):
            lats.append(lat)
            lons.append(lon)

            if not os.path.isfile('{}/{}'.format(raw, f)):
                shutil.copy('{}/{}'.format(source, f), '{}/{}'.format(raw, f))
            
    if plot: 
        if save: output = '{}/gridpoints'.format(d)
        else:    output = None
        if not os.path.isfile(output):
            plot_NRCM(lons, lats, bfile = boundaryfile, sfile = subbasinfile,
                      output = output, show = False)
开发者ID:djibi2,项目名称:PyHSPF,代码行数:52,代码来源:extract_timeseries.py

示例8: get_comids

    def get_comids(self, flowlinefile):
        """Finds the comids from the flowline file."""

        # open the file

        shapefile = Reader(flowlinefile)

        # find the index of the comids

        comid_index = shapefile.fields.index(['COMID', 'N', 9,  0]) - 1

        # make a list of the comids

        comids = [r[comid_index] for r in shapefile.records()]

        return comids
开发者ID:geclark330,项目名称:PyHSPF,代码行数:16,代码来源:nhdplusextractor.py

示例9: get_wdb_boundaries

def get_wdb_boundaries(resolution,level,rivers=False):
    polymeta = []; polybounds = []
    if rivers:
        filename = os.path.join(GSHHS_DIR, 'WDBII_shp', resolution,
                            'WDBII_river_{}_L{:02}'.format(resolution, level))
    else:
        filename = os.path.join(GSHHS_DIR, 'WDBII_shp', resolution,
                            'WDBII_border_{}_L{}'.format(resolution, level))
    print filename
    shf = Reader(filename)
    fields = shf.fields
    for shprec in shf.shapeRecords():
        shp = shprec.shape; rec = shprec.record
        parts = shp.parts.tolist()
        if parts != [0]:
            print 'multipart polygon'
            raise SystemExit

        verts = shp.points
        # Detect degenerate lines that are actually points...
        if len(verts) == 2 and np.allclose(verts[0], verts[1]):
            print 'Skipping degenerate line...'
            continue

        lons, lats = list(zip(*verts))
        north = max(lats); south = min(lats)
        attdict={}
        for r,key in zip(rec,fields[1:]):
            attdict[key[0]]=r
        area = -1
        poly_id = attdict['id']
        b = np.empty((len(lons),2),np.float32)
        b[:,0] = lons; b[:,1] = lats

        if not rivers:
            b = interpolate_long_segments(b, resolution)

        if lsd is not None:
            b = quantize(b,lsd)

        polymeta.append([-1,-1,south,north,len(b),poly_id])
        polybounds.append(b)

    return polybounds, polymeta
开发者ID:jenshnielsen,项目名称:basemap,代码行数:44,代码来源:readboundaries_shp.py

示例10: _convert_csv_to_dbf

def _convert_csv_to_dbf(input_file, output_file, mapping_file=None,
                        mapping_from=None, mapping_to=None, print_data_dict=False):
    if output_file is None:
        name = new_file_ending(input_file.name, '.dbf')
        output_file = open(name, 'w')

    if mapping_file:
        #Read this file and map it
        try:
            from shapefile import Reader
        except ImportError:
            print "pyshp required for mapping feature"
            raise
        dbfr = Reader(dbf=mapping_file)
        #find field thath as the mapping_from name
        #use -1 because pyshp adds a column for flagging deleted fields
        name_i = _find_field_index_dbf(dbfr.fields, mapping_from) - 1
        map_values = [rec[name_i] for rec in dbfr.iterRecords()]

    # Parse the csv.
    parser = csv_parser(handle=input_file)
    header, fieldspecs, records = parser.parse()
    if mapping_file:
        csv_name_i = header.index(mapping_to)
        #be conservative and make sure they match
        if len(records) != len(map_values):
            raise Exception('mapping records lengths must match')
        #reorder the records so they match the original
        #This will reaise an error if something does not map
        mapped_records = [None]*len(map_values)
        for i in xrange(len(map_values)):
            mv = map_values[i]
            try:
                old_i = collect(records, csv_name_i).index(mv)
            except ValueError:
                raise ValueError('Could not find record name %s in csv' % mv)
            mapped_records[i] = records[old_i]
        records = mapped_records

    # Write to dbf.
    dbfwriter(output_file, header, fieldspecs, records)

    if print_data_dict:
        parser.write_dd(input_file.name, output_file)
开发者ID:charleslaw,项目名称:census2dbf,代码行数:44,代码来源:census2dbf.py

示例11: get_coast_polygons

def get_coast_polygons(resolution):
    polymeta = []
    polybounds = []
    for level in [1, 2, 3, 4]:
        filename = "GSHHS_shp/%s/GSHHS_%s_L%s" % (resolution, resolution, level)
        # filename = 'WDBII_shp/%s/WDBII_border_%s_L%s' % (resolution, resolution, level)
        print filename
        shf = Reader(filename)
        fields = shf.fields
        try:
            shf.shapeRecords()
        except:
            continue
        for shprec in shf.shapeRecords():
            shp = shprec.shape
            rec = shprec.record
            parts = shp.parts.tolist()
            if parts != [0]:
                print "multipart polygon"
                raise SystemExit
            verts = shp.points
            lons, lats = list(zip(*verts))
            north = max(lats)
            south = min(lats)
            attdict = {}
            for r, key in zip(rec, fields[1:]):
                attdict[key[0]] = r
            area = attdict["area"]
            id = attdict["id"]
            polymeta.append([level, area, south, north, len(lons), id])
            b = np.empty((len(lons), 2), np.float32)
            b[:, 0] = lons
            b[:, 1] = lats
            if lsd is not None:
                b = quantize(b, lsd)
            polybounds.append(b)
    return polybounds, polymeta
开发者ID:jdkloe,项目名称:basemap,代码行数:37,代码来源:readboundaries_shp.py

示例12: plot_NRCM

def plot_NRCM(lons, lats, bfile = None, sfile = None, space = 0.05,
              show = False, output = None):

    fig = pyplot.figure()

    sub = fig.add_subplot(111, aspect = 'equal')
    sub.set_title('Nested Regional Climate Model Grid Points')
    sub.scatter(lons, lats, marker = '+', c = 'r', s = 40)

    if bfile is not None:
        
        sf = Reader(bfile)
        boundary = sf.shape(0).points
        sub.add_patch(make_patch(boundary, (1, 0, 0, 0), width = 1.2))

    if sfile is not None:
        
        sf = Reader(sfile)

        for s in sf.shapes():
            boundary = s.points
            sub.add_patch(make_patch(boundary, (1, 0, 0, 0), width = 0.2))

    sub.set_xlabel('Longitude, Decimal Degrees', size = 13)
    sub.set_ylabel('Latitude, Decimal Degrees', size = 13)

    xmin, ymin, xmax, ymax = get_boundaries(sf.shapes(), space = space)

    pyplot.xlim([xmin, xmax])
    pyplot.ylim([ymin, ymax])

    if output is not None: pyplot.savefig(output)

    if show: pyplot.show()

    pyplot.clf()
    pyplot.close()
开发者ID:djibi2,项目名称:PyHSPF,代码行数:37,代码来源:extract_timeseries.py

示例13: extract_HUC8

    def extract_HUC8(self, HUC8, output, gagefile = 'gagestations', 
                     verbose = True):
        """Extracts the USGS gage stations for a watershed from the gage 
        station shapefile into a shapefile for the 8-digit hydrologic unit 
        code of interest. 
        """

        # make sure the metadata exist locally

        self.download_metadata()

        # make sure the output destination exists

        if not os.path.isdir(output): os.mkdir(output)

        sfile = '{}/{}'.format(output, gagefile)
        if not os.path.isfile(sfile + '.shp'):

            # copy the projection

            shutil.copy(self.NWIS + '.prj', sfile + '.prj')

            # read the file

            gagereader  = Reader(self.NWIS, shapeType = 1)
            gagerecords = gagereader.records()

            # pull out the HUC8 record to parse the dataset

            HUC8_index  = gagereader.fields.index(['HUC',  'C', 8, 0]) - 1

            # iterate through the field and find gages in the watershed

            its = HUC8, sfile
            print('extracting gage stations in {} to {}\n'.format(*its))

            gage_indices = []

            i = 0
            for record in gagerecords:
                if record[HUC8_index] == HUC8: gage_indices.append(i)
                i+=1

            # write the data from the HUC8 to a new shapefile

            w = Writer(shapeType = 1)

            for field in gagereader.fields:  w.field(*field)

            for i in gage_indices:
                point = gagereader.shape(i).points[0]
                w.point(*point)
                w.record(*gagerecords[i])

            w.save(sfile)

            if verbose: 
                print('successfully extracted NWIS gage stations\n')

        elif verbose: 

            print('gage station file {} exists\n'.format(sfile))

        self.set_metadata(sfile)
开发者ID:kbrannan,项目名称:PyHSPF,代码行数:64,代码来源:nwisextractor.py

示例14: average

# few more stations

space = 0.5

# download/set the location of the data using the "download_shapefile" method

processor.download_shapefile(filename, start, end, output,
                             datasets = ['precip3240'], space = 0.5)

# the ClimateProcessor's aggregate method can be used with inverse-distance
# weighted average (IDWA) to interpolate between the stations at a given point
# using the "method," "latitude," and "longitude" keyword arguments. the
# result is the same as the previous example. as before, the subbasin_catchments
# shapefile will be used that contains the centroid for each aggregation.

sf = Reader(filename)

# index of the comid, latitude, and longitude records

comid_index = [f[0] for f in sf.fields].index('ComID') - 1
lon_index   = [f[0] for f in sf.fields].index('CenX')  - 1
lat_index   = [f[0] for f in sf.fields].index('CenY')  - 1

# iterate through the shapefile records and aggregate the timeseries

for i in range(len(sf.records())):

    record = sf.record(i)
    comid  = record[comid_index]
    lon    = record[lon_index]
    lat    = record[lat_index]
开发者ID:djlampert,项目名称:PyHSPF,代码行数:31,代码来源:climateprocessor08.py

示例15: extract_bbox

    def extract_bbox(self, bbox, output, verbose = True):
        """Extracts the NID dam locations for a watershed from the dam 
        shapefile and the 8-digit hydrologic unit code of interest. 
        """

        self.download_compressed()

        xmin, ymin, xmax, ymax = bbox

        # copy the projection files

        if verbose: print('copying the projections from the NID source\n')

        projection = self.source + '.prj'

        shutil.copy(projection, output + '.prj')

        # get the dams within the watershed

        if verbose: print('reading the dam file\n')

        sf = Reader(self.source, shapeType = 1)

        # work around for issues with pyshp

        damrecords   = []
        for i in range(len(sf.shapes())):
            try: damrecords.append(sf.record(i))
            except: damrecords.append([-100 for i in range(len(sf.fields))])

        name_index  = sf.fields.index(['DAM_NAME',   'C', 65,   0]) - 1
        nid_index   = sf.fields.index(['NIDID',      'C', 7,    0]) - 1
        long_index  = sf.fields.index(['LONGITUDE',  'N', 19,  11]) - 1
        lat_index   = sf.fields.index(['LATITUDE',   'N', 19,  11]) - 1
        river_index = sf.fields.index(['RIVER',      'C', 65,   0]) - 1
        owner_index = sf.fields.index(['OWN_NAME',   'C', 65,   0]) - 1
        type_index  = sf.fields.index(['DAM_TYPE',   'C', 10,   0]) - 1
        purp_index  = sf.fields.index(['PURPOSES',   'C', 254,  0]) - 1
        year_index  = sf.fields.index(['YR_COMPL',   'C', 10,   0]) - 1
        high_index  = sf.fields.index(['NID_HEIGHT', 'N', 19,  11]) - 1
        mstor_index = sf.fields.index(['MAX_STOR',   'N', 19,  11]) - 1
        nstor_index = sf.fields.index(['NORMAL_STO', 'N', 19,  11]) - 1
        area_index  = sf.fields.index(['SURF_AREA',  'N', 19,  11]) - 1

        # iterate through the fields and determine which points are in the box

        if verbose: print('extracting dams into new file\n')

        dam_indices = []

        i = 0
        for record in damrecords:

            lat = record[lat_index]
            lon = record[long_index]

            if self.inside_box([xmin, ymin], [xmax, ymax], [lon, lat]):
                dam_indices.append(i)
            i+=1

        # write the data from the bbox to a new shapefile

        w = Writer(shapeType = 1)

        for field in sf.fields:  w.field(*field)

        for i in dam_indices:
            point = sf.shape(i).points[0]
            w.point(*point)

            values = damrecords[i]

            rs = []

            for value in values:

                if isinstance(value, bytes): value = value.decode('utf-8')
                rs.append(value)

            w.record(*rs)

        w.save(output)

        if verbose: 

            print('successfully extracted NID dam locations to new file\n')
开发者ID:eotp,项目名称:PyHSPF,代码行数:86,代码来源:nidextractor.py


注:本文中的shapefile.Reader类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。