当前位置: 首页>>代码示例>>Python>>正文


Python Reader.shape方法代码示例

本文整理汇总了Python中shapefile.Reader.shape方法的典型用法代码示例。如果您正苦于以下问题:Python Reader.shape方法的具体用法?Python Reader.shape怎么用?Python Reader.shape使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在shapefile.Reader的用法示例。


在下文中一共展示了Reader.shape方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: extract_flowlines

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import shape [as 别名]
    def extract_flowlines(self, source, destination, HUC8, verbose = True):
        """Extracts flowlines from the source datafile to the destination using
        the HUC8 for the query."""

        # open the flowline file
    
        if verbose: print('reading the flowline file\n')
    
        shapefile = Reader(source, shapeType = 3)
        records   = shapefile.records()
    
        # figure out which field codes are the Reach code and comid
    
        reach_index = shapefile.fields.index(['REACHCODE', 'C', 14, 0]) - 1
    
        # go through the reach indices, add add them to the list of flowlines
        # if in the watershed; also make a list of the corresponding comids
    
        if verbose: print('searching for flowlines in the watershed\n')
    
        indices = []
       
        i = 0
        for record in records:
            if record[reach_index][:8] == HUC8: indices.append(i)
            i+=1

        if len(indices) == 0:
            if verbose: print('error: query returned no values')
            raise
    
        # write the data from the HUC8 to a new shapefile
    
        w = Writer(shapeType = 3)
    
        for field in shapefile.fields:  w.field(*field)
    
        for i in indices:
            shape = shapefile.shape(i)
            w.poly(shapeType = 3, parts = [shape.points])
    
            record = records[i]
    
            # little work around for blank GNIS_ID and GNIS_NAME values
    
            if isinstance(record[3], bytes):
                record[3] = record[3].decode('utf-8')
            if isinstance(record[4], bytes):
                record[4] = record[4].decode('utf-8')
    
            w.record(*record)
    
        w.save(destination)
    
        if verbose: 
            l = len(indices)
            print('queried {} flowlines from original shapefile\n'.format(l))
开发者ID:geclark330,项目名称:PyHSPF,代码行数:59,代码来源:nhdplusextractor.py

示例2: extract_catchments

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import shape [as 别名]
    def extract_catchments(self, 
                           source, 
                           destination, 
                           flowlinefile, 
                           verbose = True,
                           ):
        """
        Extracts the catchments from the source data file to the destination
        using the list of comids for the query.
        """

        # make a list of the comids

        comids = self.get_comids(flowlinefile)

        # open the catchment shapefile
    
        if verbose: print('reading the catchment shapefile\n')
    
        shapefile = Reader(source)
    
        # get the index of the feature id, which links to the flowline comid
    
        featureid_index = shapefile.fields.index(['FEATUREID', 'N', 9, 0]) - 1
    
        # go through the comids from the flowlines and add the corresponding 
        # catchment to the catchment list
    
        if verbose: print('searching the catchments in the watershed\n')
    
        records = shapefile.records()
        indices = []
    
        i = 0
        for record in records:
            if record[featureid_index] in comids: indices.append(i)
            i+=1
    
        if len(indices) == 0:
            print('query returned no values, returning\n')
            raise

        # create the new shapefile
    
        if verbose: print('writing the new catchment shapefile\n')
        
        w = Writer()
    
        for field in shapefile.fields:  w.field(*field)
    
        for i in indices:
            shape = shapefile.shape(i)
            w.poly(shapeType = 5, parts = [shape.points])
            w.record(*records[i])
    
        w.save(destination)
开发者ID:djibi2,项目名称:PyHSPF,代码行数:58,代码来源:nhdplusextractor.py

示例3: plot_NRCM

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import shape [as 别名]
def plot_NRCM(lons, lats, bfile = None, sfile = None, space = 0.05,
              show = False, output = None):

    fig = pyplot.figure()

    sub = fig.add_subplot(111, aspect = 'equal')
    sub.set_title('Nested Regional Climate Model Grid Points')
    sub.scatter(lons, lats, marker = '+', c = 'r', s = 40)

    if bfile is not None:
        
        sf = Reader(bfile)
        boundary = sf.shape(0).points
        sub.add_patch(make_patch(boundary, (1, 0, 0, 0), width = 1.2))

    if sfile is not None:
        
        sf = Reader(sfile)

        for s in sf.shapes():
            boundary = s.points
            sub.add_patch(make_patch(boundary, (1, 0, 0, 0), width = 0.2))

    sub.set_xlabel('Longitude, Decimal Degrees', size = 13)
    sub.set_ylabel('Latitude, Decimal Degrees', size = 13)

    xmin, ymin, xmax, ymax = get_boundaries(sf.shapes(), space = space)

    pyplot.xlim([xmin, xmax])
    pyplot.ylim([ymin, ymax])

    if output is not None: pyplot.savefig(output)

    if show: pyplot.show()

    pyplot.clf()
    pyplot.close()
开发者ID:djibi2,项目名称:PyHSPF,代码行数:39,代码来源:extract_timeseries.py

示例4: plot_gage_subbasin

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import shape [as 别名]
    def plot_gage_subbasin(self, hspfmodel, folder):
        """Makes a plot of the subbasin area."""

        subbasinfile  = '{}/subbasins'.format(folder)
        boundaryfile  = '{}/boundary'.format(folder)
        flowfile      = '{}/flowlines'.format(folder)
        combinedfile  = '{}/combined'.format(folder)
        watershedplot = '{}/watershed.png'.format(folder)

        # make a shapefile of the subbasins for the watershed

        f = '{0}/{1}/{1}subbasins'.format(self.directory, self.HUC8)
        for out in (subbasinfile, boundaryfile, flowfile, combinedfile):
            if not os.path.isfile(out + '.prj'):
                shutil.copy(f + '.prj', out + '.prj')

        if not os.path.isfile(subbasinfile + '.shp'):

            subshapes  = []
            subrecords = []
            for subbasin in hspfmodel.subbasins:

                f = '{0}/{1}/{2}/combined'.format(self.directory, self.HUC8, 
                                                  subbasin)
                s = Reader(f, shapeType = 5)

                subshapes.append(s.shape(0).points)
                subrecords.append(s.record(0))

            w = Writer(shapeType = 5)

            for field in s.fields:    w.field(*field)
            for record in subrecords: w.record(*record)
            for shape in subshapes:   w.poly(shapeType = 5, parts = [shape])

            w.save(subbasinfile)

        if not os.path.isfile(combinedfile + '.shp'):

            fshapes    = []
            frecords   = []
            for subbasin in hspfmodel.subbasins:
                f = '{0}/{1}/{2}/combined_flowline'.format(self.directory, 
                                                           self.HUC8, 
                                                           subbasin)
                r = Reader(f, shapeType = 3)

                fshapes.append(r.shape(0).points)
                frecords.append(r.record(0))

            w = Writer(shapeType = 3)

            for field in r.fields:  w.field(*field)
            for record in frecords: w.record(*record)
            for shape in fshapes:   w.poly(shapeType = 3, parts = [shape])

            w.save(combinedfile)

        # merge the shapes into a watershed

        if not os.path.exists(boundaryfile + '.shp'):

            merge_shapes(subbasinfile, outputfile = boundaryfile)

        # make a flowline file for the subbasins for the watershed

        if not os.path.isfile(flowfile + '.shp'):

            shapes  = []
            records = []
            for subbasin in hspfmodel.subbasins:
                f = '{0}/{1}/{2}/flowlines'.format(self.directory, 
                                                   self.HUC8, subbasin)
                r = Reader(f, shapeType = 3)
                for shape  in r.shapes():  shapes.append(shape.points)
                for record in r.records(): records.append(record)

            w = Writer(shapeType = 3)

            for field in r.fields: w.field(*field)
            for record in records: w.record(*record)
            for shape in shapes:   w.poly(shapeType = 3, parts = [shape])

            w.save(flowfile)

        if not os.path.isfile(watershedplot):

            plot_gage_subbasin(folder, self.HUC8, self.gageid, hspfmodel,
                               output = watershedplot)
开发者ID:djibi2,项目名称:PyHSPF,代码行数:91,代码来源:forecaster.py

示例5: plot_HUC8

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import shape [as 别名]
    def plot_HUC8(self, 
                  flowfile, 
                  cfile,
                  bfile,
                  VAAfile, 
                  elevfile,
                  patchcolor = None,
                  resolution = 400, 
                  colormap = 'gist_earth',
                  grid = False,
                  title = None, 
                  verbose = True,
                  output = None,
                  show = False,
                  ):
        """Makes a plot of the raw NHDPlus data."""

        if verbose: print('generating plot of the watershed\n')

        fig = pyplot.figure()
        subplot = fig.add_subplot(111, aspect = 'equal')
        subplot.tick_params(axis = 'both', which = 'major', labelsize = 10)

        # add the title

        if title is not None: subplot.set_title(title, fontsize = 14)

        if patchcolor is None: facecolor = (1,0,0,0.)
        else:                  facecolor = patchcolor

        # open up and show the boundary

        b = Reader(bfile, shapeType = 5)

        boundary = b.shape(0)
        points = numpy.array(boundary.points)
        subplot.add_patch(self.make_patch(points, facecolor, width = 0.5))

        # open up and show the catchments

        c = Reader(cfile, shapeType = 5)

        extent = self.get_boundaries(c.shapes(), space = 0.02)

        xmin, ymin, xmax, ymax = extent

        # figure out how far one foot is on the map

        points_per_width = 72 * 8
        ft_per_km = 3280.84
        scale_factor = (points_per_width / 
                        self.get_distance([xmin, ymin], [xmax, ymin]) / 
                        ft_per_km)

        # make patches of the catchment area

        for i in range(len(c.records())):
            catchment = c.shape(i)
            points = numpy.array(catchment.points)
            subplot.add_patch(self.make_patch(points, facecolor, width = 0.1))

        # get the flowline attributes, make an "updown" dictionary to follow 
        # flow, and change the keys to comids

        with open(VAAfile, 'rb') as f: flowlineVAAs = pickle.load(f)

        updown = {}
        for f in flowlineVAAs:
            if flowlineVAAs[f].down in flowlineVAAs:
                updown[flowlineVAAs[f].comid] = \
                    flowlineVAAs[flowlineVAAs[f].down].comid

        flowlineVAAs = {flowlineVAAs[f].comid:flowlineVAAs[f] 
                        for f in flowlineVAAs}

        # open up and show the flowfiles

        f = Reader(flowfile, shapeType = 3)
        comid_index = f.fields.index(['COMID', 'N',  9, 0]) - 1

        all_comids = [r[comid_index] for r in f.records()]
        
        # get the flows and velocities from the dictionary
        
        widths = []
        comids = []
        for comid in all_comids:
            if comid in flowlineVAAs:
                flow = flowlineVAAs[comid].flow
                velocity = flowlineVAAs[comid].velocity

                # estimate flow width (ft) assuming triangular 90 d channel 

                comids.append(comid)
                widths.append(numpy.sqrt(4 * flow / velocity))
        
        # convert widths in feet to points on the figure; exaggerated by 10

        widths = [w * scale_factor * 20 for w in widths]

#.........这里部分代码省略.........
开发者ID:geclark330,项目名称:PyHSPF,代码行数:103,代码来源:nhdplusextractor.py

示例6: extract_HUC8

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import shape [as 别名]
    def extract_HUC8(self, HUC8, output, gagefile = 'gagestations', 
                     verbose = True):
        """Extracts the USGS gage stations for a watershed from the gage 
        station shapefile into a shapefile for the 8-digit hydrologic unit 
        code of interest. 
        """

        # make sure the metadata exist locally

        self.download_metadata()

        # make sure the output destination exists

        if not os.path.isdir(output): os.mkdir(output)

        sfile = '{}/{}'.format(output, gagefile)
        if not os.path.isfile(sfile + '.shp'):

            # copy the projection

            shutil.copy(self.NWIS + '.prj', sfile + '.prj')

            # read the file

            gagereader  = Reader(self.NWIS, shapeType = 1)
            gagerecords = gagereader.records()

            # pull out the HUC8 record to parse the dataset

            HUC8_index  = gagereader.fields.index(['HUC',  'C', 8, 0]) - 1

            # iterate through the field and find gages in the watershed

            its = HUC8, sfile
            print('extracting gage stations in {} to {}\n'.format(*its))

            gage_indices = []

            i = 0
            for record in gagerecords:
                if record[HUC8_index] == HUC8: gage_indices.append(i)
                i+=1

            # write the data from the HUC8 to a new shapefile

            w = Writer(shapeType = 1)

            for field in gagereader.fields:  w.field(*field)

            for i in gage_indices:
                point = gagereader.shape(i).points[0]
                w.point(*point)
                w.record(*gagerecords[i])

            w.save(sfile)

            if verbose: 
                print('successfully extracted NWIS gage stations\n')

        elif verbose: 

            print('gage station file {} exists\n'.format(sfile))

        self.set_metadata(sfile)
开发者ID:kbrannan,项目名称:PyHSPF,代码行数:66,代码来源:nwisextractor.py

示例7: extract_bbox

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import shape [as 别名]
    def extract_bbox(self, bbox, output, verbose = True):
        """Extracts the NID dam locations for a watershed from the dam 
        shapefile and the 8-digit hydrologic unit code of interest. 
        """

        self.download_compressed()

        xmin, ymin, xmax, ymax = bbox

        # copy the projection files

        if verbose: print('copying the projections from the NID source\n')

        projection = self.source + '.prj'

        shutil.copy(projection, output + '.prj')

        # get the dams within the watershed

        if verbose: print('reading the dam file\n')

        sf = Reader(self.source, shapeType = 1)

        # work around for issues with pyshp

        damrecords   = []
        for i in range(len(sf.shapes())):
            try: damrecords.append(sf.record(i))
            except: damrecords.append([-100 for i in range(len(sf.fields))])

        name_index  = sf.fields.index(['DAM_NAME',   'C', 65,   0]) - 1
        nid_index   = sf.fields.index(['NIDID',      'C', 7,    0]) - 1
        long_index  = sf.fields.index(['LONGITUDE',  'N', 19,  11]) - 1
        lat_index   = sf.fields.index(['LATITUDE',   'N', 19,  11]) - 1
        river_index = sf.fields.index(['RIVER',      'C', 65,   0]) - 1
        owner_index = sf.fields.index(['OWN_NAME',   'C', 65,   0]) - 1
        type_index  = sf.fields.index(['DAM_TYPE',   'C', 10,   0]) - 1
        purp_index  = sf.fields.index(['PURPOSES',   'C', 254,  0]) - 1
        year_index  = sf.fields.index(['YR_COMPL',   'C', 10,   0]) - 1
        high_index  = sf.fields.index(['NID_HEIGHT', 'N', 19,  11]) - 1
        mstor_index = sf.fields.index(['MAX_STOR',   'N', 19,  11]) - 1
        nstor_index = sf.fields.index(['NORMAL_STO', 'N', 19,  11]) - 1
        area_index  = sf.fields.index(['SURF_AREA',  'N', 19,  11]) - 1

        # iterate through the fields and determine which points are in the box

        if verbose: print('extracting dams into new file\n')

        dam_indices = []

        i = 0
        for record in damrecords:

            lat = record[lat_index]
            lon = record[long_index]

            if self.inside_box([xmin, ymin], [xmax, ymax], [lon, lat]):
                dam_indices.append(i)
            i+=1

        # write the data from the bbox to a new shapefile

        w = Writer(shapeType = 1)

        for field in sf.fields:  w.field(*field)

        for i in dam_indices:
            point = sf.shape(i).points[0]
            w.point(*point)

            values = damrecords[i]

            rs = []

            for value in values:

                if isinstance(value, bytes): value = value.decode('utf-8')
                rs.append(value)

            w.record(*rs)

        w.save(output)

        if verbose: 

            print('successfully extracted NID dam locations to new file\n')
开发者ID:eotp,项目名称:PyHSPF,代码行数:88,代码来源:nidextractor.py

示例8: calculate_landuse

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import shape [as 别名]
    def calculate_landuse(self,
                          rasterfile,
                          shapefile,
                          aggregatefile,
                          attribute,
                          csvfile = None,
                          ):
        """
        Calculates the land use for the given year for the "attribute"
        feature attribute in the polygon shapefile using the aggregate
        mapping provided in the "aggregatefile."
        """

        # make sure the files exist

        for f in rasterfile, shapefile + '.shp', aggregatefile:
            if not os.path.isfile(f):
                print('error, {} does not exist\n'.format(f))
                raise

        # read the aggregate file

        self.read_aggregatefile(aggregatefile)

        # open the shapefile

        sf = Reader(shapefile, shapeType = 5)

        attributes = [f[0] for f in sf.fields]

        try:    index = attributes.index(attribute) - 1
        except:
            print('error: attribute ' +
                  '{} is not in the shapefile fields'.format(attribute))
            raise

        # iterate through the shapes, get the fractions and save them

        for i in range(len(sf.records())):

            points = numpy.array(sf.shape(i).points)
            record = sf.record(i)

            k = record[index]

            # store the results

            self.landuse[k] = {r:0 for r in self.order}

            try:

                values, origin = get_raster_in_poly(rasterfile, points,
                                                    verbose = False)
                values = values.flatten()
                values = values[values.nonzero()]

                tot_pixels = len(values)

                # count the number of pixels of each land use type

                for v in numpy.unique(values):

                    # find all the indices for each pixel value

                    pixels = numpy.argwhere(values == v)

                    # normalize by the total # of pixels

                    f = len(values[pixels]) / tot_pixels

                    # add the landuse to the aggregated value

                    self.landuse[k][self.groups[v]] += f

            # work around for small shapes

            except: self.landuse[k][self.groups[0]] = 1

        if csvfile is not None:  self.make_csv(attribute, csvfile)

        return self.landuse
开发者ID:djlampert,项目名称:PyHSPF,代码行数:83,代码来源:cdlextractor.py

示例9: plot_landuse

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import shape [as 别名]
    def plot_landuse(self,
                     landuse,
                     catchments,
                     attribute,
                     categoryfile,
                     output = None,
                     datatype = 'raw',
                     overwrite = False,
                     pixels = 1000,
                     border = 0.02,
                     lw = 0.5,
                     show = False,
                     verbose = True,
                     vverbose = False
                     ):
        """
        Makes a plot of the landuse of a catchment shapefile on top of a
        raster landuse file.
        """

        if self.order is None:
            print('error: no landuse aggregation file information provided\n')
            raise

        self.read_categoryfile(categoryfile)

        if verbose: print('generating a {} land use plot\n'.format(datatype))

        # make the figure

        fig = pyplot.figure()
        subplot = fig.add_subplot(111, aspect = 'equal')
        subplot.tick_params(axis = 'both', which = 'major', labelsize = 11)

        # add the title

        if datatype == 'results': title = 'Land Use Fractions'
        else:                     title = 'Raw Land Use Data'

        subplot.set_title(title, size = 14)

        # open the shapefile and get the bounding box

        s = Reader(catchments, shapeType = 5)

        xmin, ymin, xmax, ymax = s.bbox

        # get the index of the field for the attribute matching

        index = [f[0] for f in s.fields].index(attribute) - 1

        # set up a custom colormap using the rgbs supplied in the aggregate file

        color_table = [(self.reds[g] / 255, self.greens[g] / 255,
                        self.blues[g] / 255) for g in self.order]

        cmap = colors.ListedColormap(color_table)

        # provide the cutoff boundaries for the mapping of values to the table

        bounds = [i-0.5 for i in range(len(self.order)+1)]

        # create a norm to map the bounds to the colors

        norm = colors.BoundaryNorm(bounds, cmap.N)

        # get the pixel width and origin

        w = (xmax - xmin) / pixels

        # calculate the image array height and the height of a pixel

        height = int(numpy.ceil((ymax - ymin) / (xmax - xmin)) * pixels)
        h = (ymax - ymin) / height

        # set up the image array

        image_array = numpy.zeros((height, pixels), dtype = 'uint8')

        # get the land use fraction for each category

        if datatype == 'results':

            # iterate through the shapes and make patches

            for i in range(len(s.records())):
                comid = s.record(i)[index]
                points = numpy.array(s.shape(i).points)

                # convert the shape to pixel coordinates

                pixel_polygon = [(get_pixel(x, xmin, w), get_pixel(y, ymin, h))
                                 for x, y in points]

                # make a PIL image to use as a mask

                rasterpoly = Image.new('L', (pixels, height), 1)
                rasterize  = ImageDraw.Draw(rasterpoly)

                # rasterize the polygon
#.........这里部分代码省略.........
开发者ID:djlampert,项目名称:PyHSPF,代码行数:103,代码来源:cdlextractor.py

示例10: Reader

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import shape [as 别名]
# first use the NWIS metadata file to get the latitude and longitude of the gage

reader = Reader('{}/USGS_Streamgages-NHD_Locations.shp'.format(NWIS))

# find the record index for the NWIS gage ids

i = [f[0] for f in reader.fields].index('SITE_NO') - 1

# find the index of the gage

j = [r[i] for r in reader.records()].index(gageid)

# use the index to get the latitude and longitude of the station

x, y = reader.shape(j).points[0]

print('location of gage {}: {:.4f}, {:.4f}\n'.format(gageid, x, y))

# open the flowline shapefile to supply reach length (miles or kilometers 
# depending on the unit system)

reader = Reader(flowfile)

# find shapes with a bounding box encompassing the gage to narrow the search

print('searching for the closest flowline to the gage\n')

bboxes = [s.bbox for s in reader.shapes()]

contains = [i for i, b in enumerate(bboxes)
开发者ID:djibi2,项目名称:PyHSPF,代码行数:32,代码来源:ftable01.py

示例11: plot_climate

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import shape [as 别名]
def plot_climate(HUC8, sfile, bfile, pfile = None, efile = None, tfile = None, 
                 snowfile = None, centroids = True, radius = None, 
                 patchcolor = None, solarfile = None, windfile = None,
                 output = None, show = False, verbose = True):
    """Makes a plot of all the hourly precipitation stations of a watershed
    defined by "bfile" with subbasin defined by "sfile" from the source 
    precipitation shapefile "pfile"."""


    if verbose: 
        print('generating plot of watershed %s NCDC stations\n' % HUC8)

    fig = pyplot.figure()
    subplot = fig.add_subplot(111, aspect = 'equal')
    subplot.tick_params(axis = 'both', which = 'major', labelsize = 10)

    # add the title

    description = 'Climate Data Stations'
    title = 'Cataloging Unit %s\n%s' % (HUC8, description)
    subplot.set_title(title, fontsize = 14)

    # open up and show the catchments

    if patchcolor is None: facecolor = (1,0,0,0.)
    else:                  facecolor = patchcolor

    b = Reader(bfile, shapeType = 5)

    points = np.array(b.shape(0).points)
    subplot.add_patch(make_patch(points, facecolor = facecolor, width = 1.))

    extent = get_boundaries(b.shapes(), space = 0.02)

    xmin, ymin, xmax, ymax = extent

    # add the subbasin file

    s = Reader(sfile, shapeType = 5)

    # make patches of the subbasins

    for i in range(len(s.records())):
        shape = s.shape(i)
        points = np.array(shape.points)
        subplot.add_patch(make_patch(points, facecolor, width = 0.15))

    plots = [] # keep track of the scatterplots
    names = [] # keep track of names for the legend
        
    # add the subbasin centroids

    if centroids:

        cx_index = s.fields.index(['CenX', 'N', 12, 6]) - 1
        cy_index = s.fields.index(['CenY', 'N', 12, 6]) - 1

        centroids = [[r[cx_index], r[cy_index]] for r in s.records()]
        xs, ys = zip(*centroids)
        cplot = subplot.scatter(xs, ys, marker = '+', c = 'pink', s = 15)
        plots.append(cplot)
        names.append('Centroids')

    # add a circle showing around subbasin "radius" showing the gages within
    # the radius for a given subbasin

    if radius is not None:

        comid_index = s.fields.index(['ComID',    'N',  9, 0]) - 1
        cx_index    = s.fields.index(['CenX',     'N', 12, 6]) - 1
        cy_index    = s.fields.index(['CenY',     'N', 12, 6]) - 1
        area_index  = s.fields.index(['AreaSqKm', 'N', 10, 2]) - 1

        comids = ['{}'.format(r[comid_index]) for r in s.records()]
        cxs    = [r[cx_index]    for r in s.records()]
        cys    = [r[cy_index]    for r in s.records()]
        areas  = [r[area_index]  for r in s.records()]

        try: i = comids.index(radius)
        except: i = 0
        
        c = [cxs[i], cys[i]]

        radii = [math.sqrt(a / math.pi) for a in areas]

        # scale kms to degrees

        km  = get_distance([xmin, ymin], [xmax, ymax])
        deg = math.sqrt((xmin - xmax)**2 + (ymax - ymin)**2)

        r = sum(radii) / len(radii) * deg / km * 5

        circle = pyplot.Circle(c, radius = r, edgecolor = 'black', 
                               facecolor = 'yellow', alpha = 0.5) 
        subplot.add_patch(circle)
        subplot.scatter(c[0], c[1], marker = '+', c = 'black')

    # add the precipitation gage points

    if pfile is not None:
#.........这里部分代码省略.........
开发者ID:djibi2,项目名称:PyHSPF,代码行数:103,代码来源:gisplots.py

示例12: merge_shapes

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import shape [as 别名]
def merge_shapes(inputfile, outputfile = None, overwrite = False, 
                 verbose = True, vverbose = False):
    """Merges all the shapes in a shapefile into a single shape."""

    if outputfile is None: output = '{}/merged'.format(os.getcwd())

    if os.path.isfile(outputfile + '.shp') and not overwrite:
        if verbose: print('combined watershed shapefile %s exists' % outputfile)
        return
   
    if verbose: print('combining shapes from {}\n'.format(inputfile) + 
                      'this may take a while...\n')

    # start by copying the projection files

    shutil.copy(inputfile + '.prj', outputfile + '.prj')

    # load the catchment and flowline shapefiles

    r = Reader(inputfile, shapeType = 5)
    n = len(r.records())

    try: 
        shapes  = []
        records = [] 
        bboxes  = []

        for i in range(n):
            shape = r.shape(i)
            record = r.record(i)

            shape_list = format_shape(shape.points)

            for sh in shape_list:
                shapes.append(sh)
                records.append(record)
                bboxes.append(shape.bbox)

                try: combined = combine_shapes(shapes, bboxes, 
                                               verbose = vverbose)
                except: 
                    if verbose: print('trying alternate trace method')
                    combined = combine_shapes(shapes, bboxes, skip = True, 
                                              verbose = vverbose)

    except:
        if verbose: print('trying alternate trace method')
        shapes  = []
        records = [] 
        bboxes  = []
        for i in range(n):
            shape = r.shape(i)
            record = r.record(i)

            shape_list = format_shape(shape.points, omit = True)

            for sh in shape_list:
                shapes.append(sh)
                records.append(record)
                bboxes.append(shape.bbox)

        try:    combined = combine_shapes(shapes, bboxes, verbose = vverbose)
        except: 
            if verbose: print('trying alternate trace method')
            combined = combine_shapes(shapes, bboxes, skip = True,
                                      verbose = vverbose)

    # create the new file with the merged shapes

    w = Writer(shapeType = 5)

    w.poly(shapeType = 5, parts = [combined])

    # copy the fields from the original and then the first record; note this
    # can be adapted as needed

    for field in r.fields: w.field(*field)
    w.record(*r.record(0))

    w.save(outputfile)

    if verbose: 
        print('successfully combined shapes from %s to %s\n' % 
              (inputfile, outputfile))
开发者ID:eotp,项目名称:PyHSPF,代码行数:86,代码来源:vectorutils.py

示例13: extract_aquifers

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import shape [as 别名]
def extract_aquifers(directory, HUC8, aquifers, pad = 0.2, verbose = True):
    """Extracts aquifers from the source datafile to the destination using
    the HUC8 boundaries for the query."""

    start = time.time()

    # open up the HUC8 boundary shapefile and use it to get the bounding box

    shapefile = Reader(directory + '/%s/%scatchments' % (HUC8, HUC8))

    xmin, ymin, xmax, ymax = get_boundaries(shapefile.shapes())

    # convert to bounding corners for testing

    p1 = [xmin - pad * (xmax - xmin), ymin - pad * (ymax - ymin)]
    p2 = [xmax + pad * (xmax - xmin), ymax + pad * (ymax - ymin)]

    shapefile = None

    # start by copying the projection files

    if verbose: print('\ncopying the projections\n')

    shutil.copy(directory + '/%s/%scatchments.prj' % (HUC8, HUC8), 
                directory + '/%s/%saquifers.prj' % (HUC8, HUC8))

    # open the flowline file
    
    if verbose: print('reading the aquifer file\n')
    
    shapefile = Reader(aquifers, shapeType = 5)

    # work around for issues with pyshp

    records   = []
    for i in range(len(shapefile.shapes())):
        try: records.append(shapefile.record(i))
        except: records.append('')
     
    # use the bounding boxes to see if the shapes are within the watershed area

    if verbose: print('searching for aquifers in the watershed\n')

    bboxes = [shapefile.shape(i).bbox for i in range(len(records))]

    corners = [[[b[0], b[1]], [b[0], b[3]], [b[2], b[1]], [b[2], b[3]]]
               for b in bboxes]

    indices = [i for i, c in zip(range(len(corners)), corners) if 
               any([inside_box(p1, p2, p) for p in c]) or 
               all([inside_box(c[0], c[3], p1), inside_box(c[0], c[3], p2)])]

    # remove any non aquifers

    indices = [i for i in indices if shapefile.record(i)[4] != 999]

    # find a record for the non aquifer

    i = 0
    while shapefile.record(i)[4] != 999: i+=1

    nonrecord = shapefile.record(i)
    nonrecord[1] = nonrecord[1].decode('utf-8')
    nonrecord[5] = 0
    nonrecord[6] = 0

    if len(indices) == 0:
        if verbose: print('query returned no values, returning\n')
        return
    
    # write the data from the HUC8 to a new shapefile
    
    w = Writer(shapeType = 5)
    
    for field in shapefile.fields:  w.field(*field)
    
    for i in indices:
        shape = shapefile.shape(i)

        # check for multiple parts

        if len(shape.parts) > 1:
            parts = [shape.points[i:j] 
                     for i, j in zip(shape.parts[:-1], shape.parts[1:])]
        else: parts = [shape.points]

        record = records[i]
    
        # little work around for blank binary values
    
        if isinstance(record[1], bytes):
            record[1] = record[1].decode('utf-8')

        w.poly(shapeType = 5, parts = parts)
        w.record(*record)

    # add a shape for the bounding box showing no aquifer locations

    part = [p1, [p1[0], p2[1]], p2, [p2[0], p1[1]]]

#.........这里部分代码省略.........
开发者ID:djibi2,项目名称:PyHSPF,代码行数:103,代码来源:extract_aquifers.py


注:本文中的shapefile.Reader.shape方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。