当前位置: 首页>>代码示例>>Python>>正文


Python Reader.records方法代码示例

本文整理汇总了Python中shapefile.Reader.records方法的典型用法代码示例。如果您正苦于以下问题:Python Reader.records方法的具体用法?Python Reader.records怎么用?Python Reader.records使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在shapefile.Reader的用法示例。


在下文中一共展示了Reader.records方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: extract_flowlines

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import records [as 别名]
    def extract_flowlines(self, source, destination, HUC8, verbose = True):
        """Extracts flowlines from the source datafile to the destination using
        the HUC8 for the query."""

        # open the flowline file
    
        if verbose: print('reading the flowline file\n')
    
        shapefile = Reader(source, shapeType = 3)
        records   = shapefile.records()
    
        # figure out which field codes are the Reach code and comid
    
        reach_index = shapefile.fields.index(['REACHCODE', 'C', 14, 0]) - 1
    
        # go through the reach indices, add add them to the list of flowlines
        # if in the watershed; also make a list of the corresponding comids
    
        if verbose: print('searching for flowlines in the watershed\n')
    
        indices = []
       
        i = 0
        for record in records:
            if record[reach_index][:8] == HUC8: indices.append(i)
            i+=1

        if len(indices) == 0:
            if verbose: print('error: query returned no values')
            raise
    
        # write the data from the HUC8 to a new shapefile
    
        w = Writer(shapeType = 3)
    
        for field in shapefile.fields:  w.field(*field)
    
        for i in indices:
            shape = shapefile.shape(i)
            w.poly(shapeType = 3, parts = [shape.points])
    
            record = records[i]
    
            # little work around for blank GNIS_ID and GNIS_NAME values
    
            if isinstance(record[3], bytes):
                record[3] = record[3].decode('utf-8')
            if isinstance(record[4], bytes):
                record[4] = record[4].decode('utf-8')
    
            w.record(*record)
    
        w.save(destination)
    
        if verbose: 
            l = len(indices)
            print('queried {} flowlines from original shapefile\n'.format(l))
开发者ID:geclark330,项目名称:PyHSPF,代码行数:59,代码来源:nhdplusextractor.py

示例2: extract_catchments

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import records [as 别名]
    def extract_catchments(self, 
                           source, 
                           destination, 
                           flowlinefile, 
                           verbose = True,
                           ):
        """
        Extracts the catchments from the source data file to the destination
        using the list of comids for the query.
        """

        # make a list of the comids

        comids = self.get_comids(flowlinefile)

        # open the catchment shapefile
    
        if verbose: print('reading the catchment shapefile\n')
    
        shapefile = Reader(source)
    
        # get the index of the feature id, which links to the flowline comid
    
        featureid_index = shapefile.fields.index(['FEATUREID', 'N', 9, 0]) - 1
    
        # go through the comids from the flowlines and add the corresponding 
        # catchment to the catchment list
    
        if verbose: print('searching the catchments in the watershed\n')
    
        records = shapefile.records()
        indices = []
    
        i = 0
        for record in records:
            if record[featureid_index] in comids: indices.append(i)
            i+=1
    
        if len(indices) == 0:
            print('query returned no values, returning\n')
            raise

        # create the new shapefile
    
        if verbose: print('writing the new catchment shapefile\n')
        
        w = Writer()
    
        for field in shapefile.fields:  w.field(*field)
    
        for i in indices:
            shape = shapefile.shape(i)
            w.poly(shapeType = 5, parts = [shape.points])
            w.record(*records[i])
    
        w.save(destination)
开发者ID:djibi2,项目名称:PyHSPF,代码行数:58,代码来源:nhdplusextractor.py

示例3: set_metadata

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import records [as 别名]
    def set_metadata(self, 
                     gagefile,
                     ):
        """
        Opens the gage file with the station metadata.
        """

        # metadata for stations

        self.gages  = []
        self.day1s  = []
        self.dayns  = []
        self.drains = []
        self.states = []
        self.sites  = []
        self.nwiss  = []
        self.aves   = []
        self.names  = []

        gagereader = Reader(gagefile, shapeType = 1)

        # get the fields with pertinent info

        day1_index  = gagereader.fields.index(['DAY1',       'N', 19, 0]) - 1
        dayn_index  = gagereader.fields.index(['DAYN',       'N', 19, 0]) - 1
        drain_index = gagereader.fields.index(['DA_SQ_MILE', 'N', 19, 2]) - 1
        HUC8_index  = gagereader.fields.index(['HUC',        'C',  8, 0]) - 1
        state_index = gagereader.fields.index(['STATE',      'C',  2, 0]) - 1
        site_index  = gagereader.fields.index(['SITE_NO',    'C', 15, 0]) - 1
        nwis_index  = gagereader.fields.index(['NWISWEB',    'C', 75, 0]) - 1
        ave_index   = gagereader.fields.index(['AVE',        'N', 19, 3]) - 1
        name_index  = gagereader.fields.index(['STATION_NM', 'C', 60, 0]) - 1

        # iterate through the records

        for r in gagereader.records():
            
            gage  = r[site_index] 
            day1  = r[day1_index] 
            dayn  = r[dayn_index] 
            drain = r[drain_index]
            state = r[state_index]
            nwis  = r[nwis_index]
            ave   = r[ave_index]  
            name  = r[name_index]
            site  = r[site_index]

            self.gages.append(gage)
            self.day1s.append(day1)
            self.dayns.append(dayn)
            self.drains.append(drain)
            self.states.append(state)
            self.sites.append(site)
            self.nwiss.append(nwis)
            self.aves.append(ave)
            self.names.append(name)
开发者ID:djibi2,项目名称:PyHSPF,代码行数:58,代码来源:nwisextractor.py

示例4: get_comids

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import records [as 别名]
    def get_comids(self, flowlinefile):
        """Finds the comids from the flowline file."""

        # open the file

        shapefile = Reader(flowlinefile)

        # find the index of the comids

        comid_index = shapefile.fields.index(['COMID', 'N', 9,  0]) - 1

        # make a list of the comids

        comids = [r[comid_index] for r in shapefile.records()]

        return comids
开发者ID:geclark330,项目名称:PyHSPF,代码行数:18,代码来源:nhdplusextractor.py

示例5: extract_HUC8

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import records [as 别名]
    def extract_HUC8(self, HUC8, output, gagefile = 'gagestations', 
                     verbose = True):
        """Extracts the USGS gage stations for a watershed from the gage 
        station shapefile into a shapefile for the 8-digit hydrologic unit 
        code of interest. 
        """

        # make sure the metadata exist locally

        self.download_metadata()

        # make sure the output destination exists

        if not os.path.isdir(output): os.mkdir(output)

        sfile = '{}/{}'.format(output, gagefile)
        if not os.path.isfile(sfile + '.shp'):

            # copy the projection

            shutil.copy(self.NWIS + '.prj', sfile + '.prj')

            # read the file

            gagereader  = Reader(self.NWIS, shapeType = 1)
            gagerecords = gagereader.records()

            # pull out the HUC8 record to parse the dataset

            HUC8_index  = gagereader.fields.index(['HUC',  'C', 8, 0]) - 1

            # iterate through the field and find gages in the watershed

            its = HUC8, sfile
            print('extracting gage stations in {} to {}\n'.format(*its))

            gage_indices = []

            i = 0
            for record in gagerecords:
                if record[HUC8_index] == HUC8: gage_indices.append(i)
                i+=1

            # write the data from the HUC8 to a new shapefile

            w = Writer(shapeType = 1)

            for field in gagereader.fields:  w.field(*field)

            for i in gage_indices:
                point = gagereader.shape(i).points[0]
                w.point(*point)
                w.record(*gagerecords[i])

            w.save(sfile)

            if verbose: 
                print('successfully extracted NWIS gage stations\n')

        elif verbose: 

            print('gage station file {} exists\n'.format(sfile))

        self.set_metadata(sfile)
开发者ID:kbrannan,项目名称:PyHSPF,代码行数:66,代码来源:nwisextractor.py

示例6: average

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import records [as 别名]
# weighted average (IDWA) to interpolate between the stations at a given point
# using the "method," "latitude," and "longitude" keyword arguments. the
# result is the same as the previous example. as before, the subbasin_catchments
# shapefile will be used that contains the centroid for each aggregation.

sf = Reader(filename)

# index of the comid, latitude, and longitude records

comid_index = [f[0] for f in sf.fields].index('ComID') - 1
lon_index   = [f[0] for f in sf.fields].index('CenX')  - 1
lat_index   = [f[0] for f in sf.fields].index('CenY')  - 1

# iterate through the shapefile records and aggregate the timeseries

for i in range(len(sf.records())):

    record = sf.record(i)
    comid  = record[comid_index]
    lon    = record[lon_index]
    lat    = record[lat_index]

    i = comid, lon, lat
    print('aggregating timeseries for comid {} at {}, {}\n'.format(*i))

    precipitation = processor.aggregate('precip3240', 'precip', start, end,
                                        method = 'IDWA', longitude = lon,
                                        latitude = lat)

    mean = sum(precipitation) / (end - start).days * 365.25
开发者ID:djlampert,项目名称:PyHSPF,代码行数:32,代码来源:climateprocessor08.py

示例7: climate

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import records [as 别名]

#.........这里部分代码省略.........
            ts = s, 60, climateprocessor.aggregate('NSRDB', 'metstat', s, e)
            with open(hsolar, 'wb') as f: pickle.dump(ts, f)
            
        # aggregate the hourly solar to daily

        dsolar = '{}/solar'.format(daily)
        if not os.path.isfile(dsolar):

            with open(hsolar, 'rb') as f: t, tstep, data = pickle.load(f)
            ts = s, 1440, [sum(data[i:i+24]) / 24 
                           for i in range(0, 24 * (e-s).days, 24)]

            with open(dsolar, 'wb') as f: pickle.dump(ts, f)

        # aggregate the hourly precipitation for each subbasin using IDWA

        precip = '{}/hourlyprecipitation'.format(climatedata)
        if not os.path.isdir(precip): os.mkdir(precip)

        # use the subbasin shapefile to get the location of the centroids

        sf = Reader(subbasinfile)

        # index of the comid, latitude, and longitude records

        comid_index = [f[0] for f in sf.fields].index('ComID') - 1
        lon_index   = [f[0] for f in sf.fields].index('CenX')  - 1
        lat_index   = [f[0] for f in sf.fields].index('CenY')  - 1
        elev_index  = [f[0] for f in sf.fields].index('AvgElevM') - 1
        area_index  = [f[0] for f in sf.fields].index('AreaSqKm') - 1

        # iterate through the shapefile records and aggregate the timeseries

        for i in range(len(sf.records())):

            record = sf.record(i)
            comid  = record[comid_index]
            lon    = record[lon_index]
            lat    = record[lat_index]

            # check if the aggregated time series exists or calculate it

            subbasinprecip = '{}/{}'.format(precip, comid)
            if not os.path.isfile(subbasinprecip):

                if verbose:
                    i = comid, lon, lat
                    print('aggregating timeseries for comid ' +
                          '{} at {}, {}\n'.format(*i))

                p = climateprocessor.aggregate('precip3240', 'precip', s, e,
                                               method = 'IDWA', 
                                               longitude = lon,
                                               latitude = lat)

                ts = s, 60, p
                with open(subbasinprecip, 'wb') as f: pickle.dump(ts, f)

        # make a directory for the evapotranspiration time series

        evapotranspiration = '{}/evapotranspiration'.format(climatedata)
        if not os.path.isdir(evapotranspiration): 
            os.mkdir(evapotranspiration)

        # use the ETCalculator to calculate the ET time series
开发者ID:eotp,项目名称:PyHSPF,代码行数:69,代码来源:preprocessor.py

示例8: build_watershed

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import records [as 别名]
    def build_watershed(self,
                        subbasinfile, 
                        flowfile, 
                        outletfile, 
                        damfile, 
                        gagefile,
                        landfiles, 
                        VAAfile, 
                        years, 
                        HUC8, 
                        filename,
                        plotname = None,
                        ):

        # create a dictionary to store subbasin data

        subbasins = {}

        # create a dictionary to keep track of subbasin inlets

        inlets = {}

        # read in the flow plane data into an instance of the FlowPlane class

        sf = Reader(subbasinfile, shapeType = 5)

        comid_index = sf.fields.index(['ComID',      'N',  9, 0]) - 1
        len_index   = sf.fields.index(['PlaneLenM',  'N',  8, 2]) - 1
        slope_index = sf.fields.index(['PlaneSlope', 'N',  9, 6]) - 1
        area_index  = sf.fields.index(['AreaSqKm',   'N', 10, 2]) - 1
        cx_index    = sf.fields.index(['CenX',       'N', 12, 6]) - 1
        cy_index    = sf.fields.index(['CenY',       'N', 12, 6]) - 1
        elev_index  = sf.fields.index(['AvgElevM',   'N',  8, 2]) - 1

        for record in sf.records():
            comid     = '{}'.format(record[comid_index])
            length    = record[len_index]
            slope     = record[slope_index]
            tot_area  = record[area_index]
            centroid  = [record[cx_index], record[cy_index]]
            elevation = record[elev_index]

            subbasin  = Subbasin(comid)
            subbasin.add_flowplane(length, slope, centroid, elevation)

            subbasins[comid] = subbasin

        # read in the flowline data to an instance of the Reach class

        sf = Reader(flowfile)

        outcomid_index   = sf.fields.index(['OutComID',   'N',  9, 0]) - 1
        gnis_index       = sf.fields.index(['GNIS_NAME',  'C', 65, 0]) - 1
        reach_index      = sf.fields.index(['REACHCODE',  'C',  8, 0]) - 1
        incomid_index    = sf.fields.index(['InletComID', 'N',  9, 0]) - 1
        maxelev_index    = sf.fields.index(['MaxElev',    'N',  9, 2]) - 1
        minelev_index    = sf.fields.index(['MinElev',    'N',  9, 2]) - 1
        slopelen_index   = sf.fields.index(['SlopeLenKM', 'N',  6, 2]) - 1
        slope_index      = sf.fields.index(['Slope',      'N',  8, 5]) - 1
        inflow_index     = sf.fields.index(['InFlowCFS',  'N',  8, 3]) - 1
        outflow_index    = sf.fields.index(['OutFlowCFS', 'N',  8, 3]) - 1
        velocity_index   = sf.fields.index(['VelFPS',     'N',  7, 4]) - 1
        traveltime_index = sf.fields.index(['TravTimeHR', 'N',  8, 2]) - 1

        for record in sf.records():

            outcomid   = '{}'.format(record[outcomid_index])
            gnis       = record[gnis_index]
            reach      = record[reach_index]
            incomid    = '{}'.format(record[incomid_index])
            maxelev    = record[maxelev_index] / 100
            minelev    = record[minelev_index] / 100
            slopelen   = record[slopelen_index]
            slope      = record[slope_index]
            inflow     = record[inflow_index]
            outflow    = record[outflow_index]
            velocity   = record[velocity_index]
            traveltime = record[traveltime_index]

            if isinstance(gnis, bytes): gnis = ''

            subbasin = subbasins[outcomid]

            flow = (inflow + outflow) / 2
            subbasin.add_reach(gnis, maxelev, minelev, slopelen, flow = flow, 
                               velocity = velocity, traveltime = traveltime)
            inlets[outcomid] = incomid

        # open up the outlet file and see if the subbasin has a gage or dam

        sf = Reader(outletfile)

        records = sf.records()

        comid_index = sf.fields.index(['COMID',   'N',  9, 0]) - 1
        nid_index   = sf.fields.index(['NIDID',   'C',  7, 0]) - 1
        nwis_index  = sf.fields.index(['SITE_NO', 'C', 15, 0]) - 1

        nids = {'{}'.format(r[comid_index]):r[nid_index] for r in records 
                if isinstance(r[nid_index], str)}
#.........这里部分代码省略.........
开发者ID:eotp,项目名称:PyHSPF,代码行数:103,代码来源:preprocessor.py

示例9: cos

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import records [as 别名]
        careas[c] = f.variables["area_" + c][:]

# find valid fpus
tarea = 100 * (111.2 / 2) ** 2 * cos(pi * lats / 180)
tarea = resize(tarea, (nlons, nlats)).T
validfpus = []
for i in range(nfpu):
    hareafpu = harea[fpumap == fpu[i]].sum()
    tareafpu = tarea[fpumap == fpu[i]].sum()
    if hareafpu / tareafpu > percent / 100.0:
        validfpus.append(fpu[i])

# load shape file
r = Reader(shapefile)
shapes = r.shapes()
records = r.records()

models = ["epic", "gepic", "lpj-guess", "lpjml", "pdssat", "pegasus"]  # exclude image
gcms = ["gfdl-esm2m", "hadgem2-es", "ipsl-cm5a-lr", "miroc-esm-chem", "noresm1-m"]
crops = ["maize", "wheat", "soy", "rice"] if crop == "all" else [crop]
co2s = ["co2", "noco2"]

hadgemidx = gcms.index("hadgem2-es")

nm, ng, ncr, nco2 = len(models), len(gcms), len(crops), len(co2s)

# variables
sh = (nm, ng, ncr, 3, nfpu, nco2)
dy26arr = masked_array(zeros(sh), mask=ones(sh))
dy85arr = masked_array(zeros(sh), mask=ones(sh))
with nc(infile) as f:
开发者ID:RDCEP,项目名称:ggcmi,代码行数:33,代码来源:blmap.isi1.py

示例10: merge_shapes

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import records [as 别名]
def merge_shapes(inputfile, outputfile = None, overwrite = False, 
                 verbose = True, vverbose = False):
    """Merges all the shapes in a shapefile into a single shape."""

    if outputfile is None: output = '{}/merged'.format(os.getcwd())

    if os.path.isfile(outputfile + '.shp') and not overwrite:
        if verbose: print('combined watershed shapefile %s exists' % outputfile)
        return
   
    if verbose: print('combining shapes from {}\n'.format(inputfile) + 
                      'this may take a while...\n')

    # start by copying the projection files

    shutil.copy(inputfile + '.prj', outputfile + '.prj')

    # load the catchment and flowline shapefiles

    r = Reader(inputfile, shapeType = 5)
    n = len(r.records())

    try: 
        shapes  = []
        records = [] 
        bboxes  = []

        for i in range(n):
            shape = r.shape(i)
            record = r.record(i)

            shape_list = format_shape(shape.points)

            for sh in shape_list:
                shapes.append(sh)
                records.append(record)
                bboxes.append(shape.bbox)

                try: combined = combine_shapes(shapes, bboxes, 
                                               verbose = vverbose)
                except: 
                    if verbose: print('trying alternate trace method')
                    combined = combine_shapes(shapes, bboxes, skip = True, 
                                              verbose = vverbose)

    except:
        if verbose: print('trying alternate trace method')
        shapes  = []
        records = [] 
        bboxes  = []
        for i in range(n):
            shape = r.shape(i)
            record = r.record(i)

            shape_list = format_shape(shape.points, omit = True)

            for sh in shape_list:
                shapes.append(sh)
                records.append(record)
                bboxes.append(shape.bbox)

        try:    combined = combine_shapes(shapes, bboxes, verbose = vverbose)
        except: 
            if verbose: print('trying alternate trace method')
            combined = combine_shapes(shapes, bboxes, skip = True,
                                      verbose = vverbose)

    # create the new file with the merged shapes

    w = Writer(shapeType = 5)

    w.poly(shapeType = 5, parts = [combined])

    # copy the fields from the original and then the first record; note this
    # can be adapted as needed

    for field in r.fields: w.field(*field)
    w.record(*r.record(0))

    w.save(outputfile)

    if verbose: 
        print('successfully combined shapes from %s to %s\n' % 
              (inputfile, outputfile))
开发者ID:eotp,项目名称:PyHSPF,代码行数:86,代码来源:vectorutils.py

示例11: plot_HUC8

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import records [as 别名]
    def plot_HUC8(self, 
                  flowfile, 
                  cfile,
                  bfile,
                  VAAfile, 
                  elevfile,
                  patchcolor = None,
                  resolution = 400, 
                  colormap = 'gist_earth',
                  grid = False,
                  title = None, 
                  verbose = True,
                  output = None,
                  show = False,
                  ):
        """Makes a plot of the raw NHDPlus data."""

        if verbose: print('generating plot of the watershed\n')

        fig = pyplot.figure()
        subplot = fig.add_subplot(111, aspect = 'equal')
        subplot.tick_params(axis = 'both', which = 'major', labelsize = 10)

        # add the title

        if title is not None: subplot.set_title(title, fontsize = 14)

        if patchcolor is None: facecolor = (1,0,0,0.)
        else:                  facecolor = patchcolor

        # open up and show the boundary

        b = Reader(bfile, shapeType = 5)

        boundary = b.shape(0)
        points = numpy.array(boundary.points)
        subplot.add_patch(self.make_patch(points, facecolor, width = 0.5))

        # open up and show the catchments

        c = Reader(cfile, shapeType = 5)

        extent = self.get_boundaries(c.shapes(), space = 0.02)

        xmin, ymin, xmax, ymax = extent

        # figure out how far one foot is on the map

        points_per_width = 72 * 8
        ft_per_km = 3280.84
        scale_factor = (points_per_width / 
                        self.get_distance([xmin, ymin], [xmax, ymin]) / 
                        ft_per_km)

        # make patches of the catchment area

        for i in range(len(c.records())):
            catchment = c.shape(i)
            points = numpy.array(catchment.points)
            subplot.add_patch(self.make_patch(points, facecolor, width = 0.1))

        # get the flowline attributes, make an "updown" dictionary to follow 
        # flow, and change the keys to comids

        with open(VAAfile, 'rb') as f: flowlineVAAs = pickle.load(f)

        updown = {}
        for f in flowlineVAAs:
            if flowlineVAAs[f].down in flowlineVAAs:
                updown[flowlineVAAs[f].comid] = \
                    flowlineVAAs[flowlineVAAs[f].down].comid

        flowlineVAAs = {flowlineVAAs[f].comid:flowlineVAAs[f] 
                        for f in flowlineVAAs}

        # open up and show the flowfiles

        f = Reader(flowfile, shapeType = 3)
        comid_index = f.fields.index(['COMID', 'N',  9, 0]) - 1

        all_comids = [r[comid_index] for r in f.records()]
        
        # get the flows and velocities from the dictionary
        
        widths = []
        comids = []
        for comid in all_comids:
            if comid in flowlineVAAs:
                flow = flowlineVAAs[comid].flow
                velocity = flowlineVAAs[comid].velocity

                # estimate flow width (ft) assuming triangular 90 d channel 

                comids.append(comid)
                widths.append(numpy.sqrt(4 * flow / velocity))
        
        # convert widths in feet to points on the figure; exaggerated by 10

        widths = [w * scale_factor * 20 for w in widths]

#.........这里部分代码省略.........
开发者ID:geclark330,项目名称:PyHSPF,代码行数:103,代码来源:nhdplusextractor.py

示例12: Reader

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import records [as 别名]
extractor.download_gagedata(gageid, start, end, output = gageid)

# need to know the reach length; so find the location of the gage, then find 
# the flowline in the shapefile and use the record info to get the length

# first use the NWIS metadata file to get the latitude and longitude of the gage

reader = Reader('{}/USGS_Streamgages-NHD_Locations.shp'.format(NWIS))

# find the record index for the NWIS gage ids

i = [f[0] for f in reader.fields].index('SITE_NO') - 1

# find the index of the gage

j = [r[i] for r in reader.records()].index(gageid)

# use the index to get the latitude and longitude of the station

x, y = reader.shape(j).points[0]

print('location of gage {}: {:.4f}, {:.4f}\n'.format(gageid, x, y))

# open the flowline shapefile to supply reach length (miles or kilometers 
# depending on the unit system)

reader = Reader(flowfile)

# find shapes with a bounding box encompassing the gage to narrow the search

print('searching for the closest flowline to the gage\n')
开发者ID:djibi2,项目名称:PyHSPF,代码行数:33,代码来源:ftable01.py

示例13: Reader

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import records [as 别名]
# solar radiation in W/m2; these are the units supplied by the other classes
# in PyHSPF already so no manipulation is needed

# some of the parameters in the Penman-Monteith Equation depend on the 
# geographic location so let's use the information in the shapefile to 
# provide the average longitude, latitude, and elevation

sf = Reader(filename)

# make a list of the fields for each shape

fields = [f[0] for f in sf.fields]

# get the area, centroid and elevation of each shape

areas = [r[fields.index('AreaSqKm') - 1] for r in sf.records()]
xs    = [r[fields.index('CenX')     - 1] for r in sf.records()]
ys    = [r[fields.index('CenY')     - 1] for r in sf.records()]
zs    = [r[fields.index('AvgElevM') - 1] for r in sf.records()]

# get the areal-weighted averages

lon  = sum([a * x for a, x in zip(areas, xs)]) / sum(areas)
lat  = sum([a * y for a, y in zip(areas, ys)]) / sum(areas)
elev = sum([a * z for a, z in zip(areas, zs)]) / sum(areas)

# add the information to the calculator

calculator.add_location(lon, lat, elev)

# it is pretty trivial to get the corresponding reference evapotranspiration 
开发者ID:djibi2,项目名称:PyHSPF,代码行数:33,代码来源:etexample01.py

示例14: ETCalculator

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import records [as 别名]
# use the ETCalculator to estimate the evapotranspiration time series

calculator = ETCalculator()

# some of the parameters in the Penman-Monteith Equation depend on the
# geographic location so get the average longitude, latitude, and elevation

sf = Reader(filename)

# make a list of the fields for each shape

fields = [f[0] for f in sf.fields]

# get the area, centroid and elevation of each shape

areas = [r[fields.index("AreaSqKm") - 1] for r in sf.records()]
xs = [r[fields.index("CenX") - 1] for r in sf.records()]
ys = [r[fields.index("CenY") - 1] for r in sf.records()]
zs = [r[fields.index("AvgElevM") - 1] for r in sf.records()]

# get the areal-weighted averages

lon = sum([a * x for a, x in zip(areas, xs)]) / sum(areas)
lat = sum([a * y for a, y in zip(areas, ys)]) / sum(areas)
elev = sum([a * z for a, z in zip(areas, zs)]) / sum(areas)

# add the information to the calculator

calculator.add_location(lon, lat, elev)

# use the daily tmin and tmax time series to the calculator to get hourly temps
开发者ID:kbrannan,项目名称:PyHSPF,代码行数:33,代码来源:etexample05.py

示例15: print

# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import records [as 别名]
name_index  = sf.fields.index(['DAM_NAME',   'C', 65,   0]) - 1
nid_index   = sf.fields.index(['NIDID',      'C', 7,    0]) - 1
lon_index   = sf.fields.index(['LONGITUDE',  'N', 19,  11]) - 1
lat_index   = sf.fields.index(['LATITUDE',   'N', 19,  11]) - 1
river_index = sf.fields.index(['RIVER',      'C', 65,   0]) - 1
owner_index = sf.fields.index(['OWN_NAME',   'C', 65,   0]) - 1
type_index  = sf.fields.index(['DAM_TYPE',   'C', 10,   0]) - 1
purp_index  = sf.fields.index(['PURPOSES',   'C', 254,  0]) - 1
year_index  = sf.fields.index(['YR_COMPL',   'C', 10,   0]) - 1
high_index  = sf.fields.index(['NID_HEIGHT', 'N', 19,  11]) - 1
mstor_index = sf.fields.index(['MAX_STOR',   'N', 19,  11]) - 1
nstor_index = sf.fields.index(['NORMAL_STO', 'N', 19,  11]) - 1
area_index  = sf.fields.index(['SURF_AREA',  'N', 19,  11]) - 1

# iterate through the records and get whatever information is needed

for r in sf.records():

    name  = r[name_index]
    nidid = r[nid_index]
    lon   = r[lon_index]
    lat   = r[lat_index]
    pur   = r[purp_index]

    print('Dam name:       ', name)
    print('NID ID:         ', nidid)
    print('Longitude:      ', lon)
    print('Latitude:       ', lat)
    print('Primary Purpose:', pur)
    print('')
开发者ID:djibi2,项目名称:PyHSPF,代码行数:32,代码来源:nidexample.py


注:本文中的shapefile.Reader.records方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。