本文整理汇总了Python中shapefile.Reader.record方法的典型用法代码示例。如果您正苦于以下问题:Python Reader.record方法的具体用法?Python Reader.record怎么用?Python Reader.record使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类shapefile.Reader
的用法示例。
在下文中一共展示了Reader.record方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: merge_shapes
# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import record [as 别名]
def merge_shapes(inputfile,
outputfile = None,
overwrite = False,
verbose = True,
vverbose = False,
):
"""
Merges all the shapes in a shapefile into a single shape.
"""
if outputfile is None: output = '{}/merged'.format(os.getcwd())
if os.path.isfile(outputfile + '.shp') and not overwrite:
if verbose:
print('combined watershed shapefile {} exists'.format(outputfile))
return
if verbose: print('combining shapes from {}\n'.format(inputfile) +
'this may take a while...\n')
# start by copying the projection files
shutil.copy(inputfile + '.prj', outputfile + '.prj')
# load the catchment and flowline shapefiles
r = Reader(inputfile, shapeType = 5)
try:
combined = combine_shapes(r.shapes(), verbose = vverbose)
except:
print('error: unable to combine shapes')
raise
# create the new file with the merged shapes
w = Writer(shapeType = 5)
w.poly(shapeType = 5, parts = [combined])
# copy the fields from the original and then the first record; note this
# can be adapted as needed
for field in r.fields: w.field(*field)
w.record(*r.record(0))
w.save(outputfile)
if verbose:
its = inputfile, outputfile
print('successfully combined shapes from {} to {}\n'.format(*its))
示例2: make_timeseries
# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import record [as 别名]
def make_timeseries(directory, HUC8, start, end, evapstations = None,
plot = True):
"""Makes an hourly timeseries of the reference evapotranspiration using
the ASCE hourly Penman-Monteith Equation."""
nrcm = '{}/{}/NRCM'.format(directory, HUC8)
# start and end datetime instances
s = datetime.datetime(start, 1, 1)
e = datetime.datetime(end, 1, 1)
# average the time series together from the NRCM simulation
average_timeseries(nrcm)
# open the watershed info to use to make subbasin precipitation
watershedfile = '{}/{}/watershed'.format(directory, HUC8)
with open(watershedfile, 'rb') as f: watershed = pickle.load(f)
make_precipitation(watershed.subbasins, nrcm)
# convert temperature and humidity to dewpoint
make_dewpoint('{}/{}/NRCM/averages'.format(directory, HUC8))
# open the 3-hr temperature, solar, and dewpoint, and daily wind files
tempfile = '{}/averages/average_temperature'.format(nrcm)
solarfile = '{}/averages/average_solar'.format(nrcm)
dewfile = '{}/averages/average_dewpoint'.format(nrcm)
windfile = '{}/averages/average_wind'.format(nrcm)
# watershed timeseries
output = '{}/watershedtimeseries'.format(nrcm)
if not os.path.isdir(output): os.mkdir(output)
hourlytemp = '{}/hourlytemperature'.format(output)
hourlysolar = '{}/hourlysolar'.format(output)
dailydew = '{}/dewpoint'.format(output)
dailywind = '{}/wind'.format(output)
hourlyRET = '{}/hourlyRET'.format(output)
hourlyPETs = '{}/hourlyPETs'.format(output)
if not os.path.isfile(hourlyRET):
print('calculating an hourly time series for the reference ET...\n')
# open the bounding box and get the mean lat, lon, and elevation
f = '{0}/{1}/{1}boundaries'.format(directory, HUC8)
sh = Reader(f)
record = sh.record(0)
lon, lat, elev = record[-3:]
with open(windfile, 'rb') as f: ts, Ws = zip(*pickle.load(f))
with open(tempfile, 'rb') as f: ts, Ts = zip(*pickle.load(f))
with open(solarfile, 'rb') as f: ts, Ss = zip(*pickle.load(f))
with open(dewfile, 'rb') as f: ts, dews = zip(*pickle.load(f))
# dump the daily series
with open(dailydew, 'wb') as f:
pickle.dump((s, 1440, list(dews)), f)
with open(dailywind, 'wb') as f:
pickle.dump((s, 1440, list(Ws)), f)
# dump all the hourly series and convert the solar radiation
# from Watts/m2 to MJ/hour/m2
temp = [T for T in Ts for i in range(3)]
solar = [S for S in Ss for i in range(3)]
with open(hourlysolar, 'wb') as f: pickle.dump((s, 60, solar), f)
with open(hourlytemp, 'wb') as f: pickle.dump((s, 60, temp), f)
# convert to hourly numpy arrays
temp = numpy.array(temp)
solar = numpy.array(solar) * 3600 / 10**6
wind = numpy.array([w for w in Ws for i in range(24)])
dewpoint = numpy.array([T for T in dews for i in range(24)])
# dates
dates = [s + i * datetime.timedelta(hours = 1)
for i in range(len(solar))]
RET = penman_hourly(lat, lon, elev, dates, temp, dewpoint, solar, wind,
verbose = False)
# dump the timeseries
with open(hourlyRET, 'wb') as f: pickle.dump((s, 60, RET), f)
#.........这里部分代码省略.........
示例3: plot_gage_subbasin
# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import record [as 别名]
def plot_gage_subbasin(self, hspfmodel, folder):
"""Makes a plot of the subbasin area."""
subbasinfile = '{}/subbasins'.format(folder)
boundaryfile = '{}/boundary'.format(folder)
flowfile = '{}/flowlines'.format(folder)
combinedfile = '{}/combined'.format(folder)
watershedplot = '{}/watershed.png'.format(folder)
# make a shapefile of the subbasins for the watershed
f = '{0}/{1}/{1}subbasins'.format(self.directory, self.HUC8)
for out in (subbasinfile, boundaryfile, flowfile, combinedfile):
if not os.path.isfile(out + '.prj'):
shutil.copy(f + '.prj', out + '.prj')
if not os.path.isfile(subbasinfile + '.shp'):
subshapes = []
subrecords = []
for subbasin in hspfmodel.subbasins:
f = '{0}/{1}/{2}/combined'.format(self.directory, self.HUC8,
subbasin)
s = Reader(f, shapeType = 5)
subshapes.append(s.shape(0).points)
subrecords.append(s.record(0))
w = Writer(shapeType = 5)
for field in s.fields: w.field(*field)
for record in subrecords: w.record(*record)
for shape in subshapes: w.poly(shapeType = 5, parts = [shape])
w.save(subbasinfile)
if not os.path.isfile(combinedfile + '.shp'):
fshapes = []
frecords = []
for subbasin in hspfmodel.subbasins:
f = '{0}/{1}/{2}/combined_flowline'.format(self.directory,
self.HUC8,
subbasin)
r = Reader(f, shapeType = 3)
fshapes.append(r.shape(0).points)
frecords.append(r.record(0))
w = Writer(shapeType = 3)
for field in r.fields: w.field(*field)
for record in frecords: w.record(*record)
for shape in fshapes: w.poly(shapeType = 3, parts = [shape])
w.save(combinedfile)
# merge the shapes into a watershed
if not os.path.exists(boundaryfile + '.shp'):
merge_shapes(subbasinfile, outputfile = boundaryfile)
# make a flowline file for the subbasins for the watershed
if not os.path.isfile(flowfile + '.shp'):
shapes = []
records = []
for subbasin in hspfmodel.subbasins:
f = '{0}/{1}/{2}/flowlines'.format(self.directory,
self.HUC8, subbasin)
r = Reader(f, shapeType = 3)
for shape in r.shapes(): shapes.append(shape.points)
for record in r.records(): records.append(record)
w = Writer(shapeType = 3)
for field in r.fields: w.field(*field)
for record in records: w.record(*record)
for shape in shapes: w.poly(shapeType = 3, parts = [shape])
w.save(flowfile)
if not os.path.isfile(watershedplot):
plot_gage_subbasin(folder, self.HUC8, self.gageid, hspfmodel,
output = watershedplot)
示例4: Reader
# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import record [as 别名]
# result is the same as the previous example. as before, the subbasin_catchments
# shapefile will be used that contains the centroid for each aggregation.
sf = Reader(filename)
# index of the comid, latitude, and longitude records
comid_index = [f[0] for f in sf.fields].index('ComID') - 1
lon_index = [f[0] for f in sf.fields].index('CenX') - 1
lat_index = [f[0] for f in sf.fields].index('CenY') - 1
# iterate through the shapefile records and aggregate the timeseries
for i in range(len(sf.records())):
record = sf.record(i)
comid = record[comid_index]
lon = record[lon_index]
lat = record[lat_index]
i = comid, lon, lat
print('aggregating timeseries for comid {} at {}, {}\n'.format(*i))
precipitation = processor.aggregate('precip3240', 'precip', start, end,
method = 'IDWA', longitude = lon,
latitude = lat)
mean = sum(precipitation) / (end - start).days * 365.25
print('aggregated annual average precipitation: {:.1f} in\n'.format(mean))
示例5: climate
# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import record [as 别名]
#.........这里部分代码省略.........
ts = s, 60, climateprocessor.aggregate('NSRDB', 'metstat', s, e)
with open(hsolar, 'wb') as f: pickle.dump(ts, f)
# aggregate the hourly solar to daily
dsolar = '{}/solar'.format(daily)
if not os.path.isfile(dsolar):
with open(hsolar, 'rb') as f: t, tstep, data = pickle.load(f)
ts = s, 1440, [sum(data[i:i+24]) / 24
for i in range(0, 24 * (e-s).days, 24)]
with open(dsolar, 'wb') as f: pickle.dump(ts, f)
# aggregate the hourly precipitation for each subbasin using IDWA
precip = '{}/hourlyprecipitation'.format(climatedata)
if not os.path.isdir(precip): os.mkdir(precip)
# use the subbasin shapefile to get the location of the centroids
sf = Reader(subbasinfile)
# index of the comid, latitude, and longitude records
comid_index = [f[0] for f in sf.fields].index('ComID') - 1
lon_index = [f[0] for f in sf.fields].index('CenX') - 1
lat_index = [f[0] for f in sf.fields].index('CenY') - 1
elev_index = [f[0] for f in sf.fields].index('AvgElevM') - 1
area_index = [f[0] for f in sf.fields].index('AreaSqKm') - 1
# iterate through the shapefile records and aggregate the timeseries
for i in range(len(sf.records())):
record = sf.record(i)
comid = record[comid_index]
lon = record[lon_index]
lat = record[lat_index]
# check if the aggregated time series exists or calculate it
subbasinprecip = '{}/{}'.format(precip, comid)
if not os.path.isfile(subbasinprecip):
if verbose:
i = comid, lon, lat
print('aggregating timeseries for comid ' +
'{} at {}, {}\n'.format(*i))
p = climateprocessor.aggregate('precip3240', 'precip', s, e,
method = 'IDWA',
longitude = lon,
latitude = lat)
ts = s, 60, p
with open(subbasinprecip, 'wb') as f: pickle.dump(ts, f)
# make a directory for the evapotranspiration time series
evapotranspiration = '{}/evapotranspiration'.format(climatedata)
if not os.path.isdir(evapotranspiration):
os.mkdir(evapotranspiration)
# use the ETCalculator to calculate the ET time series
示例6: extract_bbox
# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import record [as 别名]
def extract_bbox(self, bbox, output, verbose = True):
"""Extracts the NID dam locations for a watershed from the dam
shapefile and the 8-digit hydrologic unit code of interest.
"""
self.download_compressed()
xmin, ymin, xmax, ymax = bbox
# copy the projection files
if verbose: print('copying the projections from the NID source\n')
projection = self.source + '.prj'
shutil.copy(projection, output + '.prj')
# get the dams within the watershed
if verbose: print('reading the dam file\n')
sf = Reader(self.source, shapeType = 1)
# work around for issues with pyshp
damrecords = []
for i in range(len(sf.shapes())):
try: damrecords.append(sf.record(i))
except: damrecords.append([-100 for i in range(len(sf.fields))])
name_index = sf.fields.index(['DAM_NAME', 'C', 65, 0]) - 1
nid_index = sf.fields.index(['NIDID', 'C', 7, 0]) - 1
long_index = sf.fields.index(['LONGITUDE', 'N', 19, 11]) - 1
lat_index = sf.fields.index(['LATITUDE', 'N', 19, 11]) - 1
river_index = sf.fields.index(['RIVER', 'C', 65, 0]) - 1
owner_index = sf.fields.index(['OWN_NAME', 'C', 65, 0]) - 1
type_index = sf.fields.index(['DAM_TYPE', 'C', 10, 0]) - 1
purp_index = sf.fields.index(['PURPOSES', 'C', 254, 0]) - 1
year_index = sf.fields.index(['YR_COMPL', 'C', 10, 0]) - 1
high_index = sf.fields.index(['NID_HEIGHT', 'N', 19, 11]) - 1
mstor_index = sf.fields.index(['MAX_STOR', 'N', 19, 11]) - 1
nstor_index = sf.fields.index(['NORMAL_STO', 'N', 19, 11]) - 1
area_index = sf.fields.index(['SURF_AREA', 'N', 19, 11]) - 1
# iterate through the fields and determine which points are in the box
if verbose: print('extracting dams into new file\n')
dam_indices = []
i = 0
for record in damrecords:
lat = record[lat_index]
lon = record[long_index]
if self.inside_box([xmin, ymin], [xmax, ymax], [lon, lat]):
dam_indices.append(i)
i+=1
# write the data from the bbox to a new shapefile
w = Writer(shapeType = 1)
for field in sf.fields: w.field(*field)
for i in dam_indices:
point = sf.shape(i).points[0]
w.point(*point)
values = damrecords[i]
rs = []
for value in values:
if isinstance(value, bytes): value = value.decode('utf-8')
rs.append(value)
w.record(*rs)
w.save(output)
if verbose:
print('successfully extracted NID dam locations to new file\n')
示例7: calculate_landuse
# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import record [as 别名]
def calculate_landuse(self,
rasterfile,
shapefile,
aggregatefile,
attribute,
csvfile = None,
):
"""
Calculates the land use for the given year for the "attribute"
feature attribute in the polygon shapefile using the aggregate
mapping provided in the "aggregatefile."
"""
# make sure the files exist
for f in rasterfile, shapefile + '.shp', aggregatefile:
if not os.path.isfile(f):
print('error, {} does not exist\n'.format(f))
raise
# read the aggregate file
self.read_aggregatefile(aggregatefile)
# open the shapefile
sf = Reader(shapefile, shapeType = 5)
attributes = [f[0] for f in sf.fields]
try: index = attributes.index(attribute) - 1
except:
print('error: attribute ' +
'{} is not in the shapefile fields'.format(attribute))
raise
# iterate through the shapes, get the fractions and save them
for i in range(len(sf.records())):
points = numpy.array(sf.shape(i).points)
record = sf.record(i)
k = record[index]
# store the results
self.landuse[k] = {r:0 for r in self.order}
try:
values, origin = get_raster_in_poly(rasterfile, points,
verbose = False)
values = values.flatten()
values = values[values.nonzero()]
tot_pixels = len(values)
# count the number of pixels of each land use type
for v in numpy.unique(values):
# find all the indices for each pixel value
pixels = numpy.argwhere(values == v)
# normalize by the total # of pixels
f = len(values[pixels]) / tot_pixels
# add the landuse to the aggregated value
self.landuse[k][self.groups[v]] += f
# work around for small shapes
except: self.landuse[k][self.groups[0]] = 1
if csvfile is not None: self.make_csv(attribute, csvfile)
return self.landuse
示例8: plot_landuse
# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import record [as 别名]
def plot_landuse(self,
landuse,
catchments,
attribute,
categoryfile,
output = None,
datatype = 'raw',
overwrite = False,
pixels = 1000,
border = 0.02,
lw = 0.5,
show = False,
verbose = True,
vverbose = False
):
"""
Makes a plot of the landuse of a catchment shapefile on top of a
raster landuse file.
"""
if self.order is None:
print('error: no landuse aggregation file information provided\n')
raise
self.read_categoryfile(categoryfile)
if verbose: print('generating a {} land use plot\n'.format(datatype))
# make the figure
fig = pyplot.figure()
subplot = fig.add_subplot(111, aspect = 'equal')
subplot.tick_params(axis = 'both', which = 'major', labelsize = 11)
# add the title
if datatype == 'results': title = 'Land Use Fractions'
else: title = 'Raw Land Use Data'
subplot.set_title(title, size = 14)
# open the shapefile and get the bounding box
s = Reader(catchments, shapeType = 5)
xmin, ymin, xmax, ymax = s.bbox
# get the index of the field for the attribute matching
index = [f[0] for f in s.fields].index(attribute) - 1
# set up a custom colormap using the rgbs supplied in the aggregate file
color_table = [(self.reds[g] / 255, self.greens[g] / 255,
self.blues[g] / 255) for g in self.order]
cmap = colors.ListedColormap(color_table)
# provide the cutoff boundaries for the mapping of values to the table
bounds = [i-0.5 for i in range(len(self.order)+1)]
# create a norm to map the bounds to the colors
norm = colors.BoundaryNorm(bounds, cmap.N)
# get the pixel width and origin
w = (xmax - xmin) / pixels
# calculate the image array height and the height of a pixel
height = int(numpy.ceil((ymax - ymin) / (xmax - xmin)) * pixels)
h = (ymax - ymin) / height
# set up the image array
image_array = numpy.zeros((height, pixels), dtype = 'uint8')
# get the land use fraction for each category
if datatype == 'results':
# iterate through the shapes and make patches
for i in range(len(s.records())):
comid = s.record(i)[index]
points = numpy.array(s.shape(i).points)
# convert the shape to pixel coordinates
pixel_polygon = [(get_pixel(x, xmin, w), get_pixel(y, ymin, h))
for x, y in points]
# make a PIL image to use as a mask
rasterpoly = Image.new('L', (pixels, height), 1)
rasterize = ImageDraw.Draw(rasterpoly)
# rasterize the polygon
#.........这里部分代码省略.........
示例9: enumerate
# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import record [as 别名]
contains = [i for i, b in enumerate(bboxes)
if b[0] <= x and x <= b[2] and b[0] <= y and y <= b[3]]
# find the distances between all the overlapping shapes points and the gage
distances = [min([(x1 - x)**2 + (y1 - y)**2
for x1, y1 in reader.shape(i).points])
for i in contains]
# find the shape with the point closest to the gage
closest = contains[distances.index(min(distances))]
# read the record for the flowline
record = reader.record(closest)
# find the record indices of the comid and reach length in km in the file
i = [f[0] for f in reader.fields].index('LENGTHKM') - 1
j = [f[0] for f in reader.fields].index('COMID') - 1
# get the reach length and common identifier
length = record[i]
comid = record[j]
it = comid, length
print('comid {} is closest to the gage and has a length of {} km\n'.format(*it))
# make an instance of the FtableCalculator to use for the data from the file
示例10: merge_shapes
# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import record [as 别名]
def merge_shapes(inputfile, outputfile = None, overwrite = False,
verbose = True, vverbose = False):
"""Merges all the shapes in a shapefile into a single shape."""
if outputfile is None: output = '{}/merged'.format(os.getcwd())
if os.path.isfile(outputfile + '.shp') and not overwrite:
if verbose: print('combined watershed shapefile %s exists' % outputfile)
return
if verbose: print('combining shapes from {}\n'.format(inputfile) +
'this may take a while...\n')
# start by copying the projection files
shutil.copy(inputfile + '.prj', outputfile + '.prj')
# load the catchment and flowline shapefiles
r = Reader(inputfile, shapeType = 5)
n = len(r.records())
try:
shapes = []
records = []
bboxes = []
for i in range(n):
shape = r.shape(i)
record = r.record(i)
shape_list = format_shape(shape.points)
for sh in shape_list:
shapes.append(sh)
records.append(record)
bboxes.append(shape.bbox)
try: combined = combine_shapes(shapes, bboxes,
verbose = vverbose)
except:
if verbose: print('trying alternate trace method')
combined = combine_shapes(shapes, bboxes, skip = True,
verbose = vverbose)
except:
if verbose: print('trying alternate trace method')
shapes = []
records = []
bboxes = []
for i in range(n):
shape = r.shape(i)
record = r.record(i)
shape_list = format_shape(shape.points, omit = True)
for sh in shape_list:
shapes.append(sh)
records.append(record)
bboxes.append(shape.bbox)
try: combined = combine_shapes(shapes, bboxes, verbose = vverbose)
except:
if verbose: print('trying alternate trace method')
combined = combine_shapes(shapes, bboxes, skip = True,
verbose = vverbose)
# create the new file with the merged shapes
w = Writer(shapeType = 5)
w.poly(shapeType = 5, parts = [combined])
# copy the fields from the original and then the first record; note this
# can be adapted as needed
for field in r.fields: w.field(*field)
w.record(*r.record(0))
w.save(outputfile)
if verbose:
print('successfully combined shapes from %s to %s\n' %
(inputfile, outputfile))
示例11: extract_aquifers
# 需要导入模块: from shapefile import Reader [as 别名]
# 或者: from shapefile.Reader import record [as 别名]
def extract_aquifers(directory, HUC8, aquifers, pad = 0.2, verbose = True):
"""Extracts aquifers from the source datafile to the destination using
the HUC8 boundaries for the query."""
start = time.time()
# open up the HUC8 boundary shapefile and use it to get the bounding box
shapefile = Reader(directory + '/%s/%scatchments' % (HUC8, HUC8))
xmin, ymin, xmax, ymax = get_boundaries(shapefile.shapes())
# convert to bounding corners for testing
p1 = [xmin - pad * (xmax - xmin), ymin - pad * (ymax - ymin)]
p2 = [xmax + pad * (xmax - xmin), ymax + pad * (ymax - ymin)]
shapefile = None
# start by copying the projection files
if verbose: print('\ncopying the projections\n')
shutil.copy(directory + '/%s/%scatchments.prj' % (HUC8, HUC8),
directory + '/%s/%saquifers.prj' % (HUC8, HUC8))
# open the flowline file
if verbose: print('reading the aquifer file\n')
shapefile = Reader(aquifers, shapeType = 5)
# work around for issues with pyshp
records = []
for i in range(len(shapefile.shapes())):
try: records.append(shapefile.record(i))
except: records.append('')
# use the bounding boxes to see if the shapes are within the watershed area
if verbose: print('searching for aquifers in the watershed\n')
bboxes = [shapefile.shape(i).bbox for i in range(len(records))]
corners = [[[b[0], b[1]], [b[0], b[3]], [b[2], b[1]], [b[2], b[3]]]
for b in bboxes]
indices = [i for i, c in zip(range(len(corners)), corners) if
any([inside_box(p1, p2, p) for p in c]) or
all([inside_box(c[0], c[3], p1), inside_box(c[0], c[3], p2)])]
# remove any non aquifers
indices = [i for i in indices if shapefile.record(i)[4] != 999]
# find a record for the non aquifer
i = 0
while shapefile.record(i)[4] != 999: i+=1
nonrecord = shapefile.record(i)
nonrecord[1] = nonrecord[1].decode('utf-8')
nonrecord[5] = 0
nonrecord[6] = 0
if len(indices) == 0:
if verbose: print('query returned no values, returning\n')
return
# write the data from the HUC8 to a new shapefile
w = Writer(shapeType = 5)
for field in shapefile.fields: w.field(*field)
for i in indices:
shape = shapefile.shape(i)
# check for multiple parts
if len(shape.parts) > 1:
parts = [shape.points[i:j]
for i, j in zip(shape.parts[:-1], shape.parts[1:])]
else: parts = [shape.points]
record = records[i]
# little work around for blank binary values
if isinstance(record[1], bytes):
record[1] = record[1].decode('utf-8')
w.poly(shapeType = 5, parts = parts)
w.record(*record)
# add a shape for the bounding box showing no aquifer locations
part = [p1, [p1[0], p2[1]], p2, [p2[0], p1[1]]]
#.........这里部分代码省略.........