本文整理汇总了Python中safe.impact_functions.core.get_hazard_layer函数的典型用法代码示例。如果您正苦于以下问题:Python get_hazard_layer函数的具体用法?Python get_hazard_layer怎么用?Python get_hazard_layer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_hazard_layer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
def run(layers):
"""Risk plugin for earthquake school damage
"""
# Extract data
H = get_hazard_layer(layers) # Ground shaking
E = get_exposure_layer(layers) # Building locations
# Interpolate hazard level to building locations
H = assign_hazard_values_to_exposure_data(H, E,
attribute_name='MMI')
# Extract relevant numerical data
coordinates = E.get_geometry()
shaking = H.get_data()
# Calculate building damage
building_damage = []
for i in range(len(shaking)):
x = float(shaking[i]['MMI'])
if x < 6.0 or (x != x): # x != x -> check for nan pre python 2.6
value = 0.0
else:
value = (0.692 * (x ** 4) -
15.82 * (x ** 3) +
135.0 * (x ** 2) -
509.0 * x + 714.4)
building_damage.append({'DAMAGE': value, 'MMI': x})
# Create new layer and return
V = Vector(data=building_damage,
projection=E.get_projection(),
geometry=coordinates)
return V
示例2: run
def run(layers):
"""Risk plugin for earthquake fatalities
Input
layers: List of layers expected to contain
H: Raster layer of flood depth
P: Raster layer of population data on the same grid as H
"""
threshold = 1 # Load above which people are regarded affected [kg/m2]
# Identify hazard and exposure layers
inundation = get_hazard_layer(layers) # Tephra load [kg/m2]
population = get_exposure_layer(layers) # Density [people/km^2]
# Extract data as numeric arrays
D = inundation.get_data(nan=0.0) # Depth
P = population.get_data(nan=0.0, scaling=True) # Population density
# Calculate impact as population exposed to depths > threshold
I = numpy.where(D > threshold, P, 0)
# Generate text with result for this study
number_of_people_affected = numpy.nansum(I.flat)
impact_summary = ('%i people affected by ash levels greater '
'than %i kg/m^2' % (number_of_people_affected,
threshold))
# Create raster object and return
R = Raster(I,
projection=inundation.get_projection(),
geotransform=inundation.get_geotransform(),
name='People affected',
keywords={'impact_summary': impact_summary})
return R
示例3: prepare
def prepare(self, layers):
"""Prepare this impact function for running the analysis.
This method should normally be called in your concrete class's
run method before it attempts to do any real processing. This
method will do any needed house keeping such as:
* checking that the exposure and hazard layers sufficiently
overlap (post 3.1)
* clipping or subselecting features from both layers such that
only features / coverage within the actual analysis extent
will be analysed (post 3.1)
* raising errors if any untenable condition exists e.g. extent has
no valid CRS. (post 3.1)
We suggest to overload this method in your concrete class
implementation so that it includes any impact function specific checks
too.
..note: For 3.1, we will still do those preprocessing in analysis
class. We will just need to check if the function_type is
'qgis2.0', it needs to have the extent set.
:param layers: List of layers (hazard and exposure). This is
necessary now, until we streamline the preprocess in the base class
and remove unnecessary routines in analysis, impact_calculator,
impact_calculator_thread, and calculate_safe_impact module.
:type layers: list
# """
if layers is not None:
self.hazard = get_hazard_layer(layers)
self.exposure = get_exposure_layer(layers)
示例4: run
def run(layers):
"""Risk plugin for volcano population impact
Input
layers: List of layers expected to contain
H: Raster layer of volcanic hazard level
P: Raster layer of population data on the same grid as H
"""
# Identify hazard and exposure layers
# Volcanic hazard level [0-1]
volcanic_hazard_level = get_hazard_layer(layers)
population = get_exposure_layer(layers) # Density [people/area]
# Extract data as numeric arrays
V = volcanic_hazard_level.get_data(nan=0.0)
# Population density
P = population.get_data(nan=0.0, scaling=True)
# Calculate impact as population exposed to depths > threshold
I = numpy.where(V > 2.0 / 3, P, 0)
# Generate text with result for this study
number_of_people_affected = numpy.nansum(I.flat)
impact_summary = ('%i people affected by volcanic hazard level greater'
' than 0.667' % number_of_people_affected)
# Create raster object and return
R = Raster(I,
projection=volcanic_hazard_level.get_projection(),
geotransform=volcanic_hazard_level.get_geotransform(),
name='People affected',
keywords={'impact_summary': impact_summary})
return R
示例5: run
def run(self, layers):
"""Risk plugin for tsunami population
"""
thresholds = [0.2, 0.3, 0.5, 0.8, 1.0]
#threshold = 1 # Depth above which people are regarded affected [m]
# Identify hazard and exposure layers
inundation = get_hazard_layer(layers) # Tsunami inundation [m]
population = get_exposure_layer(layers) # Population density
# Extract data as numeric arrays
D = inundation.get_data(nan=0.0) # Depth
P = population.get_data(nan=0.0, scaling=True) # Population density
# Calculate impact as population exposed to depths > 1m
I_map = numpy.where(D > thresholds[-1], P, 0)
# Generate text with result for this study
number_of_people_affected = numpy.nansum(I_map.flat)
# Do breakdown
# Create report
impact_summary = ('<table border="0" width="320px">'
' <tr><th><b>%s</b></th><th><b>%s</b></th></th>'
' <tr></tr>' % ('Ambang batas',
'Jumlah orang terdampak'))
counts = []
for i, threshold in enumerate(thresholds):
I = numpy.where(D > threshold, P, 0)
counts.append(numpy.nansum(I.flat))
impact_summary += ' <tr><td>%s m</td><td>%i</td></tr>' % (
threshold, counts[i])
impact_summary += '</table>'
# Create raster object and return
R = Raster(I_map,
projection=inundation.get_projection(),
geotransform=inundation.get_geotransform(),
name='People affected by more than 1m of inundation',
keywords={'impact_summary': impact_summary})
return R
示例6: run
def run(layers,
teta=14.05, beta=0.17): # zeta=2.15):
"""Risk plugin for earthquake fatalities
Input
H: Numerical array of hazard data
E: Numerical array of exposure data
"""
# Suppress warnings about invalid value in multiply and divide zero
# http://comments.gmane.org/gmane.comp.python.numeric.general/43218
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.seterr.html
old_numpy_setting = numpy.seterr(invalid='ignore')
numpy.seterr(divide='ignore')
# Identify input layers
intensity = get_hazard_layer(layers)
population = get_exposure_layer(layers)
# Extract data
H = intensity.get_data(nan=0)
P = population.get_data(nan=0)
# Calculate impact
logHazard = 1 / beta * numpy.log(H / teta)
# Convert array to be standard floats expected by cdf
arrayout = numpy.array([[float(value) for value in row]
for row in logHazard])
x = arrayout * P
F = normal_cdf(x)
numpy.seterr(**old_numpy_setting)
# Create new layer and return
R = Raster(F,
projection=population.get_projection(),
geotransform=population.get_geotransform(),
name='Estimated fatalities')
return R
示例7: run
def run(layers):
"""Risk plugin for earthquake school damage
"""
# Extract data
H = get_hazard_layer(layers) # Ground shaking
E = get_exposure_layer(layers) # Building locations
# Interpolate hazard level to building locations
H = H.interpolate(E)
# Extract relevant numerical data
coordinates = E.get_geometry()
shaking = H.get_data()
# Calculate building damage
building_damage = []
for i in range(len(shaking)):
x = float(shaking[i].values()[0])
if x < 6.0 or (x != x): # x != x -> check for nan pre python 2.6
value = 0.0
else:
print x
value = (0.692 * (x ** 4) -
15.82 * (x ** 3) +
135.0 * (x ** 2) -
509.0 * x + 714.4)
building_damage.append({'DAMAGE': value, 'MMI': x})
# FIXME (Ole): Need helper to generate new layer using
# correct spatial reference
# (i.e. sensibly wrap the following lines)
projection = E.get_projection()
V = Vector(data=building_damage,
projection=E.get_projection(),
geometry=coordinates)
return V
示例8: run
def run(layers):
"""Risk plugin for earthquake fatalities
Input
layers: List of layers expected to contain
H: Raster layer of flood depth
P: Raster layer of population data on the same grid as H
"""
# Depth above which people are regarded affected [m]
threshold = 0.1
# Identify hazard and exposure layers
inundation = get_hazard_layer(layers) # Flood inundation [m]
# Get population and gender ratio
population = gender_ratio = None
for layer in get_exposure_layers(layers):
keywords = layer.get_keywords()
if 'datatype' not in keywords:
population = layer
else:
datatype = keywords['datatype']
if 'ratio' not in datatype:
population = layer
else:
# if 'female' in datatype and 'ratio' in datatype:
gender_ratio_unit = keywords['unit']
msg = ('Unit for gender ratio must be either '
'"percent" or "ratio"')
if gender_ratio_unit not in ['percent', 'ratio']:
raise Exception(msg)
gender_ratio = layer
msg = 'No population layer was found in: %s' % str(layers)
verify(population is not None, msg)
# Extract data as numeric arrays
D = inundation.get_data(nan=0.0) # Depth
# Calculate impact as population exposed to depths > threshold
if population.get_resolution(native=True, isotropic=True) < 0.0005:
# Keep this for backwards compatibility just a little while
# This uses the original custom population set and
# serves as a reference
P = population.get_data(nan=0.0) # Population density
pixel_area = 2500
I = numpy.where(D > threshold, P, 0) / 100000.0 * pixel_area
else:
# This is the new generic way of scaling (issue #168 and #172)
P = population.get_data(nan=0.0, scaling=True)
I = numpy.where(D > threshold, P, 0)
if gender_ratio is not None:
# Extract gender ratio at each pixel (as ratio)
G = gender_ratio.get_data(nan=0.0)
if gender_ratio_unit == 'percent':
G /= 100
# Calculate breakdown
P_female = P * G
P_male = P - P_female
I_female = I * G
I_male = I - I_female
# Generate text with result for this study
total = str(int(sum(P.flat) / 1000))
count = str(int(sum(I.flat) / 1000))
# Create report
impact_summary = ('<table border="0" width="320px">'
' <tr><td><b>%s:</b></td>'
'<td align="right"><b>%s</b></td></tr>'
% ('Jumlah Penduduk', total))
if gender_ratio is not None:
total_female = str(int(sum(P_female.flat) / 1000))
total_male = str(int(sum(P_male.flat) / 1000))
impact_summary += (' <tr><td>%s:</td>'
'<td align="right">%s</td></tr>'
% (' - Wanita', total_female))
impact_summary += (' <tr><td>%s:</td>'
'<td align="right">%s</td></tr>'
% (' - Pria', total_male))
impact_summary += '<tr><td> </td></tr>' # Blank row
impact_summary += (' <tr><td><b>%s:</b></td>'
'<td align="right"><b>%s</b></td></tr>'
% ('Perkiraan Jumlah Terdampak (> %.1fm)' % threshold,
count))
if gender_ratio is not None:
affected_female = str(int(sum(I_female.flat) / 1000))
affected_male = str(int(sum(I_male.flat) / 1000))
#.........这里部分代码省略.........
示例9: run
def run(self, layers):
"""Earthquake impact to buildings (e.g. from Open Street Map)
"""
LOGGER.debug('Running earthquake building impact')
# Thresholds for mmi breakdown
t0 = self.parameters['low_threshold']
t1 = self.parameters['medium_threshold']
t2 = self.parameters['high_threshold']
class_1 = tr('Low')
class_2 = tr('Medium')
class_3 = tr('High')
# Extract data
H = get_hazard_layer(layers) # Depth
E = get_exposure_layer(layers) # Building locations
question = get_question(H.get_name(),
E.get_name(),
self)
# Define attribute name for hazard levels
hazard_attribute = 'mmi'
# Determine if exposure data have NEXIS attributes
attribute_names = E.get_attribute_names()
if ('FLOOR_AREA' in attribute_names and
'BUILDING_C' in attribute_names and
'CONTENTS_C' in attribute_names):
is_NEXIS = True
else:
is_NEXIS = False
# Interpolate hazard level to building locations
I = assign_hazard_values_to_exposure_data(H, E,
attribute_name=hazard_attribute)
# Extract relevant exposure data
#attribute_names = I.get_attribute_names()
attributes = I.get_data()
N = len(I)
# Calculate building impact
lo = 0
me = 0
hi = 0
building_values = {}
contents_values = {}
for key in range(4):
building_values[key] = 0
contents_values[key] = 0
for i in range(N):
# Classify building according to shake level
# and calculate dollar losses
if is_NEXIS:
try:
area = float(attributes[i]['FLOOR_AREA'])
except (ValueError, KeyError):
#print 'Got area', attributes[i]['FLOOR_AREA']
area = 0.0
try:
building_value_density = float(attributes[i]['BUILDING_C'])
except (ValueError, KeyError):
#print 'Got bld value', attributes[i]['BUILDING_C']
building_value_density = 0.0
try:
contents_value_density = float(attributes[i]['CONTENTS_C'])
except (ValueError, KeyError):
#print 'Got cont value', attributes[i]['CONTENTS_C']
contents_value_density = 0.0
building_value = building_value_density * area
contents_value = contents_value_density * area
x = float(attributes[i][hazard_attribute]) # MMI
if t0 <= x < t1:
lo += 1
cls = 1
elif t1 <= x < t2:
me += 1
cls = 2
elif t2 <= x:
hi += 1
cls = 3
else:
# Not reported for less than level t0
cls = 0
attributes[i][self.target_field] = cls
if is_NEXIS:
# Accumulate values in 1M dollar units
building_values[cls] += building_value
#.........这里部分代码省略.........
示例10: run
def run(self, layers):
"""Risk plugin for volcano population evacuation
Input
layers: List of layers expected to contain
H: Vector polygon layer of volcano impact zones
P: Raster layer of population data on the same grid as H
Counts number of people exposed to volcano event.
Return
Map of population exposed to the volcano hazard zone.
Table with number of people evacuated and supplies required.
"""
# Identify hazard and exposure layers
H = get_hazard_layer(layers) # Flood inundation
E = get_exposure_layer(layers)
question = get_question(H.get_name(),
E.get_name(),
self)
# Input checks
if not H.is_vector:
msg = ('Input hazard %s was not a vector layer as expected '
% H.get_name())
raise Exception(msg)
msg = ('Input hazard must be a polygon or point layer. '
'I got %s with layer '
'type %s' % (H.get_name(),
H.get_geometry_name()))
if not (H.is_polygon_data or H.is_point_data):
raise Exception(msg)
if H.is_point_data:
# Use concentric circles
radii = self.parameters['distance [km]']
centers = H.get_geometry()
attributes = H.get_data()
rad_m = [x * 1000 for x in radii] # Convert to meters
H = make_circular_polygon(centers,
rad_m,
attributes=attributes)
# NOTE (Sunni) : I commented out this one because there will be
# a permission problem on windows
#H.write_to_file('Evac_zones_%s.shp' % str(radii)) # To check
category_title = 'Radius'
category_header = tr('Distance [km]')
category_names = radii
name_attribute = 'NAME' # As in e.g. the Smithsonian dataset
else:
# Use hazard map
category_title = 'KRB'
category_header = tr('Category')
# FIXME (Ole): Change to English and use translation system
category_names = ['Kawasan Rawan Bencana III',
'Kawasan Rawan Bencana II',
'Kawasan Rawan Bencana I']
name_attribute = 'GUNUNG' # As in e.g. BNPB hazard map
attributes = H.get_data()
# Get names of volcanos considered
if name_attribute in H.get_attribute_names():
D = {}
for att in H.get_data():
# Run through all polygons and get unique names
D[att[name_attribute]] = None
volcano_names = ''
for name in D:
volcano_names += '%s, ' % name
volcano_names = volcano_names[:-2] # Strip trailing ', '
else:
volcano_names = tr('Not specified in data')
if not category_title in H.get_attribute_names():
msg = ('Hazard data %s did not contain expected '
'attribute %s ' % (H.get_name(), category_title))
raise InaSAFEError(msg)
# Run interpolation function for polygon2raster
P = assign_hazard_values_to_exposure_data(H, E,
attribute_name='population')
# Initialise attributes of output dataset with all attributes
# from input polygon and a population count of zero
new_attributes = H.get_data()
categories = {}
for attr in new_attributes:
attr[self.target_field] = 0
cat = attr[category_title]
categories[cat] = 0
#.........这里部分代码省略.........
开发者ID:imadedikyadehermawan,项目名称:inasafe,代码行数:101,代码来源:volcano_population_evacuation_polygon_hazard.py
示例11: run
def run(self, layers):
"""Risk plugin for flood population evacuation
Input
layers: List of layers expected to contain
H: Raster layer of flood depth
P: Raster layer of population data on the same grid as H
Counts number of people exposed to flood levels exceeding
specified threshold.
Return
Map of population exposed to flood levels exceeding the threshold
Table with number of people evacuated and supplies required
"""
# Identify hazard and exposure layers
H = get_hazard_layer(layers) # Flood inundation
E = get_exposure_layer(layers)
question = get_question(H.get_name(),
E.get_name(),
self)
# Check that hazard is polygon type
if not H.is_vector:
msg = ('Input hazard %s was not a vector layer as expected '
% H.get_name())
raise Exception(msg)
msg = ('Input hazard must be a polygon layer. I got %s with layer '
'type %s' % (H.get_name(),
H.get_geometry_name()))
if not H.is_polygon_data:
raise Exception(msg)
# Run interpolation function for polygon2raster
P = assign_hazard_values_to_exposure_data(H, E,
attribute_name='population')
# Initialise attributes of output dataset with all attributes
# from input polygon and a population count of zero
new_attributes = H.get_data()
category_title = 'FLOODPRONE' # FIXME: Should come from keywords
categories = {}
for attr in new_attributes:
attr[self.target_field] = 0
cat = attr[category_title]
categories[cat] = 0
# Count affected population per polygon, per category and total
evacuated = 0
for attr in P.get_data():
# Get population at this location
pop = float(attr['population'])
# Update population count for associated polygon
poly_id = attr['polygon_id']
new_attributes[poly_id][self.target_field] += pop
# Update population count for each category
cat = new_attributes[poly_id][category_title]
categories[cat] += pop
# Update total
evacuated += pop
# Count totals
total = int(numpy.sum(E.get_data(nan=0, scaling=False)))
# Don't show digits less than a 1000
if total > 1000:
total = total // 1000 * 1000
if evacuated > 1000:
evacuated = evacuated // 1000 * 1000
# Calculate estimated needs based on BNPB Perka 7/2008 minimum bantuan
rice = evacuated * 2.8
drinking_water = evacuated * 17.5
water = evacuated * 67
family_kits = evacuated / 5
toilets = evacuated / 20
# Generate impact report for the pdf map
table_body = [question,
TableRow([_('People needing evacuation'),
'%i' % evacuated],
header=True),
TableRow(_('Map shows population affected in each flood '
'prone area ')),
TableRow([_('Needs per week'), _('Total')],
header=True),
[_('Rice [kg]'), int(rice)],
[_('Drinking Water [l]'), int(drinking_water)],
[_('Clean Water [l]'), int(water)],
[_('Family Kits'), int(family_kits)],
[_('Toilets'), int(toilets)]]
impact_table = Table(table_body).toNewlineFreeString()
# Extend impact report for on-screen display
#.........这里部分代码省略.........
示例12: run
def run(self, layers):
"""Indonesian Earthquake Fatality Model
Input
layers: List of layers expected to contain
H: Raster layer of MMI ground shaking
P: Raster layer of population density
"""
# Establish model coefficients
x = self.parameters['x']
y = self.parameters['y']
# Define percentages of people being displaced at each mmi level
displacement_rate = self.parameters['displacement_rate']
# Tolerance for transparency
tolerance = self.parameters['tolerance']
# Extract input layers
intensity = get_hazard_layer(layers)
population = get_exposure_layer(layers)
question = get_question(intensity.get_name(),
population.get_name(),
self)
# Extract data grids
H = intensity.get_data() # Ground Shaking
P = population.get_data(scaling=True) # Population Density
# Calculate population affected by each MMI level
# FIXME (Ole): this range is 2-9. Should 10 be included?
mmi_range = range(2, 10)
number_of_exposed = {}
number_of_displaced = {}
number_of_fatalities = {}
# Calculate fatality rates for observed Intensity values (H
# based on ITB power model
R = numpy.zeros(H.shape)
for mmi in mmi_range:
# Identify cells where MMI is in class i
mask = (H > mmi - 0.5) * (H <= mmi + 0.5)
# Count population affected by this shake level
I = numpy.where(mask, P, 0)
# Calculate expected number of fatalities per level
fatality_rate = numpy.power(10.0, x * mmi - y)
F = fatality_rate * I
# Calculate expected number of displaced people per level
try:
D = displacement_rate[mmi] * I
except KeyError, e:
msg = 'mmi = %i, I = %s, Error msg: %s' % (mmi, str(I), str(e))
raise InaSAFEError(msg)
# Adjust displaced people to disregard fatalities.
# Set to zero if there are more fatalities than displaced.
D = numpy.where(D > F, D - F, 0)
# Sum up numbers for map
R += D # Displaced
# Generate text with result for this study
# This is what is used in the real time system exposure table
number_of_exposed[mmi] = numpy.nansum(I.flat)
number_of_displaced[mmi] = numpy.nansum(D.flat)
number_of_fatalities[mmi] = numpy.nansum(F.flat)
示例13: run
def run(self, layers):
"""Earthquake impact to buildings (e.g. from OpenStreetMap).
:param layers: All the input layers (Hazard Layer and Exposure Layer)
"""
LOGGER.debug('Running earthquake building impact')
# merely initialize
building_value = 0
contents_value = 0
# Thresholds for mmi breakdown.
t0 = self.parameters['low_threshold']
t1 = self.parameters['medium_threshold']
t2 = self.parameters['high_threshold']
# Class Attribute and Label.
class_1 = {'label': tr('Low'), 'class': 1}
class_2 = {'label': tr('Medium'), 'class': 2}
class_3 = {'label': tr('High'), 'class': 3}
# Extract data
hazard_layer = get_hazard_layer(layers) # Depth
exposure_layer = get_exposure_layer(layers) # Building locations
question = get_question(
hazard_layer.get_name(),
exposure_layer.get_name(),
self
)
# Define attribute name for hazard levels.
hazard_attribute = 'mmi'
# Determine if exposure data have NEXIS attributes.
attribute_names = exposure_layer.get_attribute_names()
if ('FLOOR_AREA' in attribute_names and
'BUILDING_C' in attribute_names and
'CONTENTS_C' in attribute_names):
is_nexis = True
else:
is_nexis = False
# Interpolate hazard level to building locations.
my_interpolate_result = assign_hazard_values_to_exposure_data(
hazard_layer,
exposure_layer,
attribute_name=hazard_attribute
)
# Extract relevant exposure data
#attribute_names = my_interpolate_result.get_attribute_names()
attributes = my_interpolate_result.get_data()
interpolate_size = len(my_interpolate_result)
# Calculate building impact
lo = 0
me = 0
hi = 0
building_values = {}
contents_values = {}
for key in range(4):
building_values[key] = 0
contents_values[key] = 0
for i in range(interpolate_size):
# Classify building according to shake level
# and calculate dollar losses
if is_nexis:
try:
area = float(attributes[i]['FLOOR_AREA'])
except (ValueError, KeyError):
#print 'Got area', attributes[i]['FLOOR_AREA']
area = 0.0
try:
building_value_density = float(attributes[i]['BUILDING_C'])
except (ValueError, KeyError):
#print 'Got bld value', attributes[i]['BUILDING_C']
building_value_density = 0.0
try:
contents_value_density = float(attributes[i]['CONTENTS_C'])
except (ValueError, KeyError):
#print 'Got cont value', attributes[i]['CONTENTS_C']
contents_value_density = 0.0
building_value = building_value_density * area
contents_value = contents_value_density * area
try:
x = float(attributes[i][hazard_attribute]) # MMI
except TypeError:
x = 0.0
if t0 <= x < t1:
lo += 1
cls = 1
#.........这里部分代码省略.........
示例14: run
def run(self, layers):
"""Risk plugin for Padang building survey
"""
# Extract data
H = get_hazard_layer(layers) # Ground shaking
E = get_exposure_layer(layers) # Building locations
question = get_question(H.get_name(),
E.get_name(),
self)
# Map from different kinds of datasets to Padang vulnerability classes
datatype = E.get_keywords()['datatype']
vclass_tag = 'VCLASS'
if datatype.lower() == 'osm':
# Map from OSM attributes
Emap = osm2padang(E)
elif datatype.lower() == 'sigab':
# Map from SIGAB attributes
Emap = sigab2padang(E)
else:
Emap = E
# Interpolate hazard level to building locations
I = assign_hazard_values_to_exposure_data(H, Emap,
attribute_name='MMI')
# Extract relevant numerical data
attributes = I.get_data()
N = len(I)
# Calculate building damage
count_high = count_medium = count_low = count_none = 0
for i in range(N):
mmi = float(attributes[i]['MMI'])
building_type = Emap.get_data(vclass_tag, i)
damage_params = damage_curves[building_type]
beta = damage_params['beta']
median = damage_params['median']
percent_damage = lognormal_cdf(mmi,
median=median,
sigma=beta) * 100
# Add calculated impact to existing attributes
attributes[i][self.target_field] = percent_damage
# Calculate statistics
if percent_damage < 10:
count_none += 1
if 10 <= percent_damage < 33:
count_low += 1
if 33 <= percent_damage < 66:
count_medium += 1
if 66 <= percent_damage:
count_high += 1
# Generate impact report
table_body = [question,
TableRow([_('Buildings'), _('Total')],
header=True),
TableRow([_('All'), N]),
TableRow([_('No damage'), count_none]),
TableRow([_('Low damage'), count_low]),
TableRow([_('Medium damage'), count_medium]),
TableRow([_('High damage'), count_high])]
table_body.append(TableRow(_('Notes'), header=True))
table_body.append(_('Levels of impact are defined by post 2009 '
'Padang earthquake survey conducted by Geoscience '
'Australia and Institute of Teknologi Bandung.'))
table_body.append(_('Unreinforced masonry is assumed where no '
'structural information is available.'))
impact_summary = Table(table_body).toNewlineFreeString()
impact_table = impact_summary
map_title = _('Earthquake damage to buildings')
# Create style
style_classes = [dict(label=_('No damage'), min=0, max=10,
colour='#00ff00', transparency=1),
dict(label=_('Low damage'), min=10, max=33,
colour='#ffff00', transparency=1),
dict(label=_('Medium damage'), min=33, max=66,
colour='#ffaa00', transparency=1),
dict(label=_('High damage'), min=66, max=100,
colour='#ff0000', transparency=1)]
style_info = dict(target_field=self.target_field,
style_classes=style_classes)
# Create vector layer and return
V = Vector(data=attributes,
projection=E.get_projection(),
geometry=E.get_geometry(),
name='Estimated pct damage',
keywords={'impact_summary': impact_summary,
#.........这里部分代码省略.........
示例15: run
def run(self, layers):
"""Experimental impact function.
:param layers: List of layers expected to contain at least:
H: Polygon layer of inundation areas
E: Vector layer of roads
:type layers: list
:returns: A new line layer with inundated roads marked.
:type: safe_layer
"""
target_field = self.parameters['target_field']
road_type_field = self.parameters['road_type_field']
threshold_min = self.parameters['min threshold [m]']
threshold_max = self.parameters['max threshold [m]']
if threshold_min > threshold_max:
message = tr(
'The minimal threshold is greater then the maximal specified '
'threshold. Please check the values.')
raise GetDataError(message)
# Extract data
H = get_hazard_layer(layers) # Flood
E = get_exposure_layer(layers) # Roads
question = get_question(
H.get_name(), E.get_name(), self)
H = H.get_layer()
E = E.get_layer()
#reproject self.extent to the hazard projection
hazard_crs = H.crs()
hazard_authid = hazard_crs.authid()
if hazard_authid == 'EPSG:4326':
viewport_extent = self.extent
else:
geo_crs = QgsCoordinateReferenceSystem()
geo_crs.createFromSrid(4326)
viewport_extent = extent_to_geo_array(
QgsRectangle(*self.extent), geo_crs, hazard_crs)
#Align raster extent and viewport
#assuming they are both in the same projection
raster_extent = H.dataProvider().extent()
clip_xmin = raster_extent.xMinimum()
# clip_xmax = raster_extent.xMaximum()
clip_ymin = raster_extent.yMinimum()
# clip_ymax = raster_extent.yMaximum()
if viewport_extent[0] > clip_xmin:
clip_xmin = viewport_extent[0]
if viewport_extent[1] > clip_ymin:
clip_ymin = viewport_extent[1]
# TODO: Why have these two clauses when they are not used?
# Commenting out for now.
# if viewport_extent[2] < clip_xmax:
# clip_xmax = viewport_extent[2]
# if viewport_extent[3] < clip_ymax:
# clip_ymax = viewport_extent[3]
height = ((viewport_extent[3] - viewport_extent[1]) /
H.rasterUnitsPerPixelY())
height = int(height)
width = ((viewport_extent[2] - viewport_extent[0]) /
H.rasterUnitsPerPixelX())
width = int(width)
raster_extent = H.dataProvider().extent()
xmin = raster_extent.xMinimum()
xmax = raster_extent.xMaximum()
ymin = raster_extent.yMinimum()
ymax = raster_extent.yMaximum()
x_delta = (xmax - xmin) / H.width()
x = xmin
for i in range(H.width()):
if abs(x - clip_xmin) < x_delta:
# We have found the aligned raster boundary
break
x += x_delta
_ = i
y_delta = (ymax - ymin) / H.height()
y = ymin
for i in range(H.width()):
if abs(y - clip_ymin) < y_delta:
# We have found the aligned raster boundary
break
y += y_delta
clip_extent = [x, y, x + width * x_delta, y + height * y_delta]
# Clip and polygonize
small_raster = clip_raster(
H, width, height, QgsRectangle(*clip_extent))
(flooded_polygon_inside, flooded_polygon_outside) = polygonize_gdal(
small_raster, threshold_min, threshold_max)
# Filter geometry and data using the extent
#.........这里部分代码省略.........