本文整理汇总了Python中openquake.logs.LOG.debug方法的典型用法代码示例。如果您正苦于以下问题:Python LOG.debug方法的具体用法?Python LOG.debug怎么用?Python LOG.debug使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类openquake.logs.LOG
的用法示例。
在下文中一共展示了LOG.debug方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _partition
# 需要导入模块: from openquake.logs import LOG [as 别名]
# 或者: from openquake.logs.LOG import debug [as 别名]
def _partition(self):
"""Split the set of sites to compute in blocks and store
the in the underlying kvs system.
"""
sites = []
self.blocks_keys = []
region_constraint = self.region
# we use the exposure, if specified,
# otherwise we use the input region
if self.has(EXPOSURE):
sites = self._read_sites_from_exposure()
LOG.debug("Loaded %s sites from exposure portfolio." % len(sites))
elif self.region:
sites = self.region.sites
else:
raise Exception("I don't know how to get the sites!")
if self.partition:
block_count = 0
for block in BlockSplitter(sites, constraint=region_constraint):
self.blocks_keys.append(block.id)
block.to_kvs()
block_count += 1
LOG.debug("Job has partitioned %s sites into %s blocks" % (
len(sites), block_count))
else:
block = Block(sites)
self.blocks_keys.append(block.id)
block.to_kvs()
示例2: jvm
# 需要导入模块: from openquake.logs import LOG [as 别名]
# 或者: from openquake.logs.LOG import debug [as 别名]
def jvm(max_mem=None):
"""Return the jpype module, after guaranteeing the JVM is running and
the classpath has been loaded properly."""
jarpaths = (os.path.abspath(
os.path.join(os.path.dirname(__file__), "../lib")),
os.path.abspath(
os.path.join(os.path.dirname(__file__), "../dist")))
log4j_properties_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../log4j.properties"))
if not jpype.isJVMStarted():
max_mem = get_jvm_max_mem(max_mem)
LOG.debug("Default JVM path is %s" % jpype.getDefaultJVMPath())
jpype.startJVM(jpype.getDefaultJVMPath(),
"-Djava.ext.dirs=%s:%s" % jarpaths,
# "-Dlog4j.debug", # turn on log4j internal debugging
"-Dlog4j.configuration=file://%s" % log4j_properties_path,
"-Xmx%sM" % max_mem)
# override the log level set in log4j configuration file this can't be
# done on the JVM command line (i.e. -Dlog4j.rootLogger= is not
# supported by log4j)
_set_java_log_level(FLAGS.debug.upper())
if FLAGS.capture_java_debug:
_setup_java_capture(sys.stdout, sys.stderr)
return jpype
示例3: read_sites_from_exposure
# 需要导入模块: from openquake.logs import LOG [as 别名]
# 或者: from openquake.logs.LOG import debug [as 别名]
def read_sites_from_exposure(a_job):
"""
Given the exposure model specified in the job config, read all sites which
are located within the region of interest.
:param a_job: a Job object with an EXPOSURE parameter defined
:type a_job: :py:class:`openquake.job.Job`
:returns: a list of :py:class:`openquake.shapes.Site` objects
"""
sites = []
path = os.path.join(a_job.base_path, a_job.params[conf.EXPOSURE])
reader = exposure.ExposurePortfolioFile(path)
constraint = a_job.region
LOG.debug(
"Constraining exposure parsing to %s" % constraint)
for site, _asset_data in reader.filter(constraint):
# we don't want duplicates (bug 812395):
if not site in sites:
sites.append(site)
return sites
示例4: _get_iml_from
# 需要导入模块: from openquake.logs import LOG [as 别名]
# 或者: from openquake.logs.LOG import debug [as 别名]
def _get_iml_from(curve, job, poe):
"""Return the interpolated IML using the values defined in
the INTENSITY_MEASURE_LEVELS parameter as the reference grid to
interpolate in.
IML from config is in ascending order (abscissa of hazard curve)
PoE from curve is in descending order (ordinate of hazard curve)
In our interpolation, PoE becomes the x axis, IML the y axis, therefore
the arrays have to be reversed (x axis has to be monotonically
increasing).
"""
# reverse arrays
poes = numpy.array(_extract_y_values_from(curve["curve"]))[::-1]
imls = numpy.log(numpy.array(_extract_imls_from_config(job))[::-1])
site = shapes.Site(curve["site_lon"], curve["site_lat"])
if poe > poes[-1]:
LOG.debug("[HAZARD_MAP] Interpolation out of bounds for PoE %s, "\
"using maximum PoE value pair, PoE: %s, IML: %s, at site %s" % (
poe, poes[-1], math.exp(imls[-1]), site))
return math.exp(imls[-1])
if poe < poes[0]:
LOG.debug("[HAZARD_MAP] Interpolation out of bounds for PoE %s, "\
"using minimum PoE value pair, PoE: %s, IML: %s, at site %s" % (
poe, poes[0], math.exp(imls[0]), site))
return math.exp(imls[0])
return math.exp(interp1d(poes, imls, kind='linear')(poe))
示例5: from_kvs
# 需要导入模块: from openquake.logs import LOG [as 别名]
# 或者: from openquake.logs.LOG import debug [as 别名]
def from_kvs(job_id, epsilon_provider):
"""Return an aggregate curve using the GMFs and assets
stored in the underlying kvs system."""
vuln_model = vulnerability.load_vuln_model_from_kvs(job_id)
aggregate_curve = AggregateLossCurve(vuln_model, epsilon_provider)
gmfs_keys = kvs.get_keys("%s*%s*" % (
job_id, kvs.tokens.GMF_KEY_TOKEN))
LOG.debug("Found %s stored GMFs..." % len(gmfs_keys))
asset_counter = 0
for gmfs_key in gmfs_keys:
assets = _assets_keys_for_gmfs(job_id, gmfs_key)
for asset in assets:
asset_counter += 1
gmfs = kvs.get_value_json_decoded(gmfs_key)
aggregate_curve.append(gmfs,
json.JSONDecoder().decode(asset))
LOG.debug("Found %s stored assets..." % asset_counter)
return aggregate_curve
示例6: jvm
# 需要导入模块: from openquake.logs import LOG [as 别名]
# 或者: from openquake.logs.LOG import debug [as 别名]
def jvm(max_mem=4000):
"""Return the jpype module, after guaranteeing the JVM is running and
the classpath has been loaded properly."""
jarpaths = (os.path.abspath(
os.path.join(os.path.dirname(__file__), "../lib")),
os.path.abspath(
os.path.join(os.path.dirname(__file__), "../dist")))
# TODO(JMC): Make sure these directories exist
# LOG.debug("Jarpath is %s", jarpaths)
if not jpype.isJVMStarted():
LOG.debug("Default JVM path is %s" % jpype.getDefaultJVMPath())
jpype.startJVM(jpype.getDefaultJVMPath(),
"-Djava.ext.dirs=%s:%s" % jarpaths,
#"-Dnet.spy.log.LoggerImpl=net.spy.memcached.compat.log.Log4JLogger",
# "-Dlog4j.debug",
"-Dlog4j.configuration=log4j.properties",
"-Dlog4j.rootLogger=%s, A1" % (FLAGS.debug.upper()),
# "-Dlog4j.rootLogger=DEBUG, A1",
"-Xmx%sM" % max_mem)
if FLAGS.capture_java_debug:
mystream = jpype.JProxy("org.gem.IPythonPipe", inst=sys.stdout)
errstream = jpype.JProxy("org.gem.IPythonPipe", inst=sys.stderr)
outputstream = jpype.JClass("org.gem.PythonOutputStream")()
err_stream = jpype.JClass("org.gem.PythonOutputStream")()
outputstream.setPythonStdout(mystream)
err_stream.setPythonStdout(errstream)
ps = jpype.JClass("java.io.PrintStream")
jpype.java.lang.System.setOut(ps(outputstream))
jpype.java.lang.System.setErr(ps(err_stream))
return jpype
示例7: compute_quantile_hazard_curves
# 需要导入模块: from openquake.logs import LOG [as 别名]
# 或者: from openquake.logs.LOG import debug [as 别名]
def compute_quantile_hazard_curves(job, sites):
"""Compute a quantile hazard curve for each site in the list
using as input all the pre-computed curves for different realizations.
The QUANTILE_LEVELS parameter in the configuration file specifies
all the values used in the computation.
"""
keys = []
quantiles = _extract_quantiles_from_config(job)
LOG.debug("List of QUANTILES is %s" % quantiles)
for site in sites:
for quantile in quantiles:
quantile_curve = {
"site_lat": site.latitude,
"site_lon": site.longitude,
"curve": _reconstruct_curve_list_from(compute_quantile_curve(curves_at(job.id, site), quantile)),
}
key = kvs.tokens.quantile_hazard_curve_key(job.id, site, quantile)
keys.append(key)
LOG.debug("QUANTILE curve at %s is %s" % (key, quantile_curve))
kvs.set_value_json_encoded(key, quantile_curve)
return keys
示例8: plot_aggregate_curve
# 需要导入模块: from openquake.logs import LOG [as 别名]
# 或者: from openquake.logs.LOG import debug [as 别名]
def plot_aggregate_curve(job, aggregate_curve):
"""Plot an aggreate loss curve.
This function is triggered only if the AGGREGATE_LOSS_CURVE
parameter is specified in the configuration file.
:param job: the job the engine is currently processing.
:type job:
:py:class:`openquake.risk.job.probabilistic.ProbabilisticEventMixin`
:param aggregate_curve: the aggregate curve to plot.
:type aggregate_curve: :py:class:`openquake.shapes.Curve`
"""
if not job.has("AGGREGATE_LOSS_CURVE"):
LOG.debug("AGGREGATE_LOSS_CURVE parameter not specified, " \
"skipping aggregate loss curve computation...")
return
path = os.path.join(job.params["BASE_PATH"],
job.params["OUTPUT_DIR"], _filename(job.job_id))
plotter = curve.CurvePlot(path)
plotter.write(_for_plotting(aggregate_curve,
job.params["INVESTIGATION_TIME"]), autoscale_y=False)
plotter.close()
LOG.debug("Aggregate loss curve stored at %s" % path)
示例9: compute_aggregate_curve
# 需要导入模块: from openquake.logs import LOG [as 别名]
# 或者: from openquake.logs.LOG import debug [as 别名]
def compute_aggregate_curve(job):
"""Compute and plot an aggreate loss curve.
This function expects to find in kvs a set of pre computed
GMFs and assets.
This function is triggered only if the AGGREGATE_LOSS_CURVE
parameter is specified in the configuration file.
:param job: the job the engine is currently processing.
:type job: openquake.risk.job.probabilistic.ProbabilisticEventMixin
"""
if not job.has("AGGREGATE_LOSS_CURVE"):
LOG.debug("AGGREGATE_LOSS_CURVE parameter not specified, " \
"skipping aggregate loss curve computation...")
return
epsilon_provider = risk_job.EpsilonProvider(job.params)
aggregate_loss_curve = \
prob.AggregateLossCurve.from_kvs(job.id, epsilon_provider)
path = os.path.join(job.params["BASE_PATH"],
job.params["OUTPUT_DIR"], _filename(job.id))
plotter = curve.CurvePlot(path)
plotter.write(_for_plotting(
aggregate_loss_curve.compute(),
job.params["INVESTIGATION_TIME"]), autoscale_y=False)
plotter.close()
LOG.debug("Aggregate loss curve stored at %s" % path)
示例10: compute_quantile_hazard_curves
# 需要导入模块: from openquake.logs import LOG [as 别名]
# 或者: from openquake.logs.LOG import debug [as 别名]
def compute_quantile_hazard_curves(job, sites):
"""Compute a quantile hazard curve for each site in the list
using as input all the pre-computed curves for different realizations.
The QUANTILE_LEVELS parameter in the configuration file specifies
all the values used in the computation.
"""
keys = []
quantiles = _extract_values_from_config(job, QUANTILE_PARAM_NAME)
LOG.debug("[QUANTILE_HAZARD_CURVES] List of quantiles is %s" % quantiles)
for site in sites:
for quantile in quantiles:
hazard_curves = curves_at(job.id, site)
poes = [_extract_y_values_from(curve) for curve in hazard_curves]
quantile_poes = compute_quantile_curve(poes, quantile)
quantile_curve = {"site_lat": site.latitude,
"site_lon": site.longitude,
"curve": _reconstruct_curve_list_from(quantile_poes)}
key = kvs.tokens.quantile_hazard_curve_key(
job.id, site, quantile)
keys.append(key)
kvs.set_value_json_encoded(key, quantile_curve)
return keys
示例11: compute_mean_hazard_maps
# 需要导入模块: from openquake.logs import LOG [as 别名]
# 或者: from openquake.logs.LOG import debug [as 别名]
def compute_mean_hazard_maps(job):
"""Compute mean hazard maps using as input all the
pre computed mean hazard curves.
The POES_HAZARD_MAPS parameter in the configuration file specifies
all the values used in the computation.
"""
poes = _extract_values_from_config(job, POES_PARAM_NAME)
LOG.debug("[MEAN_HAZARD_MAPS] List of POEs is %s" % poes)
# get all the pre computed mean curves
pattern = "%s*%s*" % (kvs.tokens.MEAN_HAZARD_CURVE_KEY_TOKEN, job.id)
mean_curves = kvs.mget_decoded(pattern)
LOG.debug("[MEAN_HAZARD_MAPS] Found %s pre computed mean curves"
% len(mean_curves))
keys = []
for poe in poes:
for mean_curve in mean_curves:
site = shapes.Site(mean_curve["site_lon"],
mean_curve["site_lat"])
key = kvs.tokens.mean_hazard_map_key(
job.id, site, poe)
keys.append(key)
_store_iml_for(mean_curve, key, job, poe)
return keys
示例12: from_file
# 需要导入模块: from openquake.logs import LOG [as 别名]
# 或者: from openquake.logs.LOG import debug [as 别名]
def from_file(config_file, output_type):
"""
Create a job from external configuration files.
:param config_file: the external configuration file path
:param output_type: where to store results:
* 'db' database
* 'xml' XML files *plus* database
:param params: optional dictionary of default parameters, overridden by
the ones read from the config file
:type params: :py:class:`dict`
"""
# output_type can be set, in addition to 'db' and 'xml', also to
# 'xml_without_db', which has the effect of serializing only to xml
# without requiring a database at all.
# This allows to run tests without requiring a database.
# This is not documented in the public interface because it is
# essentially a detail of our current tests and ci infrastructure.
assert output_type in ('db', 'xml', 'xml_without_db')
config_file = os.path.abspath(config_file)
LOG.debug("Loading Job from %s" % (config_file))
base_path = os.path.abspath(os.path.dirname(config_file))
params = {}
sections = []
for each_config_file in Job.default_configs() + [config_file]:
new_sections, new_params = parse_config_file(each_config_file)
sections.extend(new_sections)
params.update(new_params)
params['BASE_PATH'] = base_path
if output_type == 'xml_without_db':
# we are running a test
job_id = 0
serialize_results_to = ['xml']
else:
# openquake-server creates the job record in advance and stores the
# job id in the config file
job_id = params.get('OPENQUAKE_JOB_ID')
if not job_id:
# create the database record for this job
job_id = prepare_job(params).id
if output_type == 'db':
serialize_results_to = ['db']
else:
serialize_results_to = ['db', 'xml']
job = Job(params, job_id, sections=sections, base_path=base_path)
job.serialize_results_to = serialize_results_to
job.config_file = config_file # pylint: disable=W0201
return job
示例13: serialize_mfd
# 需要导入模块: from openquake.logs import LOG [as 别名]
# 或者: from openquake.logs.LOG import debug [as 别名]
def serialize_mfd(mfd, parent_node):
if mfd.__class__.__name__ == 'org.opensha.sha.magdist.SummedMagFreqDist':
LOG.debug("Serializing a SummedMFD")
mfd_list = mfd.getMagFreqDists()
if mfd_list is None:
mfd_list = [mfd]
for sub_mfd in mfd_list:
serialize_evenly_discretized_mfd(sub_mfd, parent_node)
elif mfd.__class__.__name__ == 'org.opensha.sha.magdist.IncrementalMagFreqDist':
LOG.debug("Serializing an IncrementalMFD")
serialize_evenly_discretized_mfd(mfd, parent_node)
else:
raise Exception("Unhandled mfd class: %s" % mfd.__class__.__name__)
示例14: cleanup
# 需要导入模块: from openquake.logs import LOG [as 别名]
# 或者: from openquake.logs.LOG import debug [as 别名]
def cleanup(self):
"""
Perform any necessary cleanup steps after the job completes.
Currently, this method only clears KVS cache data for the job.
"""
LOG.debug("Running KVS garbage collection for job %s" % self.job_id)
gc_cmd = ['python', 'bin/cache_gc.py', '--job=%s' % self.job_id]
# run KVS garbage collection aynchronously
# stdout goes to /dev/null to silence any output from the GC
subprocess.Popen(gc_cmd, env=os.environ, stdout=open('/dev/null', 'w'))
示例15: from_kvs
# 需要导入模块: from openquake.logs import LOG [as 别名]
# 或者: from openquake.logs.LOG import debug [as 别名]
def from_kvs(job_id):
"""Return an aggregate curve using the computed
loss curves in the kvs system."""
client = kvs.get_client(binary=False)
keys = client.keys("%s*%s*" % (job_id,
kvs.tokens.LOSS_CURVE_KEY_TOKEN))
LOG.debug("Found %s stored loss curves..." % len(keys))
aggregate_curve = AggregateLossCurve()
for key in keys:
aggregate_curve.append(shapes.Curve.from_json(kvs.get(key)))
return aggregate_curve