本文整理汇总了Python中openquake.logs.LOG类的典型用法代码示例。如果您正苦于以下问题:Python LOG类的具体用法?Python LOG怎么用?Python LOG使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LOG类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: plot_aggregate_curve
def plot_aggregate_curve(job, aggregate_curve):
"""Plot an aggreate loss curve.
This function is triggered only if the AGGREGATE_LOSS_CURVE
parameter is specified in the configuration file.
:param job: the job the engine is currently processing.
:type job:
:py:class:`openquake.risk.job.probabilistic.ProbabilisticEventMixin`
:param aggregate_curve: the aggregate curve to plot.
:type aggregate_curve: :py:class:`openquake.shapes.Curve`
"""
if not job.has("AGGREGATE_LOSS_CURVE"):
LOG.debug("AGGREGATE_LOSS_CURVE parameter not specified, " \
"skipping aggregate loss curve computation...")
return
path = os.path.join(job.params["BASE_PATH"],
job.params["OUTPUT_DIR"], _filename(job.job_id))
plotter = curve.CurvePlot(path)
plotter.write(_for_plotting(aggregate_curve,
job.params["INVESTIGATION_TIME"]), autoscale_y=False)
plotter.close()
LOG.debug("Aggregate loss curve stored at %s" % path)
示例2: jvm
def jvm(max_mem=None):
"""Return the jpype module, after guaranteeing the JVM is running and
the classpath has been loaded properly."""
jarpaths = (os.path.abspath(
os.path.join(os.path.dirname(__file__), "../lib")),
os.path.abspath(
os.path.join(os.path.dirname(__file__), "../dist")))
log4j_properties_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../log4j.properties"))
if not jpype.isJVMStarted():
max_mem = get_jvm_max_mem(max_mem)
LOG.debug("Default JVM path is %s" % jpype.getDefaultJVMPath())
jpype.startJVM(jpype.getDefaultJVMPath(),
"-Djava.ext.dirs=%s:%s" % jarpaths,
# "-Dlog4j.debug", # turn on log4j internal debugging
"-Dlog4j.configuration=file://%s" % log4j_properties_path,
"-Xmx%sM" % max_mem)
# override the log level set in log4j configuration file this can't be
# done on the JVM command line (i.e. -Dlog4j.rootLogger= is not
# supported by log4j)
_set_java_log_level(FLAGS.debug.upper())
if FLAGS.capture_java_debug:
_setup_java_capture(sys.stdout, sys.stderr)
return jpype
示例3: from_kvs
def from_kvs(job_id, epsilon_provider):
"""Return an aggregate curve using the GMFs and assets
stored in the underlying kvs system."""
vuln_model = vulnerability.load_vuln_model_from_kvs(job_id)
aggregate_curve = AggregateLossCurve(vuln_model, epsilon_provider)
gmfs_keys = kvs.get_keys("%s*%s*" % (
job_id, kvs.tokens.GMF_KEY_TOKEN))
LOG.debug("Found %s stored GMFs..." % len(gmfs_keys))
asset_counter = 0
for gmfs_key in gmfs_keys:
assets = _assets_keys_for_gmfs(job_id, gmfs_key)
for asset in assets:
asset_counter += 1
gmfs = kvs.get_value_json_decoded(gmfs_key)
aggregate_curve.append(gmfs,
json.JSONDecoder().decode(asset))
LOG.debug("Found %s stored assets..." % asset_counter)
return aggregate_curve
示例4: compute_quantile_hazard_curves
def compute_quantile_hazard_curves(job, sites):
"""Compute a quantile hazard curve for each site in the list
using as input all the pre-computed curves for different realizations.
The QUANTILE_LEVELS parameter in the configuration file specifies
all the values used in the computation.
"""
keys = []
quantiles = _extract_quantiles_from_config(job)
LOG.debug("List of QUANTILES is %s" % quantiles)
for site in sites:
for quantile in quantiles:
quantile_curve = {
"site_lat": site.latitude,
"site_lon": site.longitude,
"curve": _reconstruct_curve_list_from(compute_quantile_curve(curves_at(job.id, site), quantile)),
}
key = kvs.tokens.quantile_hazard_curve_key(job.id, site, quantile)
keys.append(key)
LOG.debug("QUANTILE curve at %s is %s" % (key, quantile_curve))
kvs.set_value_json_encoded(key, quantile_curve)
return keys
示例5: _get_iml_from
def _get_iml_from(curve, job, poe):
"""Return the interpolated IML using the values defined in
the INTENSITY_MEASURE_LEVELS parameter as the reference grid to
interpolate in.
IML from config is in ascending order (abscissa of hazard curve)
PoE from curve is in descending order (ordinate of hazard curve)
In our interpolation, PoE becomes the x axis, IML the y axis, therefore
the arrays have to be reversed (x axis has to be monotonically
increasing).
"""
# reverse arrays
poes = numpy.array(_extract_y_values_from(curve["curve"]))[::-1]
imls = numpy.log(numpy.array(_extract_imls_from_config(job))[::-1])
site = shapes.Site(curve["site_lon"], curve["site_lat"])
if poe > poes[-1]:
LOG.debug("[HAZARD_MAP] Interpolation out of bounds for PoE %s, "\
"using maximum PoE value pair, PoE: %s, IML: %s, at site %s" % (
poe, poes[-1], math.exp(imls[-1]), site))
return math.exp(imls[-1])
if poe < poes[0]:
LOG.debug("[HAZARD_MAP] Interpolation out of bounds for PoE %s, "\
"using minimum PoE value pair, PoE: %s, IML: %s, at site %s" % (
poe, poes[0], math.exp(imls[0]), site))
return math.exp(imls[0])
return math.exp(interp1d(poes, imls, kind='linear')(poe))
示例6: compute_quantile_hazard_curves
def compute_quantile_hazard_curves(job, sites):
"""Compute a quantile hazard curve for each site in the list
using as input all the pre-computed curves for different realizations.
The QUANTILE_LEVELS parameter in the configuration file specifies
all the values used in the computation.
"""
keys = []
quantiles = _extract_values_from_config(job, QUANTILE_PARAM_NAME)
LOG.debug("[QUANTILE_HAZARD_CURVES] List of quantiles is %s" % quantiles)
for site in sites:
for quantile in quantiles:
hazard_curves = curves_at(job.id, site)
poes = [_extract_y_values_from(curve) for curve in hazard_curves]
quantile_poes = compute_quantile_curve(poes, quantile)
quantile_curve = {"site_lat": site.latitude,
"site_lon": site.longitude,
"curve": _reconstruct_curve_list_from(quantile_poes)}
key = kvs.tokens.quantile_hazard_curve_key(
job.id, site, quantile)
keys.append(key)
kvs.set_value_json_encoded(key, quantile_curve)
return keys
示例7: compute_uhs_task
def compute_uhs_task(job_id, realization, site):
"""Compute Uniform Hazard Spectra for a given site of interest and 1 or
more Probability of Exceedance values. The bulk of the computation will
be done by utilizing the `UHSCalculator` class in the Java code.
UHS results will be written directly to the database.
:param int job_id:
ID of the job record in the DB/KVS.
:param realization:
Logic tree sample number (from 1 to N, where N is the
NUMBER_OF_LOGIC_TREE_SAMPLES param defined in the job config.
:param site:
The site of interest (a :class:`openquake.shapes.Site` object).
"""
calc_proxy = utils_tasks.get_running_calculation(job_id)
log_msg = (
"Computing UHS for job_id=%s, site=%s, realization=%s."
" UHS results will be serialized to the database.")
log_msg %= (calc_proxy.job_id, site, realization)
LOG.info(log_msg)
uhs_results = compute_uhs(calc_proxy, site)
write_uhs_spectrum_data(calc_proxy, realization, site, uhs_results)
示例8: compute_mean_hazard_maps
def compute_mean_hazard_maps(job):
"""Compute mean hazard maps using as input all the
pre computed mean hazard curves.
The POES_HAZARD_MAPS parameter in the configuration file specifies
all the values used in the computation.
"""
poes = _extract_values_from_config(job, POES_PARAM_NAME)
LOG.debug("[MEAN_HAZARD_MAPS] List of POEs is %s" % poes)
# get all the pre computed mean curves
pattern = "%s*%s*" % (kvs.tokens.MEAN_HAZARD_CURVE_KEY_TOKEN, job.id)
mean_curves = kvs.mget_decoded(pattern)
LOG.debug("[MEAN_HAZARD_MAPS] Found %s pre computed mean curves"
% len(mean_curves))
keys = []
for poe in poes:
for mean_curve in mean_curves:
site = shapes.Site(mean_curve["site_lon"],
mean_curve["site_lat"])
key = kvs.tokens.mean_hazard_map_key(
job.id, site, poe)
keys.append(key)
_store_iml_for(mean_curve, key, job, poe)
return keys
示例9: compute_aggregate_curve
def compute_aggregate_curve(job):
"""Compute and plot an aggreate loss curve.
This function expects to find in kvs a set of pre computed
GMFs and assets.
This function is triggered only if the AGGREGATE_LOSS_CURVE
parameter is specified in the configuration file.
:param job: the job the engine is currently processing.
:type job: openquake.risk.job.probabilistic.ProbabilisticEventMixin
"""
if not job.has("AGGREGATE_LOSS_CURVE"):
LOG.debug("AGGREGATE_LOSS_CURVE parameter not specified, " \
"skipping aggregate loss curve computation...")
return
epsilon_provider = risk_job.EpsilonProvider(job.params)
aggregate_loss_curve = \
prob.AggregateLossCurve.from_kvs(job.id, epsilon_provider)
path = os.path.join(job.params["BASE_PATH"],
job.params["OUTPUT_DIR"], _filename(job.id))
plotter = curve.CurvePlot(path)
plotter.write(_for_plotting(
aggregate_loss_curve.compute(),
job.params["INVESTIGATION_TIME"]), autoscale_y=False)
plotter.close()
LOG.debug("Aggregate loss curve stored at %s" % path)
示例10: run_job
def run_job(job_file, output_type):
"""Given a job_file, run the job."""
a_job = Job.from_file(job_file, output_type)
is_job_valid = a_job.is_valid()
if is_job_valid[0]:
a_job.set_status('running')
try:
a_job.launch()
except sqlalchemy.exc.SQLAlchemyError:
# Try to cleanup the session status to have a chance to update the
# job record without further errors.
session = get_db_session("reslt", "writer")
if session.is_active:
session.rollback()
a_job.set_status('failed')
raise
except:
a_job.set_status('failed')
raise
else:
a_job.set_status('succeeded')
else:
a_job.set_status('failed')
LOG.critical("The job configuration is inconsistent:")
for error_message in is_job_valid[1]:
LOG.critical(" >>> %s" % error_message)
示例11: run_job
def run_job(job_file, output_type):
"""
Given a job_file, run the job.
:param job_file: the path of the configuration file for the job
:type job_file: string
:param output_type: the desired format for the results, one of 'db', 'xml'
:type output_type: string
"""
a_job = Job.from_file(job_file, output_type)
is_job_valid = a_job.is_valid()
if is_job_valid[0]:
a_job.set_status('running')
spawn_job_supervisor(a_job.job_id, os.getpid())
try:
a_job.launch()
except Exception, ex:
LOG.critical("Job failed with exception: '%s'" % str(ex))
a_job.set_status('failed')
raise
else:
a_job.set_status('succeeded')
示例12: read_sites_from_exposure
def read_sites_from_exposure(a_job):
"""
Given the exposure model specified in the job config, read all sites which
are located within the region of interest.
:param a_job: a Job object with an EXPOSURE parameter defined
:type a_job: :py:class:`openquake.job.Job`
:returns: a list of :py:class:`openquake.shapes.Site` objects
"""
sites = []
path = os.path.join(a_job.base_path, a_job.params[conf.EXPOSURE])
reader = exposure.ExposurePortfolioFile(path)
constraint = a_job.region
LOG.debug(
"Constraining exposure parsing to %s" % constraint)
for site, _asset_data in reader.filter(constraint):
# we don't want duplicates (bug 812395):
if not site in sites:
sites.append(site)
return sites
示例13: run_job
def run_job(job_file, output_type):
"""
Given a job_file, run the job.
:param job_file: the path of the configuration file for the job
:type job_file: string
:param output_type: the desired format for the results, one of 'db', 'xml'
:type output_type: string
"""
a_job = Job.from_file(job_file, output_type)
a_job.set_status('running')
# closing all db connections to make sure they're not shared between
# supervisor and job executor processes. otherwise if one of them closes
# the connection it immediately becomes unavailable for other
close_connection()
job_pid = os.fork()
if not job_pid:
# job executor process
try:
logs.init_logs_amqp_send(level=FLAGS.debug, job_id=a_job.job_id)
a_job.launch()
except Exception, ex:
LOG.critical("Job failed with exception: '%s'" % str(ex))
a_job.set_status('failed')
raise
else:
a_job.set_status('succeeded')
return
示例14: jvm
def jvm(max_mem=4000):
"""Return the jpype module, after guaranteeing the JVM is running and
the classpath has been loaded properly."""
jarpaths = (os.path.abspath(
os.path.join(os.path.dirname(__file__), "../lib")),
os.path.abspath(
os.path.join(os.path.dirname(__file__), "../dist")))
# TODO(JMC): Make sure these directories exist
# LOG.debug("Jarpath is %s", jarpaths)
if not jpype.isJVMStarted():
LOG.debug("Default JVM path is %s" % jpype.getDefaultJVMPath())
jpype.startJVM(jpype.getDefaultJVMPath(),
"-Djava.ext.dirs=%s:%s" % jarpaths,
#"-Dnet.spy.log.LoggerImpl=net.spy.memcached.compat.log.Log4JLogger",
# "-Dlog4j.debug",
"-Dlog4j.configuration=log4j.properties",
"-Dlog4j.rootLogger=%s, A1" % (FLAGS.debug.upper()),
# "-Dlog4j.rootLogger=DEBUG, A1",
"-Xmx%sM" % max_mem)
if FLAGS.capture_java_debug:
mystream = jpype.JProxy("org.gem.IPythonPipe", inst=sys.stdout)
errstream = jpype.JProxy("org.gem.IPythonPipe", inst=sys.stderr)
outputstream = jpype.JClass("org.gem.PythonOutputStream")()
err_stream = jpype.JClass("org.gem.PythonOutputStream")()
outputstream.setPythonStdout(mystream)
err_stream.setPythonStdout(errstream)
ps = jpype.JClass("java.io.PrintStream")
jpype.java.lang.System.setOut(ps(outputstream))
jpype.java.lang.System.setErr(ps(err_stream))
return jpype
示例15: _partition
def _partition(self):
"""Split the set of sites to compute in blocks and store
the in the underlying kvs system.
"""
sites = []
self.blocks_keys = []
region_constraint = self.region
# we use the exposure, if specified,
# otherwise we use the input region
if self.has(EXPOSURE):
sites = self._read_sites_from_exposure()
LOG.debug("Loaded %s sites from exposure portfolio." % len(sites))
elif self.region:
sites = self.region.sites
else:
raise Exception("I don't know how to get the sites!")
if self.partition:
block_count = 0
for block in BlockSplitter(sites, constraint=region_constraint):
self.blocks_keys.append(block.id)
block.to_kvs()
block_count += 1
LOG.debug("Job has partitioned %s sites into %s blocks" % (
len(sites), block_count))
else:
block = Block(sites)
self.blocks_keys.append(block.id)
block.to_kvs()