本文整理汇总了Python中absl.logging.debug方法的典型用法代码示例。如果您正苦于以下问题:Python logging.debug方法的具体用法?Python logging.debug怎么用?Python logging.debug使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类absl.logging
的用法示例。
在下文中一共展示了logging.debug方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _BuildChromeApp
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import debug [as 别名]
def _BuildChromeApp(self):
"""Build and bundle the Chrome App."""
logging.debug('Building the Chrome Application...')
self._ManifestCheck()
os.chdir(self.npm_path)
_ExecuteCommand(['npm', 'install'])
_ExecuteCommand(['npm', 'run', 'build:chromeapp:once'])
os.chdir(self.chrome_app_src_dir)
if self.on_local:
print('Local bundling coming soon...')
else:
logging.info('Zipping the Loaner Chrome Application...')
_ZipRelativePath(
self.chrome_app_temp_dir, _ZIPFILENAME, self.chrome_app_temp_dir)
if os.path.isfile(self.chrome_app_archive):
os.remove(self.chrome_app_archive)
shutil.move(
os.path.join(self.chrome_app_src_dir, _ZIPFILENAME),
self.chrome_app_archive)
logging.info(
'The Loaner Chrome Application zip can be found %s',
self.chrome_app_archive)
logging.info('Removing the temp files for the Chrome App...')
shutil.rmtree(self.chrome_app_temp_dir)
示例2: insert_bucket
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import debug [as 别名]
def insert_bucket(self, bucket_name=None):
"""Inserts a Google Cloud Storage Bucket object.
Args:
bucket_name: str, the name of the Google Cloud Storage Bucket to insert.
Returns:
A dictionary object representing a Google Cloud Storage Bucket.
type: google.cloud.storage.bucket.Bucket
Raises:
AlreadyExistsError: when trying to insert a bucket that already exists.
"""
bucket_name = bucket_name or self._config.bucket
try:
new_bucket = self._client.create_bucket(bucket_name)
except exceptions.Conflict as err:
raise AlreadyExistsError(
'the Google Cloud Storage Bucket with name {!r} already exists: '
'{}'.format(bucket_name, err))
logging.debug(
'The Googld Cloud Storage Bucket %r has been created for project '
'%r.', bucket_name, self._config.project)
return new_bucket
示例3: _get_config_file_path
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import debug [as 别名]
def _get_config_file_path(config_file_path):
"""Gets the config file path if a full path was not provided.
Args:
config_file_path: str, the name or the full path of the config file.
Returns:
A str representing the full path to the config file.
"""
if os.path.isabs(config_file_path):
return config_file_path
logging.debug(
'The full path for the config file was not specified, '
'looking in the default directory.')
return os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', config_file_path)
示例4: span
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import debug [as 别名]
def span(
self,
scope: str,
sub_scope: str,
nonce: int,
parent_span_yield: Optional[None],
fn_args: Optional[Tuple[Any, ...]],
fn_kwargs: Optional[Dict[str, Any]],
trace_opts: Dict[str, Any],
) -> Generator[None, TraceResult, None]:
assert parent_span_yield is None
del parent_span_yield, fn_args, fn_kwargs, trace_opts
start_time = time.time()
logging.debug('(%s) Entering %s.%s', nonce, scope, sub_scope)
yield None
logging.debug('(%s) Exiting %s.%s. Elapsed time %f', nonce, scope,
sub_scope,
time.time() - start_time)
示例5: make_quantizer
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import debug [as 别名]
def make_quantizer(self, quantizer_1: quantizer_impl.IQuantizer,
quantizer_2: quantizer_impl.IQuantizer):
"""make adder quantizer."""
self.quantizer_1 = quantizer_1
self.quantizer_2 = quantizer_2
mode1 = quantizer_1.mode
mode2 = quantizer_2.mode
adder_impl_class = self.adder_impl_table[mode1][mode2]
logging.debug(
"qbn adder implemented as class %s",
adder_impl_class.implemented_as())
return adder_impl_class(
quantizer_1,
quantizer_2
)
示例6: tcp_server
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import debug [as 别名]
def tcp_server(tcp_addr, settings):
"""Start up the tcp server, send the settings."""
family = socket.AF_INET6 if ":" in tcp_addr.ip else socket.AF_INET
sock = socket.socket(family, socket.SOCK_STREAM, socket.IPPROTO_TCP)
sock.bind(tcp_addr)
sock.listen(1)
logging.info("Waiting for connection on %s", tcp_addr)
conn, addr = sock.accept()
logging.info("Accepted connection from %s", Addr(*addr[:2]))
# Send map_data independently for py2/3 and json encoding reasons.
write_tcp(conn, settings["map_data"])
send_settings = {k: v for k, v in settings.items() if k != "map_data"}
logging.debug("settings: %s", send_settings)
write_tcp(conn, json.dumps(send_settings).encode())
return conn
示例7: tcp_client
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import debug [as 别名]
def tcp_client(tcp_addr):
"""Connect to the tcp server, and return the settings."""
family = socket.AF_INET6 if ":" in tcp_addr.ip else socket.AF_INET
sock = socket.socket(family, socket.SOCK_STREAM, socket.IPPROTO_TCP)
for i in range(300):
logging.info("Connecting to: %s, attempt %d", tcp_addr, i)
try:
sock.connect(tcp_addr)
break
except socket.error:
time.sleep(1)
else:
sock.connect(tcp_addr) # One last try, but don't catch this error.
logging.info("Connected.")
map_data = read_tcp(sock)
settings_str = read_tcp(sock)
if not settings_str:
raise socket.error("Failed to read")
settings = json.loads(settings_str.decode())
logging.info("Got settings. map_name: %s.", settings["map_name"])
logging.debug("settings: %s", settings)
settings["map_data"] = map_data
return sock, settings
示例8: resolve_exec_properties
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import debug [as 别名]
def resolve_exec_properties(
self,
exec_properties: Dict[Text, Any],
pipeline_info: data_types.PipelineInfo,
component_info: data_types.ComponentInfo,
) -> Dict[Text, Any]:
"""Overrides BaseDriver.resolve_exec_properties()."""
del pipeline_info, component_info
input_config = example_gen_pb2.Input()
json_format.Parse(exec_properties[utils.INPUT_CONFIG_KEY], input_config)
input_base = exec_properties[utils.INPUT_BASE_KEY]
logging.debug('Processing input %s.', input_base)
# Note that this function updates the input_config.splits.pattern.
fingerprint, select_span = utils.calculate_splits_fingerprint_and_span(
input_base, input_config.splits)
exec_properties[utils.INPUT_CONFIG_KEY] = json_format.MessageToJson(
input_config, sort_keys=True, preserving_proto_field_name=True)
exec_properties[utils.SPAN_PROPERTY_NAME] = select_span
exec_properties[utils.FINGERPRINT_PROPERTY_NAME] = fingerprint
return exec_properties
示例9: extract_feature
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import debug [as 别名]
def extract_feature(*wavefiles, **kwargs):
''' tensorflow fbank feat '''
dry_run = kwargs.get('dry_run')
feat_name = 'fbank'
feat_name = kwargs.get('feature_name')
assert feat_name
graph, (input_tensor, output_tensor) = _freq_feat_graph(feat_name, **kwargs)
sess = _get_session(_get_out_tensor_name(feat_name, 0), graph)
for wavpath in wavefiles:
savepath = os.path.splitext(wavpath)[0] + '.npy'
logging.debug('extract_feat: input: {}, output: {}'.format(
wavpath, savepath))
feat = sess.run(output_tensor, feed_dict={input_tensor: wavpath})
# save feat
if dry_run:
logging.info('save feat: path {} shape:{} dtype:{}'.format(
savepath, feat.shape, feat.dtype))
else:
np.save(savepath, feat)
示例10: create_serving_input_receiver_fn
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import debug [as 别名]
def create_serving_input_receiver_fn(self):
''' infer input pipeline '''
# with batch_size
taskconf = self.config['data']['task']
shape = [None] + taskconf['audio']['feature_shape']
logging.debug('serving input shape:{}'.format(shape))
#pylint: disable=no-member
return tf.estimator.export.build_raw_serving_input_receiver_fn(
features={
'inputs':
tf.placeholder(name="inputs", shape=shape, dtype=tf.float32),
'texts':
tf.placeholder(
name="texts",
shape=(None, taskconf['text']['max_text_len']),
dtype=tf.int32)
},
default_batch_size=None,
)
示例11: import_all_modules_for_register
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import debug [as 别名]
def import_all_modules_for_register(config=None, only_nlp=False):
"""Import all modules for register."""
if only_nlp:
all_modules = ALL_NLP_MODULES
else:
all_modules = ALL_MODULES
add_custom_modules(all_modules, config)
logging.debug(f"All modules: {all_modules}")
errors = []
for base_dir, modules in all_modules:
for name in modules:
try:
if base_dir != "":
full_name = base_dir + "." + name
else:
full_name = name
importlib.import_module(full_name)
logging.debug(f"{full_name} loaded.")
except ImportError as error:
errors.append((name, error))
_handle_errors(errors)
示例12: __init__
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import debug [as 别名]
def __init__(self,
user_type_distribution=(0.3, 0.7),
user_document_mean_affinity_matrix=((.1, .7), (.7, .1)),
user_document_stddev_affinity_matrix=((.1, .1), (.1, .1)),
user_ctor=IEUserState,
**kwargs):
self._number_of_user_types = len(user_type_distribution)
self._user_type_dist = user_type_distribution
if len(user_document_mean_affinity_matrix) != len(user_type_distribution):
raise ValueError('The dimensions of user_type_distribution and '
'user_document_mean_affinity_matrix do not match.')
if len(user_document_stddev_affinity_matrix) != len(user_type_distribution):
raise ValueError('The dimensions of user_type_distribution and '
'user_document_stddev_affinity_matrix do not match.')
self._user_doc_means = user_document_mean_affinity_matrix
self._user_doc_stddev = user_document_stddev_affinity_matrix
logging.debug('Initialized IEClusterUserSampler')
super(IEClusterUserSampler, self).__init__(user_ctor, **kwargs)
示例13: __init__
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import debug [as 别名]
def __init__(self,
user_ctor=LTSUserState,
memory_discount=0.7,
sensitivity=0.01,
innovation_stddev=0.05,
choc_mean=5.0,
choc_stddev=1.0,
kale_mean=4.0,
kale_stddev=1.0,
time_budget=60,
**kwargs):
"""Creates a new user state sampler."""
logging.debug('Initialized LTSStaticUserSampler')
self._state_parameters = {'memory_discount': memory_discount,
'sensitivity': sensitivity,
'innovation_stddev': innovation_stddev,
'choc_mean': choc_mean,
'choc_stddev': choc_stddev,
'kale_mean': kale_mean,
'kale_stddev': kale_stddev,
'time_budget': time_budget
}
super(LTSStaticUserSampler, self).__init__(user_ctor, **kwargs)
示例14: value
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import debug [as 别名]
def value(self, v):
if v in _CPP_LEVEL_TO_NAMES:
# --stderrthreshold also accepts numberic strings whose values are
# Abseil C++ log levels.
cpp_value = int(v)
v = _CPP_LEVEL_TO_NAMES[v] # Normalize to strings.
elif v.lower() in _CPP_NAME_TO_LEVELS:
v = v.lower()
if v == 'warn':
v = 'warning' # Use 'warning' as the canonical name.
cpp_value = int(_CPP_NAME_TO_LEVELS[v])
else:
raise ValueError(
'--stderrthreshold must be one of (case-insensitive) '
"'debug', 'info', 'warning', 'error', 'fatal', "
"or '0', '1', '2', '3', not '%s'" % v)
self._value = v
示例15: set_verbosity
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import debug [as 别名]
def set_verbosity(v):
"""Sets the logging verbosity.
Causes all messages of level <= v to be logged,
and all messages of level > v to be silently discarded.
Args:
v: int|str, the verbosity level as an integer or string. Legal string values
are those that can be coerced to an integer as well as case-insensitive
'debug', 'info', 'warning', 'error', and 'fatal'.
"""
try:
new_level = int(v)
except ValueError:
new_level = converter.ABSL_NAMES[v.upper()]
FLAGS.verbosity = new_level