本文整理汇总了Python中tensorflow.python.debug.cli.debugger_cli_common.rich_text_lines_from_rich_line_list函数的典型用法代码示例。如果您正苦于以下问题:Python rich_text_lines_from_rich_line_list函数的具体用法?Python rich_text_lines_from_rich_line_list怎么用?Python rich_text_lines_from_rich_line_list使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rich_text_lines_from_rich_line_list函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _node_status_label_legend
def _node_status_label_legend(self):
"""Get legend for node-status labels.
Returns:
(debugger_cli_common.RichTextLines) Legend text.
"""
return debugger_cli_common.rich_text_lines_from_rich_line_list([
RL(""),
RL("Legend:"),
(RL(" ") +
RL(self.STATE_IS_PLACEHOLDER,
self._STATE_COLORS[self.STATE_IS_PLACEHOLDER]) +
" - Placeholder"),
(RL(" ") +
RL(self.STATE_UNFEEDABLE,
self._STATE_COLORS[self.STATE_UNFEEDABLE]) +
" - Unfeedable"),
(RL(" ") +
RL(self.STATE_CONT,
self._STATE_COLORS[self.STATE_CONT]) +
" - Already continued-to; Tensor handle available from output "
"slot(s)"),
(RL(" ") +
RL(self.STATE_DUMPED_INTERMEDIATE,
self._STATE_COLORS[self.STATE_DUMPED_INTERMEDIATE]) +
" - Unfeedable"),
(RL(" ") +
RL(self.STATE_OVERRIDDEN,
self._STATE_COLORS[self.STATE_OVERRIDDEN]) +
" - Has overriding (injected) tensor value"),
(RL(" ") +
RL(self.STATE_DIRTY_VARIABLE,
self._STATE_COLORS[self.STATE_DIRTY_VARIABLE]) +
" - Dirty variable: Variable already updated this node stepper.")])
示例2: _render_node_traceback
def _render_node_traceback(self, node_name):
"""Render traceback of a node's creation in Python, if available.
Args:
node_name: (str) name of the node.
Returns:
A RichTextLines object containing the stack trace of the node's
construction.
"""
lines = [RL(""), RL(""), RL("Traceback of node construction:", "bold")]
try:
node_stack = self._debug_dump.node_traceback(node_name)
for depth, (file_path, line, function_name, text) in enumerate(
node_stack):
lines.append("%d: %s" % (depth, file_path))
attribute = debugger_cli_common.MenuItem(
"", "ps %s -b %d" % (file_path, line)) if text else None
line_number_line = RL(" ")
line_number_line += RL("Line: %d" % line, attribute)
lines.append(line_number_line)
lines.append(" Function: %s" % function_name)
lines.append(" Text: " + (("\"%s\"" % text) if text else "None"))
lines.append("")
except KeyError:
lines.append("(Node unavailable in the loaded Python graph)")
except LookupError:
lines.append("(Unavailable because no Python graph has been loaded)")
return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
示例3: get_error_intro
def get_error_intro(tf_error):
"""Generate formatted intro for TensorFlow run-time error.
Args:
tf_error: (errors.OpError) TensorFlow run-time error object.
Returns:
(RichTextLines) Formatted intro message about the run-time OpError, with
sample commands for debugging.
"""
if hasattr(tf_error, "op") and hasattr(tf_error.op, "name"):
op_name = tf_error.op.name
else:
op_name = None
intro_lines = [
"--------------------------------------",
RL("!!! An error occurred during the run !!!", "blink"),
"",
]
out = debugger_cli_common.rich_text_lines_from_rich_line_list(intro_lines)
if op_name is not None:
out.extend(debugger_cli_common.RichTextLines(
["You may use the following commands to debug:"]))
out.extend(
_recommend_command("ni -a -d -t %s" % op_name,
"Inspect information about the failing op.",
create_link=True))
out.extend(
_recommend_command("li -r %s" % op_name,
"List inputs to the failing op, recursively.",
create_link=True))
out.extend(
_recommend_command(
"lt",
"List all tensors dumped during the failing run() call.",
create_link=True))
else:
out.extend(debugger_cli_common.RichTextLines([
"WARNING: Cannot determine the name of the op that caused the error."]))
more_lines = [
"",
"Op name: %s" % op_name,
"Error type: " + str(type(tf_error)),
"",
"Details:",
str(tf_error),
"",
"--------------------------------------",
"",
]
out.extend(debugger_cli_common.RichTextLines(more_lines))
return out
示例4: _recommend_command
def _recommend_command(command, description, indent=2, create_link=False):
"""Generate a RichTextLines object that describes a recommended command.
Args:
command: (str) The command to recommend.
description: (str) A description of what the command does.
indent: (int) How many spaces to indent in the beginning.
create_link: (bool) Whether a command link is to be applied to the command
string.
Returns:
(RichTextLines) Formatted text (with font attributes) for recommending the
command.
"""
indent_str = " " * indent
if create_link:
font_attr = [debugger_cli_common.MenuItem("", command), "bold"]
else:
font_attr = "bold"
lines = [RL(indent_str) + RL(command, font_attr) + ":",
indent_str + " " + description]
return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
示例5: _counts_summary
def _counts_summary(counts, skip_zeros=True, total_count=None):
"""Format values as a two-row table."""
if skip_zeros:
counts = [(count_key, count_val) for count_key, count_val in counts
if count_val]
max_common_len = 0
for count_key, count_val in counts:
count_val_str = str(count_val)
common_len = max(len(count_key) + 1, len(count_val_str) + 1)
max_common_len = max(common_len, max_common_len)
key_line = debugger_cli_common.RichLine("|")
val_line = debugger_cli_common.RichLine("|")
for count_key, count_val in counts:
count_val_str = str(count_val)
key_line += _pad_string_to_length(count_key, max_common_len)
val_line += _pad_string_to_length(count_val_str, max_common_len)
key_line += " |"
val_line += " |"
if total_count is not None:
total_key_str = "total"
total_val_str = str(total_count)
max_common_len = max(len(total_key_str) + 1, len(total_val_str))
total_key_str = _pad_string_to_length(total_key_str, max_common_len)
total_val_str = _pad_string_to_length(total_val_str, max_common_len)
key_line += total_key_str + " |"
val_line += total_val_str + " |"
return debugger_cli_common.rich_text_lines_from_rich_line_list(
[key_line, val_line])
示例6: list_sorted_nodes
def list_sorted_nodes(self, args, screen_info=None):
"""List the sorted transitive closure of the stepper's fetches."""
# TODO(cais): Use pattern such as del args, del screen_info python/debug.
_ = args
_ = screen_info
parsed = self.arg_parsers["list_sorted_nodes"].parse_args(args)
if parsed.lower_bound != -1 and parsed.upper_bound != -1:
index_range = [
max(0, parsed.lower_bound),
min(len(self._sorted_nodes), parsed.upper_bound)
]
verbose = False
else:
index_range = [0, len(self._sorted_nodes)]
verbose = True
handle_node_names = self._node_stepper.handle_node_names()
intermediate_tensor_names = self._node_stepper.intermediate_tensor_names()
override_names = self._node_stepper.override_names()
dirty_variable_names = [
dirty_variable.split(":")[0]
for dirty_variable in self._node_stepper.dirty_variables()
]
lines = []
if verbose:
lines.extend(
["Topologically-sorted transitive input(s) and fetch(es):", ""])
output = debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
self._add_deprecation_warning(output)
for i, element_name in enumerate(self._sorted_nodes):
if i < index_range[0] or i >= index_range[1]:
continue
# TODO(cais): Use fixed-width text to show node index.
if i == self._next:
node_prefix = RL(" ") + RL(self.NEXT_NODE_POINTER_STR, "bold")
else:
node_prefix = RL(" ")
node_prefix += "(%d / %d)" % (i + 1, len(self._sorted_nodes)) + " ["
node_prefix += self._get_status_labels(
element_name,
handle_node_names,
intermediate_tensor_names,
override_names,
dirty_variable_names)
output.append_rich_line(node_prefix + "] " + element_name)
if verbose:
output.extend(self._node_status_label_legend())
return output
示例7: print_source
def print_source(self, args, screen_info=None):
"""Print the content of a source file."""
del screen_info # Unused.
parsed = self._arg_parsers["print_source"].parse_args(args)
source_annotation = source_utils.annotate_source(
self._debug_dump,
parsed.source_file_path,
do_dumped_tensors=parsed.tensors,
min_line=parsed.line_begin)
with open(parsed.source_file_path, "rU") as f:
source_text = f.read()
source_lines = source_text.split("\n")
num_lines = len(source_lines)
line_num_width = int(np.ceil(np.log10(num_lines))) + 3
labeled_source_lines = []
if parsed.line_begin > 1:
labeled_source_lines.append(
RL("(... Omitted %d source lines ...)" % (parsed.line_begin - 1),
"bold"))
for i, line in enumerate(source_lines[parsed.line_begin - 1:]):
annotated_line = RL("L%d" % (i + parsed.line_begin), "yellow")
annotated_line += " " * (line_num_width - len(annotated_line))
annotated_line += line
labeled_source_lines.append(annotated_line)
if i + parsed.line_begin in source_annotation:
sorted_elements = sorted(source_annotation[i + parsed.line_begin])
for k, element in enumerate(sorted_elements):
if k >= parsed.max_elements_per_line:
labeled_source_lines.append(
" (... Omitted %d of %d %s ...)" % (
len(sorted_elements) - parsed.max_elements_per_line,
len(sorted_elements),
"tensor(s)" if parsed.tensors else "op(s)"))
break
label = RL(" " * 4)
if self._debug_dump.debug_watch_keys(
debug_data.get_node_name(element)):
attribute = debugger_cli_common.MenuItem("", "pt %s" % element)
else:
attribute = "blue"
label += RL(element, attribute)
labeled_source_lines.append(label)
output = debugger_cli_common.rich_text_lines_from_rich_line_list(
labeled_source_lines)
_add_main_menu(output, node_name=None)
return output
示例8: get_error_intro
def get_error_intro(tf_error):
"""Generate formatted intro for TensorFlow run-time error.
Args:
tf_error: (errors.OpError) TensorFlow run-time error object.
Returns:
(RichTextLines) Formatted intro message about the run-time OpError, with
sample commands for debugging.
"""
op_name = tf_error.op.name
intro_lines = [
"--------------------------------------",
RL("!!! An error occurred during the run !!!", "blink"),
"",
"You may use the following commands to debug:",
]
out = debugger_cli_common.rich_text_lines_from_rich_line_list(intro_lines)
out.extend(
_recommend_command("ni -a -d -t %s" % op_name,
"Inspect information about the failing op.",
create_link=True))
out.extend(
_recommend_command("li -r %s" % op_name,
"List inputs to the failing op, recursively.",
create_link=True))
out.extend(
_recommend_command(
"lt",
"List all tensors dumped during the failing run() call.",
create_link=True))
more_lines = [
"",
"Op name: " + op_name,
"Error type: " + str(type(tf_error)),
"",
"Details:",
str(tf_error),
"",
"WARNING: Using client GraphDef due to the error, instead of "
"executor GraphDefs.",
"--------------------------------------",
"",
]
out.extend(debugger_cli_common.RichTextLines(more_lines))
return out
示例9: error
def error(msg):
"""Generate a RichTextLines output for error.
Args:
msg: (str) The error message.
Returns:
(debugger_cli_common.RichTextLines) A representation of the error message
for screen output.
"""
return debugger_cli_common.rich_text_lines_from_rich_line_list([
RL("ERROR: " + msg, COLOR_RED)])
示例10: render
def render(self,
max_length,
backward_command,
forward_command,
latest_command_attribute="black_on_white",
old_command_attribute="magenta_on_white"):
"""Render the rich text content of the single-line navigation bar.
Args:
max_length: (`int`) Maximum length of the navigation bar, in characters.
backward_command: (`str`) command for going backward. Used to construct
the shortcut menu item.
forward_command: (`str`) command for going forward. Used to construct the
shortcut menu item.
latest_command_attribute: font attribute for lastest command.
old_command_attribute: font attribute for old (non-latest) command.
Returns:
(`debugger_cli_common.RichTextLines`) the navigation bar text with
attributes.
"""
output = RL("| ")
output += RL(
self.BACK_ARROW_TEXT,
(debugger_cli_common.MenuItem(None, backward_command)
if self.can_go_back() else None))
output += RL(" ")
output += RL(
self.FORWARD_ARROW_TEXT,
(debugger_cli_common.MenuItem(None, forward_command)
if self.can_go_forward() else None))
if self._items:
command_attribute = (latest_command_attribute
if (self._pointer == (len(self._items) - 1))
else old_command_attribute)
output += RL(" | ")
if self._pointer != len(self._items) - 1:
output += RL("(-%d) " % (len(self._items) - 1 - self._pointer),
command_attribute)
if len(output) < max_length:
maybe_truncated_command = self._items[self._pointer].command[
:(max_length - len(output))]
output += RL(maybe_truncated_command, command_attribute)
return debugger_cli_common.rich_text_lines_from_rich_line_list([output])
示例11: summarize
def summarize(self, highlight=None):
"""Get a text summary of the config.
Args:
highlight: A property name to highlight in the output.
Returns:
A `RichTextLines` output.
"""
lines = [RL("Command-line configuration:", "bold"), RL("")]
for name, val in self._config.items():
highlight_attr = "bold" if name == highlight else None
line = RL(" ")
line += RL(name, ["underline", highlight_attr])
line += RL(": ")
line += RL(str(val), font_attr=highlight_attr)
lines.append(line)
return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
示例12: _report_last_updated
def _report_last_updated(self):
"""Generate a report of the variables updated in the last cont/step call.
Returns:
(debugger_cli_common.RichTextLines) A RichTextLines representation of the
variables updated in the last cont/step call.
"""
last_updated = self._node_stepper.last_updated()
if not last_updated:
return debugger_cli_common.RichTextLines([])
rich_lines = [RL("Updated:", self._UPDATED_ATTRIBUTE)]
sorted_last_updated = sorted(list(last_updated))
for updated in sorted_last_updated:
rich_lines.append(RL(" %s" % updated))
rich_lines.append(RL(""))
return debugger_cli_common.rich_text_lines_from_rich_line_list(rich_lines)
示例13: _report_last_feed_types
def _report_last_feed_types(self):
"""Generate a report of the feed types used in the cont/step call.
Returns:
(debugger_cli_common.RichTextLines) A RichTextLines representation of the
feeds used in the last cont/step call.
"""
feed_types = self._node_stepper.last_feed_types()
out = ["Stepper used feeds:"]
if feed_types:
for feed_name in feed_types:
feed_info = RL(" %s : " % feed_name)
feed_info += RL(feed_types[feed_name],
self._FEED_COLORS[feed_types[feed_name]])
out.append(feed_info)
else:
out.append(" (No feeds)")
out.append("")
return debugger_cli_common.rich_text_lines_from_rich_line_list(out)
示例14: _get_list_profile_lines
def _get_list_profile_lines(
self, device_name, device_index, device_count,
profile_datum_list, sort_by, sort_reverse, time_unit,
device_name_filter=None, node_name_filter=None, op_type_filter=None,
screen_cols=80):
"""Get `RichTextLines` object for list_profile command for a given device.
Args:
device_name: (string) Device name.
device_index: (int) Device index.
device_count: (int) Number of devices.
profile_datum_list: List of `ProfileDatum` objects.
sort_by: (string) Identifier of column to sort. Sort identifier
must match value of SORT_OPS_BY_OP_NAME, SORT_OPS_BY_OP_TYPE,
SORT_OPS_BY_EXEC_TIME, SORT_OPS_BY_MEMORY or SORT_OPS_BY_LINE.
sort_reverse: (bool) Whether to sort in descending instead of default
(ascending) order.
time_unit: time unit, must be in cli_shared.TIME_UNITS.
device_name_filter: Regular expression to filter by device name.
node_name_filter: Regular expression to filter by node name.
op_type_filter: Regular expression to filter by op type.
screen_cols: (int) Number of columns available on the screen (i.e.,
available screen width).
Returns:
`RichTextLines` object containing a table that displays profiling
information for each op.
"""
profile_data = ProfileDataTableView(profile_datum_list, time_unit=time_unit)
# Calculate total time early to calculate column widths.
total_op_time = sum(datum.op_time for datum in profile_datum_list)
total_exec_time = sum(datum.node_exec_stats.all_end_rel_micros
for datum in profile_datum_list)
device_total_row = [
"Device Total", "",
cli_shared.time_to_readable_str(total_op_time,
force_time_unit=time_unit),
cli_shared.time_to_readable_str(total_exec_time,
force_time_unit=time_unit)]
# Calculate column widths.
column_widths = [
len(column_name) for column_name in profile_data.column_names()]
for col in range(len(device_total_row)):
column_widths[col] = max(column_widths[col], len(device_total_row[col]))
for col in range(len(column_widths)):
for row in range(profile_data.row_count()):
column_widths[col] = max(
column_widths[col], len(profile_data.value(
row,
col,
device_name_filter=device_name_filter,
node_name_filter=node_name_filter,
op_type_filter=op_type_filter)))
column_widths[col] += 2 # add margin between columns
# Add device name.
output = [RL("-" * screen_cols)]
device_row = "Device %d of %d: %s" % (
device_index + 1, device_count, device_name)
output.append(RL(device_row))
output.append(RL())
# Add headers.
base_command = "list_profile"
row = RL()
for col in range(profile_data.column_count()):
column_name = profile_data.column_names()[col]
sort_id = profile_data.column_sort_id(col)
command = "%s -s %s" % (base_command, sort_id)
if sort_by == sort_id and not sort_reverse:
command += " -r"
head_menu_item = debugger_cli_common.MenuItem(None, command)
row += RL(column_name, font_attr=[head_menu_item, "bold"])
row += RL(" " * (column_widths[col] - len(column_name)))
output.append(row)
# Add data rows.
for row in range(profile_data.row_count()):
new_row = RL()
for col in range(profile_data.column_count()):
new_cell = profile_data.value(
row,
col,
device_name_filter=device_name_filter,
node_name_filter=node_name_filter,
op_type_filter=op_type_filter)
new_row += new_cell
new_row += RL(" " * (column_widths[col] - len(new_cell)))
output.append(new_row)
# Add stat totals.
row_str = ""
for col in range(len(device_total_row)):
row_str += ("{:<%d}" % column_widths[col]).format(device_total_row[col])
output.append(RL())
output.append(RL(row_str))
return debugger_cli_common.rich_text_lines_from_rich_line_list(output)
示例15: get_run_start_intro
def get_run_start_intro(run_call_count,
fetches,
feed_dict,
tensor_filters):
"""Generate formatted intro for run-start UI.
Args:
run_call_count: (int) Run call counter.
fetches: Fetches of the `Session.run()` call. See doc of `Session.run()`
for more details.
feed_dict: Feeds to the `Session.run()` call. See doc of `Session.run()`
for more details.
tensor_filters: (dict) A dict from tensor-filter name to tensor-filter
callable.
Returns:
(RichTextLines) Formatted intro message about the `Session.run()` call.
"""
fetch_lines = _get_fetch_names(fetches)
if not feed_dict:
feed_dict_lines = ["(Empty)"]
else:
feed_dict_lines = []
for feed_key in feed_dict:
if isinstance(feed_key, six.string_types):
feed_dict_lines.append(feed_key)
else:
feed_dict_lines.append(feed_key.name)
intro_lines = [
"======================================",
"Session.run() call #%d:" % run_call_count,
"", "Fetch(es):"
]
intro_lines.extend([" " + line for line in fetch_lines])
intro_lines.extend(["", "Feed dict(s):"])
intro_lines.extend([" " + line for line in feed_dict_lines])
intro_lines.extend([
"======================================", "",
"Select one of the following commands to proceed ---->"
])
out = debugger_cli_common.RichTextLines(intro_lines)
out.extend(
_recommend_command(
"run",
"Execute the run() call with debug tensor-watching",
create_link=True))
out.extend(
_recommend_command(
"run -n",
"Execute the run() call without debug tensor-watching",
create_link=True))
out.extend(
_recommend_command(
"run -t <T>",
"Execute run() calls (T - 1) times without debugging, then "
"execute run() once more with debugging and drop back to the CLI"))
out.extend(
_recommend_command(
"run -f <filter_name>",
"Keep executing run() calls until a dumped tensor passes a given, "
"registered filter (conditional breakpoint mode)"))
more_lines = [" Registered filter(s):"]
if tensor_filters:
filter_names = []
for filter_name in tensor_filters:
filter_names.append(filter_name)
command_menu_node = debugger_cli_common.MenuItem(
"", "run -f %s" % filter_name)
more_lines.append(RL(" * ") + RL(filter_name, command_menu_node))
else:
more_lines.append(" (None)")
out.extend(
debugger_cli_common.rich_text_lines_from_rich_line_list(more_lines))
out.extend(
_recommend_command(
"invoke_stepper",
"Use the node-stepper interface, which allows you to interactively "
"step through nodes involved in the graph run() call and "
"inspect/modify their values", create_link=True))
out.append("")
out.append_rich_line(RL("For more details, see ") +
RL("help.", debugger_cli_common.MenuItem("", "help")) +
".")
out.append("")
# Make main menu for the run-start intro.
menu = debugger_cli_common.Menu()
menu.append(debugger_cli_common.MenuItem("run", "run"))
menu.append(debugger_cli_common.MenuItem(
"invoke_stepper", "invoke_stepper"))
#.........这里部分代码省略.........