本文整理汇总了Python中spinn_utilities.progress_bar.ProgressBar类的典型用法代码示例。如果您正苦于以下问题:Python ProgressBar类的具体用法?Python ProgressBar怎么用?Python ProgressBar使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ProgressBar类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __call__
def __call__(self, machine_graph, machine, plan_n_timesteps):
""" Place a machine_graph so that each vertex is placed on a core
:param machine_graph: The machine_graph to place
:type machine_graph:\
:py:class:`pacman.model.graphs.machine.MachineGraph`
:param machine:\
The machine with respect to which to partition the application\
graph
:type machine: :py:class:`spinn_machine.Machine`
:param plan_n_timesteps: number of timesteps to plan for
:type plan_n_timesteps: int
:return: A set of placements
:rtype: :py:class:`pacman.model.placements.Placements`
:raise pacman.exceptions.PacmanPlaceException: \
If something goes wrong with the placement
"""
# check that the algorithm can handle the constraints
ResourceTracker.check_constraints(machine_graph.vertices)
placements = Placements()
vertices = sort_vertices_by_known_constraints(machine_graph.vertices)
# Iterate over vertices and generate placements
progress = ProgressBar(vertices, "Placing graph vertices")
resource_tracker = ResourceTracker(machine, plan_n_timesteps)
for vertex in progress.over(vertices):
# Create and store a new placement anywhere on the board
(x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
vertex.resources_required, vertex.constraints, None)
placement = Placement(vertex, x, y, p)
placements.add_placement(placement)
return placements
示例2: __call__
def __call__(self, report_default_directory, dsg_targets, transceiver):
""" Creates a report that states where in SDRAM each region is \
(read from machine)
:param report_default_directory: the folder where reports are written
:param dsg_targets: the map between placement and file writer
:param transceiver: the spinnMan instance
:rtype: None
"""
directory_name = os.path.join(
report_default_directory, MEM_MAP_SUBDIR_NAME)
if not os.path.exists(directory_name):
os.makedirs(directory_name)
progress = ProgressBar(dsg_targets, "Writing memory map reports")
for (x, y, p) in progress.over(dsg_targets):
file_name = os.path.join(
directory_name, MEM_MAP_FILENAME.format(x, y, p))
try:
with open(file_name, "w") as f:
self._describe_mem_map(f, transceiver, x, y, p)
except IOError:
logger.exception("Generate_placement_reports: Can't open file"
" {} for writing.", file_name)
示例3: __call__
def __call__(self, report_folder, connection_holder, dsg_targets):
""" Convert synaptic matrix for every application edge.
"""
# Update the print options to display everything
print_opts = numpy.get_printoptions()
numpy.set_printoptions(threshold=numpy.nan)
if dsg_targets is None:
raise SynapticConfigurationException(
"dsg_targets should not be none, used as a check for "
"connection holder data to be generated")
# generate folder for synaptic reports
top_level_folder = os.path.join(report_folder, _DIRNAME)
if not os.path.exists(top_level_folder):
os.mkdir(top_level_folder)
# create progress bar
progress = ProgressBar(connection_holder.keys(),
"Generating synaptic matrix reports")
# for each application edge, write matrix in new file
for edge, _ in progress.over(connection_holder.keys()):
# only write matrix's for edges which have matrix's
if isinstance(edge, ProjectionApplicationEdge):
# figure new file name
file_name = os.path.join(
top_level_folder, _TMPL_FILENAME.format(edge.label))
self._write_file(file_name, connection_holder, edge)
# Reset the print options
numpy.set_printoptions(**print_opts)
示例4: __call__
def __call__(self, router_tables):
tables = MulticastRoutingTables()
previous_masks = dict()
progress = ProgressBar(
len(router_tables.routing_tables) * 2,
"Compressing Routing Tables")
# Create all masks without holes
allowed_masks = [_32_BITS - ((2 ** i) - 1) for i in range(33)]
# Check that none of the masks have "holes" e.g. 0xFFFF0FFF has a hole
for router_table in router_tables.routing_tables:
for entry in router_table.multicast_routing_entries:
if entry.mask not in allowed_masks:
raise PacmanRoutingException(
"Only masks without holes are allowed in tables for"
" BasicRouteMerger (disallowed mask={})".format(
hex(entry.mask)))
for router_table in progress.over(router_tables.routing_tables):
new_table = self._merge_routes(router_table, previous_masks)
tables.add_routing_table(new_table)
n_entries = len([
entry for entry in new_table.multicast_routing_entries
if not entry.defaultable])
# print("Reduced from {} to {}".format(
# len(router_table.multicast_routing_entries), n_entries))
if n_entries > 1023:
raise PacmanRoutingException(
"Cannot make table small enough: {} entries".format(
n_entries))
return tables
示例5: __call__
def __call__(self, router_tables, target_length=None):
# build storage
compressed_pacman_router_tables = MulticastRoutingTables()
# create progress bar
progress = ProgressBar(
router_tables.routing_tables, "Compressing routing Tables")
# compress each router
for router_table in progress.over(router_tables.routing_tables):
# convert to rig format
entries = self._convert_to_mundy_format(router_table)
# compress the router entries
compressed_router_table_entries = \
rigs_compressor.minimise(entries, target_length)
# convert back to pacman model
compressed_pacman_table = self._convert_to_pacman_router_table(
compressed_router_table_entries, router_table.x,
router_table.y)
# add to new compressed routing tables
compressed_pacman_router_tables.add_routing_table(
compressed_pacman_table)
# return
return compressed_pacman_router_tables
示例6: __call__
def __call__(
self, transceiver, tags=None, iptags=None, reverse_iptags=None):
"""
:param tags: the tags object which contains IP and reverse IP tags.
could be none if these are being given in separate lists
:param iptags: a list of IP tags, given when tags is none
:param reverse_iptags: a list of reverse IP tags when tags is none.
:param transceiver: the transceiver object
"""
# clear all the tags from the Ethernet connection, as nothing should
# be allowed to use it (no two apps should use the same Ethernet
# connection at the same time)
progress = ProgressBar(MAX_TAG_ID, "Clearing tags")
for tag_id in progress.over(range(MAX_TAG_ID)):
transceiver.clear_ip_tag(tag_id)
# Use tags object to supply tag info if it is supplied
if tags is not None:
iptags = list(tags.ip_tags)
reverse_iptags = list(tags.reverse_ip_tags)
# Load the IP tags and the Reverse IP tags
progress = ProgressBar(
len(iptags) + len(reverse_iptags), "Loading Tags")
self.load_iptags(iptags, transceiver, progress)
self.load_reverse_iptags(reverse_iptags, transceiver, progress)
progress.end()
示例7: __call__
def __call__(self, txrx, app_id, all_core_subsets):
# check that the right number of processors are in sync
processors_completed = txrx.get_core_state_count(
app_id, CPUState.FINISHED)
total_processors = len(all_core_subsets)
left_to_do_cores = total_processors - processors_completed
progress = ProgressBar(
left_to_do_cores,
"Forcing error cores to generate provenance data")
error_cores = txrx.get_cores_in_state(
all_core_subsets, CPUState.RUN_TIME_EXCEPTION)
watchdog_cores = txrx.get_cores_in_state(
all_core_subsets, CPUState.WATCHDOG)
idle_cores = txrx.get_cores_in_state(
all_core_subsets, CPUState.IDLE)
if error_cores or watchdog_cores or idle_cores:
raise ConfigurationException(
"Some cores have crashed. RTE cores {}, watch-dogged cores {},"
" idle cores {}".format(
error_cores.values(), watchdog_cores.values(),
idle_cores.values()))
# check that all cores are in the state FINISHED which shows that
# the core has received the message and done provenance updating
self._update_provenance(txrx, total_processors, processors_completed,
all_core_subsets, app_id, progress)
progress.end()
示例8: __call__
def __call__(
self, live_packet_gatherer_parameters, machine, machine_graph,
application_graph=None, graph_mapper=None):
""" Add LPG vertices on Ethernet connected chips as required.
:param live_packet_gatherer_parameters:\
the Live Packet Gatherer parameters requested by the script
:param machine: the SpiNNaker machine as discovered
:param application_graph: the application graph
:param machine_graph: the machine graph
:return: mapping between LPG parameters and LPG vertex
"""
# pylint: disable=too-many-arguments
# create progress bar
progress = ProgressBar(
machine.ethernet_connected_chips,
string_describing_what_being_progressed=(
"Adding Live Packet Gatherers to Graph"))
# Keep track of the vertices added by parameters and board address
lpg_params_to_vertices = defaultdict(dict)
# for every Ethernet connected chip, add the gatherers required
for chip in progress.over(machine.ethernet_connected_chips):
for params in live_packet_gatherer_parameters:
if (params.board_address is None or
params.board_address == chip.ip_address):
lpg_params_to_vertices[params][chip.x, chip.y] = \
self._add_lpg_vertex(application_graph, graph_mapper,
machine_graph, chip, params)
return lpg_params_to_vertices
开发者ID:SpiNNakerManchester,项目名称:SpiNNFrontEndCommon,代码行数:33,代码来源:insert_live_packet_gatherers_to_graphs.py
示例9: __call__
def __call__(
self, transceiver, placements, provenance_file_path,
run_time_ms, machine_time_step):
"""
:param transceiver: the SpiNNMan interface object
:param placements: The placements of the vertices
:param has_ran: token that states that the simulation has ran
:param provenance_file_path: The location to store the profile data
:param run_time_ms: runtime in ms
:param machine_time_step: machine time step in ms
"""
# pylint: disable=too-many-arguments
machine_time_step_ms = machine_time_step // 1000
progress = ProgressBar(
placements.n_placements, "Getting profile data")
# retrieve provenance data from any cores that provide data
for placement in progress.over(placements.placements):
if isinstance(placement.vertex, AbstractHasProfileData):
# get data
profile_data = placement.vertex.get_profile_data(
transceiver, placement)
if profile_data.tags:
self._write(placement, profile_data, run_time_ms,
machine_time_step_ms, provenance_file_path)
示例10: get_spikes
def get_spikes(self, label, buffer_manager, region,
placements, graph_mapper, application_vertex,
base_key_function, machine_time_step):
# pylint: disable=too-many-arguments
results = list()
missing = []
ms_per_tick = machine_time_step / 1000.0
vertices = graph_mapper.get_machine_vertices(application_vertex)
progress = ProgressBar(vertices,
"Getting spikes for {}".format(label))
for vertex in progress.over(vertices):
placement = placements.get_placement_of_vertex(vertex)
vertex_slice = graph_mapper.get_slice(vertex)
# Read the spikes
raw_spike_data, data_missing = \
buffer_manager.get_data_by_placement(placement, region)
if data_missing:
missing.append(placement)
self._process_spike_data(
vertex_slice, raw_spike_data, ms_per_tick,
base_key_function(vertex), results)
if missing:
missing_str = recording_utils.make_missing_string(missing)
logger.warning(
"Population {} is missing spike data in region {} from the"
" following cores: {}", label, region, missing_str)
if not results:
return numpy.empty(shape=(0, 2))
result = numpy.vstack(results)
return result[numpy.lexsort((result[:, 1], result[:, 0]))]
示例11: __call__
def __call__(
self, live_packet_gatherer_parameters, placements,
live_packet_gatherers_to_vertex_mapping, machine,
machine_graph, application_graph=None, graph_mapper=None):
"""
:param live_packet_gatherer_parameters: the set of parameters
:param placements: the placements object
:param live_packet_gatherers_to_vertex_mapping:\
the mapping of LPG parameters and the machine vertices associated\
with it
:param machine: the SpiNNaker machine
:param machine_graph: the machine graph
:param application_graph: the app graph
:param graph_mapper: the graph mapper between app and machine graph
:rtype: None
"""
# pylint: disable=too-many-arguments
progress = ProgressBar(
live_packet_gatherer_parameters,
string_describing_what_being_progressed=(
"Adding edges to the machine graph between the vertices to "
"which live output has been requested and its local Live "
"Packet Gatherer"))
for lpg_params in progress.over(live_packet_gatherer_parameters):
# locate vertices needed to be connected to a LPG with these params
for vertex in live_packet_gatherer_parameters[lpg_params]:
self._connect_lpg_vertex(
application_graph, graph_mapper, machine,
placements, machine_graph, vertex,
live_packet_gatherers_to_vertex_mapping, lpg_params)
开发者ID:SpiNNakerManchester,项目名称:SpiNNFrontEndCommon,代码行数:31,代码来源:insert_edges_to_live_packet_gatherers.py
示例12: __call__
def __call__(self, machine_graph, machine, plan_n_timesteps):
""" Place each vertex in a machine graph on a core in the machine.
:param machine_graph: The machine_graph to place
:type machine_graph:\
:py:class:`pacman.model.graphs.machine.MachineGraph`
:param machine: A SpiNNaker machine object.
:type machine: :py:class:`spinn_machine.Machine`
:param plan_n_timesteps: number of timesteps to plan for
:type plan_n_timesteps: int
:return placements: Placements of vertices on the machine
:rtype :py:class:`pacman.model.placements.Placements`
"""
# check that the algorithm can handle the constraints
ResourceTracker.check_constraints(machine_graph.vertices)
placements = Placements()
vertices = sort_vertices_by_known_constraints(machine_graph.vertices)
# Iterate over vertices and generate placements
progress = ProgressBar(machine_graph.n_vertices,
"Placing graph vertices")
resource_tracker = ResourceTracker(
machine, plan_n_timesteps, self._generate_random_chips(machine))
vertices_on_same_chip = get_same_chip_vertex_groups(machine_graph)
vertices_placed = set()
for vertex in progress.over(vertices):
if vertex not in vertices_placed:
vertices_placed.update(self._place_vertex(
vertex, resource_tracker, machine, placements,
vertices_on_same_chip))
return placements
示例13: __call__
def __call__(self, report_default_directory, machine):
""" Creates a report that states where in SDRAM each region is.
:param report_default_directory: the folder where reports are written
:param machine: python representation of the machine
:rtype: None
"""
# create file path
directory_name = os.path.join(
report_default_directory, self.AREA_CODE_REPORT_NAME)
# create the progress bar for end users
progress_bar = ProgressBar(
len(machine.ethernet_connected_chips),
"Writing the board chip report")
# iterate over ethernet chips and then the chips on that board
with open(directory_name, "w") as writer:
for ethernet_connected_chip in \
progress_bar.over(machine.ethernet_connected_chips):
chips = machine.get_chips_on_board(ethernet_connected_chip)
writer.write(
"board with IP address : {} : has chips {}\n".format(
ethernet_connected_chip.ip_address, list(chips)))
示例14: __call__
def __call__(self, machine_graph, graph_mapper):
"""
:param machine_graph: the machine_graph whose edges are to be filtered
:param graph_mapper: the graph mapper between graphs
:return: a new graph mapper and machine graph
"""
new_machine_graph = MachineGraph(label=machine_graph.label)
new_graph_mapper = GraphMapper()
# create progress bar
progress = ProgressBar(
machine_graph.n_vertices +
machine_graph.n_outgoing_edge_partitions,
"Filtering edges")
# add the vertices directly, as they wont be pruned.
for vertex in progress.over(machine_graph.vertices, False):
self._add_vertex_to_new_graph(
vertex, graph_mapper, new_machine_graph, new_graph_mapper)
# start checking edges to decide which ones need pruning....
for partition in progress.over(machine_graph.outgoing_edge_partitions):
for edge in partition.edges:
if self._is_filterable(edge, graph_mapper):
logger.debug("this edge was pruned %s", edge)
continue
logger.debug("this edge was not pruned %s", edge)
self._add_edge_to_new_graph(
edge, partition, graph_mapper, new_machine_graph,
new_graph_mapper)
# returned the pruned graph and graph_mapper
return new_machine_graph, new_graph_mapper
示例15: __call__
def __call__(
self, placements, hostname,
report_default_directory, write_text_specs,
machine, graph_mapper=None, placement_order=None):
"""
:param placements: placements of machine graph to cores
:param hostname: SpiNNaker machine name
:param report_default_directory: the location where reports are stored
:param write_text_specs:\
True if the textual version of the specification is to be written
:param machine: the python representation of the SpiNNaker machine
:param graph_mapper:\
the mapping between application and machine graph
:param placement:\
the optional order in which placements should be examined
:return: DSG targets (map of placement tuple and filename)
"""
# pylint: disable=too-many-arguments, too-many-locals
# pylint: disable=attribute-defined-outside-init
self._machine = machine
self._hostname = hostname
self._report_dir = report_default_directory
self._write_text = write_text_specs
# iterate though vertices and call generate_data_spec for each
# vertex
targets = DataSpecificationTargets(machine, self._report_dir)
if placement_order is None:
placement_order = placements.placements
progress = ProgressBar(
placements.n_placements, "Generating data specifications")
vertices_to_reset = list()
for placement in progress.over(placement_order):
# Try to generate the data spec for the placement
generated = self.__generate_data_spec_for_vertices(
placement, placement.vertex, targets)
if generated and isinstance(
placement.vertex, AbstractRewritesDataSpecification):
vertices_to_reset.append(placement.vertex)
# If the spec wasn't generated directly, and there is an
# application vertex, try with that
if not generated and graph_mapper is not None:
associated_vertex = graph_mapper.get_application_vertex(
placement.vertex)
generated = self.__generate_data_spec_for_vertices(
placement, associated_vertex, targets)
if generated and isinstance(
associated_vertex, AbstractRewritesDataSpecification):
vertices_to_reset.append(associated_vertex)
# Ensure that the vertices know their regions have been reloaded
for vertex in vertices_to_reset:
vertex.mark_regions_reloaded()
return targets