本文整理汇总了Python中spinn_utilities.progress_bar.ProgressBar.end方法的典型用法代码示例。如果您正苦于以下问题:Python ProgressBar.end方法的具体用法?Python ProgressBar.end怎么用?Python ProgressBar.end使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类spinn_utilities.progress_bar.ProgressBar
的用法示例。
在下文中一共展示了ProgressBar.end方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import end [as 别名]
def __call__(self, machine_graph, machine, file_path):
"""
:param machine_graph: the machine graph
:param machine: the machine
"""
progress = ProgressBar(
machine_graph.n_vertices + 2, "creating JSON constraints")
json_obj = list()
self._add_monitor_core_reserve(json_obj)
progress.update()
self._add_extra_monitor_cores(json_obj, machine)
progress.update()
vertex_by_id = self._search_graph_for_placement_constraints(
json_obj, machine_graph, machine, progress)
with open(file_path, "w") as f:
json.dump(json_obj, f)
# validate the schema
file_format_schemas.validate(json_obj, "constraints.json")
# complete progress bar
progress.end()
return file_path, vertex_by_id
示例2: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import end [as 别名]
def __call__(self, machine_graph, machine, plan_n_timesteps, placements):
progress_bar = ProgressBar(7, "Routing")
vertices_resources, nets, net_names = \
convert_to_rig_graph_pure_mc(machine_graph, plan_n_timesteps)
progress_bar.update()
rig_machine = convert_to_rig_machine(machine)
progress_bar.update()
rig_constraints = create_rig_machine_constraints(machine)
progress_bar.update()
rig_constraints.extend(create_rig_graph_constraints(
machine_graph, machine))
progress_bar.update()
rig_placements, rig_allocations = convert_to_rig_placements(
placements, machine)
progress_bar.update()
rig_routes = route(
vertices_resources, nets, rig_machine, rig_constraints,
rig_placements, rig_allocations, "cores")
rig_routes = {
name: rig_routes[net] for net, name in iteritems(net_names)}
progress_bar.update()
routes = convert_from_rig_routes(rig_routes)
progress_bar.update()
progress_bar.end()
return routes
示例3: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import end [as 别名]
def __call__(self, machine_graph, machine, plan_n_timesteps):
progress_bar = ProgressBar(7, "Placing")
vertices_resources, nets, _ = \
convert_to_rig_graph(machine_graph, plan_n_timesteps)
progress_bar.update()
rig_machine = convert_to_rig_machine(machine)
progress_bar.update()
rig_constraints = create_rig_machine_constraints(machine)
progress_bar.update()
rig_constraints.extend(create_rig_graph_constraints(
machine_graph, rig_machine))
progress_bar.update()
rig_placements = place(
vertices_resources, nets, rig_machine, rig_constraints)
progress_bar.update()
rig_allocations = allocate(
vertices_resources, nets, rig_machine, rig_constraints,
rig_placements)
progress_bar.update()
placements = convert_from_rig_placements(
rig_placements, rig_allocations, machine_graph)
progress_bar.update()
progress_bar.end()
return placements
示例4: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import end [as 别名]
def __call__(self, placements, file_path):
"""
:param placements:
:param file_path:
"""
progress = ProgressBar(len(placements) + 1,
"Converting to JSON core allocations")
# write basic stuff
json_obj = OrderedDict()
json_obj['type'] = "cores"
vertex_by_id = OrderedDict()
# process placements
for placement in progress.over(placements, False):
self._convert_placement(placement, vertex_by_id, json_obj)
# dump dict into json file
with open(file_path, "w") as f:
json.dump(json_obj, f)
progress.update()
# validate the schema
file_format_schemas.validate(json_obj, "core_allocations.json")
# complete progress bar
progress.end()
# return the file format
return file_path, vertex_by_id
示例5: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import end [as 别名]
def __call__(self, placements, file_path):
"""
:param placements: the memory placements object
:param file_path: the file path for the placements.json
:return: file path for the placements.json
"""
# write basic stuff
json_obj = dict()
vertex_by_id = dict()
progress = ProgressBar(placements.n_placements + 1,
"converting to JSON placements")
# process placements
for placement in progress.over(placements, False):
vertex_id = ident(placement.vertex)
vertex_by_id[vertex_id] = placement.vertex
json_obj[vertex_id] = [placement.x, placement.y]
# dump dict into json file
with open(file_path, "w") as file_to_write:
json.dump(json_obj, file_to_write)
progress.update()
# validate the schema
file_format_schemas.validate(json_obj, "placements.json")
progress.end()
# return the file format
return file_path, vertex_by_id
示例6: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import end [as 别名]
def __call__(
self, transceiver, tags=None, iptags=None, reverse_iptags=None):
"""
:param tags: the tags object which contains IP and reverse IP tags.
could be none if these are being given in separate lists
:param iptags: a list of IP tags, given when tags is none
:param reverse_iptags: a list of reverse IP tags when tags is none.
:param transceiver: the transceiver object
"""
# clear all the tags from the Ethernet connection, as nothing should
# be allowed to use it (no two apps should use the same Ethernet
# connection at the same time)
progress = ProgressBar(MAX_TAG_ID, "Clearing tags")
for tag_id in progress.over(range(MAX_TAG_ID)):
transceiver.clear_ip_tag(tag_id)
# Use tags object to supply tag info if it is supplied
if tags is not None:
iptags = list(tags.ip_tags)
reverse_iptags = list(tags.reverse_ip_tags)
# Load the IP tags and the Reverse IP tags
progress = ProgressBar(
len(iptags) + len(reverse_iptags), "Loading Tags")
self.load_iptags(iptags, transceiver, progress)
self.load_reverse_iptags(reverse_iptags, transceiver, progress)
progress.end()
示例7: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import end [as 别名]
def __call__(self, txrx, app_id, all_core_subsets):
# check that the right number of processors are in sync
processors_completed = txrx.get_core_state_count(
app_id, CPUState.FINISHED)
total_processors = len(all_core_subsets)
left_to_do_cores = total_processors - processors_completed
progress = ProgressBar(
left_to_do_cores,
"Forcing error cores to generate provenance data")
error_cores = txrx.get_cores_in_state(
all_core_subsets, CPUState.RUN_TIME_EXCEPTION)
watchdog_cores = txrx.get_cores_in_state(
all_core_subsets, CPUState.WATCHDOG)
idle_cores = txrx.get_cores_in_state(
all_core_subsets, CPUState.IDLE)
if error_cores or watchdog_cores or idle_cores:
raise ConfigurationException(
"Some cores have crashed. RTE cores {}, watch-dogged cores {},"
" idle cores {}".format(
error_cores.values(), watchdog_cores.values(),
idle_cores.values()))
# check that all cores are in the state FINISHED which shows that
# the core has received the message and done provenance updating
self._update_provenance(txrx, total_processors, processors_completed,
all_core_subsets, app_id, progress)
progress.end()
示例8: synapse_expander
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import end [as 别名]
def synapse_expander(
app_graph, graph_mapper, placements, transceiver,
provenance_file_path, executable_finder):
""" Run the synapse expander - needs to be done after data has been loaded
"""
synapse_expander = executable_finder.get_executable_path(SYNAPSE_EXPANDER)
delay_expander = executable_finder.get_executable_path(DELAY_EXPANDER)
progress = ProgressBar(len(app_graph.vertices) + 2, "Expanding Synapses")
# Find the places where the synapse expander and delay receivers should run
expander_cores = ExecutableTargets()
for vertex in progress.over(app_graph.vertices, finish_at_end=False):
# Find population vertices
if isinstance(
vertex, (AbstractPopulationVertex, DelayExtensionVertex)):
# Add all machine vertices of the population vertex to ones
# that need synapse expansion
for m_vertex in graph_mapper.get_machine_vertices(vertex):
vertex_slice = graph_mapper.get_slice(m_vertex)
if vertex.gen_on_machine(vertex_slice):
placement = placements.get_placement_of_vertex(m_vertex)
if isinstance(vertex, AbstractPopulationVertex):
binary = synapse_expander
else:
binary = delay_expander
expander_cores.add_processor(
binary, placement.x, placement.y, placement.p)
# Launch the delay receivers
expander_app_id = transceiver.app_id_tracker.get_new_id()
transceiver.execute_application(expander_cores, expander_app_id)
progress.update()
# Wait for everything to finish
finished = False
try:
transceiver.wait_for_cores_to_be_in_state(
expander_cores.all_core_subsets, expander_app_id,
[CPUState.FINISHED])
progress.update()
finished = True
_extract_iobuf(expander_cores, transceiver, provenance_file_path)
progress.end()
except Exception:
logger.exception("Synapse expander has failed")
_handle_failure(
expander_cores, transceiver, provenance_file_path)
finally:
transceiver.stop_application(expander_app_id)
transceiver.app_id_tracker.free_id(expander_app_id)
if not finished:
raise SpynnakerException(
"The synapse expander failed to complete")
示例9: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import end [as 别名]
def __call__(self, machine_graph, n_keys_map, graph_mapper=None):
# check that this algorithm supports the constraints
check_algorithm_can_support_constraints(
constrained_vertices=machine_graph.outgoing_edge_partitions,
supported_constraints=[
FixedMaskConstraint,
FixedKeyAndMaskConstraint,
ContiguousKeyRangeContraint, ShareKeyConstraint],
abstract_constraint_type=AbstractKeyAllocatorConstraint)
# verify that no edge has more than 1 of a constraint ,and that
# constraints are compatible
check_types_of_edge_constraint(machine_graph)
# final keys allocations
routing_infos = RoutingInfo()
# Get the edges grouped by those that require the same key
(fixed_keys, shared_keys, fixed_masks, fixed_fields, flexi_fields,
continuous, noncontinuous) = get_edge_groups(
machine_graph, EdgeTrafficType.MULTICAST)
# Go through the groups and allocate keys
progress = ProgressBar(
machine_graph.n_outgoing_edge_partitions,
"Allocating routing keys")
# allocate the groups that have fixed keys
for group in progress.over(fixed_keys, False):
self._allocate_fixed_keys(group, routing_infos)
for group in progress.over(fixed_masks, False):
self._allocate_fixed_masks(group, n_keys_map, routing_infos)
for group in progress.over(fixed_fields, False):
self._allocate_fixed_fields(group, n_keys_map, routing_infos)
if flexi_fields:
raise PacmanConfigurationException(
"MallocBasedRoutingInfoAllocator does not support FlexiField")
for group in progress.over(shared_keys, False):
self._allocate_share_key(group, routing_infos, n_keys_map)
for group in continuous:
self._allocate_other_groups(group, routing_infos, n_keys_map,
continuous=True)
for group in noncontinuous:
self._allocate_other_groups(group, routing_infos, n_keys_map,
continuous=False)
progress.end()
return routing_infos
示例10: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import end [as 别名]
def __call__(self, executable_targets, app_id, transceiver):
progress = ProgressBar(
executable_targets.total_processors + 1,
"Loading executables onto the machine")
for binary in executable_targets.binaries:
progress.update(self._launch_binary(
executable_targets, binary, transceiver, app_id))
self._start_simulation(executable_targets, transceiver, app_id)
progress.update()
progress.end()
示例11: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import end [as 别名]
def __call__(self, machine_graph, machine, plan_n_timesteps):
"""
:param machine_graph: The machine_graph to place
:type machine_graph:\
:py:class:`pacman.model.graphs.machine.MachineGraph`
:param machine:\
The machine with respect to which to partition the application\
graph
:type machine: :py:class:`spinn_machine.Machine`
:param plan_n_timesteps: number of timesteps to plan for
:type plan_n_timesteps: int
:return: A set of placements
:rtype: :py:class:`pacman.model.placements.Placements`
:raise pacman.exceptions.PacmanPlaceException: \
If something goes wrong with the placement
"""
# check that the algorithm can handle the constraints
self._check_constraints(machine_graph.vertices)
# Sort the vertices into those with and those without
# placement constraints
placements = Placements()
constrained = list()
unconstrained = set()
for vertex in machine_graph.vertices:
if locate_constraints_of_type(
vertex.constraints, AbstractPlacerConstraint):
constrained.append(vertex)
else:
unconstrained.add(vertex)
# Iterate over constrained vertices and generate placements
progress = ProgressBar(
machine_graph.n_vertices, "Placing graph vertices")
resource_tracker = ResourceTracker(
machine, plan_n_timesteps, self._generate_radial_chips(machine))
constrained = sort_vertices_by_known_constraints(constrained)
for vertex in progress.over(constrained, False):
self._place_vertex(vertex, resource_tracker, machine, placements)
while unconstrained:
# Place the subgraph with the overall most connected vertex
max_connected_vertex = self._find_max_connected_vertex(
unconstrained, machine_graph)
self._place_unconstrained_subgraph(
max_connected_vertex, machine_graph, unconstrained,
machine, placements, resource_tracker, progress)
# finished, so stop progress bar and return placements
progress.end()
return placements
示例12: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import end [as 别名]
def __call__(self, machine_graph, placements, buffer_manager):
# Count the regions to be read
n_regions_to_read, recording_placements = self._count_regions(
machine_graph, placements)
# Read back the regions
progress = ProgressBar(
n_regions_to_read, "Extracting buffers from the last run")
try:
buffer_manager.get_data_for_placements(
recording_placements, progress)
finally:
progress.end()
示例13: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import end [as 别名]
def __call__(self, app_id, txrx, executable_types):
total_processors = \
len(executable_types[ExecutableType.USES_SIMULATION_INTERFACE])
all_core_subsets = \
executable_types[ExecutableType.USES_SIMULATION_INTERFACE]
progress = ProgressBar(
total_processors,
"Turning off all the cores within the simulation")
# check that the right number of processors are finished
processors_finished = txrx.get_core_state_count(
app_id, CPUState.FINISHED)
finished_cores = processors_finished
while processors_finished != total_processors:
if processors_finished > finished_cores:
progress.update(processors_finished - finished_cores)
finished_cores = processors_finished
processors_rte = txrx.get_core_state_count(
app_id, CPUState.RUN_TIME_EXCEPTION)
processors_watchdogged = txrx.get_core_state_count(
app_id, CPUState.WATCHDOG)
if processors_rte > 0 or processors_watchdogged > 0:
raise ExecutableFailedToStopException(
"{} of {} processors went into an error state when"
" shutting down".format(
processors_rte + processors_watchdogged,
total_processors))
successful_cores_finished = txrx.get_cores_in_state(
all_core_subsets, CPUState.FINISHED)
for core_subset in all_core_subsets:
for processor in core_subset.processor_ids:
if not successful_cores_finished.is_core(
core_subset.x, core_subset.y, processor):
self._update_provenance_and_exit(
txrx, processor, core_subset)
time.sleep(0.5)
processors_finished = txrx.get_core_state_count(
app_id, CPUState.FINISHED)
progress.end()
示例14: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import end [as 别名]
def __call__(self, report_folder, application_graph):
"""
:param report_folder: the report folder to put figure into
:param application_graph: the app graph
:rtype: None
"""
# create holders for data
vertex_holders = dict()
dot_diagram = self._get_diagram(
"The graph of the network in graphical form")
# build progress bar for the vertices, edges, and rendering
progress = ProgressBar(
application_graph.n_vertices +
application_graph.n_outgoing_edge_partitions + 1,
"generating the graphical representation of the neural network")
# write vertices into dot diagram
for vertex_counter, vertex in progress.over(
enumerate(application_graph.vertices), False):
dot_diagram.node(
"{}".format(vertex_counter),
"{} ({} neurons)".format(vertex.label, vertex.n_atoms))
vertex_holders[vertex] = vertex_counter
# write edges into dot diagram
for partition in progress.over(
application_graph.outgoing_edge_partitions, False):
for edge in partition.edges:
source_vertex_id = vertex_holders[edge.pre_vertex]
dest_vertex_id = vertex_holders[edge.post_vertex]
if isinstance(edge, ProjectionApplicationEdge):
for synapse_info in edge.synapse_information:
dot_diagram.edge(
"{}".format(source_vertex_id),
"{}".format(dest_vertex_id),
"{}".format(synapse_info.connector))
else:
dot_diagram.edge(
"{}".format(source_vertex_id),
"{}".format(dest_vertex_id))
# write dot file and generate pdf
file_to_output = os.path.join(report_folder, "network_graph.gv")
dot_diagram.render(file_to_output, view=False)
progress.update()
progress.end()
开发者ID:SpiNNakerManchester,项目名称:sPyNNaker,代码行数:50,代码来源:spynnaker_neuron_network_specification_report.py
示例15: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import end [as 别名]
def __call__(self, machine, file_path):
"""
:param machine:
:param file_path:
"""
progress = ProgressBar(
(machine.max_chip_x + 1) * (machine.max_chip_y + 1) + 2,
"Converting to JSON machine")
# write basic stuff
json_obj = {
"width": machine.max_chip_x + 1,
"height": machine.max_chip_y + 1,
"chip_resources": {
"cores": CHIP_HOMOGENEOUS_CORES,
"sdram": CHIP_HOMOGENEOUS_SDRAM,
"sram": CHIP_HOMOGENEOUS_SRAM,
"router_entries": ROUTER_HOMOGENEOUS_ENTRIES,
"tags": CHIP_HOMOGENEOUS_TAGS},
"dead_chips": [],
"dead_links": []}
# handle exceptions (dead chips)
exceptions = defaultdict(dict)
for x in range(0, machine.max_chip_x + 1):
for y in progress.over(range(0, machine.max_chip_y + 1), False):
self._add_exceptions(json_obj, machine, x, y, exceptions)
json_obj["chip_resource_exceptions"] = [
[x, y, exceptions[x, y]] for x, y in exceptions]
progress.update()
# dump to json file
with open(file_path, "w") as f:
json.dump(json_obj, f)
progress.update()
# validate the schema
file_format_schemas.validate(json_obj, "machine.json")
# update and complete progress bar
progress.end()
return file_path