本文整理汇总了Python中spinn_utilities.progress_bar.ProgressBar.over方法的典型用法代码示例。如果您正苦于以下问题:Python ProgressBar.over方法的具体用法?Python ProgressBar.over怎么用?Python ProgressBar.over使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类spinn_utilities.progress_bar.ProgressBar
的用法示例。
在下文中一共展示了ProgressBar.over方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import over [as 别名]
def __call__(self, machine_graph, graph_mapper):
"""
:param machine_graph: the machine_graph whose edges are to be filtered
:param graph_mapper: the graph mapper between graphs
:return: a new graph mapper and machine graph
"""
new_machine_graph = MachineGraph(label=machine_graph.label)
new_graph_mapper = GraphMapper()
# create progress bar
progress = ProgressBar(
machine_graph.n_vertices +
machine_graph.n_outgoing_edge_partitions,
"Filtering edges")
# add the vertices directly, as they wont be pruned.
for vertex in progress.over(machine_graph.vertices, False):
self._add_vertex_to_new_graph(
vertex, graph_mapper, new_machine_graph, new_graph_mapper)
# start checking edges to decide which ones need pruning....
for partition in progress.over(machine_graph.outgoing_edge_partitions):
for edge in partition.edges:
if self._is_filterable(edge, graph_mapper):
logger.debug("this edge was pruned %s", edge)
continue
logger.debug("this edge was not pruned %s", edge)
self._add_edge_to_new_graph(
edge, partition, graph_mapper, new_machine_graph,
new_graph_mapper)
# returned the pruned graph and graph_mapper
return new_machine_graph, new_graph_mapper
示例2: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import over [as 别名]
def __call__(self, machine_graph, n_keys_map, graph_mapper=None):
# check that this algorithm supports the constraints
check_algorithm_can_support_constraints(
constrained_vertices=machine_graph.outgoing_edge_partitions,
supported_constraints=[
FixedMaskConstraint,
FixedKeyAndMaskConstraint,
ContiguousKeyRangeContraint, ShareKeyConstraint],
abstract_constraint_type=AbstractKeyAllocatorConstraint)
# verify that no edge has more than 1 of a constraint ,and that
# constraints are compatible
check_types_of_edge_constraint(machine_graph)
# final keys allocations
routing_infos = RoutingInfo()
# Get the edges grouped by those that require the same key
(fixed_keys, shared_keys, fixed_masks, fixed_fields, flexi_fields,
continuous, noncontinuous) = get_edge_groups(
machine_graph, EdgeTrafficType.MULTICAST)
# Go through the groups and allocate keys
progress = ProgressBar(
machine_graph.n_outgoing_edge_partitions,
"Allocating routing keys")
# allocate the groups that have fixed keys
for group in progress.over(fixed_keys, False):
self._allocate_fixed_keys(group, routing_infos)
for group in progress.over(fixed_masks, False):
self._allocate_fixed_masks(group, n_keys_map, routing_infos)
for group in progress.over(fixed_fields, False):
self._allocate_fixed_fields(group, n_keys_map, routing_infos)
if flexi_fields:
raise PacmanConfigurationException(
"MallocBasedRoutingInfoAllocator does not support FlexiField")
for group in progress.over(shared_keys, False):
self._allocate_share_key(group, routing_infos, n_keys_map)
for group in continuous:
self._allocate_other_groups(group, routing_infos, n_keys_map,
continuous=True)
for group in noncontinuous:
self._allocate_other_groups(group, routing_infos, n_keys_map,
continuous=False)
progress.end()
return routing_infos
示例3: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import over [as 别名]
def __call__(self, report_folder, application_graph):
"""
:param report_folder: the report folder to put figure into
:param application_graph: the app graph
:rtype: None
"""
# create holders for data
vertex_holders = dict()
dot_diagram = self._get_diagram(
"The graph of the network in graphical form")
# build progress bar for the vertices, edges, and rendering
progress = ProgressBar(
application_graph.n_vertices +
application_graph.n_outgoing_edge_partitions + 1,
"generating the graphical representation of the neural network")
# write vertices into dot diagram
for vertex_counter, vertex in progress.over(
enumerate(application_graph.vertices), False):
dot_diagram.node(
"{}".format(vertex_counter),
"{} ({} neurons)".format(vertex.label, vertex.n_atoms))
vertex_holders[vertex] = vertex_counter
# write edges into dot diagram
for partition in progress.over(
application_graph.outgoing_edge_partitions, False):
for edge in partition.edges:
source_vertex_id = vertex_holders[edge.pre_vertex]
dest_vertex_id = vertex_holders[edge.post_vertex]
if isinstance(edge, ProjectionApplicationEdge):
for synapse_info in edge.synapse_information:
dot_diagram.edge(
"{}".format(source_vertex_id),
"{}".format(dest_vertex_id),
"{}".format(synapse_info.connector))
else:
dot_diagram.edge(
"{}".format(source_vertex_id),
"{}".format(dest_vertex_id))
# write dot file and generate pdf
file_to_output = os.path.join(report_folder, "network_graph.gv")
dot_diagram.render(file_to_output, view=False)
progress.update()
progress.end()
开发者ID:SpiNNakerManchester,项目名称:sPyNNaker,代码行数:50,代码来源:spynnaker_neuron_network_specification_report.py
示例4: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import over [as 别名]
def __call__(self, router_tables, target_length=None):
# build storage
compressed_pacman_router_tables = MulticastRoutingTables()
# create progress bar
progress = ProgressBar(
router_tables.routing_tables, "Compressing routing Tables")
# compress each router
for router_table in progress.over(router_tables.routing_tables):
# convert to rig format
entries = self._convert_to_mundy_format(router_table)
# compress the router entries
compressed_router_table_entries = \
rigs_compressor.minimise(entries, target_length)
# convert back to pacman model
compressed_pacman_table = self._convert_to_pacman_router_table(
compressed_router_table_entries, router_table.x,
router_table.y)
# add to new compressed routing tables
compressed_pacman_router_tables.add_routing_table(
compressed_pacman_table)
# return
return compressed_pacman_router_tables
示例5: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import over [as 别名]
def __call__(self, router_tables):
tables = MulticastRoutingTables()
previous_masks = dict()
progress = ProgressBar(
len(router_tables.routing_tables) * 2,
"Compressing Routing Tables")
# Create all masks without holes
allowed_masks = [_32_BITS - ((2 ** i) - 1) for i in range(33)]
# Check that none of the masks have "holes" e.g. 0xFFFF0FFF has a hole
for router_table in router_tables.routing_tables:
for entry in router_table.multicast_routing_entries:
if entry.mask not in allowed_masks:
raise PacmanRoutingException(
"Only masks without holes are allowed in tables for"
" BasicRouteMerger (disallowed mask={})".format(
hex(entry.mask)))
for router_table in progress.over(router_tables.routing_tables):
new_table = self._merge_routes(router_table, previous_masks)
tables.add_routing_table(new_table)
n_entries = len([
entry for entry in new_table.multicast_routing_entries
if not entry.defaultable])
# print("Reduced from {} to {}".format(
# len(router_table.multicast_routing_entries), n_entries))
if n_entries > 1023:
raise PacmanRoutingException(
"Cannot make table small enough: {} entries".format(
n_entries))
return tables
示例6: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import over [as 别名]
def __call__(self, placements, file_path):
"""
:param placements: the memory placements object
:param file_path: the file path for the placements.json
:return: file path for the placements.json
"""
# write basic stuff
json_obj = dict()
vertex_by_id = dict()
progress = ProgressBar(placements.n_placements + 1,
"converting to JSON placements")
# process placements
for placement in progress.over(placements, False):
vertex_id = ident(placement.vertex)
vertex_by_id[vertex_id] = placement.vertex
json_obj[vertex_id] = [placement.x, placement.y]
# dump dict into json file
with open(file_path, "w") as file_to_write:
json.dump(json_obj, file_to_write)
progress.update()
# validate the schema
file_format_schemas.validate(json_obj, "placements.json")
progress.end()
# return the file format
return file_path, vertex_by_id
示例7: get_spikes
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import over [as 别名]
def get_spikes(self, label, buffer_manager, region,
placements, graph_mapper, application_vertex,
base_key_function, machine_time_step):
# pylint: disable=too-many-arguments
results = list()
missing = []
ms_per_tick = machine_time_step / 1000.0
vertices = graph_mapper.get_machine_vertices(application_vertex)
progress = ProgressBar(vertices,
"Getting spikes for {}".format(label))
for vertex in progress.over(vertices):
placement = placements.get_placement_of_vertex(vertex)
vertex_slice = graph_mapper.get_slice(vertex)
# Read the spikes
raw_spike_data, data_missing = \
buffer_manager.get_data_by_placement(placement, region)
if data_missing:
missing.append(placement)
self._process_spike_data(
vertex_slice, raw_spike_data, ms_per_tick,
base_key_function(vertex), results)
if missing:
missing_str = recording_utils.make_missing_string(missing)
logger.warning(
"Population {} is missing spike data in region {} from the"
" following cores: {}", label, region, missing_str)
if not results:
return numpy.empty(shape=(0, 2))
result = numpy.vstack(results)
return result[numpy.lexsort((result[:, 1], result[:, 0]))]
示例8: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import over [as 别名]
def __call__(
self, transceiver, tags=None, iptags=None, reverse_iptags=None):
"""
:param tags: the tags object which contains IP and reverse IP tags.
could be none if these are being given in separate lists
:param iptags: a list of IP tags, given when tags is none
:param reverse_iptags: a list of reverse IP tags when tags is none.
:param transceiver: the transceiver object
"""
# clear all the tags from the Ethernet connection, as nothing should
# be allowed to use it (no two apps should use the same Ethernet
# connection at the same time)
progress = ProgressBar(MAX_TAG_ID, "Clearing tags")
for tag_id in progress.over(range(MAX_TAG_ID)):
transceiver.clear_ip_tag(tag_id)
# Use tags object to supply tag info if it is supplied
if tags is not None:
iptags = list(tags.ip_tags)
reverse_iptags = list(tags.reverse_ip_tags)
# Load the IP tags and the Reverse IP tags
progress = ProgressBar(
len(iptags) + len(reverse_iptags), "Loading Tags")
self.load_iptags(iptags, transceiver, progress)
self.load_reverse_iptags(reverse_iptags, transceiver, progress)
progress.end()
示例9: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import over [as 别名]
def __call__(
self, transceiver, placements, provenance_file_path,
run_time_ms, machine_time_step):
"""
:param transceiver: the SpiNNMan interface object
:param placements: The placements of the vertices
:param has_ran: token that states that the simulation has ran
:param provenance_file_path: The location to store the profile data
:param run_time_ms: runtime in ms
:param machine_time_step: machine time step in ms
"""
# pylint: disable=too-many-arguments
machine_time_step_ms = machine_time_step // 1000
progress = ProgressBar(
placements.n_placements, "Getting profile data")
# retrieve provenance data from any cores that provide data
for placement in progress.over(placements.placements):
if isinstance(placement.vertex, AbstractHasProfileData):
# get data
profile_data = placement.vertex.get_profile_data(
transceiver, placement)
if profile_data.tags:
self._write(placement, profile_data, run_time_ms,
machine_time_step_ms, provenance_file_path)
示例10: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import over [as 别名]
def __call__(
self, live_packet_gatherer_parameters, machine, machine_graph,
application_graph=None, graph_mapper=None):
""" Add LPG vertices on Ethernet connected chips as required.
:param live_packet_gatherer_parameters:\
the Live Packet Gatherer parameters requested by the script
:param machine: the SpiNNaker machine as discovered
:param application_graph: the application graph
:param machine_graph: the machine graph
:return: mapping between LPG parameters and LPG vertex
"""
# pylint: disable=too-many-arguments
# create progress bar
progress = ProgressBar(
machine.ethernet_connected_chips,
string_describing_what_being_progressed=(
"Adding Live Packet Gatherers to Graph"))
# Keep track of the vertices added by parameters and board address
lpg_params_to_vertices = defaultdict(dict)
# for every Ethernet connected chip, add the gatherers required
for chip in progress.over(machine.ethernet_connected_chips):
for params in live_packet_gatherer_parameters:
if (params.board_address is None or
params.board_address == chip.ip_address):
lpg_params_to_vertices[params][chip.x, chip.y] = \
self._add_lpg_vertex(application_graph, graph_mapper,
machine_graph, chip, params)
return lpg_params_to_vertices
开发者ID:SpiNNakerManchester,项目名称:SpiNNFrontEndCommon,代码行数:35,代码来源:insert_live_packet_gatherers_to_graphs.py
示例11: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import over [as 别名]
def __call__(
self, live_packet_gatherer_parameters, placements,
live_packet_gatherers_to_vertex_mapping, machine,
machine_graph, application_graph=None, graph_mapper=None):
"""
:param live_packet_gatherer_parameters: the set of parameters
:param placements: the placements object
:param live_packet_gatherers_to_vertex_mapping:\
the mapping of LPG parameters and the machine vertices associated\
with it
:param machine: the SpiNNaker machine
:param machine_graph: the machine graph
:param application_graph: the app graph
:param graph_mapper: the graph mapper between app and machine graph
:rtype: None
"""
# pylint: disable=too-many-arguments
progress = ProgressBar(
live_packet_gatherer_parameters,
string_describing_what_being_progressed=(
"Adding edges to the machine graph between the vertices to "
"which live output has been requested and its local Live "
"Packet Gatherer"))
for lpg_params in progress.over(live_packet_gatherer_parameters):
# locate vertices needed to be connected to a LPG with these params
for vertex in live_packet_gatherer_parameters[lpg_params]:
self._connect_lpg_vertex(
application_graph, graph_mapper, machine,
placements, machine_graph, vertex,
live_packet_gatherers_to_vertex_mapping, lpg_params)
开发者ID:SpiNNakerManchester,项目名称:SpiNNFrontEndCommon,代码行数:33,代码来源:insert_edges_to_live_packet_gatherers.py
示例12: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import over [as 别名]
def __call__(self, report_default_directory, dsg_targets, transceiver):
""" Creates a report that states where in SDRAM each region is \
(read from machine)
:param report_default_directory: the folder where reports are written
:param dsg_targets: the map between placement and file writer
:param transceiver: the spinnMan instance
:rtype: None
"""
directory_name = os.path.join(
report_default_directory, MEM_MAP_SUBDIR_NAME)
if not os.path.exists(directory_name):
os.makedirs(directory_name)
progress = ProgressBar(dsg_targets, "Writing memory map reports")
for (x, y, p) in progress.over(dsg_targets):
file_name = os.path.join(
directory_name, MEM_MAP_FILENAME.format(x, y, p))
try:
with open(file_name, "w") as f:
self._describe_mem_map(f, transceiver, x, y, p)
except IOError:
logger.exception("Generate_placement_reports: Can't open file"
" {} for writing.", file_name)
示例13: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import over [as 别名]
def __call__(self, placements, file_path):
"""
:param placements:
:param file_path:
"""
progress = ProgressBar(len(placements) + 1,
"Converting to JSON core allocations")
# write basic stuff
json_obj = OrderedDict()
json_obj['type'] = "cores"
vertex_by_id = OrderedDict()
# process placements
for placement in progress.over(placements, False):
self._convert_placement(placement, vertex_by_id, json_obj)
# dump dict into json file
with open(file_path, "w") as f:
json.dump(json_obj, f)
progress.update()
# validate the schema
file_format_schemas.validate(json_obj, "core_allocations.json")
# complete progress bar
progress.end()
# return the file format
return file_path, vertex_by_id
示例14: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import over [as 别名]
def __call__(self, report_default_directory, machine):
""" Creates a report that states where in SDRAM each region is.
:param report_default_directory: the folder where reports are written
:param machine: python representation of the machine
:rtype: None
"""
# create file path
directory_name = os.path.join(
report_default_directory, self.AREA_CODE_REPORT_NAME)
# create the progress bar for end users
progress_bar = ProgressBar(
len(machine.ethernet_connected_chips),
"Writing the board chip report")
# iterate over ethernet chips and then the chips on that board
with open(directory_name, "w") as writer:
for ethernet_connected_chip in \
progress_bar.over(machine.ethernet_connected_chips):
chips = machine.get_chips_on_board(ethernet_connected_chip)
writer.write(
"board with IP address : {} : has chips {}\n".format(
ethernet_connected_chip.ip_address, list(chips)))
示例15: __call__
# 需要导入模块: from spinn_utilities.progress_bar import ProgressBar [as 别名]
# 或者: from spinn_utilities.progress_bar.ProgressBar import over [as 别名]
def __call__(self, machine_graph, machine, plan_n_timesteps):
""" Place a machine_graph so that each vertex is placed on a core
:param machine_graph: The machine_graph to place
:type machine_graph:\
:py:class:`pacman.model.graphs.machine.MachineGraph`
:param machine:\
The machine with respect to which to partition the application\
graph
:type machine: :py:class:`spinn_machine.Machine`
:param plan_n_timesteps: number of timesteps to plan for
:type plan_n_timesteps: int
:return: A set of placements
:rtype: :py:class:`pacman.model.placements.Placements`
:raise pacman.exceptions.PacmanPlaceException: \
If something goes wrong with the placement
"""
# check that the algorithm can handle the constraints
ResourceTracker.check_constraints(machine_graph.vertices)
placements = Placements()
vertices = sort_vertices_by_known_constraints(machine_graph.vertices)
# Iterate over vertices and generate placements
progress = ProgressBar(vertices, "Placing graph vertices")
resource_tracker = ResourceTracker(machine, plan_n_timesteps)
for vertex in progress.over(vertices):
# Create and store a new placement anywhere on the board
(x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
vertex.resources_required, vertex.constraints, None)
placement = Placement(vertex, x, y, p)
placements.add_placement(placement)
return placements