本文整理汇总了Python中onnx.GraphProto方法的典型用法代码示例。如果您正苦于以下问题:Python onnx.GraphProto方法的具体用法?Python onnx.GraphProto怎么用?Python onnx.GraphProto使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类onnx
的用法示例。
在下文中一共展示了onnx.GraphProto方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import GraphProto [as 别名]
def __init__(self, onnx_proto_instance): # type: (onnx.GraphProto) -> None
super(GraphWrapper, self).__init__(onnx_proto_instance, self)
self._ng_node_cache = {} # type: Dict[str, TensorOp]
self.node = [NodeWrapper(node, self) for node in self._proto.node]
self.input = [ValueInfoWrapper(inpt, self) for inpt in self._proto.input]
self.output = [ValueInfoWrapper(output, self) for output in self._proto.output]
self.initializer = [TensorWrapper(initializer, self)
for initializer in self._proto.initializer]
self._initialize_ng_tensors()
self._initialize_ng_nodes()
示例2: _optimized
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import GraphProto [as 别名]
def _optimized(self, graph, opts): # type: (GraphProto, Sequence[Text]) -> ModelProto
orig_model = helper.make_model(graph, producer_name='onnx-test')
optimized_model = onnx.optimizer.optimize(orig_model, opts)
checker.check_model(optimized_model)
return optimized_model
# input_types and output_types are lists of triples of (name, type, shape)
示例3: _visit_all_nodes_recursive
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import GraphProto [as 别名]
def _visit_all_nodes_recursive(self, graph, fn): # type: (GraphProto, Callable[[NodeProto], None]) -> None
for node in graph.node:
fn(node)
for attr in node.attribute:
if attr.g is not None:
self._visit_all_nodes_recursive(attr.g, fn)
if len(attr.graphs):
for gr in attr.graphs:
self._visit_all_nodes_recursive(gr, fn)
示例4: test_attr_repeated_graph_proto
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import GraphProto [as 别名]
def test_attr_repeated_graph_proto(self): # type: () -> None
graphs = [GraphProto(), GraphProto()]
graphs[0].name = "a"
graphs[1].name = "b"
attr = helper.make_attribute("graphs", graphs)
self.assertEqual(attr.name, "graphs")
self.assertEqual(list(attr.graphs), graphs)
checker.check_attribute(attr)
示例5: _make_graph
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import GraphProto [as 别名]
def _make_graph(self,
seed_values, # type: Sequence[Union[Text, Tuple[Text, TensorProto.DataType, Any]]]
nodes, # type: List[NodeProto]
value_info, # type: List[ValueInfoProto]
initializer=None # type: Optional[Sequence[TensorProto]]
): # type: (...) -> GraphProto
if initializer is None:
initializer = []
names_in_initializer = set(x.name for x in initializer)
input_value_infos = []
# If the starting values are not also initializers,
# introduce the starting values as the output of reshape,
# so that the sizes are guaranteed to be unknown
for seed_value in seed_values:
if isinstance(seed_value, tuple):
seed_name = seed_value[0]
seed_value_info = make_tensor_value_info(*seed_value)
else:
seed_name = seed_value
seed_value_info = make_empty_tensor_value_info(seed_value)
if seed_name in names_in_initializer:
input_value_infos.append(seed_value_info)
else:
value_info.append(seed_value_info)
input_value_infos.append(make_tensor_value_info('SEED_' + seed_name, TensorProto.UNDEFINED, ()))
input_value_infos.append(make_tensor_value_info('UNKNOWN_SHAPE_' + seed_name, TensorProto.UNDEFINED, ()))
nodes[:0] = [make_node("Reshape", ['SEED_' + seed_name, 'UNKNOWN_SHAPE_' + seed_name], [seed_name])]
return helper.make_graph(nodes, "test", input_value_infos, [], initializer=initializer, value_info=value_info)
示例6: _inferred
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import GraphProto [as 别名]
def _inferred(self, graph): # type: (GraphProto) -> ModelProto
orig_model = helper.make_model(graph, producer_name='onnx-test')
inferred_model = onnx.shape_inference.infer_shapes(orig_model)
checker.check_model(inferred_model)
return inferred_model
示例7: reserve_node_for_embedded_graph
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import GraphProto [as 别名]
def reserve_node_for_embedded_graph(nodelist):
# type: (onnx.GraphProto)->(onnx.GraphProto, frozenset)
nodelist = _fix_unamed_node(nodelist)
ginputs = []
for nd_ in nodelist:
for _, subgraph_ in OnnxGraphContext.get_attr_graph(nd_).items():
inner_inputs = frozenset([i_.name for i_ in subgraph_.input])
for sub_nd_ in subgraph_.node:
ginputs.extend([i_ for i_ in sub_nd_.input if i_ not in inner_inputs])
ginputs.extend(OnnxGraphContext.stopping_initializers)
return nodelist, frozenset(ginputs)
示例8: const_folding_optimizer
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import GraphProto [as 别名]
def const_folding_optimizer(graph, outer_graph=None):
# type: (onnx.GraphProto, onnx.GraphProto)->onnx.GraphProto
nodelist, reserved_names = reserve_node_for_embedded_graph(graph.node)
opt_graph = OnnxGraphContext(graph, nodelist)
node_status = {}
for ts_ in graph.output:
_dfs_calc(opt_graph, opt_graph.tensor_to_node[ts_.name], reserved_names, node_status)
graph.initializer.extend([numpy_helper.from_array(ts_, nm_) for nm_, ts_ in opt_graph.variables.items()])
new_nodes = [nd_ for nd_ in nodelist if nd_.name in node_status]
new_nodes = [nd_ for nd_ in new_nodes if nd_.output[0] not in opt_graph.variables]
def node_key(nd_):
return abs(node_status[nd_.name])
new_nodes.sort(key=node_key)
pruned_initilizers = _remove_unused_initializers(new_nodes, graph.initializer, reserved_names,
None if outer_graph is None else outer_graph.initializer)
del graph.node[:]
graph.node.extend(new_nodes)
del graph.initializer[:]
graph.initializer.extend(pruned_initilizers)
for nd_ in graph.node:
for aname_, subgraph_ in OnnxGraphContext.get_attr_graph(nd_).items():
opt_inner_graph = const_folding_optimizer(subgraph_, graph)
lst_attrs = list(nd_.attribute)
del nd_.attribute[:]
lst_attrs = [helper.make_attribute(aname_, opt_inner_graph) if
attr.name == aname_ else attr for attr in lst_attrs]
nd_.attribute.extend(lst_attrs)
return graph
示例9: from_onnx
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import GraphProto [as 别名]
def from_onnx(graph, onnx_ir_version): # type: (GraphProto) -> Graph
input_tensors = {
t.name: numpy_helper.to_array(t) for t in graph.initializer
}
nodes_ = []
nodes_by_input = {} # type: Dict[Text, List[Node]]
nodes_by_output = {}
for node in graph.node:
node_ = Node.from_onnx(node)
for input_ in node_.inputs:
if input_ in input_tensors:
node_.input_tensors[input_] = input_tensors[input_]
else:
if input_ in nodes_by_input:
input_nodes = nodes_by_input[input_]
else:
input_nodes = []
nodes_by_input[input_] = input_nodes
input_nodes.append(node_)
for output_ in node_.outputs:
nodes_by_output[output_] = node_
nodes_.append(node_)
inputs = []
for i in graph.input:
if i.name not in input_tensors:
inputs.append(_input_from_onnx_input(i))
outputs = []
for o in graph.output:
outputs.append(_input_from_onnx_input(o))
for node_ in nodes_:
for input_ in node_.inputs:
if input_ in nodes_by_output:
node_.parents.append(nodes_by_output[input_])
for output_ in node_.outputs:
if output_ in nodes_by_input:
node_.children.extend(nodes_by_input[output_])
# Dictionary to hold the "value_info" field from ONNX graph
shape_dict = {} # type: Dict[Text,Tuple[int,...]]
def extract_value_info(shape_dict, # type: Dict[Text,Tuple[int,...]]
value_info, # type: ValueInfoProto[...]
):
# type: (...) -> None
t = tuple([int(dim.dim_value) for dim in value_info.type.tensor_type.shape.dim])
if t:
shape_dict[value_info.name] = t
for value_info in graph.value_info:
extract_value_info(shape_dict, value_info)
for value_info in graph.input:
extract_value_info(shape_dict, value_info)
for value_info in graph.output:
extract_value_info(shape_dict, value_info)
return Graph(nodes_, inputs, outputs, shape_dict, onnx_ir_version)
示例10: from_onnx
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import GraphProto [as 别名]
def from_onnx(graph): # type: (GraphProto) -> Graph
input_tensors = {
t.name: numpy_helper.to_array(t) for t in graph.initializer
}
nodes_ = []
nodes_by_input = {} # type: Dict[Text, List[Node]]
nodes_by_output = {}
for node in graph.node:
node_ = Node.from_onnx(node)
for input_ in node_.inputs:
if input_ in input_tensors:
node_.input_tensors[input_] = input_tensors[input_]
else:
if input_ in nodes_by_input:
input_nodes = nodes_by_input[input_]
else:
input_nodes = []
nodes_by_input[input_] = input_nodes
input_nodes.append(node_)
for output_ in node_.outputs:
nodes_by_output[output_] = node_
nodes_.append(node_)
inputs = []
for i in graph.input:
if i.name not in input_tensors:
inputs.append(_input_from_onnx_input(i))
outputs = []
for o in graph.output:
outputs.append(_input_from_onnx_input(o))
for node_ in nodes_:
for input_ in node_.inputs:
if input_ in nodes_by_output:
node_.parents.append(nodes_by_output[input_])
for output_ in node_.outputs:
if output_ in nodes_by_input:
node_.children.extend(nodes_by_input[output_])
# Dictionary to hold the "value_info" field from ONNX graph
shape_dict = {} # type: Dict[Text,Tuple[int,...]]
def extract_value_info(shape_dict, # type: Dict[Text,Tuple[int,...]]
value_info, # type: ValueInfoProto[...]
):
# type: (...) -> None
shape_dict[value_info.name] = tuple([int(dim.dim_value) for dim in value_info.type.tensor_type.shape.dim])
for value_info in graph.value_info:
extract_value_info(shape_dict, value_info)
for value_info in graph.input:
extract_value_info(shape_dict, value_info)
for value_info in graph.output:
extract_value_info(shape_dict, value_info)
return Graph(nodes_, inputs, outputs, shape_dict)