当前位置: 首页>>代码示例>>Python>>正文


Python helper.make_tensor方法代码示例

本文整理汇总了Python中onnx.helper.make_tensor方法的典型用法代码示例。如果您正苦于以下问题:Python helper.make_tensor方法的具体用法?Python helper.make_tensor怎么用?Python helper.make_tensor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在onnx.helper的用法示例。


在下文中一共展示了helper.make_tensor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _create_param_tensors

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_tensor [as 别名]
def _create_param_tensors(self, conv_params, param_category, suffix):
        """Creates the initializers with weights from the weights file together with
        the input tensors.

        Keyword arguments:
        conv_params -- a ConvParams object
        param_category -- the category of parameters to be created ('bn' or 'conv')
        suffix -- a string determining the sub-type of above param_category (e.g.,
        'weights' or 'bias')
        """
        param_name, param_data, param_data_shape = self._load_one_param_type(
            conv_params, param_category, suffix)

        initializer_tensor = helper.make_tensor(
            param_name, TensorProto.FLOAT, param_data_shape, param_data)
        input_tensor = helper.make_tensor_value_info(
            param_name, TensorProto.FLOAT, param_data_shape)
        return initializer_tensor, input_tensor 
开发者ID:aimuch,项目名称:iAI,代码行数:20,代码来源:yolov3_to_onnx.py

示例2: load_resize_scales

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_tensor [as 别名]
def load_resize_scales(self, resize_params):
        """Returns the initializers with the value of the scale input
        tensor given by resize_params.

        Keyword argument:
        resize_params -- a ResizeParams object
        """
        initializer = list()
        inputs = list()
        name = resize_params.generate_param_name()
        shape = resize_params.value.shape
        data = resize_params.value
        scale_init = helper.make_tensor(
            name, TensorProto.FLOAT, shape, data)
        scale_input = helper.make_tensor_value_info(
            name, TensorProto.FLOAT, shape)
        initializer.append(scale_init)
        inputs.append(scale_input)
        return initializer, inputs 
开发者ID:aimuch,项目名称:iAI,代码行数:21,代码来源:yolov3_to_onnx.py

示例3: add_const_proto_explicit

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_tensor [as 别名]
def add_const_proto_explicit(self,
                               name,
                               value,
                               np_dtype=None,
                               tf_dtype=None,
                               onnx_dtype=None):
    onnx_dtype = any_dtype_to_onnx_dtype(
        np_dtype=np_dtype, tf_dtype=tf_dtype, onnx_dtype=onnx_dtype)

    const_dim = len(value.shape)

    if const_dim == 0:
      raw_values = [value.tolist()]
      values = [value]
    else:
      raw_values = value.flatten().tolist()
      values = value

    shape = np.array(values).shape
    const_proto = make_tensor(
        name=name, data_type=onnx_dtype, dims=shape, vals=raw_values)
    self._consts_proto.append(const_proto) 
开发者ID:onnx,项目名称:onnx-tensorflow,代码行数:24,代码来源:pb_wrapper.py

示例4: test_attribute_wrapper

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_tensor [as 别名]
def test_attribute_wrapper():
    def attribute_value_test(attribute_value):
        node = make_node('Abs', ['X'], [], name='test_node', test_attribute=attribute_value)
        model = make_model(make_graph([node], 'test_graph', [
            make_tensor_value_info('X', onnx.TensorProto.FLOAT, [1, 2]),
        ], []), producer_name='ngraph')
        wrapped_attribute = ModelWrapper(model).graph.node[0].get_attribute('test_attribute')
        return wrapped_attribute.get_value()

    tensor = make_tensor('test_tensor', onnx.TensorProto.FLOAT, [1], [1])

    assert attribute_value_test(1) == 1
    assert type(attribute_value_test(1)) == np.long
    assert attribute_value_test(1.0) == 1.0
    assert type(attribute_value_test(1.0)) == np.float
    assert attribute_value_test('test') == 'test'
    assert attribute_value_test(tensor)._proto == tensor

    assert attribute_value_test([1, 2, 3]) == [1, 2, 3]
    assert attribute_value_test([1.0, 2.0, 3.0]) == [1.0, 2.0, 3.0]
    assert attribute_value_test(['test1', 'test2']) == ['test1', 'test2']
    assert attribute_value_test([tensor, tensor])[1]._proto == tensor 
开发者ID:NervanaSystems,项目名称:ngraph-python,代码行数:24,代码来源:test_model_wrappers.py

示例5: make_node_test_model

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_tensor [as 别名]
def make_node_test_model(node, inputs, use_weights=True):
    # HACK TODO: The output info is unknown here; not sure what the best solution is
    output_dtype = np.float32 # Dummy value only
    output_shape = [-99]      # Dummy value only
    graph_inputs = [onnx_helper.make_tensor_value_info(
        name, np2onnx_dtype(array.dtype), array.shape)
                    for name, array in zip(node.input, inputs)]
    graph_outputs = [onnx_helper.make_tensor_value_info(
        name, np2onnx_dtype(output_dtype), output_shape)
                     for name in node.output]
    if use_weights:
        # Add initializers for all inputs except the first
        initializers = [onnx_helper.make_tensor(
            name, np2onnx_dtype(array.dtype), array.shape, array.flatten().tolist())
                        for name, array in zip(node.input[1:], inputs[1:])]
    else:
        initializers = []
    graph = onnx_helper.make_graph(
           [node], "RunNodeGraph_" + node.op_type,
           graph_inputs, graph_outputs, initializer=initializers)
    model = onnx_helper.make_model(graph)
    return model 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:24,代码来源:backend.py

示例6: _make_fake_if_op

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_tensor [as 别名]
def _make_fake_if_op(self,
                         true_nodes,  # type: Sequence[NodeProto]
                         false_nodes,  # type: Sequence[NodeProto]
                         output_types  # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
                         ):  # type: (...) -> List[NodeProto]
        true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
        true_graph = helper.make_graph(true_nodes, "true_graph", [], [])
        false_graph = helper.make_graph(false_nodes, "false_graph", [], [])
        if_inputs = ["condition"]
        if_outputs = [name for _, _, name in output_types]
        retval_nodes = [
            helper.make_node("Constant", [], ["condition"], value=true),
            helper.make_node("If", if_inputs, if_outputs, then_branch=true_graph,
                             else_branch=false_graph)
        ]
        return retval_nodes

    # fn is a function that takes a single node as argument 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:20,代码来源:optimizer_test.py

示例7: test_eliminate_unused_initializer_input

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_tensor [as 别名]
def test_eliminate_unused_initializer_input(self):  # type: () -> None
        add = helper.make_node("Add", ["X", "Y"], ["Z"])
        graph = helper.make_graph(
            [add],
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
             helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2)),
             helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
            [helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
            [helper.make_tensor("A", TensorProto.FLOAT,
                                dims=(2, 3),
                                vals=np.random.randn(2, 3).astype(np.float32).tobytes(),
                                raw=True)])
        optimized_model = self._optimized(graph, ["eliminate_unused_initializer"])

        assert len(list(optimized_model.graph.initializer)) == 0
        assert len(optimized_model.graph.input) == 2 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:19,代码来源:optimizer_test.py

示例8: test_eliminate_unused_initializer_no_eliminate_output

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_tensor [as 别名]
def test_eliminate_unused_initializer_no_eliminate_output(self):  # type: () -> None
        add = helper.make_node("Add", ["X", "Y"], ["Z"])
        graph = helper.make_graph(
            [add],
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
             helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2)),
             helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
            [helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2)),
             helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
            [helper.make_tensor("A", TensorProto.FLOAT,
                                dims=(2, 3),
                                vals=np.random.randn(2, 3).astype(np.float32).tobytes(),
                                raw=True)])
        optimized_model = self._optimized(graph, ["eliminate_unused_initializer"])

        assert len(list(optimized_model.graph.initializer)) == 1
        assert "Z" in [o.name for o in optimized_model.graph.output] 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:20,代码来源:optimizer_test.py

示例9: test_attr_repeated_tensor_proto

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_tensor [as 别名]
def test_attr_repeated_tensor_proto(self):  # type: () -> None
        tensors = [
            helper.make_tensor(
                name='a',
                data_type=TensorProto.FLOAT,
                dims=(1,),
                vals=np.ones(1).tolist()
            ),
            helper.make_tensor(
                name='b',
                data_type=TensorProto.FLOAT,
                dims=(1,),
                vals=np.ones(1).tolist()
            )]
        attr = helper.make_attribute("tensors", tensors)
        self.assertEqual(attr.name, "tensors")
        self.assertEqual(list(attr.tensors), tensors)
        checker.check_attribute(attr) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:20,代码来源:helper_test.py

示例10: emit_Constant

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_tensor [as 别名]
def emit_Constant(self, IR_node):
        if IR_node.get_attr('value'):
            value = 'np.array({}, dtype=np.float32)'.format(IR_node.get_attr('value'))
            self.add_body(1, "{:15} = {}".format(
                IR_node.variable_name + '_value_array',
                value))
        else:
            self.add_body(1, "{:15} = __weights_dict['{}']['value']".format(
                IR_node.variable_name + '_value_array',
                IR_node.name))
        self.add_body(1, "{:15} = helper.make_node('Constant', inputs=[], outputs=['{}'], value=helper.make_tensor(name='const_tensor', data_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[{}.dtype], dims={}.shape, vals={}.flatten().astype(float)), name='{}')".format(
                          IR_node.variable_name,
                          IR_node.variable_name,
                          IR_node.variable_name + '_value_array',
                          IR_node.variable_name + '_value_array',
                          IR_node.variable_name + '_value_array',
                          IR_node.variable_name))
        self.nodes.append(IR_node.variable_name) 
开发者ID:microsoft,项目名称:MMdnn,代码行数:20,代码来源:onnx_emitter.py

示例11: emit_Mul

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_tensor [as 别名]
def emit_Mul(self, IR_node):
        inputs = ', '.join("'" + self.IR_graph.get_node(i).real_variable_name + "'" for i in IR_node.in_edges)
        
        if IR_node.name in self.weights_dict and 'weights' in self.weights_dict[IR_node.name]:
            self.add_body(1,"{:15} = np.array([__weights_dict['{}']['weights']])".format(
                IR_node.variable_name+'_weight_array',
                IR_node.name
            ))
            self.add_body(1, "{:15} = helper.make_node('Constant', inputs=[], outputs=['{}'], value=helper.make_tensor(name='const_tensor', data_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[{}.dtype], dims={}.shape, vals={}), name='{}')".format(
                    IR_node.variable_name + '_weight',
                    IR_node.variable_name + '_weight',
                    IR_node.variable_name + '_weight_array',
                    IR_node.variable_name + '_weight_array',
                    IR_node.variable_name + '_weight_array',
                    IR_node.variable_name + '_weight'
                    ))
            inputs += ', '+''.join("'"+IR_node.variable_name +"_weight'")
            self.nodes.append(IR_node.variable_name+'_weight')

        self.add_body(1, "{:15} = helper.make_node('Mul', inputs=[{}], outputs=['{}'], broadcast=1, name='{}')".format(
            IR_node.variable_name,
            inputs,
            IR_node.variable_name,
            IR_node.variable_name))
        self.nodes.append(IR_node.variable_name) 
开发者ID:microsoft,项目名称:MMdnn,代码行数:27,代码来源:onnx_emitter.py

示例12: emit_Reshape

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_tensor [as 别名]
def emit_Reshape(self, IR_node):
        shape = [item if item != -1 else 1 for item in IR_node.get_attr('shape')]
        if len(shape) == 4:
            shape = [shape[i] for i in [0, 3, 1, 2]]
        shape_str = ', '.join('%s' % i for i in shape)
        self.add_body(1, "{:15} = np.array([{}], dtype=np.int64)".format(
            IR_node.variable_name + '_shape_array',
            shape_str
        ))
        self.add_body(1, "{:15} = helper.make_node('Constant', inputs=[], outputs=['{}'], value=helper.make_tensor(name='const_tensor', data_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[{}.dtype], dims={}.shape, vals={}), name='{}')".format(
                          IR_node.variable_name + '_shape',
                          IR_node.variable_name + '_shape',
                          IR_node.variable_name + '_shape_array',
                          IR_node.variable_name + '_shape_array',
                          IR_node.variable_name + '_shape_array',
                          IR_node.variable_name + '_shape'))
        self.add_body(1, "{:15} = helper.make_node('Reshape', inputs=['{}', '{}'], outputs=['{}'], name='{}')".format(
            IR_node.variable_name,
            self.parent_variable_name(IR_node),
            IR_node.variable_name + '_shape',
            IR_node.variable_name,
            IR_node.variable_name))
        self.nodes.append(IR_node.variable_name + '_shape')
        self.nodes.append(IR_node.variable_name) 
开发者ID:microsoft,项目名称:MMdnn,代码行数:26,代码来源:onnx_emitter.py

示例13: load_upsample_scales

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_tensor [as 别名]
def load_upsample_scales(self, upsample_params):
        """Returns the initializers with the value of the scale input
        tensor given by upsample_params.

        Keyword argument:
        upsample_params -- a UpsampleParams object
        """
        initializer = list()
        inputs = list()
        name = upsample_params.generate_param_name()
        shape = upsample_params.value.shape
        data = upsample_params.value
        scale_init = helper.make_tensor(
            name, TensorProto.FLOAT, shape, data)
        scale_input = helper.make_tensor_value_info(
            name, TensorProto.FLOAT, shape)
        initializer.append(scale_init)
        inputs.append(scale_input)
        return initializer, inputs 
开发者ID:jkjung-avt,项目名称:tensorrt_demos,代码行数:21,代码来源:yolov3_to_onnx.py

示例14: _create_param_tensors

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_tensor [as 别名]
def _create_param_tensors(self, conv_params, param_category, suffix):
        """Creates the initializers with weights from the weights file together with
        the input tensors.

        Keyword arguments:
        conv_params -- a ConvParams object
        param_category -- the category of parameters to be created ('bn' or 'conv')
        suffix -- a string determining the sub-type of above param_category (e.g.,
        'weights' or 'bias')
        """
        param_name, param_data, param_data_shape = self._load_one_param_type(
            conv_params, param_category, suffix)

        initializer_tensor = helper.make_tensor(param_name, TensorProto.FLOAT, param_data_shape, param_data)
        input_tensor = helper.make_tensor_value_info(param_name, TensorProto.FLOAT, param_data_shape)
        return initializer_tensor, input_tensor 
开发者ID:cv-core,项目名称:MIT-Driverless-CV-TrainingInfra,代码行数:18,代码来源:yolo2onnx.py

示例15: make_shape_compatible_op

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_tensor [as 别名]
def make_shape_compatible_op(self, model):
        exp_ishape = self.get_normal_input_shape()
        oshape = self.get_normal_output_shape()
        ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
        assert ishape == exp_ishape, "Unexpect input shape for StreamingMaxPool."
        # implement tensor with correct shape
        values = np.random.randn(*oshape).astype(np.float32)
        return helper.make_node(
            "Constant",
            inputs=[],
            outputs=[self.onnx_node.output[0]],
            value=helper.make_tensor(
                name="const_tensor",
                data_type=TensorProto.FLOAT,
                dims=values.shape,
                vals=values.flatten().astype(float),
            ),
        ) 
开发者ID:Xilinx,项目名称:finn,代码行数:20,代码来源:streamingmaxpool_batch.py


注:本文中的onnx.helper.make_tensor方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。