當前位置: 首頁>>代碼示例>>Python>>正文


Python helper.make_tensor方法代碼示例

本文整理匯總了Python中onnx.helper.make_tensor方法的典型用法代碼示例。如果您正苦於以下問題:Python helper.make_tensor方法的具體用法?Python helper.make_tensor怎麽用?Python helper.make_tensor使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在onnx.helper的用法示例。


在下文中一共展示了helper.make_tensor方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _create_param_tensors

# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_tensor [as 別名]
def _create_param_tensors(self, conv_params, param_category, suffix):
        """Creates the initializers with weights from the weights file together with
        the input tensors.

        Keyword arguments:
        conv_params -- a ConvParams object
        param_category -- the category of parameters to be created ('bn' or 'conv')
        suffix -- a string determining the sub-type of above param_category (e.g.,
        'weights' or 'bias')
        """
        param_name, param_data, param_data_shape = self._load_one_param_type(
            conv_params, param_category, suffix)

        initializer_tensor = helper.make_tensor(
            param_name, TensorProto.FLOAT, param_data_shape, param_data)
        input_tensor = helper.make_tensor_value_info(
            param_name, TensorProto.FLOAT, param_data_shape)
        return initializer_tensor, input_tensor 
開發者ID:aimuch,項目名稱:iAI,代碼行數:20,代碼來源:yolov3_to_onnx.py

示例2: load_resize_scales

# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_tensor [as 別名]
def load_resize_scales(self, resize_params):
        """Returns the initializers with the value of the scale input
        tensor given by resize_params.

        Keyword argument:
        resize_params -- a ResizeParams object
        """
        initializer = list()
        inputs = list()
        name = resize_params.generate_param_name()
        shape = resize_params.value.shape
        data = resize_params.value
        scale_init = helper.make_tensor(
            name, TensorProto.FLOAT, shape, data)
        scale_input = helper.make_tensor_value_info(
            name, TensorProto.FLOAT, shape)
        initializer.append(scale_init)
        inputs.append(scale_input)
        return initializer, inputs 
開發者ID:aimuch,項目名稱:iAI,代碼行數:21,代碼來源:yolov3_to_onnx.py

示例3: add_const_proto_explicit

# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_tensor [as 別名]
def add_const_proto_explicit(self,
                               name,
                               value,
                               np_dtype=None,
                               tf_dtype=None,
                               onnx_dtype=None):
    onnx_dtype = any_dtype_to_onnx_dtype(
        np_dtype=np_dtype, tf_dtype=tf_dtype, onnx_dtype=onnx_dtype)

    const_dim = len(value.shape)

    if const_dim == 0:
      raw_values = [value.tolist()]
      values = [value]
    else:
      raw_values = value.flatten().tolist()
      values = value

    shape = np.array(values).shape
    const_proto = make_tensor(
        name=name, data_type=onnx_dtype, dims=shape, vals=raw_values)
    self._consts_proto.append(const_proto) 
開發者ID:onnx,項目名稱:onnx-tensorflow,代碼行數:24,代碼來源:pb_wrapper.py

示例4: test_attribute_wrapper

# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_tensor [as 別名]
def test_attribute_wrapper():
    def attribute_value_test(attribute_value):
        node = make_node('Abs', ['X'], [], name='test_node', test_attribute=attribute_value)
        model = make_model(make_graph([node], 'test_graph', [
            make_tensor_value_info('X', onnx.TensorProto.FLOAT, [1, 2]),
        ], []), producer_name='ngraph')
        wrapped_attribute = ModelWrapper(model).graph.node[0].get_attribute('test_attribute')
        return wrapped_attribute.get_value()

    tensor = make_tensor('test_tensor', onnx.TensorProto.FLOAT, [1], [1])

    assert attribute_value_test(1) == 1
    assert type(attribute_value_test(1)) == np.long
    assert attribute_value_test(1.0) == 1.0
    assert type(attribute_value_test(1.0)) == np.float
    assert attribute_value_test('test') == 'test'
    assert attribute_value_test(tensor)._proto == tensor

    assert attribute_value_test([1, 2, 3]) == [1, 2, 3]
    assert attribute_value_test([1.0, 2.0, 3.0]) == [1.0, 2.0, 3.0]
    assert attribute_value_test(['test1', 'test2']) == ['test1', 'test2']
    assert attribute_value_test([tensor, tensor])[1]._proto == tensor 
開發者ID:NervanaSystems,項目名稱:ngraph-python,代碼行數:24,代碼來源:test_model_wrappers.py

示例5: make_node_test_model

# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_tensor [as 別名]
def make_node_test_model(node, inputs, use_weights=True):
    # HACK TODO: The output info is unknown here; not sure what the best solution is
    output_dtype = np.float32 # Dummy value only
    output_shape = [-99]      # Dummy value only
    graph_inputs = [onnx_helper.make_tensor_value_info(
        name, np2onnx_dtype(array.dtype), array.shape)
                    for name, array in zip(node.input, inputs)]
    graph_outputs = [onnx_helper.make_tensor_value_info(
        name, np2onnx_dtype(output_dtype), output_shape)
                     for name in node.output]
    if use_weights:
        # Add initializers for all inputs except the first
        initializers = [onnx_helper.make_tensor(
            name, np2onnx_dtype(array.dtype), array.shape, array.flatten().tolist())
                        for name, array in zip(node.input[1:], inputs[1:])]
    else:
        initializers = []
    graph = onnx_helper.make_graph(
           [node], "RunNodeGraph_" + node.op_type,
           graph_inputs, graph_outputs, initializer=initializers)
    model = onnx_helper.make_model(graph)
    return model 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:24,代碼來源:backend.py

示例6: _make_fake_if_op

# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_tensor [as 別名]
def _make_fake_if_op(self,
                         true_nodes,  # type: Sequence[NodeProto]
                         false_nodes,  # type: Sequence[NodeProto]
                         output_types  # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
                         ):  # type: (...) -> List[NodeProto]
        true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
        true_graph = helper.make_graph(true_nodes, "true_graph", [], [])
        false_graph = helper.make_graph(false_nodes, "false_graph", [], [])
        if_inputs = ["condition"]
        if_outputs = [name for _, _, name in output_types]
        retval_nodes = [
            helper.make_node("Constant", [], ["condition"], value=true),
            helper.make_node("If", if_inputs, if_outputs, then_branch=true_graph,
                             else_branch=false_graph)
        ]
        return retval_nodes

    # fn is a function that takes a single node as argument 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:20,代碼來源:optimizer_test.py

示例7: test_eliminate_unused_initializer_input

# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_tensor [as 別名]
def test_eliminate_unused_initializer_input(self):  # type: () -> None
        add = helper.make_node("Add", ["X", "Y"], ["Z"])
        graph = helper.make_graph(
            [add],
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
             helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2)),
             helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
            [helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
            [helper.make_tensor("A", TensorProto.FLOAT,
                                dims=(2, 3),
                                vals=np.random.randn(2, 3).astype(np.float32).tobytes(),
                                raw=True)])
        optimized_model = self._optimized(graph, ["eliminate_unused_initializer"])

        assert len(list(optimized_model.graph.initializer)) == 0
        assert len(optimized_model.graph.input) == 2 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:19,代碼來源:optimizer_test.py

示例8: test_eliminate_unused_initializer_no_eliminate_output

# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_tensor [as 別名]
def test_eliminate_unused_initializer_no_eliminate_output(self):  # type: () -> None
        add = helper.make_node("Add", ["X", "Y"], ["Z"])
        graph = helper.make_graph(
            [add],
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
             helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2)),
             helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
            [helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2)),
             helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
            [helper.make_tensor("A", TensorProto.FLOAT,
                                dims=(2, 3),
                                vals=np.random.randn(2, 3).astype(np.float32).tobytes(),
                                raw=True)])
        optimized_model = self._optimized(graph, ["eliminate_unused_initializer"])

        assert len(list(optimized_model.graph.initializer)) == 1
        assert "Z" in [o.name for o in optimized_model.graph.output] 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:20,代碼來源:optimizer_test.py

示例9: test_attr_repeated_tensor_proto

# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_tensor [as 別名]
def test_attr_repeated_tensor_proto(self):  # type: () -> None
        tensors = [
            helper.make_tensor(
                name='a',
                data_type=TensorProto.FLOAT,
                dims=(1,),
                vals=np.ones(1).tolist()
            ),
            helper.make_tensor(
                name='b',
                data_type=TensorProto.FLOAT,
                dims=(1,),
                vals=np.ones(1).tolist()
            )]
        attr = helper.make_attribute("tensors", tensors)
        self.assertEqual(attr.name, "tensors")
        self.assertEqual(list(attr.tensors), tensors)
        checker.check_attribute(attr) 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:20,代碼來源:helper_test.py

示例10: emit_Constant

# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_tensor [as 別名]
def emit_Constant(self, IR_node):
        if IR_node.get_attr('value'):
            value = 'np.array({}, dtype=np.float32)'.format(IR_node.get_attr('value'))
            self.add_body(1, "{:15} = {}".format(
                IR_node.variable_name + '_value_array',
                value))
        else:
            self.add_body(1, "{:15} = __weights_dict['{}']['value']".format(
                IR_node.variable_name + '_value_array',
                IR_node.name))
        self.add_body(1, "{:15} = helper.make_node('Constant', inputs=[], outputs=['{}'], value=helper.make_tensor(name='const_tensor', data_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[{}.dtype], dims={}.shape, vals={}.flatten().astype(float)), name='{}')".format(
                          IR_node.variable_name,
                          IR_node.variable_name,
                          IR_node.variable_name + '_value_array',
                          IR_node.variable_name + '_value_array',
                          IR_node.variable_name + '_value_array',
                          IR_node.variable_name))
        self.nodes.append(IR_node.variable_name) 
開發者ID:microsoft,項目名稱:MMdnn,代碼行數:20,代碼來源:onnx_emitter.py

示例11: emit_Mul

# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_tensor [as 別名]
def emit_Mul(self, IR_node):
        inputs = ', '.join("'" + self.IR_graph.get_node(i).real_variable_name + "'" for i in IR_node.in_edges)
        
        if IR_node.name in self.weights_dict and 'weights' in self.weights_dict[IR_node.name]:
            self.add_body(1,"{:15} = np.array([__weights_dict['{}']['weights']])".format(
                IR_node.variable_name+'_weight_array',
                IR_node.name
            ))
            self.add_body(1, "{:15} = helper.make_node('Constant', inputs=[], outputs=['{}'], value=helper.make_tensor(name='const_tensor', data_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[{}.dtype], dims={}.shape, vals={}), name='{}')".format(
                    IR_node.variable_name + '_weight',
                    IR_node.variable_name + '_weight',
                    IR_node.variable_name + '_weight_array',
                    IR_node.variable_name + '_weight_array',
                    IR_node.variable_name + '_weight_array',
                    IR_node.variable_name + '_weight'
                    ))
            inputs += ', '+''.join("'"+IR_node.variable_name +"_weight'")
            self.nodes.append(IR_node.variable_name+'_weight')

        self.add_body(1, "{:15} = helper.make_node('Mul', inputs=[{}], outputs=['{}'], broadcast=1, name='{}')".format(
            IR_node.variable_name,
            inputs,
            IR_node.variable_name,
            IR_node.variable_name))
        self.nodes.append(IR_node.variable_name) 
開發者ID:microsoft,項目名稱:MMdnn,代碼行數:27,代碼來源:onnx_emitter.py

示例12: emit_Reshape

# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_tensor [as 別名]
def emit_Reshape(self, IR_node):
        shape = [item if item != -1 else 1 for item in IR_node.get_attr('shape')]
        if len(shape) == 4:
            shape = [shape[i] for i in [0, 3, 1, 2]]
        shape_str = ', '.join('%s' % i for i in shape)
        self.add_body(1, "{:15} = np.array([{}], dtype=np.int64)".format(
            IR_node.variable_name + '_shape_array',
            shape_str
        ))
        self.add_body(1, "{:15} = helper.make_node('Constant', inputs=[], outputs=['{}'], value=helper.make_tensor(name='const_tensor', data_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[{}.dtype], dims={}.shape, vals={}), name='{}')".format(
                          IR_node.variable_name + '_shape',
                          IR_node.variable_name + '_shape',
                          IR_node.variable_name + '_shape_array',
                          IR_node.variable_name + '_shape_array',
                          IR_node.variable_name + '_shape_array',
                          IR_node.variable_name + '_shape'))
        self.add_body(1, "{:15} = helper.make_node('Reshape', inputs=['{}', '{}'], outputs=['{}'], name='{}')".format(
            IR_node.variable_name,
            self.parent_variable_name(IR_node),
            IR_node.variable_name + '_shape',
            IR_node.variable_name,
            IR_node.variable_name))
        self.nodes.append(IR_node.variable_name + '_shape')
        self.nodes.append(IR_node.variable_name) 
開發者ID:microsoft,項目名稱:MMdnn,代碼行數:26,代碼來源:onnx_emitter.py

示例13: load_upsample_scales

# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_tensor [as 別名]
def load_upsample_scales(self, upsample_params):
        """Returns the initializers with the value of the scale input
        tensor given by upsample_params.

        Keyword argument:
        upsample_params -- a UpsampleParams object
        """
        initializer = list()
        inputs = list()
        name = upsample_params.generate_param_name()
        shape = upsample_params.value.shape
        data = upsample_params.value
        scale_init = helper.make_tensor(
            name, TensorProto.FLOAT, shape, data)
        scale_input = helper.make_tensor_value_info(
            name, TensorProto.FLOAT, shape)
        initializer.append(scale_init)
        inputs.append(scale_input)
        return initializer, inputs 
開發者ID:jkjung-avt,項目名稱:tensorrt_demos,代碼行數:21,代碼來源:yolov3_to_onnx.py

示例14: _create_param_tensors

# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_tensor [as 別名]
def _create_param_tensors(self, conv_params, param_category, suffix):
        """Creates the initializers with weights from the weights file together with
        the input tensors.

        Keyword arguments:
        conv_params -- a ConvParams object
        param_category -- the category of parameters to be created ('bn' or 'conv')
        suffix -- a string determining the sub-type of above param_category (e.g.,
        'weights' or 'bias')
        """
        param_name, param_data, param_data_shape = self._load_one_param_type(
            conv_params, param_category, suffix)

        initializer_tensor = helper.make_tensor(param_name, TensorProto.FLOAT, param_data_shape, param_data)
        input_tensor = helper.make_tensor_value_info(param_name, TensorProto.FLOAT, param_data_shape)
        return initializer_tensor, input_tensor 
開發者ID:cv-core,項目名稱:MIT-Driverless-CV-TrainingInfra,代碼行數:18,代碼來源:yolo2onnx.py

示例15: make_shape_compatible_op

# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_tensor [as 別名]
def make_shape_compatible_op(self, model):
        exp_ishape = self.get_normal_input_shape()
        oshape = self.get_normal_output_shape()
        ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
        assert ishape == exp_ishape, "Unexpect input shape for StreamingMaxPool."
        # implement tensor with correct shape
        values = np.random.randn(*oshape).astype(np.float32)
        return helper.make_node(
            "Constant",
            inputs=[],
            outputs=[self.onnx_node.output[0]],
            value=helper.make_tensor(
                name="const_tensor",
                data_type=TensorProto.FLOAT,
                dims=values.shape,
                vals=values.flatten().astype(float),
            ),
        ) 
開發者ID:Xilinx,項目名稱:finn,代碼行數:20,代碼來源:streamingmaxpool_batch.py


注:本文中的onnx.helper.make_tensor方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。