当前位置: 首页>>代码示例>>Python>>正文


Python TensorProto.INT64属性代码示例

本文整理汇总了Python中onnx.TensorProto.INT64属性的典型用法代码示例。如果您正苦于以下问题:Python TensorProto.INT64属性的具体用法?Python TensorProto.INT64怎么用?Python TensorProto.INT64使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在onnx.TensorProto的用法示例。


在下文中一共展示了TensorProto.INT64属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _make_model_acos_exp_topk

# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT64 [as 别名]
def _make_model_acos_exp_topk(): # type: (...) -> ModelProto
  '''
  make a very simple model for testing: input->clip->exp->topk->2 outputs
  '''
  inputs = [('input0', (10,), TensorProto.FLOAT), ('K', (1,), TensorProto.INT64)]
  outputs = [('output_values', (3,), TensorProto.FLOAT), ('output_indices', (3,), TensorProto.INT64)]
  acos = helper.make_node("Acos",
                          inputs=[inputs[0][0]],
                          outputs=['acos_out'])
  exp = helper.make_node("Exp",
                        inputs=[acos.output[0]],
                        outputs=['exp_out'])
  topk = helper.make_node("TopK",
                        inputs=[exp.output[0], inputs[1][0]],
                        outputs=[outputs[0][0], outputs[1][0]],
                        axis=0)
  return _onnx_create_model([acos, exp, topk], inputs, outputs) 
开发者ID:onnx,项目名称:onnx-coreml,代码行数:19,代码来源:custom_layers_test.py

示例2: test_gather_nd

# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT64 [as 别名]
def test_gather_nd(self):
    if legacy_opset_pre_ver(11):
      raise unittest.SkipTest(
          "ONNX version {} doesn't support GatherND.".format(
              defs.onnx_opset_version()))
    # valid positive and negative indices for elements
    data = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
    indices = np.array([[0, 0], [1, -3]], dtype=np.int64)
    ref_output = np.array([1, 4], dtype=np.int32)
    node_def = helper.make_node("GatherND", ["data", "indices"], ["outputs"])
    graph_def = helper.make_graph(
        [node_def],
        name="test_unknown_shape",
        inputs=[
            helper.make_tensor_value_info("data", TensorProto.INT32,
                                          [None, None]),
            helper.make_tensor_value_info("indices", TensorProto.INT64,
                                          [None, None])
        ],
        outputs=[
            helper.make_tensor_value_info("outputs", TensorProto.INT32, [None])
        ])
    tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
    output = tf_rep.run({"data": data, "indices": indices})
    np.testing.assert_almost_equal(output["outputs"], ref_output) 
开发者ID:onnx,项目名称:onnx-tensorflow,代码行数:27,代码来源:test_dynamic_shape.py

示例3: test_fuse_add_bias_into_conv_use_weight_shape_with_tile

# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT64 [as 别名]
def test_fuse_add_bias_into_conv_use_weight_shape_with_tile(self):  # type: () -> None
        conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
        add = helper.make_node("Add", ["Z", "A"], ["B"])
        graph = helper.make_graph(
            [conv, add],
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
             helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3)),
             helper.make_tensor_value_info("A", TensorProto.FLOAT, (1,))],
            [helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 1, 1))],
        )
        optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])

        assert len(list(optimized_model.graph.node)) == 3
        assert len(optimized_model.graph.value_info) == 1
        assert optimized_model.graph.value_info[0].type.tensor_type.elem_type == TensorProto.INT64
        assert len(optimized_model.graph.value_info[0].type.tensor_type.shape.dim) == 1
        assert optimized_model.graph.node[0].op_type == 'Constant'
        assert optimized_model.graph.node[1].op_type == 'Tile'
        assert optimized_model.graph.node[2].op_type == 'Conv'
        assert optimized_model.graph.output[0].name == 'Z' 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:23,代码来源:optimizer_test.py

示例4: _make_model_acos_exp_topk

# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT64 [as 别名]
def _make_model_acos_exp_topk():  # type: (...) -> ModelProto
    """
  make a very simple model for testing: input->clip->exp->topk->2 outputs
  """
    inputs = [("input0", (10,), TensorProto.FLOAT), ("K", (1,), TensorProto.INT64)]
    outputs = [
        ("output_values", (3,), TensorProto.FLOAT),
        ("output_indices", (3,), TensorProto.INT64),
    ]
    acos = helper.make_node("Acos", inputs=[inputs[0][0]], outputs=["acos_out"])
    exp = helper.make_node("Exp", inputs=[acos.output[0]], outputs=["exp_out"])
    topk = helper.make_node(
        "TopK",
        inputs=[exp.output[0], inputs[1][0]],
        outputs=[outputs[0][0], outputs[1][0]],
        axis=0,
    )
    return _onnx_create_model([acos, exp, topk], inputs, outputs) 
开发者ID:apple,项目名称:coremltools,代码行数:20,代码来源:test_custom_layers.py

示例5: _transform_coreml_dtypes

# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT64 [as 别名]
def _transform_coreml_dtypes(builder, # type : NeuralNetworkBuilder
                             inputs, # type: List[EdgeInfo]
                             outputs # type: List[EdgeInfo]
                             ):
    # type: (...) -> None

    ''' Make sure ONNX input/output data types are mapped to the equivalent CoreML types
    '''
    for i, input_ in enumerate(inputs):
        onnx_type = input_[1]
        if onnx_type == TensorProto.FLOAT:
            _update_multiarray_to_float32(builder.spec.description.input[i])
        elif onnx_type == TensorProto.DOUBLE:
            continue
        elif onnx_type == TensorProto.INT32 or onnx_type == TensorProto.INT64:
            _update_multiarray_to_int32(builder.spec.description.input[i])
        elif onnx_type == TensorProto.BOOL:
            _update_multiarray_to_float32(builder.spec.description.input[i])
        else:
            raise TypeError("Input must be of of type FLOAT, DOUBLE, INT32 or INT64")

    for i, output_ in enumerate(outputs):
        onnx_type = output_[1]
        if onnx_type == TensorProto.FLOAT:
            _update_multiarray_to_float32(builder.spec.description.output[i])
        elif onnx_type == TensorProto.DOUBLE:
            continue
        elif onnx_type == TensorProto.INT32 or onnx_type == TensorProto.INT64:
            _update_multiarray_to_int32(builder.spec.description.output[i])
        elif onnx_type == TensorProto.BOOL:
            _update_multiarray_to_float32(builder.spec.description.output[i])
        else:
            raise TypeError("Output must be of of type FLOAT, DOUBLE, INT32 or INT64") 
开发者ID:onnx,项目名称:onnx-coreml,代码行数:35,代码来源:converter.py

示例6: _convert_cast

# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT64 [as 别名]
def _convert_cast(builder, node, graph, err):
    '''
    Perform cast operation in CoreML
        e.g. Casting from Float (assumed) to Int maps to Floor Layer
             For Others, add copy layer
    '''
    convert_to = node.attrs.get('to')
    convert_to_int = set({TensorProto.UINT8, TensorProto.INT8, TensorProto.UINT16, TensorProto.INT32,
                          TensorProto.INT64, TensorProto.UINT32, TensorProto.UINT64})

    ## TODO: Add support for conversion from STRING TO FLOAT
    ## Currently, such input will error out in parsing
    if convert_to in convert_to_int:
        builder.add_floor(
            name=node.name,
            input_name=node.inputs[0],
            output_name=node.outputs[0]
        )
    else:
        load_input_constants(builder, node, graph, err)
        builder.add_activation(
            name=node.name,
            non_linearity = 'LINEAR',
            input_name=node.inputs[0],
            output_name=node.outputs[0],
            params=[1.0, 0.0]
        ) 
开发者ID:onnx,项目名称:onnx-coreml,代码行数:29,代码来源:_operators_nd.py

示例7: test_cast

# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT64 [as 别名]
def test_cast(self):
    if legacy_onnx_pre_ver(1, 2) or legacy_opset_pre_ver(6):
      test_cases = [("FLOAT", tf.float32), ("UINT8", tf.uint8),
                    ("INT8", tf.int8),
                    ("UINT16", tf.uint16), ("INT16", tf.int16),
                    ("INT32", tf.int32), ("INT64", tf.int64), ("BOOL", tf.bool),
                    ("FLOAT16", tf.float16), ("DOUBLE", tf.float64),
                    ("COMPLEX64", tf.complex64), ("COMPLEX128", tf.complex128)]
    else:
      test_cases = [(TensorProto.FLOAT, tf.float32),
                    (TensorProto.UINT8, tf.uint8), (TensorProto.INT8, tf.int8),
                    (TensorProto.UINT16, tf.uint16),
                    (TensorProto.INT16, tf.int16),
                    (TensorProto.INT32, tf.int32),
                    (TensorProto.INT64, tf.int64), (TensorProto.BOOL, tf.bool),
                    (TensorProto.FLOAT16, tf.float16),
                    (TensorProto.DOUBLE, tf.float64),
                    (TensorProto.COMPLEX64, tf.complex64),
                    (TensorProto.COMPLEX128, tf.complex128)]
      if not legacy_opset_pre_ver(9):
        test_cases.append((TensorProto.STRING, tf.string))
    for ty, tf_type in test_cases:
      node_def = helper.make_node("Cast", ["input"], ["output"], to=ty)
      vector = [2, 3]
      output = run_node(node_def, [vector])
      np.testing.assert_equal(output["output"].dtype, tf_type)

    if not legacy_opset_pre_ver(9):
      test_cases2 = [(TensorProto.FLOAT, tf.float32),
                     (TensorProto.INT32, tf.int32),
                     (TensorProto.INT64, tf.int64),
                     (TensorProto.DOUBLE, tf.float64)]
      for ty, tf_type in test_cases2:
        node_def = helper.make_node("Cast", ["input"], ["output"], to=ty)
        vector = ['2', '3']
        output = run_node(node_def, [vector])
        np.testing.assert_equal(output["output"].dtype, tf_type) 
开发者ID:onnx,项目名称:onnx-tensorflow,代码行数:39,代码来源:test_node.py

示例8: test_scatter_nd

# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT64 [as 别名]
def test_scatter_nd(self):
    if legacy_opset_pre_ver(11):
      raise unittest.SkipTest(
          "ONNX version {} doesn't support ScatterND.".format(
              defs.onnx_opset_version()))
    # valid positive and negative indices for slices
    data = np.reshape(np.arange(1, 25, dtype=np.float32), [2, 3, 4])
    indices = np.array([[-1]], dtype=np.int64)
    updates = np.array([[[43, 44, 45, 46], [47, 48, 49, 50], [51, 52, 53, 54]]],
                       dtype=np.float32)
    ref_output = np.array(
        [[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
         [[43, 44, 45, 46], [47, 48, 49, 50], [51, 52, 53, 54]]],
        dtype=np.float32)
    node_def = helper.make_node("ScatterND", ["data", "indices", "updates"],
                                ["outputs"])
    graph_def = helper.make_graph(
        [node_def],
        name="test_unknown_shape",
        inputs=[
            helper.make_tensor_value_info("data", TensorProto.FLOAT,
                                          [None, None, None]),
            helper.make_tensor_value_info("indices", TensorProto.INT64,
                                          [None, None]),
            helper.make_tensor_value_info("updates", TensorProto.FLOAT,
                                          [None, None, None])
        ],
        outputs=[
            helper.make_tensor_value_info("outputs", TensorProto.FLOAT,
                                          [None, None, None])
        ])
    tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
    output = tf_rep.run({"data": data, "indices": indices, "updates": updates})
    np.testing.assert_almost_equal(output["outputs"], ref_output) 
开发者ID:onnx,项目名称:onnx-tensorflow,代码行数:36,代码来源:test_dynamic_shape.py

示例9: test_reshape_static_shape

# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT64 [as 别名]
def test_reshape_static_shape(self):  # type: () -> None
        graph = self._make_graph(
            [('x', TensorProto.UINT8, (2, 4, 3)),
             ('shape', TensorProto.INT64, (2,))],
            [make_node("Reshape", ['x', 'shape'], ['y'])],
            [],
            initializer=[make_tensor('shape', TensorProto.INT64, (2,), (3, 8))])
        self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (3, 8))]) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:10,代码来源:shape_inference_test.py

示例10: test_reshape_static_shape_inferred

# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT64 [as 别名]
def test_reshape_static_shape_inferred(self):  # type: () -> None
        graph = self._make_graph(
            [('x', TensorProto.UINT8, (2, 4, 3)),
             ('shape', TensorProto.INT64, (3,))],
            [make_node("Reshape", ['x', 'shape'], ['y'])],
            [],
            initializer=[make_tensor('shape', TensorProto.INT64, (3,), (0, 3, -1))])
        self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (2, 3, 4))]) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:10,代码来源:shape_inference_test.py

示例11: test_shape

# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT64 [as 别名]
def test_shape(self):  # type: () -> None
        graph = self._make_graph(
            [('x', TensorProto.FLOAT, (2, 4, 3))],
            [make_node("Shape", ['x'], ['y'])],
            [])
        self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (3,))]) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:8,代码来源:shape_inference_test.py

示例12: test_size

# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT64 [as 别名]
def test_size(self):  # type: () -> None
        graph = self._make_graph(
            [('x', TensorProto.FLOAT, (2, 4, 3))],
            [make_node("Size", ['x'], ['y'])],
            [])
        self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, ())]) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:8,代码来源:shape_inference_test.py

示例13: test_gather_into_scalar

# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT64 [as 别名]
def test_gather_into_scalar(self):  # type: () -> None
        graph = self._make_graph(
            [('x', TensorProto.FLOAT, (3,)),
             ('i', TensorProto.INT64, ())],
            [make_node("Gather", ['x', 'i'], ['y'])],
            [])
        self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, ())]) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:9,代码来源:shape_inference_test.py

示例14: test_topk_default_axis

# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT64 [as 别名]
def test_topk_default_axis(self):  # type: () -> None
        graph = self._make_graph(
            [('x', TensorProto.FLOAT, (3, 4, 5, 10))],
            [make_node('TopK', ['x'], ['y', 'z'], k=2)],
            [])
        self._assert_inferred(graph,
                              [make_tensor_value_info('y', TensorProto.FLOAT, (3, 4, 5, 2)),
                               make_tensor_value_info('z', TensorProto.INT64, (3, 4, 5, 2))]) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:10,代码来源:shape_inference_test.py

示例15: test_topk

# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT64 [as 别名]
def test_topk(self):  # type: () -> None
        graph = self._make_graph(
            [('x', TensorProto.FLOAT, (3, 4, 5, 10))],
            [make_node('TopK', ['x'], ['y', 'z'], k=2, axis=2)],
            [])
        self._assert_inferred(graph,
                              [make_tensor_value_info('y', TensorProto.FLOAT, (3, 4, 2, 10)),
                               make_tensor_value_info('z', TensorProto.INT64, (3, 4, 2, 10))]) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:10,代码来源:shape_inference_test.py


注:本文中的onnx.TensorProto.INT64属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。