當前位置: 首頁>>代碼示例>>Python>>正文


Python backend.prepare方法代碼示例

本文整理匯總了Python中caffe2.python.onnx.backend.prepare方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.prepare方法的具體用法?Python backend.prepare怎麽用?Python backend.prepare使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在caffe2.python.onnx.backend的用法示例。


在下文中一共展示了backend.prepare方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: onnx_inference

# 需要導入模塊: from caffe2.python.onnx import backend [as 別名]
# 或者: from caffe2.python.onnx.backend import prepare [as 別名]
def onnx_inference(args):
    # Load the ONNX model
    model = onnx.load("models/deepspeech_{}.onnx".format(args.continue_from))

    # Check that the IR is well formed
    onnx.checker.check_model(model)

    onnx.helper.printable_graph(model.graph)

    print("model checked, preparing backend!")
    rep = backend.prepare(model, device="CPU")  # or "CPU"

    print("running inference!")

    # Hard coded input dim
    inputs = np.random.randn(16, 1, 161, 129).astype(np.float32)

    start = time.time()
    outputs = rep.run(inputs)
    print("time used: {}".format(time.time() - start))
    # To run networks with more than one input, pass a tuple
    # rather than a single numpy ndarray.
    print(outputs[0]) 
開發者ID:mlperf,項目名稱:inference,代碼行數:25,代碼來源:convert_onnx.py

示例2: test

# 需要導入模塊: from caffe2.python.onnx import backend [as 別名]
# 或者: from caffe2.python.onnx.backend import prepare [as 別名]
def test(self):
    _model = onnx.load(self.MODEL_PATH + "shufflenet/model.onnx")
    node_count = len(_model.graph.node)
    more_outputs = []
    output_to_check = []
    for node in _model.graph.node:
      more_outputs.append(
          helper.make_tensor_value_info(node.output[0], TensorProto.FLOAT,
                                        (100, 100)))
      output_to_check.append(node.output[0])
    _model.graph.output.extend(more_outputs)

    tf_rep = tf.prepare(_model)
    cf_rep = c2.prepare(_model)

    sample = np.load(
        self.MODEL_PATH + "shufflenet/test_data_{}.npz".format(str(1)),
        encoding='bytes')
    inputs = list(sample['inputs'])
    outputs = list(sample['outputs'])

    my_out = tf_rep.run(inputs)
    cf_out = cf_rep.run(inputs)

    for op in output_to_check:
      try:
        np.savetxt(
            op.replace("/", "__") + ".cf", cf_out[op].flatten(), delimiter='\t')
        np.savetxt(
            op.replace("/", "__") + ".tf", my_out[op].flatten(), delimiter='\t')
        np.testing.assert_allclose(my_out[op], cf_out[op], rtol=1e-2)
        print(op, "results of this layer are correct within tolerence.")
      except Exception as e:
        np.set_printoptions(threshold=np.inf)
        mismatch_percent = (find_between(str(e), "(mismatch", "%)"))
        print(op, "mismatch with percentage {} %".format(mismatch_percent)) 
開發者ID:onnx,項目名稱:onnx-tensorflow,代碼行數:38,代碼來源:test_model_large_stepping.py

示例3: run_embed_params

# 需要導入模塊: from caffe2.python.onnx import backend [as 別名]
# 或者: from caffe2.python.onnx.backend import prepare [as 別名]
def run_embed_params(proto, model, input, state_dict=None, use_gpu=True):
    """
    This is only a helper debug function so we can test embed_params=False
    case as well on pytorch front
    This should likely be removed from the release version of the code
    """
    device = 'CPU'
    if use_gpu:
      device = 'CUDA'
    model_def = onnx.ModelProto.FromString(proto)
    onnx.checker.check_model(model_def)
    prepared = c2.prepare(model_def, device=device)

    if state_dict:
      parameters = []
      # Passed in state_dict may have a different order.  Make
      # sure our order is consistent with the model's order.
      # TODO: Even better: keyword arguments!
      for k in model.state_dict():
        if k not in state_dict:
          # Once PyTorch Module adds unnecessary paramter, the old pre-trained model does not have it.
          # Just simply pass the new one.
          # TODO: Please don't export unnecessary parameter.
          parameters.append(model.state_dict()[k])
        else:
          parameters.append(state_dict[k])
    else:
      parameters = list(model.state_dict().values())

    W = {}
    for k, v in zip(model_def.graph.input, flatten((input, parameters))):
      if isinstance(v, Variable):
        W[k.name] = v.data.cpu().numpy()
      else:
        W[k.name] = v.cpu().numpy()

    caffe2_out = prepared.run(inputs=W)

    return caffe2_out 
開發者ID:onnxbot,項目名稱:onnx-fb-universe,代碼行數:41,代碼來源:debug_embed_params.py

示例4: run_generated_test

# 需要導入模塊: from caffe2.python.onnx import backend [as 別名]
# 或者: from caffe2.python.onnx.backend import prepare [as 別名]
def run_generated_test(model_file, data_dir, device='CPU'):
    model = onnx.load(model_file)
    input_num = len(glob.glob(os.path.join(data_dir, "input_*.pb")))
    inputs = []
    for i in range(input_num):
        inputs.append(numpy_helper.to_array(load_tensor_as_numpy_array(
            os.path.join(data_dir, "input_{}.pb".format(i)))))
    output_num = len(glob.glob(os.path.join(data_dir, "output_*.pb")))
    outputs = []
    for i in range(output_num):
        outputs.append(numpy_helper.to_array(load_tensor_as_numpy_array(
            os.path.join(data_dir, "output_{}.pb".format(i)))))
    prepared = c2.prepare(model, device=device)
    c2_outputs = prepared.run(inputs)
    assert_similar(outputs, c2_outputs) 
開發者ID:onnxbot,項目名稱:onnx-fb-universe,代碼行數:17,代碼來源:test_caffe2_common.py

示例5: main

# 需要導入模塊: from caffe2.python.onnx import backend [as 別名]
# 或者: from caffe2.python.onnx.backend import prepare [as 別名]
def main():
    args = parser.parse_args()

    if not args.checkpoint:
        args.pretrained = True
    else:
        args.pretrained = False

    # create model
    geffnet.config.set_exportable(True)
    print("==> Creating PyTorch {} model".format(args.model))
    model = geffnet.create_model(
        args.model,
        num_classes=args.num_classes,
        in_chans=3,
        pretrained=args.pretrained,
        checkpoint_path=args.checkpoint)

    model.eval()

    x = torch.randn((1, 3, args.img_size or 224, args.img_size or 224), requires_grad=True)
    model(x)  # run model once before export trace

    print("==> Exporting model to ONNX format at '{}'".format(args.output))
    input_names = ["input0"]
    output_names = ["output0"]
    optional_args = dict(keep_initializers_as_inputs=True)  # pytorch 1.3 needs this for export to succeed
    try:
        torch_out = torch.onnx._export(
            model, x, args.output, export_params=True, verbose=False,
            input_names=input_names, output_names=output_names, **optional_args)
    except TypeError:
        # fallback to no keep_initializers arg for pytorch < 1.3
        torch_out = torch.onnx._export(
            model, x, args.output, export_params=True, verbose=False,
            input_names=input_names, output_names=output_names)

    print("==> Loading and checking exported model from '{}'".format(args.output))
    onnx_model = onnx.load(args.output)
    onnx.checker.check_model(onnx_model)  # assuming throw on error
    print("==> Passed")

    print("==> Loading model into Caffe2 backend and comparing forward pass.".format(args.output))
    caffe2_backend = onnx_caffe2.prepare(onnx_model)
    B = {onnx_model.graph.input[0].name: x.data.numpy()}
    c2_out = caffe2_backend.run(B)[0]
    np.testing.assert_almost_equal(torch_out.data.numpy(), c2_out, decimal=5)
    print("==> Passed") 
開發者ID:rwightman,項目名稱:gen-efficientnet-pytorch,代碼行數:50,代碼來源:onnx_export.py

示例6: __init__

# 需要導入模塊: from caffe2.python.onnx import backend [as 別名]
# 或者: from caffe2.python.onnx.backend import prepare [as 別名]
def __init__(self, path):
    # parameters
    self.path = path

    # config from path
    try:
      yaml_path = self.path + "/cfg.yaml"
      print("Opening config file %s" % yaml_path)
      self.CFG = yaml.safe_load(open(yaml_path, 'r'))
    except Exception as e:
      print(e)
      print("Error opening cfg.yaml file from trained model.")
      quit()

    # make a colorizer
    self.colorizer = Colorizer(self.CFG["dataset"]["color_map"])

    # get the data
    parserModule = imp.load_source("parserModule",
                                   booger.TRAIN_PATH + '/tasks/segmentation/dataset/' +
                                   self.CFG["dataset"]["name"] + '/parser.py')
    self.parser = parserModule.Parser(img_prop=self.CFG["dataset"]["img_prop"],
                                      img_means=self.CFG["dataset"]["img_means"],
                                      img_stds=self.CFG["dataset"]["img_stds"],
                                      classes=self.CFG["dataset"]["labels"],
                                      train=False)

    # some useful data
    self.data_h, self.data_w, self.data_d = self.parser.get_img_size()
    self.means, self.stds = self.parser.get_means_stds()
    self.means = np.array(self.means, dtype=np.float32)
    self.stds = np.array(self.stds, dtype=np.float32)
    self.nclasses = self.parser.get_n_classes()

    # architecture definition
    # get weights?
    try:
      self.onnx_path = os.path.join(self.path, "model.onnx")
      self.model = onnx.load(self.onnx_path)
      print("Successfully ONNX weights from ", self.onnx_path)
    except Exception as e:
      print("Couldn't load ONNX network. Error: ", e)
      quit()

    # prepare caffe2 model in proper device
    if torch.cuda.is_available():
      self.device = "CUDA"
    else:
      self.device = "CPU"
    print("Building backend ONXX Caffe2 with device ", self.device)
    self.engine = backend.prepare(self.model, device=self.device) 
開發者ID:PRBonn,項目名稱:bonnetal,代碼行數:53,代碼來源:userCaffe2.py

示例7: __init__

# 需要導入模塊: from caffe2.python.onnx import backend [as 別名]
# 或者: from caffe2.python.onnx.backend import prepare [as 別名]
def __init__(self, path):
    # parameters
    self.path = path

    # config from path
    try:
      yaml_path = self.path + "/cfg.yaml"
      print("Opening config file %s" % yaml_path)
      self.CFG = yaml.safe_load(open(yaml_path, 'r'))
    except Exception as e:
      print(e)
      print("Error opening cfg.yaml file from trained model.")
      quit()

    # get the data
    parserModule = imp.load_source("parserModule",
                                   booger.TRAIN_PATH + '/tasks/classification/dataset/' +
                                   self.CFG["dataset"]["name"] + '/parser.py')
    self.parser = parserModule.Parser(img_prop=self.CFG["dataset"]["img_prop"],
                                      img_means=self.CFG["dataset"]["img_means"],
                                      img_stds=self.CFG["dataset"]["img_stds"],
                                      classes=self.CFG["dataset"]["labels"],
                                      train=False)

    # some useful data
    self.data_h, self.data_w, self.data_d = self.parser.get_img_size()
    self.means, self.stds = self.parser.get_means_stds()
    self.means = np.array(self.means, dtype=np.float32)
    self.stds = np.array(self.stds, dtype=np.float32)
    self.nclasses = self.parser.get_n_classes()

    # architecture definition
    # get weights?
    try:
      self.onnx_path = os.path.join(self.path, "model.onnx")
      self.model = onnx.load(self.onnx_path)
      print("Successfully ONNX weights from ", self.onnx_path)
    except Exception as e:
      print("Couldn't load ONNX network. Error: ", e)
      quit()

    # prepare caffe2 model in proper device
    if torch.cuda.is_available():
      self.device = "CUDA"
    else:
      self.device = "CPU"
    print("Building backend ONXX Caffe2 with device ", self.device)
    self.engine = backend.prepare(self.model, device=self.device) 
開發者ID:PRBonn,項目名稱:bonnetal,代碼行數:50,代碼來源:userCaffe2.py


注:本文中的caffe2.python.onnx.backend.prepare方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。