当前位置: 首页>>代码示例>>Python>>正文


Python Model.get_outputs方法代码示例

本文整理汇总了Python中neon.models.Model.get_outputs方法的典型用法代码示例。如果您正苦于以下问题:Python Model.get_outputs方法的具体用法?Python Model.get_outputs怎么用?Python Model.get_outputs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在neon.models.Model的用法示例。


在下文中一共展示了Model.get_outputs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_model_get_outputs_rnn

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import get_outputs [as 别名]
def test_model_get_outputs_rnn(backend_default, data):

    dataset = PTB(50, path=data)
    dataiter = dataset.train_iter

    # weight initialization
    init = Constant(0.08)

    # model initialization
    layers = [
        Recurrent(150, init, activation=Logistic()),
        Affine(len(dataiter.vocab), init, bias=init, activation=Rectlin())
    ]

    model = Model(layers=layers)
    output = model.get_outputs(dataiter)

    assert output.shape == (dataiter.ndata, dataiter.seq_length, dataiter.nclass)

    # since the init are all constant and model is un-trained:
    # along the feature dim, the values should be all the same
    assert allclose_with_out(output[0, 0], output[0, 0, 0], rtol=0, atol=1e-4)
    assert allclose_with_out(output[0, 1], output[0, 1, 0], rtol=0, atol=1e-4)

    # along the time dim, the values should be increasing:
    assert np.alltrue(output[0, 2] > output[0, 1])
    assert np.alltrue(output[0, 1] > output[0, 0])
开发者ID:StevenLOL,项目名称:neon,代码行数:29,代码来源:test_model.py

示例2: train_eval

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import get_outputs [as 别名]
def train_eval(
        train_set,
        valid_set,
        args,
        hidden_size = 100,
        clip_gradients = True,
        gradient_limit = 5):

    # weight initialization
    init = Uniform(low=-0.08, high=0.08)

    # model initialization
    layers = [
        LSTM(hidden_size, init, Logistic(), Tanh()),
        LSTM(hidden_size, init, Logistic(), Tanh()),
        Affine(2, init, bias=init, activation=Softmax())
    ]

    cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True))
    model = Model(layers=layers)
    optimizer = RMSProp(clip_gradients=clip_gradients, gradient_limit=gradient_limit, stochastic_round=args.rounding)

    # configure callbacks
    callbacks = Callbacks(model, train_set, progress_bar=args.progress_bar)

    # train model
    model.fit(train_set,
              optimizer=optimizer,
              num_epochs=args.epochs,
              cost=cost,
              callbacks=callbacks)

    pred = model.get_outputs(valid_set)
    pred_neg_rate = model.eval(valid_set, metric=Misclassification())
    return (pred[:,1], pred_neg_rate)
开发者ID:wjiangcmu,项目名称:Driver_telematics,代码行数:37,代码来源:lstm.py

示例3: test_model_get_outputs

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import get_outputs [as 别名]
def test_model_get_outputs(backend):
    (X_train, y_train), (X_test, y_test), nclass = load_mnist()
    train_set = DataIterator(X_train[:backend.bsz * 3])

    init_norm = Gaussian(loc=0.0, scale=0.1)

    layers = [Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin()),
              Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
    mlp = Model(layers=layers)
    out_list = []
    for x, t in train_set:
        x = mlp.fprop(x)
        out_list.append(x.get().T.copy())
    ref_output = np.vstack(out_list)

    train_set.reset()
    output = mlp.get_outputs(train_set)
    assert np.allclose(output, ref_output)
开发者ID:sunclx,项目名称:neon,代码行数:20,代码来源:test_model.py

示例4: test_model_get_outputs_rnn

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import get_outputs [as 别名]
def test_model_get_outputs_rnn(backend):

    data_path = load_text('ptb-valid')

    data_set = Text(time_steps=50, path=data_path)

    # weight initialization
    init = Constant(0.08)

    # model initialization
    layers = [
        Recurrent(150, init, Logistic()),
        Affine(len(data_set.vocab), init, bias=init, activation=Rectlin())
    ]

    model = Model(layers=layers)
    output = model.get_outputs(data_set)

    assert output.shape == (data_set.ndata, data_set.seq_length, data_set.nclass)
开发者ID:sunclx,项目名称:neon,代码行数:21,代码来源:test_model.py

示例5: run_once

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import get_outputs [as 别名]
def run_once(web_input):
    """
    Run forward pass for a single input. Receives input vector from the web form.
    """

    parser = NeonArgparser(__doc__)
    
    args = parser.parse_args()
    
    num_feat = 4
    
    npzfile = np.load('./model/homeapp_preproc.npz')
    mean = npzfile['mean']
    std = npzfile['std']
    mean = np.reshape(mean, (1,mean.shape[0]))
    std = np.reshape(std, (1,std.shape[0]))
    
    # Reloading saved model
    mlp=Model("./model/homeapp_model.prm")
    
    # Horrible terrible hack that should never be needed :-(
    NervanaObject.be.bsz = 1
    
    # Actual: 275,000 Predicted: 362,177 
    #web_input = np.array([51.2246169879,-1.48577399748,223.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,1.0])
    # Actual 185,000 Predicted: 244,526
    #web_input = np.array([51.4395375168,-1.07174234072,5.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,1.0])
    # Actual 231,500 Predicted 281,053
    web_input = np.array([52.2010084131,-2.18181259148,218.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,1.0])
    web_input = np.reshape(web_input, (1,web_input.shape[0]))
    
    web_input[:,:num_feat-1] -= mean[:,1:num_feat]
    web_input[:,:num_feat-1] /= std[:,1:num_feat]
    
    web_test_set = ArrayIterator(X=web_input, make_onehot=False)
    
    web_output = mlp.get_outputs(web_test_set)
    
    #Rescale the output
    web_output *= std[:,0]
    web_output += mean[:,0]
    
    return web_output[0]
开发者ID:ankitvb,项目名称:homeprice,代码行数:45,代码来源:run_mlp.py

示例6: __init__

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import get_outputs [as 别名]
class MostCommonWordSense:

    def __init__(self, rounding, callback_args, epochs):
        # setup weight initialization function
        self.init = Gaussian(loc=0.0, scale=0.01)
        # setup optimizer
        self.optimizer = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9,
                                                 stochastic_round=rounding)
        # setup cost function as CrossEntropy
        self.cost = GeneralizedCost(costfunc=SumSquared())
        self.epochs = epochs
        self.model = None
        self.callback_args = callback_args

    def build(self):
        # setup model layers
        layers = [Affine(nout=100, init=self.init, bias=self.init, activation=Rectlin()),
                  Affine(nout=2, init=self.init, bias=self.init, activation=Softmax())]

        # initialize model object
        self.model = Model(layers=layers)

    def fit(self, valid_set, train_set):
        # configure callbacks
        callbacks = Callbacks(self.model, eval_set=valid_set, **self.callback_args)
        self.model.fit(train_set, optimizer=self.optimizer, num_epochs=self.epochs,
                       cost=self.cost, callbacks=callbacks)

    def save(self, save_path):
        self.model.save_params(save_path)

    def load(self, model_path):
        self.model = Model(model_path)

    def eval(self, valid_set):
        eval_rate = self.model.eval(valid_set, metric=Misclassification())
        return eval_rate

    def get_outputs(self, valid_set):
        return self.model.get_outputs(valid_set)
开发者ID:cdj0311,项目名称:nlp-architect,代码行数:42,代码来源:most_common_word_sense.py

示例7: test_model_get_outputs

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import get_outputs [as 别名]
def test_model_get_outputs(backend_default, data):
    (X_train, y_train), (X_test, y_test), nclass = load_mnist(path=data)
    train_set = ArrayIterator(X_train[:backend_default.bsz * 3])

    init_norm = Gaussian(loc=0.0, scale=0.1)

    layers = [Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin()),
              Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
    mlp = Model(layers=layers)
    out_list = []
    mlp.initialize(train_set)
    for x, t in train_set:
        x = mlp.fprop(x)
        out_list.append(x.get().T.copy())
    ref_output = np.vstack(out_list)

    train_set.reset()
    output = mlp.get_outputs(train_set)
    assert np.allclose(output, ref_output)

    # test model benchmark inference
    mlp.benchmark(train_set, inference=True, niterations=5)
开发者ID:AdrienAtallah,项目名称:neon,代码行数:24,代码来源:test_model.py

示例8: test_model_get_outputs

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import get_outputs [as 别名]
def test_model_get_outputs(backend_default, data):
    dataset = MNIST(path=data)
    train_set = dataset.train_iter

    init_norm = Gaussian(loc=0.0, scale=0.1)

    layers = [Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin()),
              Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
    mlp = Model(layers=layers)
    out_list = []
    mlp.initialize(train_set)
    for x, t in train_set:
        x = mlp.fprop(x)
        out_list.append(x.get().T.copy())
    ref_output = np.vstack(out_list)

    train_set.reset()
    output = mlp.get_outputs(train_set)
    assert allclose_with_out(output, ref_output[:output.shape[0], :])

    # test model benchmark inference
    mlp.benchmark(train_set, inference=True, niterations=5)
开发者ID:StevenLOL,项目名称:neon,代码行数:24,代码来源:test_model.py

示例9: NeonArgparser

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import get_outputs [as 别名]
config_files = [demo_config] if os.path.exists(demo_config) else []

parser = NeonArgparser(__doc__, default_config_files=config_files)
parser.add_argument('--input_video', help='video file')
parser.add_argument('--output_video', help='Video file with overlayed inference hypotheses')
args = parser.parse_args()

assert args.model_file is not None, "need a model file for testing"
model = Model(args.model_file)

assert 'categories' in args.manifest, "Missing categories file"
category_map = {t[0]: t[1] for t in np.genfromtxt(args.manifest['categories'],
                                                  dtype=None, delimiter=',')}

# Make a temporary directory and clean up afterwards
outdir = mkdtemp()
atexit.register(shutil.rmtree, outdir)
caption_file = os.path.join(outdir, 'caption.txt')

manifest = segment_video(args.input_video, outdir)

test = make_inference_loader(manifest, model.be)
clip_pred = model.get_outputs(test)
tot_prob = clip_pred[:test.ndata, :].mean(axis=0)
top_5 = np.argsort(tot_prob)[-5:]

hyps = ["{:0.5f} {}".format(tot_prob[i], category_map[i]) for i in reversed(top_5)]
np.savetxt(caption_file, hyps, fmt='%s')

caption_video(args.input_video, caption_file, args.output_video)
开发者ID:rlugojr,项目名称:neon,代码行数:32,代码来源:demo.py

示例10: Uniform

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import get_outputs [as 别名]
init = Uniform(low=-0.08, high=0.08)

# model initialization
if args.rlayer_type == 'lstm':
    rlayer = LSTM(hidden_size, init, activation=Logistic(), gate_activation=Tanh())
else:
    rlayer = GRU(hidden_size, init, activation=Tanh(), gate_activation=Logistic())

layers = [rlayer,
          Affine(len(train_set.vocab), init, bias=init, activation=Softmax())]

cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True))

model = Model(layers=layers)

optimizer = RMSProp(gradient_clip_value=gradient_clip_value, stochastic_round=args.rounding)

# configure callbacks
callbacks = Callbacks(model, train_set, eval_set=valid_set, **args.callback_args)

# train model
model.fit(train_set, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)

# get predictions
ypred = model.get_outputs(valid_set)
prediction = ypred.argmax(2).reshape((valid_set.nbatches,
                                      args.batch_size,
                                      time_steps)).transpose(1, 0, 2)
fraction_correct = (prediction == valid_set.y).mean()
print 'Misclassification error = %.1f%%' % ((1-fraction_correct)*100)
开发者ID:ferenckulcsar,项目名称:neon,代码行数:32,代码来源:char_lstm.py

示例11: Model

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import get_outputs [as 别名]
    model = Model(layers=layers)
    cost = GeneralizedCost(MeanSquared())
    optimizer = RMSProp(stochastic_round=args.rounding)

    callbacks = Callbacks(model, eval_set=valid_set, **args.callback_args)

    # fit model
    model.fit(train_set,
              optimizer=optimizer,
              num_epochs=args.epochs,
              cost=cost,
              callbacks=callbacks)

    # =======visualize how the model does on validation set==============
    # run the trained model on train and valid dataset and see how the outputs match
    train_output = model.get_outputs(train_set).reshape(-1, train_set.nfeatures)
    valid_output = model.get_outputs(valid_set).reshape(-1, valid_set.nfeatures)
    train_target = train_set.y_series
    valid_target = valid_set.y_series

    # calculate accuracy
    terr = err(train_output, train_target)
    verr = err(valid_output, valid_target)

    print 'terr = %g, verr = %g' % (terr, verr)

    if do_plots:
        plt.figure()
        plt.plot(train_output[:, 0], train_output[:, 1], 'bo', label='prediction')
        plt.plot(train_target[:, 0], train_target[:, 1], 'r.', label='target')
        plt.legend()
开发者ID:Jicheng-Yan,项目名称:neon,代码行数:33,代码来源:timeseries_lstm.py

示例12: arguments

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import get_outputs [as 别名]
from neon.util.argparser import NeonArgparser
from neon.layers import Pooling
from neon.models import Model
from neon.data import ImageLoader
from neon.util.persist import save_obj, load_obj

# parse the command line arguments (generates the backend)
parser = NeonArgparser(__doc__)
args = parser.parse_args()

scales = [112, 128, 160, 240]
for scale in scales:
    print scale
    test = ImageLoader(set_name='validation', shuffle=False, do_transforms=False, inner_size=scale,
                       scale_range=scale, repo_dir=args.data_dir)

    model_desc = load_obj(args.model_file)
    model_desc['model']['config']['layers'].insert(-1, Pooling('all', op='avg').get_description())
    model = Model(model_desc, test, inference=True)
    softmaxes = model.get_outputs(test)
    save_obj(softmaxes, "bigfeat_dropout_SM_{}.pkl".format(scale))
开发者ID:surround-io,项目名称:mpmz,代码行数:23,代码来源:miniplaces_eval.py

示例13: model

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import get_outputs [as 别名]
class NpSemanticSegClassifier:
    """
    NP Semantic Segmentation classifier model (based on Neon framework).

    Args:
        num_epochs(int): number of epochs to train the model
        **callback_args (dict): callback args keyword arguments to init a Callback for the model
        cost: the model's cost function. Default is 'neon.transforms.CrossEntropyBinary' cost
        optimizer (:obj:`neon.optimizers`): the model's optimizer. Default is
        'neon.optimizers.GradientDescentMomentum(0.07, momentum_coef=0.9)'
    """

    def __init__(self, num_epochs, callback_args,
                 optimizer=GradientDescentMomentum(0.07, momentum_coef=0.9)):
        """

        Args:
            num_epochs(int): number of epochs to train the model
            **callback_args (dict): callback args keyword arguments to init Callback for the model
            cost: the model's cost function. Default is 'neon.transforms.CrossEntropyBinary' cost
            optimizer (:obj:`neon.optimizers`): the model's optimizer. Default is
            `neon.optimizers.GradientDescentMomentum(0.07, momentum_coef=0.9)`
        """
        self.model = None
        self.cost = GeneralizedCost(costfunc=CrossEntropyBinary())
        self.optimizer = optimizer
        self.epochs = num_epochs
        self.callback_args = callback_args

    def build(self):
        """
        Build the model's layers
        """
        first_layer_dens = 64
        second_layer_dens = 64
        output_layer_dens = 2
        # setup weight initialization function
        init_norm = Gaussian(scale=0.01)
        # setup model layers
        layers = [Affine(nout=first_layer_dens, init=init_norm,
                         activation=Rectlin()),
                  Affine(nout=second_layer_dens, init=init_norm,
                         activation=Rectlin()),
                  Affine(nout=output_layer_dens, init=init_norm,
                         activation=Logistic(shortcut=True))]

        # initialize model object
        self.model = Model(layers=layers)

    def fit(self, test_set, train_set):
        """
        Train and fit the model on the datasets

        Args:
            test_set (:obj:`neon.data.ArrayIterators`): The test set
            train_set (:obj:`neon.data.ArrayIterators`): The train set
            args: callback_args and epochs from ArgParser input
        """
        # configure callbacks
        callbacks = Callbacks(self.model, eval_set=test_set, **self.callback_args)
        self.model.fit(train_set, optimizer=self.optimizer, num_epochs=self.epochs, cost=self.cost,
                       callbacks=callbacks)

    def save(self, model_path):
        """
        Save the model's prm file in model_path location

        Args:
            model_path(str): local path for saving the model
        """
        self.model.save_params(model_path)

    def load(self, model_path):
        """
        Load pre-trained model's .prm file to NpSemanticSegClassifier object

        Args:
            model_path(str): local path for loading the model
        """
        self.model = Model(model_path)

    def eval(self, test_set):
        """
        Evaluate the model's test_set on error_rate, test_accuracy_rate and precision_recall_rate

        Args:
            test_set (ArrayIterator): The test set

        Returns:
            tuple(int): error_rate, test_accuracy_rate and precision_recall_rate
        """
        error_rate = self.model.eval(test_set, metric=Misclassification())
        test_accuracy_rate = self.model.eval(test_set, metric=Accuracy())
        precision_recall_rate = self.model.eval(test_set, metric=PrecisionRecall(2))
        return error_rate, test_accuracy_rate, precision_recall_rate

    def get_outputs(self, test_set):
        """
        Classify the dataset on the model

#.........这里部分代码省略.........
开发者ID:cdj0311,项目名称:nlp-architect,代码行数:103,代码来源:np_semantic_segmentation.py

示例14: main

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import get_outputs [as 别名]
def main():
    # larger batch sizes may not fit on GPU
    parser = NeonArgparser(__doc__, default_overrides={'batch_size': 4})
    parser.add_argument("--bench", action="store_true", help="run benchmark instead of training")
    parser.add_argument("--num_classes", type=int, default=12, help="number of classes in the annotation")
    parser.add_argument("--height", type=int, default=256, help="image height")
    parser.add_argument("--width", type=int, default=512, help="image width")

    args = parser.parse_args(gen_be=False)

    # check that image dimensions are powers of 2
    if((args.height & (args.height - 1)) != 0):
        raise TypeError("Height must be a power of 2.")
    if((args.width & (args.width - 1)) != 0):
        raise TypeError("Width must be a power of 2.")

    (c, h, w) = (args.num_classes, args.height, args.width)

    # need to use the backend with the new upsampling layer implementation
    be = NervanaGPU_Upsample(rng_seed=args.rng_seed,
                             device_id=args.device_id)
    # set batch size
    be.bsz = args.batch_size

    # couple backend to global neon object
    NervanaObject.be = be

    shape = dict(channel_count=3, height=h, width=w, subtract_mean=False)
    train_params = ImageParams(center=True, flip=False,
                               scale_min=min(h, w), scale_max=min(h, w),
                               aspect_ratio=0, **shape)
    test_params = ImageParams(center=True, flip=False,
                              scale_min=min(h, w), scale_max=min(h, w),
                              aspect_ratio=0, **shape)
    common = dict(target_size=h*w, target_conversion='read_contents',
                  onehot=False, target_dtype=np.uint8, nclasses=args.num_classes)

    train_set = PixelWiseImageLoader(set_name='train', repo_dir=args.data_dir,
                                      media_params=train_params,
                                      shuffle=False, subset_percent=100,
                                      index_file=os.path.join(args.data_dir, 'train_images.csv'),
                                      **common)
    val_set = PixelWiseImageLoader(set_name='val', repo_dir=args.data_dir,media_params=test_params, 
                      index_file=os.path.join(args.data_dir, 'val_images.csv'), **common)

    # initialize model object
    layers = gen_model(c, h, w)
    segnet_model = Model(layers=layers)

    # configure callbacks
    callbacks = Callbacks(segnet_model, eval_set=val_set, **args.callback_args)

    opt_gdm = GradientDescentMomentum(1.0e-6, 0.9, wdecay=0.0005, schedule=Schedule())
    opt_biases = GradientDescentMomentum(2.0e-6, 0.9, schedule=Schedule())
    opt_bn = GradientDescentMomentum(1.0e-6, 0.9, schedule=Schedule())
    opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases, 'BatchNorm': opt_bn})

    cost = GeneralizedCost(costfunc=CrossEntropyMulti())

    if args.bench:
        segnet_model.initialize(train_set, cost=cost)
        segnet_model.benchmark(train_set, cost=cost, optimizer=opt)
        sys.exit(0)
    else:
        segnet_model.fit(train_set, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)

    # get the trained segnet model outputs for valisation set
    outs_val = segnet_model.get_outputs(val_set)

    with open('outputs.pkl', 'w') as fid:
        pickle.dump(outs_val, fid, -1)
开发者ID:NervanaSystems,项目名称:neon_segnet,代码行数:73,代码来源:segnet_neon.py

示例15: GeneralizedCost

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import get_outputs [as 别名]
    layers.append(Pooling(2, strides=2))
    nchan *= 2
layers.append(DropoutBinary(keep=0.2))
layers.append(Affine(nout=447, init=init, activation=Softmax()))

cost = GeneralizedCost(costfunc=CrossEntropyMulti())
mlp = Model(layers=layers)
callbacks = Callbacks(mlp, train, **args.callback_args)
mlp.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost,
        callbacks=callbacks)
train.exit_batch_provider()

test = ClassifierLoader(repo_dir=args.test_data_dir, inner_size=imwidth,
                        set_name='validation', do_transforms=False)
test.init_batch_provider()
probs = mlp.get_outputs(test)
test.exit_batch_provider()

filcsv = np.loadtxt(os.path.join(args.test_data_dir, 'val_file.csv'),
                    delimiter=',', skiprows=1, dtype=str)
files = [os.path.basename(row[0]) for row in filcsv]
datadir = os.path.dirname(args.data_dir)

with open(os.path.join(datadir, 'sample_submission.csv'), 'r') as fd:
    header = fd.readline()

with gzip.open('subm.csv.gz', 'wb') as fd:
    fd.write(header)
    for i in range(probs.shape[0]):
        fd.write('{},'.format(files[i]))
        row = probs[i].tolist()
开发者ID:pitseli,项目名称:deepwhales,代码行数:33,代码来源:classifier.py


注:本文中的neon.models.Model.get_outputs方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。