当前位置: 首页>>代码示例>>Python>>正文


Python NeonArgparser.add_argument方法代码示例

本文整理汇总了Python中neon.util.argparser.NeonArgparser.add_argument方法的典型用法代码示例。如果您正苦于以下问题:Python NeonArgparser.add_argument方法的具体用法?Python NeonArgparser.add_argument怎么用?Python NeonArgparser.add_argument使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在neon.util.argparser.NeonArgparser的用法示例。


在下文中一共展示了NeonArgparser.add_argument方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import add_argument [as 别名]
def main():
    # parse the command line arguments
    parser = NeonArgparser(__doc__)
    parser.add_argument('--output_path', required=True,
                        help='Output path used when training model')
    parser.add_argument('--w2v_path', required=False, default=None,
                        help='Path to GoogleNews w2v file for voab expansion.')
    parser.add_argument('--eval_data_path', required=False, default='./SICK_data',
                        help='Path to the SICK dataset for evaluating semantic relateness')
    parser.add_argument('--max_vocab_size', required=False, default=1000000,
                        help='Limit the vocabulary expansion to fit in GPU memory')
    parser.add_argument('--subset_pct', required=False, default=100,
                        help='subset of training dataset to use (use to retreive \
                        preprocessed data from training)')
    args = parser.parse_args(gen_be=True)

    # load vocab file from training
    _, vocab_file = load_data(args.data_dir, output_path=args.output_path,
                              subset_pct=float(args.subset_pct))
    vocab, _, _ = load_obj(vocab_file)

    vocab_size = len(vocab)
    neon_logger.display("\nVocab size from the dataset is: {}".format(vocab_size))

    index_from = 2  # 0: padding 1: oov
    vocab_size_layer = vocab_size + index_from
    max_len = 30

    # load trained model
    model_dict = load_obj(args.model_file)

    # Vocabulary expansion trick needs to pass the correct vocab set to evaluate (for tokenization)
    if args.w2v_path:
        neon_logger.display("Performing Vocabulary Expansion... Loading W2V...")
        w2v_vocab, w2v_vocab_size = get_w2v_vocab(args.w2v_path,
                                                  int(args.max_vocab_size), cache=True)

        vocab_size_layer = w2v_vocab_size + index_from
        model = load_sent_encoder(model_dict, expand_vocab=True, orig_vocab=vocab,
                                  w2v_vocab=w2v_vocab, w2v_path=args.w2v_path, use_recur_last=True)
        vocab = w2v_vocab
    else:
        # otherwise stick with original vocab size used to train the model
        model = load_sent_encoder(model_dict, use_recur_last=True)

    model.initialize(dataset=(max_len, 1))

    evaluate(model, vocab=vocab, data_path=args.eval_data_path, evaltest=True,
             vocab_size_layer=vocab_size_layer)
开发者ID:NervanaSystems,项目名称:neon,代码行数:51,代码来源:eval_sick.py

示例2: arguments

# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import add_argument [as 别名]
# ----------------------------------------------------------------------------
import os
from neon.optimizers import GradientDescentMomentum, Schedule
from neon.transforms import Misclassification
from neon.callbacks.callbacks import Callbacks, BatchNormTuneCallback
from neon.util.argparser import NeonArgparser

from network import create_network
from data import make_train_loader, make_validation_loader, make_tuning_loader

# parse the command line arguments (generates the backend)
train_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'train.cfg')
config_files = [train_config] if os.path.exists(train_config) else []

parser = NeonArgparser(__doc__, default_config_files=config_files)
parser.add_argument('--depth', type=int, default=2,
                    help='depth of each stage (network depth will be 9n+2)')
parser.add_argument('--subset_pct', type=float, default=100,
                    help='subset of training dataset to use (percentage)')
args = parser.parse_args()
random_seed = args.rng_seed if args.rng_seed else 0

# Check that the proper manifest sets have been supplied
assert 'train' in args.manifest, "Missing train manifest"
assert 'val' in args.manifest, "Missing validation manifest"

model, cost = create_network(args.depth)

# setup data provider
train = make_train_loader(args.manifest['train'], args.manifest_root, model.be, args.subset_pct,
                          random_seed)
test = make_validation_loader(args.manifest['val'], args.manifest_root, model.be, args.subset_pct)
开发者ID:rlugojr,项目名称:neon,代码行数:34,代码来源:train.py

示例3: NeonArgparser

# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import add_argument [as 别名]
from neon.util.argparser import NeonArgparser
from util import create_frcn_model

do_plots = True
try:
    import matplotlib.pyplot as plt
    plt.switch_backend('agg')
except ImportError:
    neon_logger.display('matplotlib needs to be installed manually to generate plots needed '
                        'for this example.  Skipping plot generation')
    do_plots = False

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--img_prefix', type=str,
                    help='prefix for the saved image file names. If None, use '
                         'the model file name')
args = parser.parse_args(gen_be=True)
assert args.model_file is not None, "need a model file to do Fast R-CNN testing"

if args.img_prefix is None:
    args.img_prefix = os.path.splitext(os.path.basename(args.model_file))[0]

output_dir = os.path.join(args.data_dir, 'frcn_output')
if not os.path.isdir(output_dir):
    os.mkdir(output_dir)

# hyperparameters
args.batch_size = 1
n_mb = 40
img_per_batch = args.batch_size
开发者ID:JediKoder,项目名称:neon,代码行数:33,代码来源:demo.py

示例4: NeonArgparser

# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import add_argument [as 别名]
from neon.data import ArrayIterator
from neon.initializers import Uniform, GlorotUniform
from neon.layers import GeneralizedCost, Affine, Dropout, LookupTable, LSTM, RecurrentSum
from neon.models import Model
from neon.optimizers import Adagrad
from neon.transforms import Logistic, Tanh, Softmax, CrossEntropyMulti, Accuracy
from neon.util.argparser import NeonArgparser, extract_valid_args
from neon.callbacks.callbacks import Callbacks
from neon.data.text_preprocessing import get_paddedXY, get_google_word2vec_W
import h5py
import cPickle

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('-f', '--review_file',
                    default='labeledTrainData.tsv',
                    help='input movie review file')
parser.add_argument('--vocab_file',
                    default='labeledTrainData.tsv.vocab',
                    help='output file to save the processed vocabulary')
parser.add_argument('--use_w2v', action='store_true',
                    help='use downloaded Google Word2Vec')
parser.add_argument('--w2v',
                    default='GoogleNews-vectors-negative300.bin',
                    help='the pre-built Word2Vec')
args = parser.parse_args()


# hyperparameters
hidden_size = 128
embedding_dim = 128
开发者ID:EnriqueSMarquez,项目名称:CNNs_RelatedProjects,代码行数:33,代码来源:train.py

示例5: NeonArgparser

# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import add_argument [as 别名]
"""

from neon.backends import gen_backend
from neon.data import Text, load_text
from neon.initializers import Uniform
from neon.layers import GeneralizedCost, LSTM, Affine, GRU, LookupTable
from neon.models import Model
from neon.optimizers import GradientDescentMomentum, Schedule
from neon.transforms import Logistic, Tanh, Softmax, CrossEntropyMulti
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser, extract_valid_args

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--rlayer_type', default='lstm', choices=['gru', 'lstm'],
                    help='type of recurrent layer to use (gru or lstm)')
args = parser.parse_args(gen_be=False)

# hyperparameters from the reference
args.batch_size = 20
time_steps = 20
hidden_size = 200
gradient_clip_norm = 5

# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))

# download penn treebank
train_path = load_text('ptb-train', path=args.data_dir)
valid_path = load_text('ptb-valid', path=args.data_dir)
开发者ID:qdao,项目名称:neon,代码行数:32,代码来源:word_lstm.py

示例6: NeonArgparser

# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import add_argument [as 别名]
"""

import os

from neon.util.argparser import NeonArgparser
from neon.layers import Conv, Pooling, MergeBroadcast, BranchNode, Affine, Tree, Dropout
from neon.layers import GeneralizedCost, Multicost
from neon.initializers import Constant, Xavier
from neon.backends import gen_backend
from neon.optimizers import GradientDescentMomentum, MultiOptimizer
from neon.transforms import Rectlin, Softmax, CrossEntropyMulti, TopKMisclassification
from neon.models import Model
from neon.data import ImageLoader

parser = NeonArgparser(__doc__)
parser.add_argument('--subset_pct', type=float, default=100,
                    help='subset of training dataset to use (percentage)')
parser.add_argument('--test_only', action='store_true',
                    help='skip fitting - evaluate metrics on trained model weights')
args = parser.parse_args()

# setup data provider
img_set_options = dict(repo_dir=args.data_dir, inner_size=224,
                       dtype=args.datatype, subset_pct=args.subset_pct)
test = ImageLoader(set_name='validation', scale_range=(256, 256),
                   do_transforms=False, **img_set_options)

init1 = Xavier(local=False)
initx = Xavier(local=True)
bias = Constant(val=0.20)
relu = Rectlin()
开发者ID:BwRy,项目名称:NervanaModelZoo,代码行数:33,代码来源:googlenet_neon.py

示例7: NeonArgparser

# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import add_argument [as 别名]
from neon.optimizers import GradientDescentMomentum, MultiOptimizer, StepSchedule
from neon.callbacks.callbacks import Callbacks
from neon.util.persist import save_obj, get_data_cache_dir
from objectlocalization import PASCALVOC
from neon.transforms import CrossEntropyMulti, SmoothL1Loss
from neon.layers import Multicost, GeneralizedCostMask
import util
import faster_rcnn
import os

train_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pascalvoc.cfg')
config_files = [train_config] if os.path.exists(train_config) else []

# parse the command line arguments
parser = NeonArgparser(__doc__, default_config_files=config_files)
parser.add_argument('--width', type=int, default=1000, help='Width of input image')
parser.add_argument('--height', type=int, default=1000, help='Height of input image')
parser.add_argument('--subset_pct', type=float, default=100,
                    help='subset of training dataset to use (percentage)')
args = parser.parse_args(gen_be=False)

# hyperparameters
assert args.batch_size is 1, "Faster-RCNN only supports batch size 1"
assert 'train' in args.manifest

rpn_rois_per_img = 256  # number of rois to sample to train rpn
frcn_rois_per_img = 128  # number of rois to sample to train frcn

# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))
be.enable_winograd = 4  # default to winograd 4 for fast autotune
开发者ID:StevenLOL,项目名称:neon,代码行数:33,代码来源:train.py

示例8: err

# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import add_argument [as 别名]
            self.batch_index += 1

            yield self.X_dev, self.y_dev


# replicate neon's mse error metric
def err(y, t):
    feature_axis = 1
    return (0.5 * np.square(y - t).mean(axis=feature_axis).mean())

if __name__ == '__main__':

    # parse the command line arguments
    parser = NeonArgparser(__doc__)
    parser.add_argument('--curvetype', default='Lissajous1', choices=['Lissajous1', 'Lissajous2'],
                        help='type of input curve data to use (Lissajous1 or Lissajous2)')
    args = parser.parse_args(gen_be=False)

    # network hyperparameters
    hidden = 32
    args.batch_size = 1

    # The following flag will switch between 2 training strategies:
    # 1. return_sequence True:
    #       Inputs are sequences, and target outputs will be sequences.
    #       The RNN layer's output at EVERY step will be used for errors and optimized.
    #       The RNN model contains a RNN layer and an Affine layer
    #       The data iterator will format the data accordingly, and will stride along the
    #           whole series with no overlap
    # 2. return_sequence False:
    #       Inputs are sequences, and target output will be a single step.
开发者ID:Jicheng-Yan,项目名称:neon,代码行数:33,代码来源:timeseries_lstm.py

示例9: import

# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import add_argument [as 别名]
from neon.data.dataloaders import load_imdb
from neon.data.dataiterator import ArrayIterator
from neon.data.text_preprocessing import pad_data
from neon.initializers import Uniform, GlorotUniform
from neon.layers import (GeneralizedCost, LSTM, Affine, Dropout, LookupTable,
                         RecurrentSum, Recurrent, DeepBiLSTM, DeepBiRNN)
from neon.models import Model
from neon.optimizers import Adagrad
from neon.transforms import Logistic, Tanh, Softmax, CrossEntropyMulti, Accuracy
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser, extract_valid_args

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--rlayer_type', default='lstm',
                    choices=['bilstm', 'lstm', 'birnn', 'bibnrnn', 'rnn'],
                    help='type of recurrent layer to use (lstm, bilstm, rnn, birnn, bibnrnn)')

args = parser.parse_args(gen_be=False)

# hyperparameters from the reference
args.batch_size = 128
gradient_clip_value = 15
vocab_size = 20000
sentence_length = 128
embedding_dim = 128
hidden_size = 128
reset_cells = True

# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))
开发者ID:JediKoder,项目名称:neon,代码行数:33,代码来源:imdb_lstm.py

示例10: arguments

# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import add_argument [as 别名]
and no partial minibatches, dropout is turned off for reproducibility on gpu
and the learning rate is scaled to handle the reduced dropout percentage.

"""
from neon.util.argparser import NeonArgparser
from neon.initializers import Constant, Gaussian
from neon.layers import Conv, Dropout, Pooling, GeneralizedCost, Affine
from neon.optimizers import GradientDescentMomentum, MultiOptimizer, Schedule
from neon.transforms import Rectlin, Softmax, CrossEntropyMulti, TopKMisclassification
from neon.models import Model
from neon.data import ImageLoader
from neon.callbacks.callbacks import Callbacks

# parse the command line arguments (generates the backend)
parser = NeonArgparser(__doc__)
parser.add_argument('--direct', action='store_true',
                    help='do not initialize layers, deserialize directly')
args = parser.parse_args()

# setup data provider
img_set_options = dict(repo_dir=args.data_dir,
                       inner_size=224,
                       subset_pct=0.09990891117239205)
train = ImageLoader(set_name='train', scale_range=(256, 256), shuffle=False,
                    do_transforms=False, **img_set_options)
test = ImageLoader(set_name='validation', scale_range=(256, 384), shuffle=False,
                   do_transforms=False, **img_set_options)

layers = [Conv((11, 11, 64), init=Gaussian(scale=0.01), bias=Constant(0),
               activation=Rectlin(), padding=3, strides=4),
          Pooling(3, strides=2),
          Conv((5, 5, 192), init=Gaussian(scale=0.01), bias=Constant(1),
开发者ID:Jokeren,项目名称:neon,代码行数:34,代码来源:alexnet.py

示例11: NeonArgparser

# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import add_argument [as 别名]
from neon.backends import gen_backend  # noqa
from neon.data import ArrayIterator  # noqa
from neon.initializers import Uniform, GlorotUniform, Array  # noqa
from neon.layers import GeneralizedCost, Affine, Dropout, LookupTable, LSTM, RecurrentSum  # noqa
from neon.models import Model  # noqa
from neon.optimizers import Adagrad  # noqa
from neon.transforms import Logistic, Tanh, Softmax, CrossEntropyMulti, Accuracy  # noqa
from neon.util.argparser import NeonArgparser, extract_valid_args  # noqa
from neon.util.compat import pickle  # noqa
from neon.callbacks.callbacks import Callbacks  # noqa
from neon.data.text_preprocessing import get_paddedXY, get_google_word2vec_W  # noqa
import h5py  # noqa

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument("-f", "--review_file", default="labeledTrainData.tsv", help="input movie review file")
parser.add_argument(
    "--vocab_file", default="labeledTrainData.tsv.vocab", help="output file to save the processed vocabulary"
)
parser.add_argument("--use_w2v", action="store_true", help="use downloaded Google Word2Vec")
parser.add_argument("--w2v", default="GoogleNews-vectors-negative300.bin", help="the pre-built Word2Vec")
args = parser.parse_args()


# hyperparameters
hidden_size = 128
embedding_dim = 128
vocab_size = 20000
sentence_length = 128
batch_size = 32
gradient_limit = 5
开发者ID:Jokeren,项目名称:neon,代码行数:33,代码来源:train.py

示例12: open

# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import add_argument [as 别名]
        predictions:
            the model's predictions
    """
    results_list = predictions.tolist()
    with open(output, 'w', encoding='utf-8') as out_file:
        writer = csv.writer(out_file, delimiter=',', quotechar='"')
        for result in results_list:
            writer.writerow([result])
    print("Results of inference saved in {0}".format(output))


if __name__ == "__main__":
    # parse the command line arguments
    parser = NeonArgparser()
    parser.set_defaults(epochs=200)
    parser.add_argument('--data', help='prepared data CSV file path',
                        type=validate_existing_filepath)
    parser.add_argument('--model', help='path to the trained model file',
                        type=validate_existing_filepath)
    parser.add_argument('--print_stats', action='store_true', default=False,
                        help='print evaluation stats for the model predictions - if '
                        'your data has tagging')
    parser.add_argument('--output', help='path to location for inference output file',
                        type=validate_parent_exists)
    args = parser.parse_args()
    data_path = absolute_path(args.data)
    model_path = absolute_path(args.model)
    print_stats = args.print_stats
    output_path = absolute_path(args.output)
    # generate backend
    be = gen_backend(batch_size=10)
    data_set = NpSemanticSegData(data_path, train_to_test_ratio=1)
开发者ID:cdj0311,项目名称:nlp-architect,代码行数:34,代码来源:inference.py

示例13: NeonArgparser

# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import add_argument [as 别名]
    Striving for Simplicity: the All Convolutional Net `[Springenberg2015]`_
..  _[Springenberg2015]: http://arxiv.org/pdf/1412.6806.pdf
"""

from neon.initializers import Gaussian
from neon.optimizers import GradientDescentMomentum, Schedule
from neon.layers import Conv, Dropout, Activation, Pooling, GeneralizedCost
from neon.transforms import Rectlin, Softmax, CrossEntropyMulti, Misclassification
from neon.models import Model
from neon.data import ArrayIterator, load_cifar10
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument("--learning_rate", default=0.05, help="initial learning rate")
parser.add_argument("--weight_decay", default=0.001, help="weight decay")
parser.add_argument("--deconv", action="store_true", help="save visualization data from deconvolution")
args = parser.parse_args()

# hyperparameters
num_epochs = args.epochs

(X_train, y_train), (X_test, y_test), nclass = load_cifar10(
    path=args.data_dir, normalize=False, contrast_normalize=True, whiten=True
)

# really 10 classes, pad to nearest power of 2 to match conv output
train_set = ArrayIterator(X_train, y_train, nclass=16, lshape=(3, 32, 32))
valid_set = ArrayIterator(X_test, y_test, nclass=16, lshape=(3, 32, 32))
开发者ID:dongjoon-hyun,项目名称:neon,代码行数:32,代码来源:cifar10_allcnn.py

示例14: NeonArgparser

# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import add_argument [as 别名]
Usage:
    use -t to specify which bAbI task to run
    python examples/babi/demo.py -t 1 --rlayer_type gru --model_weights babi.p
"""
import numpy as np

from util import create_model, babi_handler
from neon.backends import gen_backend
from neon.data import BABI, QA
from neon.data.text import Text
from neon.util.argparser import NeonArgparser, extract_valid_args

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('-t', '--task', type=int, default='1', choices=xrange(1, 21),
                    help='the task ID to train/test on from bAbI dataset (1-20)')
parser.add_argument('--rlayer_type', default='gru', choices=['gru', 'lstm'],
                    help='type of recurrent layer to use (gru or lstm)')
parser.add_argument('--model_weights',
                    help='pickle file of trained weights')
args = parser.parse_args(gen_be=False)

# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))
be.bsz = 1

# load the bAbI dataset
babi = babi_handler(args.data_dir, args.task)
valid_set = QA(*babi.test)

# create model
开发者ID:bin2000,项目名称:neon,代码行数:33,代码来源:demo.py

示例15: NeonArgparser

# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import add_argument [as 别名]
from neon.util.persist import load_obj
from neon.util.argparser import NeonArgparser, extract_valid_args
from neon.initializers import Uniform, Orthonormal, Constant
from neon.layers import GeneralizedCostMask, Multicost, GRU, SkipThought
from neon.models import Model
from neon.transforms import CrossEntropyMulti, Logistic, Tanh
from neon.callbacks.callbacks import Callbacks, MetricCallback
from neon.optimizers import Adam
from neon import logger as neon_logger

from data_loader import load_data
from data_iterator import SentenceHomogenous

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--output_dir', default='output/',
                    help='choose the directory to save the files')
parser.add_argument('--max_vocab_size', default=20000,
                    help='number of (most frequent) words to use from vocabulary')
parser.add_argument('--max_len_w', default=30,
                    help='maximum sentence length for training')
parser.add_argument('--subset_pct', type=float, default=100,
                    help='subset of training dataset to use (percentage)')
args = parser.parse_args(gen_be=False)

# hyperparameters from the reference
args.batch_size = 64
embed_dim = 620

valid_split = None
# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))
开发者ID:NervanaSystems,项目名称:neon,代码行数:34,代码来源:train.py


注:本文中的neon.util.argparser.NeonArgparser.add_argument方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。