本文整理汇总了Python中neon.util.argparser.NeonArgparser.parse_args方法的典型用法代码示例。如果您正苦于以下问题:Python NeonArgparser.parse_args方法的具体用法?Python NeonArgparser.parse_args怎么用?Python NeonArgparser.parse_args使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neon.util.argparser.NeonArgparser
的用法示例。
在下文中一共展示了NeonArgparser.parse_args方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_iterator
# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import parse_args [as 别名]
def test_iterator():
parser = NeonArgparser(__doc__)
args = parser.parse_args()
(X_train, y_train), (X_test, y_test), nclass = load_cifar10_imgs(path=args.data_dir)
train = DataIterator(X_train, y_train, nclass=nclass, lshape=(3, 32, 32))
test = DataIterator(X_test, y_test, nclass=nclass, lshape=(3, 32, 32))
return run(args, train, test)
示例2: test_iterator
# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import parse_args [as 别名]
def test_iterator():
print('Testing iterator based data loader')
parser = NeonArgparser(__doc__)
args = parser.parse_args()
(X_train, y_train), (X_test, y_test), nclass = load_cifar10_imgs(path=args.data_dir)
train = ArrayIterator(X_train, y_train, nclass=nclass, lshape=(3, 32, 32))
test = ArrayIterator(X_test, y_test, nclass=nclass, lshape=(3, 32, 32))
return run(args, train, test)
示例3: get_data
# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import parse_args [as 别名]
def get_data():
"""
Download bilingual text dataset for Machine translation example.
"""
# vocab_size and time_steps are hard coded here
vocab_size = 16384
time_steps = 20
# download dataset
url = 'http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/'
filename = 'bitexts.tgz'
size = 1313280000
parser = NeonArgparser(__doc__)
args = parser.parse_args(gen_be=False)
data_dir = os.path.join(args.data_dir, 'nmt')
_, filepath = Dataset._valid_path_append(data_dir, '', filename)
if not os.path.exists(filepath):
Dataset.fetch_dataset(url, filename, filepath, size)
# extract selected datasets
datafiles = dict()
datafiles['un2000'] = ('un2000_pc34.en.gz', 'un2000_pc34.fr.gz')
datafiles['europarl7'] = ('ep7_pc45.en.gz', 'ep7_pc45.fr.gz')
extractpath = os.path.join(data_dir, 'bitexts.selected')
with tarfile.open(filepath, 'r') as tar_ref:
for dset, files in datafiles.items():
datasetpath = os.path.join(data_dir, dset)
# extract the files for dataset, if not already there
for zipped in files:
fname = '.'.join(zipped.split('.')[:-1])
fpath = os.path.join(datasetpath, fname)
if not os.path.exists(fpath):
gzpath = os.path.join(extractpath, zipped)
if not os.path.exists(gzpath):
select = [ti for ti in tar_ref if os.path.split(ti.name)[1] == zipped]
tar_ref.extractall(path=data_dir, members=select)
# get contents of gz files
if not os.path.exists(datasetpath):
os.makedirs(datasetpath)
with gzip.open(gzpath, 'r') as fin, open(fpath, 'w') as fout:
fout.write(fin.read())
os.remove(gzpath)
if os.path.exists(extractpath):
os.rmdir(extractpath)
# process data and save to h5 file
# loop through all datasets and get train and valid splits
for dataset in datafiles.keys():
s_vocab, t_vocab = create_h5py(data_dir, dataset, 'train',
vocab_size=vocab_size, time_steps=time_steps)
create_h5py(data_dir, dataset, 'valid', s_vocab=s_vocab, t_vocab=t_vocab,
time_steps=time_steps)
示例4: test_loader
# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import parse_args [as 别名]
def test_loader():
parser = NeonArgparser(__doc__)
args = parser.parse_args()
train_dir = os.path.join(args.data_dir, 'macrotrain')
test_dir = os.path.join(args.data_dir, 'macrotest')
write_batches(args, train_dir, trainimgs, 0)
write_batches(args, test_dir, testimgs, 1)
train = ImageLoader(set_name='train', do_transforms=False, inner_size=32,
repo_dir=train_dir)
test = ImageLoader(set_name='validation', do_transforms=False, inner_size=32,
repo_dir=test_dir)
err = run(args, train, test)
return err
示例5: main
# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import parse_args [as 别名]
def main():
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
logger = logging.getLogger()
logger.setLevel(args.log_thresh)
# Set up batch iterator for training images
train = ImgMaster(repo_dir="spectroDataTmp", set_name="train", inner_size=400, subset_pct=100)
val = ImgMaster(
repo_dir="spectroDataTmp", set_name="validation", inner_size=400, subset_pct=100, do_transforms=False
)
test = ImgMaster(
repo_dir="spectroTestDataTmp", set_name="validation", inner_size=400, subset_pct=100, do_transforms=False
)
train.init_batch_provider()
test.init_batch_provider()
print "Constructing network..."
model = constuct_network()
model.load_weights(args.model_file)
# Optimizer
opt = Adadelta()
# configure callbacks
valmetric = TopKMisclassification(k=5)
callbacks = Callbacks(model, train, eval_set=val, metric=valmetric, **args.callback_args)
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
# flag = input("Press Enter if you want to begin training process.")
print "Training network..."
print args.epochs
model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
mets = model.eval(test, metric=valmetric)
print "Validation set metrics:"
print "LogLoss: %.2f, Accuracy: %.1f %%0 (Top-1), %.1f %% (Top-5)" % (
mets[0],
(1.0 - mets[1]) * 100,
(1.0 - mets[2]) * 100,
)
test.exit_batch_provider()
train.exit_batch_provider()
示例6: main
# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import parse_args [as 别名]
def main():
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
logger = logging.getLogger()
logger.setLevel(args.log_thresh)
#Set up batch iterator for training images
print "Setting up data batch loaders..."
train = ImgMaster(repo_dir='dataTmp', set_name='train', inner_size=120, subset_pct=100)
val = ImgMaster(repo_dir='dataTmp', set_name='train', inner_size=120, subset_pct=100, do_transforms=False)
test = ImgMaster(repo_dir='dataTestTmp', set_name='train', inner_size=120, subset_pct=100, do_transforms=False)
train.init_batch_provider()
val.init_batch_provider()
test.init_batch_provider()
print "Constructing network..."
#Create AlexNet architecture
model = constuct_network()
#model.load_weights(args.model_file)
# drop weights LR by 1/250**(1/3) at epochs (23, 45, 66), drop bias LR by 1/10 at epoch 45
weight_sched = Schedule([22, 44, 65, 90, 97], (1/250.)**(1/3.))
opt_gdm = GradientDescentMomentum(0.01, 0.9, wdecay=0.005, schedule=weight_sched)
opt_biases = GradientDescentMomentum(0.04, 1.0, schedule=Schedule([130],.1))
opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases})
# configure callbacks
valmetric = TopKMisclassification(k=5)
callbacks = Callbacks(model, train, eval_set=val, metric=valmetric, **args.callback_args)
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
#flag = input("Press Enter if you want to begin training process.")
print "Training network..."
model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
mets = model.eval(test, metric=valmetric)
print 'Validation set metrics:'
print 'LogLoss: %.2f, Accuracy: %.1f %%0 (Top-1), %.1f %% (Top-5)' % (mets[0],
(1.0-mets[1])*100,
(1.0-mets[2])*100)
test.exit_batch_provider()
val.exit_batch_provider()
train.exit_batch_provider()
示例7: main
# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import parse_args [as 别名]
def main():
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--output_path', required=True,
help='Output path used when training model')
parser.add_argument('--w2v_path', required=False, default=None,
help='Path to GoogleNews w2v file for voab expansion.')
parser.add_argument('--eval_data_path', required=False, default='./SICK_data',
help='Path to the SICK dataset for evaluating semantic relateness')
parser.add_argument('--max_vocab_size', required=False, default=1000000,
help='Limit the vocabulary expansion to fit in GPU memory')
parser.add_argument('--subset_pct', required=False, default=100,
help='subset of training dataset to use (use to retreive \
preprocessed data from training)')
args = parser.parse_args(gen_be=True)
# load vocab file from training
_, vocab_file = load_data(args.data_dir, output_path=args.output_path,
subset_pct=float(args.subset_pct))
vocab, _, _ = load_obj(vocab_file)
vocab_size = len(vocab)
neon_logger.display("\nVocab size from the dataset is: {}".format(vocab_size))
index_from = 2 # 0: padding 1: oov
vocab_size_layer = vocab_size + index_from
max_len = 30
# load trained model
model_dict = load_obj(args.model_file)
# Vocabulary expansion trick needs to pass the correct vocab set to evaluate (for tokenization)
if args.w2v_path:
neon_logger.display("Performing Vocabulary Expansion... Loading W2V...")
w2v_vocab, w2v_vocab_size = get_w2v_vocab(args.w2v_path,
int(args.max_vocab_size), cache=True)
vocab_size_layer = w2v_vocab_size + index_from
model = load_sent_encoder(model_dict, expand_vocab=True, orig_vocab=vocab,
w2v_vocab=w2v_vocab, w2v_path=args.w2v_path, use_recur_last=True)
vocab = w2v_vocab
else:
# otherwise stick with original vocab size used to train the model
model = load_sent_encoder(model_dict, use_recur_last=True)
model.initialize(dataset=(max_len, 1))
evaluate(model, vocab=vocab, data_path=args.eval_data_path, evaltest=True,
vocab_size_layer=vocab_size_layer)
示例8: test_loader
# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import parse_args [as 别名]
def test_loader():
print('Testing image loader')
parser = NeonArgparser(__doc__)
args = parser.parse_args()
train_archive = os.path.join(args.data_dir, traindir + '-ingested')
test_archive = os.path.join(args.data_dir, testdir + '-ingested')
write_batches(args, train_archive, traindir, 0)
write_batches(args, test_archive, testdir, 1)
train = ImageLoader(set_name='train', do_transforms=False, inner_size=32,
scale_range=0, repo_dir=train_archive)
test = ImageLoader(set_name='validation', do_transforms=False, inner_size=32,
scale_range=0, repo_dir=test_archive)
err = run(args, train, test)
return err
示例9: test_loader
# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import parse_args [as 别名]
def test_loader():
print('Testing generic data loader')
parser = NeonArgparser(__doc__)
args = parser.parse_args()
train_path = os.path.join(args.data_dir, traindir + '-ingested')
test_path = os.path.join(args.data_dir, testdir + '-ingested')
params = ImageParams(channel_count=3, height=32, width=32)
common = dict(media_params=params, target_size=1, nclasses=10)
train = DataLoader('train', repo_dir=os.path.join(args.data_dir, 'train'),
**common)
test = DataLoader('test', repo_dir=os.path.join(args.data_dir, 'test'),
**common)
err = run(args, train, test)
return err
示例10: get_args_and_hyperparameters
# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import parse_args [as 别名]
def get_args_and_hyperparameters():
parser = NeonArgparser(__doc__)
args = parser.parse_args(gen_be=False)
# Override save path if None
if args.save_path is None:
args.save_path = 'frcn_alexnet.pickle'
if args.callback_args['save_path'] is None:
args.callback_args['save_path'] = args.save_path
if args.callback_args['serialize'] is None:
args.callback_args['serialize'] = min(args.epochs, 10)
# hyperparameters
args.batch_size = 64
hyper_params = lambda: None
hyper_params.use_pre_trained_weights = True # If true, load pre-trained weights to the model
hyper_params.max_train_imgs = 5000 # Make this smaller in small trial runs to save time
hyper_params.max_test_imgs = 5000 # Make this smaller in small trial runs to save time
hyper_params.num_epochs = args.epochs
hyper_params.samples_per_batch = args.batch_size # The mini-batch size
# The number of multi-scale samples to make for each input image. These
# samples are then fed into the network in multiple minibatches.
hyper_params.samples_per_img = hyper_params.samples_per_batch*7
hyper_params.frcn_fine_tune = False
hyper_params.shuffle = True
if hyper_params.use_pre_trained_weights:
# This will typically train in 10-15 epochs. Use a small learning rate
# and quickly reduce every 5-10 epochs. Use a high momentum since we
# are close to the minima.
s = 1e-4
hyper_params.learning_rate_scale = s
hyper_params.learning_rate_sched = Schedule(step_config=[15, 20],
change=[0.1*s, 0.01*s])
hyper_params.momentum = 0.9
else: # need to be less aggressive with reducing learning rate if the model is not pre-trained
s = 1e-2
hyper_params.learning_rate_scale = 1e-2
hyper_params.learning_rate_sched = Schedule(step_config=[8, 14, 18, 20],
change=[0.5*s, 0.1*s, 0.05*s, 0.01*s])
hyper_params.momentum = 0.1
hyper_params.class_score_threshold = 0.000001
hyper_params.score_exponent = 5
hyper_params.shuffle = True
return args, hyper_params
示例11: run_once
# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import parse_args [as 别名]
def run_once(web_input):
"""
Run forward pass for a single input. Receives input vector from the web form.
"""
parser = NeonArgparser(__doc__)
args = parser.parse_args()
num_feat = 4
npzfile = np.load('./model/homeapp_preproc.npz')
mean = npzfile['mean']
std = npzfile['std']
mean = np.reshape(mean, (1,mean.shape[0]))
std = np.reshape(std, (1,std.shape[0]))
# Reloading saved model
mlp=Model("./model/homeapp_model.prm")
# Horrible terrible hack that should never be needed :-(
NervanaObject.be.bsz = 1
# Actual: 275,000 Predicted: 362,177
#web_input = np.array([51.2246169879,-1.48577399748,223.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,1.0])
# Actual 185,000 Predicted: 244,526
#web_input = np.array([51.4395375168,-1.07174234072,5.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,1.0])
# Actual 231,500 Predicted 281,053
web_input = np.array([52.2010084131,-2.18181259148,218.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,1.0])
web_input = np.reshape(web_input, (1,web_input.shape[0]))
web_input[:,:num_feat-1] -= mean[:,1:num_feat]
web_input[:,:num_feat-1] /= std[:,1:num_feat]
web_test_set = ArrayIterator(X=web_input, make_onehot=False)
web_output = mlp.get_outputs(web_test_set)
#Rescale the output
web_output *= std[:,0]
web_output += mean[:,0]
return web_output[0]
示例12: main
# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import parse_args [as 别名]
def main():
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
logger = logging.getLogger()
logger.setLevel(args.log_thresh)
#Set up batch iterator for training images
train = ImgMaster(repo_dir='dataTmp_optFlow_BW', set_name='train', inner_size=240, subset_pct=100)
val = ImgMaster(repo_dir='dataTmp_optFlow_BW', set_name='train', inner_size=240, subset_pct=100, do_transforms=False)
test = ImgMaster(repo_dir='dataTestTmp_optFlow_BW', set_name='train', inner_size=240, subset_pct=100, do_transforms=False)
train.init_batch_provider()
val.init_batch_provider()
test.init_batch_provider()
print "Constructing network..."
#Create AlexNet architecture
model = constuct_network()
# Optimzer for model
opt = Adadelta()
# configure callbacks
valmetric = TopKMisclassification(k=5)
callbacks = Callbacks(model, train, eval_set=test, metric=valmetric, **args.callback_args)
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
#flag = input("Press Enter if you want to begin training process.")
print "Training network..."
model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
mets = model.eval(test, metric=valmetric)
print 'Validation set metrics:'
print 'LogLoss: %.2f, Accuracy: %.1f %%0 (Top-1), %.1f %% (Top-5)' % (mets[0],
(1.0-mets[1])*100,
(1.0-mets[2])*100)
return
示例13: NeonArgparser
# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import parse_args [as 别名]
from neon.backends import gen_backend
from neon.initializers import GlorotUniform
from neon.optimizers import GradientDescentMomentum, Schedule
from neon.layers import Conv, Dropout, Activation, Pooling, GeneralizedCost, DataTransform
from neon.transforms import Rectlin, Softmax, CrossEntropyMulti, Normalizer
from neon.models import Model
from neon.callbacks.callbacks import Callbacks
from neon.data import ImgMaster, ImageLoader
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--deconv', action='store_true',
help='save visualization data from deconvolution')
parser.add_argument('--loader_version', default='old', choices=['old', 'new'],
help='whether to use old dataloader (ImgMaster) or new (ImageLoader)')
args = parser.parse_args()
# hyperparameters
batch_size = 64
# setup backend
be = gen_backend(backend=args.backend,
batch_size=batch_size,
rng_seed=args.rng_seed,
device_id=args.device_id,
datatype=args.datatype)
# setup data provider
img_provider = ImgMaster if args.loader_version == 'old' else ImageLoader
img_set_options = dict(repo_dir=args.data_dir,
示例14: NeonArgparser
# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import parse_args [as 别名]
do_plots = True
try:
import matplotlib.pyplot as plt
plt.switch_backend('agg')
except ImportError:
neon_logger.display('matplotlib needs to be installed manually to generate plots needed '
'for this example. Skipping plot generation')
do_plots = False
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--img_prefix', type=str,
help='prefix for the saved image file names. If None, use '
'the model file name')
args = parser.parse_args(gen_be=True)
assert args.model_file is not None, "need a model file to do Fast R-CNN testing"
if args.img_prefix is None:
args.img_prefix = os.path.splitext(os.path.basename(args.model_file))[0]
output_dir = os.path.join(args.data_dir, 'frcn_output')
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# hyperparameters
args.batch_size = 1
n_mb = 40
img_per_batch = args.batch_size
rois_per_img = 5403
示例15: NeonArgparser
# 需要导入模块: from neon.util.argparser import NeonArgparser [as 别名]
# 或者: from neon.util.argparser.NeonArgparser import parse_args [as 别名]
from neon.backends import gen_backend
from neon.data import Text, load_text
from neon.initializers import Uniform
from neon.layers import GeneralizedCost, LSTM, Affine, GRU, LookupTable
from neon.models import Model
from neon.optimizers import GradientDescentMomentum, Schedule
from neon.transforms import Logistic, Tanh, Softmax, CrossEntropyMulti
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser, extract_valid_args
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--rlayer_type', default='lstm', choices=['gru', 'lstm'],
help='type of recurrent layer to use (gru or lstm)')
args = parser.parse_args(gen_be=False)
# hyperparameters from the reference
args.batch_size = 20
time_steps = 20
hidden_size = 200
gradient_clip_norm = 5
# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))
# download penn treebank
train_path = load_text('ptb-train', path=args.data_dir)
valid_path = load_text('ptb-valid', path=args.data_dir)