本文整理汇总了Python中neon.util.argparser.NeonArgparser类的典型用法代码示例。如果您正苦于以下问题:Python NeonArgparser类的具体用法?Python NeonArgparser怎么用?Python NeonArgparser使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了NeonArgparser类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_iterator
def test_iterator():
parser = NeonArgparser(__doc__)
args = parser.parse_args()
(X_train, y_train), (X_test, y_test), nclass = load_cifar10_imgs(path=args.data_dir)
train = DataIterator(X_train, y_train, nclass=nclass, lshape=(3, 32, 32))
test = DataIterator(X_test, y_test, nclass=nclass, lshape=(3, 32, 32))
return run(args, train, test)
示例2: test_iterator
def test_iterator():
print('Testing iterator based data loader')
parser = NeonArgparser(__doc__)
args = parser.parse_args()
(X_train, y_train), (X_test, y_test), nclass = load_cifar10_imgs(path=args.data_dir)
train = ArrayIterator(X_train, y_train, nclass=nclass, lshape=(3, 32, 32))
test = ArrayIterator(X_test, y_test, nclass=nclass, lshape=(3, 32, 32))
return run(args, train, test)
示例3: get_data
def get_data():
"""
Download bilingual text dataset for Machine translation example.
"""
# vocab_size and time_steps are hard coded here
vocab_size = 16384
time_steps = 20
# download dataset
url = 'http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/'
filename = 'bitexts.tgz'
size = 1313280000
parser = NeonArgparser(__doc__)
args = parser.parse_args(gen_be=False)
data_dir = os.path.join(args.data_dir, 'nmt')
_, filepath = Dataset._valid_path_append(data_dir, '', filename)
if not os.path.exists(filepath):
Dataset.fetch_dataset(url, filename, filepath, size)
# extract selected datasets
datafiles = dict()
datafiles['un2000'] = ('un2000_pc34.en.gz', 'un2000_pc34.fr.gz')
datafiles['europarl7'] = ('ep7_pc45.en.gz', 'ep7_pc45.fr.gz')
extractpath = os.path.join(data_dir, 'bitexts.selected')
with tarfile.open(filepath, 'r') as tar_ref:
for dset, files in datafiles.items():
datasetpath = os.path.join(data_dir, dset)
# extract the files for dataset, if not already there
for zipped in files:
fname = '.'.join(zipped.split('.')[:-1])
fpath = os.path.join(datasetpath, fname)
if not os.path.exists(fpath):
gzpath = os.path.join(extractpath, zipped)
if not os.path.exists(gzpath):
select = [ti for ti in tar_ref if os.path.split(ti.name)[1] == zipped]
tar_ref.extractall(path=data_dir, members=select)
# get contents of gz files
if not os.path.exists(datasetpath):
os.makedirs(datasetpath)
with gzip.open(gzpath, 'r') as fin, open(fpath, 'w') as fout:
fout.write(fin.read())
os.remove(gzpath)
if os.path.exists(extractpath):
os.rmdir(extractpath)
# process data and save to h5 file
# loop through all datasets and get train and valid splits
for dataset in datafiles.keys():
s_vocab, t_vocab = create_h5py(data_dir, dataset, 'train',
vocab_size=vocab_size, time_steps=time_steps)
create_h5py(data_dir, dataset, 'valid', s_vocab=s_vocab, t_vocab=t_vocab,
time_steps=time_steps)
示例4: test_loader
def test_loader():
parser = NeonArgparser(__doc__)
args = parser.parse_args()
train_dir = os.path.join(args.data_dir, 'macrotrain')
test_dir = os.path.join(args.data_dir, 'macrotest')
write_batches(args, train_dir, trainimgs, 0)
write_batches(args, test_dir, testimgs, 1)
train = ImageLoader(set_name='train', do_transforms=False, inner_size=32,
repo_dir=train_dir)
test = ImageLoader(set_name='validation', do_transforms=False, inner_size=32,
repo_dir=test_dir)
err = run(args, train, test)
return err
示例5: main
def main():
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
logger = logging.getLogger()
logger.setLevel(args.log_thresh)
#Set up batch iterator for training images
print "Setting up data batch loaders..."
train = ImgMaster(repo_dir='dataTmp', set_name='train', inner_size=120, subset_pct=100)
val = ImgMaster(repo_dir='dataTmp', set_name='train', inner_size=120, subset_pct=100, do_transforms=False)
test = ImgMaster(repo_dir='dataTestTmp', set_name='train', inner_size=120, subset_pct=100, do_transforms=False)
train.init_batch_provider()
val.init_batch_provider()
test.init_batch_provider()
print "Constructing network..."
#Create AlexNet architecture
model = constuct_network()
#model.load_weights(args.model_file)
# drop weights LR by 1/250**(1/3) at epochs (23, 45, 66), drop bias LR by 1/10 at epoch 45
weight_sched = Schedule([22, 44, 65, 90, 97], (1/250.)**(1/3.))
opt_gdm = GradientDescentMomentum(0.01, 0.9, wdecay=0.005, schedule=weight_sched)
opt_biases = GradientDescentMomentum(0.04, 1.0, schedule=Schedule([130],.1))
opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases})
# configure callbacks
valmetric = TopKMisclassification(k=5)
callbacks = Callbacks(model, train, eval_set=val, metric=valmetric, **args.callback_args)
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
#flag = input("Press Enter if you want to begin training process.")
print "Training network..."
model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
mets = model.eval(test, metric=valmetric)
print 'Validation set metrics:'
print 'LogLoss: %.2f, Accuracy: %.1f %%0 (Top-1), %.1f %% (Top-5)' % (mets[0],
(1.0-mets[1])*100,
(1.0-mets[2])*100)
test.exit_batch_provider()
val.exit_batch_provider()
train.exit_batch_provider()
示例6: main
def main():
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
logger = logging.getLogger()
logger.setLevel(args.log_thresh)
# Set up batch iterator for training images
train = ImgMaster(repo_dir="spectroDataTmp", set_name="train", inner_size=400, subset_pct=100)
val = ImgMaster(
repo_dir="spectroDataTmp", set_name="validation", inner_size=400, subset_pct=100, do_transforms=False
)
test = ImgMaster(
repo_dir="spectroTestDataTmp", set_name="validation", inner_size=400, subset_pct=100, do_transforms=False
)
train.init_batch_provider()
test.init_batch_provider()
print "Constructing network..."
model = constuct_network()
model.load_weights(args.model_file)
# Optimizer
opt = Adadelta()
# configure callbacks
valmetric = TopKMisclassification(k=5)
callbacks = Callbacks(model, train, eval_set=val, metric=valmetric, **args.callback_args)
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
# flag = input("Press Enter if you want to begin training process.")
print "Training network..."
print args.epochs
model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
mets = model.eval(test, metric=valmetric)
print "Validation set metrics:"
print "LogLoss: %.2f, Accuracy: %.1f %%0 (Top-1), %.1f %% (Top-5)" % (
mets[0],
(1.0 - mets[1]) * 100,
(1.0 - mets[2]) * 100,
)
test.exit_batch_provider()
train.exit_batch_provider()
示例7: test_loader
def test_loader():
print('Testing image loader')
parser = NeonArgparser(__doc__)
args = parser.parse_args()
train_archive = os.path.join(args.data_dir, traindir + '-ingested')
test_archive = os.path.join(args.data_dir, testdir + '-ingested')
write_batches(args, train_archive, traindir, 0)
write_batches(args, test_archive, testdir, 1)
train = ImageLoader(set_name='train', do_transforms=False, inner_size=32,
scale_range=0, repo_dir=train_archive)
test = ImageLoader(set_name='validation', do_transforms=False, inner_size=32,
scale_range=0, repo_dir=test_archive)
err = run(args, train, test)
return err
示例8: test_loader
def test_loader():
print('Testing generic data loader')
parser = NeonArgparser(__doc__)
args = parser.parse_args()
train_path = os.path.join(args.data_dir, traindir + '-ingested')
test_path = os.path.join(args.data_dir, testdir + '-ingested')
params = ImageParams(channel_count=3, height=32, width=32)
common = dict(media_params=params, target_size=1, nclasses=10)
train = DataLoader('train', repo_dir=os.path.join(args.data_dir, 'train'),
**common)
test = DataLoader('test', repo_dir=os.path.join(args.data_dir, 'test'),
**common)
err = run(args, train, test)
return err
示例9: get_args_and_hyperparameters
def get_args_and_hyperparameters():
parser = NeonArgparser(__doc__)
args = parser.parse_args(gen_be=False)
# Override save path if None
if args.save_path is None:
args.save_path = 'frcn_alexnet.pickle'
if args.callback_args['save_path'] is None:
args.callback_args['save_path'] = args.save_path
if args.callback_args['serialize'] is None:
args.callback_args['serialize'] = min(args.epochs, 10)
# hyperparameters
args.batch_size = 64
hyper_params = lambda: None
hyper_params.use_pre_trained_weights = True # If true, load pre-trained weights to the model
hyper_params.max_train_imgs = 5000 # Make this smaller in small trial runs to save time
hyper_params.max_test_imgs = 5000 # Make this smaller in small trial runs to save time
hyper_params.num_epochs = args.epochs
hyper_params.samples_per_batch = args.batch_size # The mini-batch size
# The number of multi-scale samples to make for each input image. These
# samples are then fed into the network in multiple minibatches.
hyper_params.samples_per_img = hyper_params.samples_per_batch*7
hyper_params.frcn_fine_tune = False
hyper_params.shuffle = True
if hyper_params.use_pre_trained_weights:
# This will typically train in 10-15 epochs. Use a small learning rate
# and quickly reduce every 5-10 epochs. Use a high momentum since we
# are close to the minima.
s = 1e-4
hyper_params.learning_rate_scale = s
hyper_params.learning_rate_sched = Schedule(step_config=[15, 20],
change=[0.1*s, 0.01*s])
hyper_params.momentum = 0.9
else: # need to be less aggressive with reducing learning rate if the model is not pre-trained
s = 1e-2
hyper_params.learning_rate_scale = 1e-2
hyper_params.learning_rate_sched = Schedule(step_config=[8, 14, 18, 20],
change=[0.5*s, 0.1*s, 0.05*s, 0.01*s])
hyper_params.momentum = 0.1
hyper_params.class_score_threshold = 0.000001
hyper_params.score_exponent = 5
hyper_params.shuffle = True
return args, hyper_params
示例10: run_once
def run_once(web_input):
"""
Run forward pass for a single input. Receives input vector from the web form.
"""
parser = NeonArgparser(__doc__)
args = parser.parse_args()
num_feat = 4
npzfile = np.load('./model/homeapp_preproc.npz')
mean = npzfile['mean']
std = npzfile['std']
mean = np.reshape(mean, (1,mean.shape[0]))
std = np.reshape(std, (1,std.shape[0]))
# Reloading saved model
mlp=Model("./model/homeapp_model.prm")
# Horrible terrible hack that should never be needed :-(
NervanaObject.be.bsz = 1
# Actual: 275,000 Predicted: 362,177
#web_input = np.array([51.2246169879,-1.48577399748,223.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,1.0])
# Actual 185,000 Predicted: 244,526
#web_input = np.array([51.4395375168,-1.07174234072,5.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,1.0])
# Actual 231,500 Predicted 281,053
web_input = np.array([52.2010084131,-2.18181259148,218.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,1.0])
web_input = np.reshape(web_input, (1,web_input.shape[0]))
web_input[:,:num_feat-1] -= mean[:,1:num_feat]
web_input[:,:num_feat-1] /= std[:,1:num_feat]
web_test_set = ArrayIterator(X=web_input, make_onehot=False)
web_output = mlp.get_outputs(web_test_set)
#Rescale the output
web_output *= std[:,0]
web_output += mean[:,0]
return web_output[0]
示例11: main
def main():
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
logger = logging.getLogger()
logger.setLevel(args.log_thresh)
#Set up batch iterator for training images
train = ImgMaster(repo_dir='dataTmp_optFlow_BW', set_name='train', inner_size=240, subset_pct=100)
val = ImgMaster(repo_dir='dataTmp_optFlow_BW', set_name='train', inner_size=240, subset_pct=100, do_transforms=False)
test = ImgMaster(repo_dir='dataTestTmp_optFlow_BW', set_name='train', inner_size=240, subset_pct=100, do_transforms=False)
train.init_batch_provider()
val.init_batch_provider()
test.init_batch_provider()
print "Constructing network..."
#Create AlexNet architecture
model = constuct_network()
# Optimzer for model
opt = Adadelta()
# configure callbacks
valmetric = TopKMisclassification(k=5)
callbacks = Callbacks(model, train, eval_set=test, metric=valmetric, **args.callback_args)
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
#flag = input("Press Enter if you want to begin training process.")
print "Training network..."
model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
mets = model.eval(test, metric=valmetric)
print 'Validation set metrics:'
print 'LogLoss: %.2f, Accuracy: %.1f %%0 (Top-1), %.1f %% (Top-5)' % (mets[0],
(1.0-mets[1])*100,
(1.0-mets[2])*100)
return
示例12: caption_video
return manifest_file
def caption_video(infile, caption, outfile):
cmd = '''ffmpeg -i {0} -an \
-vf drawtext="textfile={1}: fontcolor=white: fontsize=16: box=1: [email protected]" \
-y {2}'''
proc = subprocess.Popen(cmd.format(infile, caption, outfile), shell=True)
proc.communicate()
# parse the command line arguments
demo_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'test.cfg')
config_files = [demo_config] if os.path.exists(demo_config) else []
parser = NeonArgparser(__doc__, default_config_files=config_files)
parser.add_argument('--input_video', help='video file')
parser.add_argument('--output_video', help='Video file with overlayed inference hypotheses')
args = parser.parse_args()
assert args.model_file is not None, "need a model file for testing"
model = Model(args.model_file)
assert 'categories' in args.manifest, "Missing categories file"
category_map = {t[0]: t[1] for t in np.genfromtxt(args.manifest['categories'],
dtype=None, delimiter=',')}
# Make a temporary directory and clean up afterwards
outdir = mkdtemp()
atexit.register(shutil.rmtree, outdir)
caption_file = os.path.join(outdir, 'caption.txt')
示例13: NeonArgparser
from neon.util.argparser import NeonArgparser, extract_valid_args
from neon.optimizers import GradientDescentMomentum, MultiOptimizer, StepSchedule
from neon.callbacks.callbacks import Callbacks
from neon.util.persist import save_obj, get_data_cache_dir
from objectlocalization import PASCALVOC
from neon.transforms import CrossEntropyMulti, SmoothL1Loss
from neon.layers import Multicost, GeneralizedCostMask
import util
import faster_rcnn
import os
train_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pascalvoc.cfg')
config_files = [train_config] if os.path.exists(train_config) else []
# parse the command line arguments
parser = NeonArgparser(__doc__, default_config_files=config_files)
parser.add_argument('--width', type=int, default=1000, help='Width of input image')
parser.add_argument('--height', type=int, default=1000, help='Height of input image')
parser.add_argument('--subset_pct', type=float, default=100,
help='subset of training dataset to use (percentage)')
args = parser.parse_args(gen_be=False)
# hyperparameters
assert args.batch_size is 1, "Faster-RCNN only supports batch size 1"
assert 'train' in args.manifest
rpn_rois_per_img = 256 # number of rois to sample to train rpn
frcn_rois_per_img = 128 # number of rois to sample to train frcn
# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))
示例14: train_mlp
def train_mlp():
"""
Train data and save scaling and network weights and biases to file
to be used by forward prop phase on test data
"""
parser = NeonArgparser(__doc__)
args = parser.parse_args()
logger = logging.getLogger()
logger.setLevel(args.log_thresh)
# hyperparameters
num_epochs = args.epochs
#preprocessor
std_scale = preprocessing.StandardScaler(with_mean=True,with_std=True)
#std_scale = feature_scaler(type='Standardizer',with_mean=True,with_std=True)
#number of non one-hot encoded features, including ground truth
num_feat = 4
# load up the mnist data set
# split into train and tests sets
#load data from csv-files and rescale
#training
traindf = pd.DataFrame.from_csv('data/train.csv')
ncols = traindf.shape[1]
#tmpmat=std_scale.fit_transform(traindf.as_matrix())
#print std_scale.scale_
#print std_scale.mean_
tmpmat = traindf.as_matrix()
#print tmpmat[:,1:num_feat]
tmpmat[:,:num_feat] = std_scale.fit_transform(tmpmat[:,:num_feat])
X_train = tmpmat[:,1:]
y_train = np.reshape(tmpmat[:,0],(tmpmat[:,0].shape[0],1))
#validation
validdf = pd.DataFrame.from_csv('data/validate.csv')
ncols = validdf.shape[1]
tmpmat = validdf.as_matrix()
tmpmat[:,:num_feat] = std_scale.transform(tmpmat[:,:num_feat])
X_valid = tmpmat[:,1:]
y_valid = np.reshape(tmpmat[:,0],(tmpmat[:,0].shape[0],1))
#test
testdf = pd.DataFrame.from_csv('data/test.csv')
ncols = testdf.shape[1]
tmpmat = testdf.as_matrix()
tmpmat[:,:num_feat] = std_scale.transform(tmpmat[:,:num_feat])
X_test = tmpmat[:,1:]
y_test = np.reshape(tmpmat[:,0],(tmpmat[:,0].shape[0],1))
# setup a training set iterator
train_set = CustomDataIterator(X_train, lshape=(X_train.shape[1]), y_c=y_train)
# setup a validation data set iterator
valid_set = CustomDataIterator(X_valid, lshape=(X_valid.shape[1]), y_c=y_valid)
# setup a validation data set iterator
test_set = CustomDataIterator(X_test, lshape=(X_test.shape[1]), y_c=y_test)
# setup weight initialization function
init_norm = Xavier()
# setup model layers
layers = [Affine(nout=X_train.shape[1], init=init_norm, activation=Rectlin()),
Dropout(keep=0.5),
Affine(nout=X_train.shape[1]/2, init=init_norm, activation=Rectlin()),
Linear(nout=1, init=init_norm)]
# setup cost function as CrossEntropy
cost = GeneralizedCost(costfunc=SmoothL1Loss())
# setup optimizer
#schedule
#schedule = ExpSchedule(decay=0.3)
#optimizer = GradientDescentMomentum(0.0001, momentum_coef=0.9, stochastic_round=args.rounding, schedule=schedule)
optimizer = Adam(learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1.e-8)
# initialize model object
mlp = Model(layers=layers)
# configure callbacks
if args.callback_args['eval_freq'] is None:
args.callback_args['eval_freq'] = 1
# configure callbacks
callbacks = Callbacks(mlp, eval_set=valid_set, **args.callback_args)
callbacks.add_early_stop_callback(stop_func)
callbacks.add_save_best_state_callback(os.path.join(args.data_dir, "early_stop-best_state.pkl"))
# run fit
mlp.fit(train_set, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
#evaluate model
print('Evaluation Error = %.4f'%(mlp.eval(valid_set, metric=SmoothL1Metric())))
print('Test set error = %.4f'%(mlp.eval(test_set, metric=SmoothL1Metric())))
#.........这里部分代码省略.........
示例15: NeonArgparser
Referece:
https://github.com/karpathy/neuraltalk
"""
from neon.backends import gen_backend
from neon.data import load_flickr8k, ImageCaption, ImageCaptionTest
from neon.initializers import Uniform, Constant
from neon.layers import GeneralizedCostMask, LSTM, Affine, Dropout, Sequential, MergeMultistream
from neon.models import Model
from neon.optimizers import RMSProp
from neon.transforms import Logistic, Tanh, Softmax, CrossEntropyMulti
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser, extract_valid_args
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args(gen_be=False)
# hyperparameters
hidden_size = 512
num_epochs = args.epochs
# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))
# download dataset
data_path = load_flickr8k(path=args.data_dir) # Other setnames are flickr30k and coco
# load data
train_set = ImageCaption(path=data_path, max_images=-1)