本文整理汇总了Python中stanza.research.config.options函数的典型用法代码示例。如果您正苦于以下问题:Python options函数的具体用法?Python options怎么用?Python options使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了options函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
def main():
options = config.options(read=True)
app = wx.App() # NOQA: wx needs an App even if we're only showing a few modal dialogs
this_output = html_report.get_output(options.run_dir, options.split)
this_insts = get_trial_data(this_output, options.test_size, options.run_dir)
if options.compare_dir:
compare_output = html_report.get_output(options.compare_dir, options.split)
compare_insts = get_trial_data(compare_output, options.test_size, options.run_dir)
else:
compare_insts = []
all_insts = this_insts + compare_insts
random.shuffle(all_insts)
human = HumanListener()
human.train(all_insts)
m = [metrics.squared_error]
test_results = evaluate.evaluate(human, this_insts, split_id='human_eval', metrics=m)
output.output_results(test_results, options.run_dir)
if compare_insts:
test_results = evaluate.evaluate(human, compare_insts,
split_id='human_eval_compare', metrics=m)
output.output_results(test_results, options.compare_dir)
示例2: evaluate_l1_eval
def evaluate_l1_eval():
options = config.options(read=True)
grids_path = os.path.join(options.run_dir, 's0_grids.0.jsons.gz')
with gzip.open(grids_path, 'rb') as infile:
grids = [json.loads(line.strip()) for line in infile]
data_path = os.path.join(options.run_dir, 'data.eval.jsons')
with open(data_path, 'r') as infile:
insts = [instance.Instance(**json.loads(line.strip()))
for line in infile]
assert len(grids) == len(insts), '{} != {}'.format(len(grids), len(insts))
gold_outputs = np.array([inst.output for inst in insts])
s0 = np.array([[np.array(ss['S0']).T for ss in grid['sets']]
for grid in grids])
l1 = compute_l1(s0, alpha=options.alpha)
l1_scores = l1[np.arange(l1.shape[0]), gold_outputs].tolist()
l1_preds = np.argmax(l1, axis=1).tolist()
m = [metrics.log_likelihood,
metrics.log_likelihood_bits,
metrics.perplexity,
metrics.accuracy]
learner = DummyLearner(l1_preds, l1_scores)
results = evaluate.evaluate(learner, insts, metrics=m, split_id='l1_eval',
write_data=False)
output.output_results(results, 'l1_eval')
示例3: run_listener
def run_listener(self, listener_class=ListenerLearner, cell='LSTM', tensorboard=True):
sys.argv = []
options = config.options()
options.train_iters = 2
options.train_epochs = 3
options.listener_cell = cell
options.listener = True
mo = MockOpen(TEST_DIR)
mgfp = mock_get_file_path(TEST_DIR)
with mock.patch('stanza.monitoring.summary.open', mo), \
mock.patch('stanza.monitoring.summary.SummaryWriter', MockSummaryWriter), \
mock.patch('stanza.research.config.open', mo), \
mock.patch('stanza.research.config.get_file_path', mgfp):
listener = listener_class()
train_data = [instance.Instance('green', (0, 255, 0))]
listener.train(train_data)
predictions, scores = listener.predict_and_score(train_data)
# predictions = [(123, 45, 67)]
self.assertIsInstance(predictions, list)
self.assertEqual(len(predictions), 1)
self.assertEqual(len(predictions[0]), 3)
self.assertIsInstance(predictions[0][0], Number)
# scores = [123.456]
self.assertIsInstance(scores, list)
self.assertEqual(len(scores), 1)
self.assertIsInstance(scores[0], float)
if tensorboard:
self.check_tensorboard(mo, mgfp, images=True)
示例4: run_speaker
def run_speaker(self, speaker_class, cell='LSTM', color_repr='buckets',
tensorboard=True, images=False):
sys.argv = []
options = config.options()
options.train_iters = 2
options.train_epochs = 3
options.speaker_cell = cell
options.speaker_color_repr = color_repr
options.listener = False
mo = MockOpen(TEST_DIR)
mgfp = mock_get_file_path(TEST_DIR)
with mock.patch('stanza.monitoring.summary.open', mo), \
mock.patch('stanza.monitoring.summary.SummaryWriter', MockSummaryWriter), \
mock.patch('stanza.research.config.open', mo), \
mock.patch('stanza.research.config.get_file_path', mgfp):
speaker = speaker_class()
train_data = [instance.Instance((0, 255, 0), 'green')]
speaker.train(train_data)
predictions, scores = speaker.predict_and_score(train_data)
# predictions = ['somestring']
self.assertIsInstance(predictions, list)
self.assertEqual(len(predictions), 1)
self.assertIsInstance(predictions[0], basestring)
# scores = [123.456]
self.assertIsInstance(scores, list)
self.assertEqual(len(scores), 1)
self.assertIsInstance(scores[0], float)
if tensorboard:
self.check_tensorboard(mo, mgfp, images=images)
示例5: __init__
def __init__(self):
options = config.options()
self.game_config = cards_config.new(options.game_config)
self.viewer = None
self.verbosity = 4
# One action for each player
player = spaces.Discrete(len(ACTIONS))
# should this be spaces.Tuple((player, player)) for 2 players?
self.action_space = spaces.Tuple([player for _ in range(MAX_BATCH_SIZE)])
# One board for walls, one for card observations, one for player location
board = spaces.Box(np.zeros(MAX_BOARD_SIZE), np.ones(MAX_BOARD_SIZE))
language_player = spaces.Box(np.array(0.), np.array(1.))
language = spaces.Tuple([language_player for _ in range(self.game_config.num_players - 1)])
hand = spaces.Box(np.zeros((3, len(RANKS), len(SUITS))),
np.ones((3, len(RANKS), len(SUITS))))
floor = spaces.Box(np.zeros((len(RANKS), len(SUITS))),
np.ones((len(RANKS), len(SUITS))))
all_obs = (board, board, board, hand, floor, language)
self.observation_space = spaces.Tuple([e
for _ in range(MAX_BATCH_SIZE)
for e in all_obs])
self.clear_boards()
import world
self.default_world = world.CardsWorld(all_transcripts()[0])
self._seed()
示例6: write_metrics
def write_metrics():
options = config.options(read=True)
for split in options.splits:
output = html_report.get_output(options.run_dir, split)
for m in options.metrics:
write_metric_for_split(output, options.run_dir, split, m)
示例7: output_sample
def output_sample(model):
options = config.options()
insts = model.sample_joint_smooth(num_samples=options.num_samples)
if not options.listener:
insts = [inst.inverted() for inst in insts]
html = rsa_fit_data.get_html(insts, title='Agent samples (smoothed prior)')
config.dump([inst.__dict__ for inst in insts], 'data.sample.jsons', lines=True)
with config.open('report.sample.html', 'w') as outfile:
outfile.write(html)
示例8: train
def train(self, training_instances, validation_instances='ignored', metrics='ignored'):
options = config.options()
for inst in training_instances:
inp, out = inst.input, inst.output
if options.listener:
out = self.vectorize(out)
else:
inp = self.vectorize(inp)
self.counters[inp][out] += 1
示例9: evaluate_ak_blending
def evaluate_ak_blending():
options = config.options(read=True)
grids_path = os.path.join(options.run_dir, 's0_grids.0.jsons.gz')
with gzip.open(grids_path, 'rb') as infile:
grids = [json.loads(line.strip()) for line in infile]
data_path = os.path.join(options.run_dir, 'data.eval.jsons')
with open(data_path, 'r') as infile:
insts = [instance.Instance(**json.loads(line.strip()))
for line in infile]
assert len(grids) == len(insts), '{} != {}'.format(len(grids), len(insts))
gold_outputs = np.array([inst.output for inst in insts])
l0 = np.array([[np.array(ss['L0']).T for ss in grid['sets']]
for grid in grids])
s0 = np.array([[np.array(ss['S0']).T for ss in grid['sets']]
for grid in grids])
if options.additive:
ak = compute_additive(l0, s0,
bw=options.base_weight,
sw=options.speaker_weight,
alpha_s1=options.alpha,
alpha_l1=options.alpha_l1)
else:
ak = compute_ak(l0, s0,
bw=options.base_weight,
sw=options.speaker_weight,
alpha=options.alpha,
gamma=options.gamma)
ak_scores = ak[np.arange(ak.shape[0]), gold_outputs].tolist()
ak_preds = np.argmax(ak, axis=1).tolist()
m = [metrics.log_likelihood,
metrics.log_likelihood_bits,
metrics.perplexity,
metrics.accuracy]
learner = DummyLearner(ak_preds, ak_scores, params={
'base_weight': options.base_weight,
'speaker_weight': options.speaker_weight,
'alpha': options.alpha,
'alpha_l1': options.alpha_l1,
'gamma': options.gamma,
'additive': options.additive,
})
split_id = '{}_eval'.format(options.blend_name)
results = evaluate.evaluate(learner, insts, metrics=m,
split_id=split_id,
write_data=False)
output.output_results(results, split_id)
options_dump = vars(options)
del options_dump['overwrite']
del options_dump['config']
config.dump_pretty(options_dump, split_id + '_config.json')
示例10: __init__
def __init__(self):
import learners
import cards_env
options = config.options()
if options.verbosity >= 4:
print('Loading speaker')
self.speaker = learners.new(options.p2_learner)
self.speaker.load(options.p2_load)
self.utterances = [None for _ in range(cards_env.MAX_BATCH_SIZE)]
self.ace_locs = [None for _ in range(cards_env.MAX_BATCH_SIZE)]
示例11: tune_queue
def tune_queue(main_fn):
config.redirect_output()
options = config.options()
if any('tune' not in s for s in options.data_source):
warnings.warn('expected all --data_source\'s to contain "tune", instead got "{}". '
'Are you polluting your dev/test set?'.format(options.data_source))
if 'gpu' in options.device or 'cuda' in options.device:
warnings.warn('device is "{}". Have you checked that all processes will fit '
'on one GPU? (Random GPU assignment has not been implemented '
'yet.)'.format(options.device))
with open(options.tune_config, 'r') as infile:
tune_options = config.HoconConfigFileParser().parse(infile)
reg = ProcessRegistry(main_fn, tune_options, options.tune_maximize)
remaining_random = options.tune_random
remaining_local = options.tune_local
if options.tune_local <= 0:
remaining_local = None
try:
reg.start_default()
while remaining_random > 0 and reg.running_processes < options.tune_max_processes:
reg.start_random()
remaining_random -= 1
while remaining_local > 0 and reg.running_processes < options.tune_max_processes:
reg.start_local()
remaining_random -= 1
while reg.running_processes > 0:
name, objective = reg.get()
print('\nTUNE: {:10.3f} {}\n'.format(objective, name[:70]))
while remaining_random > 0 and reg.running_processes < options.tune_max_processes:
reg.start_random()
remaining_random -= 1
while (remaining_local is None or remaining_local > 0) and \
reg.running_processes < options.tune_max_processes:
try:
reg.start_local()
if remaining_local is not None:
remaining_local -= 1
except StopIteration:
print('no new local search candidates')
break
except KeyboardInterrupt:
reg.terminate()
print('')
print('best result:')
print('{:10.3f} {}'.format(reg.best_objective, str(reg.best_name)[:70]))
示例12: generate_html_reports
def generate_html_reports(run_dir=None, compare_dir=None):
options = config.options(read=True)
run_dir = run_dir or options.run_dir
compare_dir = compare_dir or options.compare_dir
for output, compare, out_path in get_all_outputs(run_dir, options.compare_dir):
with open(out_path, 'w') as outfile:
outfile.write(html_report(output, compare, per_token=options.per_token_prob,
only_differing=options.only_differing_preds,
show_all=options.show_all,
show_tokens=options.show_tokens))
示例13: test_main
def test_main():
options = config.options()
import sys
print('stdout')
sys.stderr.write('stderr\n')
return {}, {
'eval.perplexity.gmean': (options.speaker_learning_rate +
options.speaker_cell_size +
len(options.speaker_optimizer))
}
示例14: reference_game
def reference_game(insts, gen_func, listener=False):
options = config.options()
for i in range(len(insts)):
color = insts[i].output if listener else insts[i].input
distractors = [gen_func(color) for _ in range(options.num_distractors)]
answer = rng.randint(0, len(distractors) + 1)
context = distractors[:answer] + [color] + distractors[answer:]
ref_inst = (Instance(insts[i].input, answer, alt_outputs=context)
if listener else
Instance(answer, insts[i].output, alt_inputs=context))
insts[i] = ref_inst
return insts
示例15: __init__
def __init__(self):
options = config.options()
self.counters = defaultdict(Counter)
if options.listener:
res = options.listener_color_resolution
hsv = options.listener_hsv
else:
res = options.speaker_color_resolution
hsv = options.speaker_hsv
self.res = res
self.hsv = hsv
self.init_vectorizer()