本文整理汇总了Python中base.Model方法的典型用法代码示例。如果您正苦于以下问题:Python base.Model方法的具体用法?Python base.Model怎么用?Python base.Model使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类base
的用法示例。
在下文中一共展示了base.Model方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import base [as 别名]
# 或者: from base import Model [as 别名]
def __init__(self,
char_vocab_size,
glove_vocab_size,
word_vocab_size,
hidden_size,
embed_size,
dropout,
num_heads,
max_ans_len=7,
elmo=False,
max_pool=False,
num_layers=1,
glove_cpu=False,
metric='ip',
**kwargs):
super(Model, self).__init__()
self.embedding = Embedding(char_vocab_size, glove_vocab_size, word_vocab_size, embed_size, dropout,
elmo=elmo, glove_cpu=glove_cpu)
self.context_embedding = self.embedding
self.question_embedding = self.embedding
word_size = self.embedding.output_size
context_input_size = word_size
question_input_size = word_size
self.context_start = ContextBoundary(context_input_size, hidden_size, dropout, num_heads, num_layers=num_layers)
self.context_end = ContextBoundary(context_input_size, hidden_size, dropout, num_heads, num_layers=num_layers)
self.question_start = QuestionBoundary(question_input_size, hidden_size, dropout, num_heads, max_pool=max_pool)
self.question_end = QuestionBoundary(question_input_size, hidden_size, dropout, num_heads, max_pool=max_pool)
self.softmax = nn.Softmax(dim=1)
self.max_ans_len = max_ans_len
self.linear = nn.Linear(word_size, 1)
self.metric = metric
示例2: __init__
# 需要导入模块: import base [as 别名]
# 或者: from base import Model [as 别名]
def __init__(self, sess, reader, dataset="ptb",
decay_rate=0.96, decay_step=10000, embed_dim=500,
h_dim=50, learning_rate=0.001, max_iter=450000,
checkpoint_dir="checkpoint"):
"""Initialize Neural Varational Document Model.
params:
sess: TensorFlow Session object.
reader: TextReader object for training and test.
dataset: The name of dataset to use.
h_dim: The dimension of document representations (h). [50, 200]
"""
self.sess = sess
self.reader = reader
self.h_dim = h_dim
self.embed_dim = embed_dim
self.max_iter = max_iter
self.decay_rate = decay_rate
self.decay_step = decay_step
self.checkpoint_dir = checkpoint_dir
self.step = tf.Variable(0, trainable=False)
self.lr = tf.train.exponential_decay(
learning_rate, self.step, 10000, decay_rate, staircase=True, name="lr")
_ = tf.scalar_summary("learning rate", self.lr)
self.dataset = dataset
self._attrs = ["h_dim", "embed_dim", "max_iter", "dataset",
"learning_rate", "decay_rate", "decay_step"]
self.build_model()
示例3: __init__
# 需要导入模块: import base [as 别名]
# 或者: from base import Model [as 别名]
def __init__(self, sess, reader, dataset="ptb",
batch_size=20, num_steps=3, embed_dim=500,
h_dim=50, learning_rate=0.01, epoch=50,
checkpoint_dir="checkpoint"):
"""Initialize Neural Varational Document Model.
params:
sess: TensorFlow Session object.
reader: TextReader object for training and test.
dataset: The name of dataset to use.
h_dim: The dimension of document representations (h). [50, 200]
"""
self.sess = sess
self.reader = reader
self.h_dim = h_dim
self.embed_dim = embed_dim
self.epoch = epoch
self.batch_size = batch_size
self.learning_rate = learning_rate
self.checkpoint_dir = checkpoint_dir
self.dataset="ptb"
self._attrs=["batch_size", "num_steps", "embed_dim", "h_dim", "learning_rate"]
raise Exception(" [!] Working in progress")
self.build_model()
示例4: test
# 需要导入模块: import base [as 别名]
# 或者: from base import Model [as 别名]
def test(args):
device = torch.device('cuda' if args.cuda else 'cpu')
pprint(args.__dict__)
interface = FileInterface(**args.__dict__)
# use cache for metadata
if args.cache:
out = interface.cache(preprocess, args)
processor = out['processor']
processed_metadata = out['processed_metadata']
else:
processor = Processor(**args.__dict__)
metadata = interface.load_metadata()
processed_metadata = processor.process_metadata(metadata)
model = Model(**args.__dict__).to(device)
model.init(processed_metadata)
interface.bind(processor, model)
interface.load(args.iteration, session=args.load_dir)
test_examples = interface.load_test()
test_dataset = tuple(processor.preprocess(example) for example in test_examples)
test_sampler = Sampler(test_dataset, 'test', **args.__dict__)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, sampler=test_sampler,
collate_fn=processor.collate)
print('Inferencing')
with torch.no_grad():
model.eval()
pred = {}
for batch_idx, (test_batch, _) in enumerate(zip(test_loader, range(args.eval_steps))):
test_batch = {key: val.to(device) for key, val in test_batch.items()}
model_output = model(**test_batch)
results = processor.postprocess_batch(test_dataset, test_batch, model_output)
if batch_idx % args.dump_period == 0:
dump = processor.get_dump(test_dataset, test_batch, model_output, results)
interface.dump(batch_idx, dump)
for result in results:
pred[result['id']] = result['pred']
print('[%d/%d]' % (batch_idx + 1, len(test_loader)))
interface.pred(pred)
示例5: embed
# 需要导入模块: import base [as 别名]
# 或者: from base import Model [as 别名]
def embed(args):
device = torch.device('cuda' if args.cuda else 'cpu')
pprint(args.__dict__)
interface = FileInterface(**args.__dict__)
# use cache for metadata
if args.cache:
out = interface.cache(preprocess, args)
processor = out['processor']
processed_metadata = out['processed_metadata']
else:
processor = Processor(**args.__dict__)
metadata = interface.load_metadata()
processed_metadata = processor.process_metadata(metadata)
model = Model(**args.__dict__).to(device)
model.init(processed_metadata)
interface.bind(processor, model)
interface.load(args.iteration, session=args.load_dir)
test_examples = interface.load_test()
test_dataset = tuple(processor.preprocess(example) for example in test_examples)
test_sampler = Sampler(test_dataset, 'test', **args.__dict__)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, sampler=test_sampler,
collate_fn=processor.collate)
print('Saving embeddings')
with torch.no_grad():
model.eval()
for batch_idx, (test_batch, _) in enumerate(zip(test_loader, range(args.eval_steps))):
test_batch = {key: val.to(device) for key, val in test_batch.items()}
if args.mode == 'embed' or args.mode == 'embed_context':
context_output = model.get_context(**test_batch)
context_results = processor.postprocess_context_batch(test_dataset, test_batch, context_output)
for id_, phrases, matrix, metadata in context_results:
if not args.metadata:
metadata = None
interface.context_emb(id_, phrases, matrix, metadata=metadata, emb_type=args.emb_type)
if args.mode == 'embed' or args.mode == 'embed_question':
question_output = model.get_question(**test_batch)
question_results = processor.postprocess_question_batch(test_dataset, test_batch, question_output)
for id_, emb in question_results:
interface.question_emb(id_, emb, emb_type=args.emb_type)
print('[%d/%d]' % (batch_idx + 1, len(test_loader)))
if args.archive:
print('Archiving')
interface.archive()