本文整理汇总了Python中model.Model.build方法的典型用法代码示例。如果您正苦于以下问题:Python Model.build方法的具体用法?Python Model.build怎么用?Python Model.build使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类model.Model
的用法示例。
在下文中一共展示了Model.build方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: keypoint_detection
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import build [as 别名]
def keypoint_detection():
try:
data = sio.loadmat('data.mat')
except:
load.csv()
data = sio.loadmat('data.mat')
train_x = data['train_x']
train_y = data['train_y']
test_x = data['test_x']
# data normalization
train_x = train_x / 256.0
train_y = (train_y - 48) / 48.0
test_x = test_x / 256.0
sklearn.utils.shuffle(train_x, train_y, random_state=0)
train_x, valid_x = train_x[:-400], train_x[-400:]
train_y, valid_y = train_y[:-400], train_y[-400:]
model = Model(0.01, 0.9, 0.0005, 100, 10000)
model.add_layer(layers.FullConnectedLayer(9216, 256, 1, layers.rectify))
model.add_layer(layers.DropoutLayer(0.5))
model.add_layer(layers.FullConnectedLayer(256, 100, 1, layers.rectify))
model.add_layer(layers.DropoutLayer(0.5))
model.add_layer(layers.FullConnectedLayer(100, 30))
model.set_loss_function(layers.EuclideanLoss)
model.build()
print 'build model complete'
model.train_model(train_x, train_y, valid_x, valid_y)
model.save_test_result(test_x)
示例2: keypoint_detection
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import build [as 别名]
def keypoint_detection():
try:
data = sio.loadmat('data.mat')
except:
load.csv()
data = sio.loadmat('data.mat')
train_x = data['train_x']
train_y = data['train_y']
test_x = data['test_x']
# data normalization
train_x = train_x / 256.0
train_y = (train_y - 48) / 48.0
test_x = test_x / 256.0
sklearn.utils.shuffle(train_x, train_y, random_state=0)
train_x, valid_x = train_x[:-400], train_x[-400:]
train_y, valid_y = train_y[:-400], train_y[-400:]
model = Model(0.01, 0.9, 0.0005, 100, 1000)
model.add_layer(layers.ReshapeLayer(1, 96, 96))
model.add_layer(layers.ConvolutionLayer((3, 3), 8, 1, 1, layers.rectify))
model.add_layer(layers.PoolingLayer((2, 2))) # 47 * 47 * 8
model.add_layer(layers.ConvolutionLayer((2, 2), 16, 8, 1, layers.rectify))
model.add_layer(layers.PoolingLayer((2, 2))) # 23 * 23 * 16
model.add_layer(layers.ConvolutionLayer((2, 2), 32, 16, 1, layers.rectify))
model.add_layer(layers.PoolingLayer((2, 2))) # 11 * 11 * 32
model.add_layer(layers.ConvolutionLayer((2, 2), 64, 32, 1, layers.rectify))
model.add_layer(layers.PoolingLayer((2, 2))) # 5 * 5 * 64
model.add_layer(layers.ConvolutionLayer((2, 2), 128, 64, 1, layers.rectify))
model.add_layer(layers.PoolingLayer((2, 2))) # 2 * 2 * 128
model.add_layer(layers.FullConnectedLayer(512, 512, 1, layers.rectify))
model.add_layer(layers.DropoutLayer(0.5))
model.add_layer(layers.FullConnectedLayer(512, 512, 1, layers.rectify))
model.add_layer(layers.DropoutLayer(0.5))
model.add_layer(layers.FullConnectedLayer(512, 30))
model.set_loss_function(layers.EuclideanLoss)
model.build()
print 'build model complete'
model.train_model(train_x, train_y, valid_x, valid_y)
model.save_test_result(test_x)
示例3: making_models
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import build [as 别名]
def making_models(list_methods = ['item_similarity_recommender', 'factorization_recommender',
'ranking_factorization_recommender']):
'''
INPUT: None
DESCRIPTION: Makes and saves the model (deprecated)
OUTPUT: None
'''
model = Model()
model.save('item_similarity_recommender')
for method in list_methods:
model.model = model.build(method)
model.save(method)
示例4: len
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import build [as 别名]
'''1: read from gazetteers file with the format: <gazeetteer <list of categories>>
2: once we read the gazetteers, we create a one-hot-encoding gazetteer vector
for every word in the sentence. The length of vector is equal to no of categories
and add the gazetteer feature vector for every word
'''
gazetteers_dataset,gaz_tags = loader.load_gazetteers(parameters['gaz_path'])
parameters['gaz_dim'] = len(gaz_tags)
loader.add_gazetteers(train_data, gazetteers_dataset, id_to_word,gaz_tags)
loader.add_gazetteers(dev_data, gazetteers_dataset, id_to_word,gaz_tags)
loader.add_gazetteers(test_data, gazetteers_dataset, id_to_word,gaz_tags)
# Save the mappings to disk
print 'Saving the mappings to disk...'
model.save_mappings(id_to_word, id_to_char, id_to_tag)
# Build the model
f_train, f_eval = model.build(**parameters)
# Reload previous model values
if opts.reload:
print 'Reloading previous model...'
model.reload()
#
# Train network
#
singletons = set([word_to_id[k] for k, v
in dico_words_train.items() if v == 1])
n_epochs = 100 # number of epochs over the training set
freq_eval = 1000 # evaluate on dev every freq_eval steps
best_dev = -np.inf
best_test = -np.inf
示例5: len
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import build [as 别名]
if not args.source_vocab_size:
args.source_vocab_size = train_data.annot_vocab.size
if not args.target_vocab_size:
args.target_vocab_size = train_data.terminal_vocab.size
if not args.rule_num:
args.rule_num = len(train_data.grammar.rules)
if not args.node_num:
args.node_num = len(train_data.grammar.node_type_to_id)
config_module = sys.modules['config']
for name, value in vars(args).iteritems():
setattr(config_module, name, value)
# build the model
model = Model()
model.build()
model.load(args.model)
def decode_query(query):
"""decode a given natural language query, return a list of generated candidates"""
query, str_map = canonicalize_query(query)
vocab = train_data.annot_vocab
query_tokens = query.split(' ')
query_tokens_data = [query_to_data(query, vocab)]
example = namedtuple('example', ['query', 'data'])(query=query_tokens, data=query_tokens_data)
cand_list = model.decode(example, train_data.grammar, train_data.terminal_vocab,
beam_size=args.beam_size, max_time_step=args.decode_max_time_step, log=True)
return cand_list
示例6: Model
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import build [as 别名]
assert os.path.isdir(opts.model)
assert os.path.isfile(opts.input)
# Load existing model
print "Loading model..."
model = Model(model_path=opts.model)
parameters = model.parameters
# Load reverse mappings
word_to_id, char_to_id, tag_to_id = [
{v: k for k, v in x.items()}
for x in [model.id_to_word, model.id_to_char, model.id_to_tag]
]
# Load the model
_, f_eval = model.build(training=False, **parameters)
model.reload()
f_output = codecs.open(opts.output, 'w', 'utf-8')
start = time.time()
print 'Tagging...'
with codecs.open(opts.input, 'r', 'utf-8') as f_input:
count = 0
for line in f_input:
words_ini = line.rstrip().split()
if line:
# Lowercase sentence
if parameters['lower']:
line = line.lower()
# Replace all digits with zeros