本文整理匯總了Python中model.config.cfg.ANCHOR_RATIOS屬性的典型用法代碼示例。如果您正苦於以下問題:Python cfg.ANCHOR_RATIOS屬性的具體用法?Python cfg.ANCHOR_RATIOS怎麽用?Python cfg.ANCHOR_RATIOS使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類model.config.cfg
的用法示例。
在下文中一共展示了cfg.ANCHOR_RATIOS屬性的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: construct_graph
# 需要導入模塊: from model.config import cfg [as 別名]
# 或者: from model.config.cfg import ANCHOR_RATIOS [as 別名]
def construct_graph(self):
# Set the random seed
torch.manual_seed(cfg.RNG_SEED)
# Build the main computation graph
self.net.create_architecture(self.imdb.num_classes, tag='default',
anchor_scales=cfg.ANCHOR_SCALES,
anchor_ratios=cfg.ANCHOR_RATIOS)
# Define the loss
# loss = layers['total_loss']
# Set learning rate and momentum
lr = cfg.TRAIN.LEARNING_RATE
params = []
for key, value in dict(self.net.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
params += [{'params':[value],'lr':lr*(cfg.TRAIN.DOUBLE_BIAS + 1), 'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
else:
params += [{'params':[value],'lr':lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
self.optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
# Write the train and validation information to tensorboard
self.writer = tb.writer.FileWriter(self.tbdir)
self.valwriter = tb.writer.FileWriter(self.tbvaldir)
return lr, self.optimizer
示例2: construct_graph
# 需要導入模塊: from model.config import cfg [as 別名]
# 或者: from model.config.cfg import ANCHOR_RATIOS [as 別名]
def construct_graph(self, sess):
with sess.graph.as_default():
# Set the random seed for tensorflow
tf.set_random_seed(cfg.RNG_SEED)
# Build the main computation graph
layers = self.net.create_architecture('TRAIN', self.imdb.num_classes, tag='default',
anchor_scales=cfg.ANCHOR_SCALES,
anchor_ratios=cfg.ANCHOR_RATIOS)
# Define the loss
loss = layers['total_loss']
# Set learning rate and momentum
lr = tf.Variable(cfg.TRAIN.LEARNING_RATE, trainable=False)
self.optimizer = tf.train.MomentumOptimizer(lr, cfg.TRAIN.MOMENTUM)
# Compute the gradients with regard to the loss
gvs = self.optimizer.compute_gradients(loss)
# Double the gradient of the bias if set
if cfg.TRAIN.DOUBLE_BIAS:
final_gvs = []
with tf.variable_scope('Gradient_Mult') as scope:
for grad, var in gvs:
scale = 1.
if cfg.TRAIN.DOUBLE_BIAS and '/biases:' in var.name:
scale *= 2.
if not np.allclose(scale, 1.0):
grad = tf.multiply(grad, scale)
final_gvs.append((grad, var))
train_op = self.optimizer.apply_gradients(final_gvs)
else:
train_op = self.optimizer.apply_gradients(gvs)
# We will handle the snapshots ourselves
self.saver = tf.train.Saver(max_to_keep=100000)
# Write the train and validation information to tensorboard
self.writer = tf.summary.FileWriter(self.tbdir, sess.graph)
self.valwriter = tf.summary.FileWriter(self.tbvaldir)
return lr, train_op
示例3: convert_from_depre
# 需要導入模塊: from model.config import cfg [as 別名]
# 或者: from model.config.cfg import ANCHOR_RATIOS [as 別名]
def convert_from_depre(net, imdb, input_dir, output_dir, snapshot, max_iters):
if not osp.exists(output_dir):
os.makedirs(output_dir)
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
sess = tf.Session(config=tfconfig)
num_classes = imdb.num_classes
with sess.graph.as_default():
tf.set_random_seed(cfg.RNG_SEED)
layers = net.create_architecture(sess, 'TRAIN', num_classes, tag='default',
anchor_scales=cfg.ANCHOR_SCALES,
anchor_ratios=cfg.ANCHOR_RATIOS)
loss = layers['total_loss']
# Learning rate should be reduced already
lr = tf.Variable(cfg.TRAIN.LEARNING_RATE * cfg.TRAIN.GAMMA, trainable=False)
momentum = cfg.TRAIN.MOMENTUM
optimizer = tf.train.MomentumOptimizer(lr, momentum)
gvs = optimizer.compute_gradients(loss)
if cfg.TRAIN.DOUBLE_BIAS:
final_gvs = []
with tf.variable_scope('Gradient_Mult') as scope:
for grad, var in gvs:
scale = 1.
if cfg.TRAIN.DOUBLE_BIAS and '/biases:' in var.name:
scale *= 2.
if not np.allclose(scale, 1.0):
grad = tf.multiply(grad, scale)
final_gvs.append((grad, var))
train_op = optimizer.apply_gradients(final_gvs)
else:
train_op = optimizer.apply_gradients(gvs)
checkpoint = osp.join(input_dir, snapshot + '.ckpt')
variables = tf.global_variables()
name2var = {convert_names(v.name): v for v in variables}
target_names = get_variables_in_checkpoint_file(checkpoint)
restorer = tf.train.Saver(name2var)
saver = tf.train.Saver()
print('Importing...')
restorer.restore(sess, checkpoint)
checkpoint = osp.join(output_dir, snapshot + '.ckpt')
print('Exporting...')
saver.save(sess, checkpoint)
# also copy the pkl file
index = osp.join(input_dir, snapshot + '.pkl')
outdex = osp.join(output_dir, snapshot + '.pkl')
shutil.copy(index, outdex)
sess.close()
示例4: construct_graph
# 需要導入模塊: from model.config import cfg [as 別名]
# 或者: from model.config.cfg import ANCHOR_RATIOS [as 別名]
def construct_graph(self):
# Set the random seed
torch.manual_seed(cfg.RNG_SEED)
# Build the main computation graph
self.net.create_architecture(self.imdb.num_classes, tag='default',
anchor_scales=cfg.ANCHOR_SCALES,
anchor_ratios=cfg.ANCHOR_RATIOS)
# Define the loss
# loss = layers['total_loss']
# Set learning rate and momentum
lr = cfg.TRAIN.LEARNING_RATE
params = []
for key, value in dict(self.net.named_parameters()).items():
if value.requires_grad:
if 'mask' in key:
if 'bias' in key:
params += [{'params': [value], 'lr': 10*lr * (cfg.TRAIN.DOUBLE_BIAS + 1),
'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
else:
params += [{'params': [value], 'lr': 10*lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
elif 'lightrcnn' in key:
if 'bias' in key:
params += [{'params': [value], 'lr': 10 * lr * (cfg.TRAIN.DOUBLE_BIAS + 1),
'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
else:
params += [{'params': [value], 'lr': 10 * lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
else:
if 'bias' in key:
params += [{'params': [value], 'lr': lr * (cfg.TRAIN.DOUBLE_BIAS + 1),
'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
else:
params += [{'params': [value], 'lr': lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
# if 'features' in key or 'classifier' in key or 'net' in key:
# if 'bias' in key:
# params += [{'params':[value],'lr':lr*(cfg.TRAIN.DOUBLE_BIAS + 1), 'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
# else:
# params += [{'params':[value],'lr':lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
# elif 'dec_channel' in key or 'global' in key:
# if 'bias' in key:
# params += [{'params': [value], 'lr': lr * (cfg.TRAIN.DOUBLE_BIAS + 1)*10,
# 'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
# else:
# params += [{'params': [value], 'lr': lr*10, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
self.optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
# Write the train and validation information to tensorboard
self.writer = tb.writer.FileWriter(self.tbdir)
self.valwriter = tb.writer.FileWriter(self.tbvaldir)
return lr, self.optimizer