本文整理汇总了Python中caffe.proto.caffe_pb2.TRAIN属性的典型用法代码示例。如果您正苦于以下问题:Python caffe_pb2.TRAIN属性的具体用法?Python caffe_pb2.TRAIN怎么用?Python caffe_pb2.TRAIN使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类caffe.proto.caffe_pb2
的用法示例。
在下文中一共展示了caffe_pb2.TRAIN属性的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: orth_loss_v2
# 需要导入模块: from caffe.proto import caffe_pb2 [as 别名]
# 或者: from caffe.proto.caffe_pb2 import TRAIN [as 别名]
def orth_loss_v2(self, bottom_name):
# self.Python('orth_loss', 'orthLossLayer', loss_weight=1, bottom=[bottom_name], top=[name], name=name)
# , bottom=[bottom+'_MVN']
# save bottom
mainpath = self.bottom
bottom = bottom_name #'NormLayer',
# self.MVN(bottom=[bottom])
layer = "TransposeLayer"
layername = bottom_name+'_' + layer
outputs = [layername]
self.Python(layer, layer, top=outputs, bottom=[bottom], name=layername, phase='TRAIN')
self.Matmul()
outputs = [self.this.name]
self.EuclideanLoss(name=bottom_name+'_euclidean', bottom=outputs, loss_weight=1e-1, phase='TRAIN')
# restore bottom
self.cur = mainpath
示例2: resnet
# 需要导入模块: from caffe.proto import caffe_pb2 [as 别名]
# 或者: from caffe.proto.caffe_pb2 import TRAIN [as 别名]
def resnet(n=3, num_output = 16):
"""6n+2, n=3 9 18 coresponds to 20 56 110 layers"""
net_name = "resnet-"
pt_folder = osp.join(osp.abspath(osp.curdir), net_name +str(6*n+2))
name = net_name+str(6*n+2)+'-cifar10'
if n > 18:
# warm up
solver = Solver(solver_name="solver_warm.prototxt", folder=pt_folder, lr_policy=Solver.policy.fixed)
solver.p.base_lr = 0.01
solver.set_max_iter(500)
solver.write()
del solver
solver = Solver(folder=pt_folder)
solver.write()
del solver
builder = Net(name)
builder.Data('cifar-10-batches-py/train', phase='TRAIN', crop_size=32)
builder.Data('cifar-10-batches-py/test', phase='TEST')
builder.resnet_cifar(n, num_output=num_output)
builder.write(folder=pt_folder)
示例3: resnet_orth_v2
# 需要导入模块: from caffe.proto import caffe_pb2 [as 别名]
# 或者: from caffe.proto.caffe_pb2 import TRAIN [as 别名]
def resnet_orth_v2(n=3):
"""6n+2, n=3 9 18 coresponds to 20 56 110 layers"""
net_name = "resnet-orth-v2"
pt_folder = osp.join(osp.abspath(osp.curdir), net_name +str(6*n+2))
name = net_name+str(6*n+2)+'-cifar10'
if n > 18:
# warm up
solver = Solver(solver_name="solver_warm.prototxt", folder=pt_folder, lr_policy=Solver.policy.fixed)
solver.p.base_lr = 0.01
solver.set_max_iter(500)
solver.write()
del solver
solver = Solver(folder=pt_folder)
solver.write()
del solver
builder = Net(name)
builder.Data('cifar-10-batches-py/train', phase='TRAIN', crop_size=32)
builder.Data('cifar-10-batches-py/test', phase='TEST')
builder.resnet_cifar(n, orth=True, v2=True)
builder.write(folder=pt_folder)
示例4: include
# 需要导入模块: from caffe.proto import caffe_pb2 [as 别名]
# 或者: from caffe.proto.caffe_pb2 import TRAIN [as 别名]
def include(self, phase='TRAIN'):
if phase is not None:
includes = self.this.include.add()
if phase == 'TRAIN':
includes.phase = caffe_pb2.TRAIN
elif phase == 'TEST':
includes.phase = caffe_pb2.TEST
else:
NotImplementedError
#************************** inplace **************************
示例5: MVN
# 需要导入模块: from caffe.proto import caffe_pb2 [as 别名]
# 或者: from caffe.proto.caffe_pb2 import TRAIN [as 别名]
def MVN(self, name=None, bottom=[], normalize_variance=True, across_channels=False, phase='TRAIN'):
if across_channels:
NotImplementedError
if not normalize_variance:
NotImplementedError
self.setup(self.suffix('MVN', name),bottom=bottom, layer_type='MVN')
if phase!='TRAIN':
NotImplementedError
self.include()
示例6: plain_func
# 需要导入模块: from caffe.proto import caffe_pb2 [as 别名]
# 或者: from caffe.proto.caffe_pb2 import TRAIN [as 别名]
def plain_func(self, name, num_output, up=False, **kwargs):
self.conv_bn_relu(name+'_conv0', num_output=num_output, stride=1+int(up), **kwargs)
self.conv_bn_relu(name+'_conv1', num_output=num_output, **kwargs)
# def orth_loss(self, bottom_name):
# # self.Python('orth_loss', 'orthLossLayer', loss_weight=1, bottom=[bottom_name], top=[name], name=name)
# # , bottom=[bottom+'_MVN']
# # save bottom
# mainpath = self.bottom
# bottom = bottom_name #'NormLayer',
# # self.MVN(bottom=[bottom])
# layer = "TransposeLayer"
# layername = bottom_name+'_' + layer
# outputs = [layername]#, bottom_name+'_zerolike']
# self.Python(layer, layer, top=outputs, bottom=[bottom], name=layername, phase='TRAIN')
# self.Matmul()
# # layer="diagLayer"
# # layername = bottom_name+'_' + layer
# # self.Python(layer, layer, top=[layername], name=layername, phase='TRAIN')
# outputs = [self.this.name]#, bottom_name+'_zerolike']
# self.EuclideanLoss(name=bottom_name+'_euclidean', bottom=outputs, loss_weight=1e-3, phase='TRAIN')
# # restore bottom
# self.cur = mainpath
示例7: plain
# 需要导入模块: from caffe.proto import caffe_pb2 [as 别名]
# 或者: from caffe.proto.caffe_pb2 import TRAIN [as 别名]
def plain(n=3):
"""6n+2, n=3 9 18 coresponds to 20 56 110 layers"""
net_name = "plain"
pt_folder = osp.join(osp.abspath(osp.curdir), net_name +str(6*n+2))
name = net_name+str(6*n+2)+'-cifar10'
solver = Solver(folder=pt_folder)
solver.write()
del solver
builder = Net(name)
builder.Data('cifar-10-batches-py/train', phase='TRAIN', crop_size=32)
builder.Data('cifar-10-batches-py/test', phase='TEST')
builder.plain_cifar(n, num_output = 16)
builder.write(folder=pt_folder)
示例8: plain_orth
# 需要导入模块: from caffe.proto import caffe_pb2 [as 别名]
# 或者: from caffe.proto.caffe_pb2 import TRAIN [as 别名]
def plain_orth(n=3):
"""6n+2, n=3 5 7 9 18 coresponds to 20 56 110 layers"""
net_name = "plain-orth"
pt_folder = osp.join(osp.abspath(osp.curdir), net_name +str(6*n+2))
name = net_name+str(6*n+2)+'-cifar10'
solver = Solver(folder=pt_folder)
solver.write()
del solver
builder = Net(name)
builder.Data('cifar-10-batches-py/train', phase='TRAIN', crop_size=32)
builder.Data('cifar-10-batches-py/test', phase='TEST')
builder.plain_cifar(n, orth=True)
builder.write(folder=pt_folder)
示例9: plain_orth_v1
# 需要导入模块: from caffe.proto import caffe_pb2 [as 别名]
# 或者: from caffe.proto.caffe_pb2 import TRAIN [as 别名]
def plain_orth_v1(n=3):
"""6n+2, n=3 5 7 9 18 coresponds to 20 32 44 56 110 layers"""
net_name = "plain-orth-v1-"
pt_folder = osp.join(osp.abspath(osp.curdir), net_name +str(6*n+2))
name = net_name+str(6*n+2)+'-cifar10'
solver = Solver(folder=pt_folder)
solver.write()
del solver
builder = Net(name)
builder.Data('cifar-10-batches-py/train', phase='TRAIN', crop_size=32)
builder.Data('cifar-10-batches-py/test', phase='TEST')
builder.plain_cifar(n, orth=True, inplace=False, num_output = 16)
builder.write(folder=pt_folder)
示例10: acc
# 需要导入模块: from caffe.proto import caffe_pb2 [as 别名]
# 或者: from caffe.proto.caffe_pb2 import TRAIN [as 别名]
def acc(n=3):
"""6n+2, n=3 9 18 coresponds to 20 56 110 layers"""
net_name = "plain"
pt_folder = osp.join(osp.abspath(osp.curdir), net_name +str(6*n+2))
name = net_name+str(6*n+2)+'-cifar10'
solver = Solver(folder=pt_folder)
solver.write()
del solver
builder = Net(name)
builder.Data('cifar-10-batches-py/train', phase='TRAIN', crop_size=32)
builder.Data('cifar-10-batches-py/test', phase='TEST')
builder.plain_cifar(n, num_output = 16, inplace=False)
builder.write(folder=pt_folder)
示例11: include
# 需要导入模块: from caffe.proto import caffe_pb2 [as 别名]
# 或者: from caffe.proto.caffe_pb2 import TRAIN [as 别名]
def include(self, phase='TRAIN'):
if phase is not None:
includes = self.this.include.add()
if phase == 'TRAIN':
includes.phase = caffe_pb2.TRAIN
elif phase == 'TEST':
includes.phase = caffe_pb2.TEST
else:
NotImplementedError
#************************** inplace **************************
示例12: solver_and_prototxt
# 需要导入模块: from caffe.proto import caffe_pb2 [as 别名]
# 或者: from caffe.proto.caffe_pb2 import TRAIN [as 别名]
def solver_and_prototxt(compress_layer, compress_rate, compress_block):
layers = ['2a', '2b', '2c', '3a', '3b', '3c', '3d',
'4a', '4b', '4c', '4d', '4e', '4f', '5a', '5b', '5c']
pt_folder = layers[compress_layer] + '_' + str(compress_block)
if not os.path.exists(pt_folder):
os.mkdir(pt_folder)
name = 'resnet-' + layers[compress_layer] + str(compress_block) +'-ImageNet'
solver = Solver(folder=pt_folder, b=compress_layer, compress_block=compress_block)
solver.write()
builder = Net(name)
builder.Data('/opt/luojh/Dataset/ImageNet/lmdb/ilsvrc12_train_lmdb', backend='LMDB', phase='TRAIN', mirror=True,
crop_size=224, batch_size=32)
builder.Data('/opt/luojh/Dataset/ImageNet/lmdb/ilsvrc12_val_lmdb', backend='LMDB', phase='TEST', mirror=False,
crop_size=224, batch_size=10)
builder.resnet_50(layers, compress_layer, compress_rate, compress_block)
builder.write(name='trainval.prototxt', folder=pt_folder)
if compress_block == 0:
compress_block = 1
compress_layer -= 1
else:
compress_block =0
builder = Net(name + '-old')
builder.setup('data', 'Data', top=['data'])
builder.resnet_50(layers, compress_layer, compress_rate, compress_block, deploy=True)
builder.write(name='deploy.prototxt', folder=pt_folder, deploy=True)
print "Finished net prototxt generation!"
示例13: ssd_loss
# 需要导入模块: from caffe.proto import caffe_pb2 [as 别名]
# 或者: from caffe.proto.caffe_pb2 import TRAIN [as 别名]
def ssd_loss(self):
layer = self.net.layer.add()
layer.name = "mbox_loss"
layer.type = "MultiBoxLoss"
layer.bottom.append("mbox_loc")
layer.bottom.append("mbox_conf")
layer.bottom.append("mbox_priorbox")
layer.bottom.append("label")
layer.top.append("mbox_loss")
layer.include.add().phase = caffe_pb2.TRAIN
layer.propagate_down.append(True)
layer.propagate_down.append(True)
layer.propagate_down.append(False)
layer.propagate_down.append(False)
layer.loss_param.normalization = caffe_pb2.LossParameter.VALID
layer.multibox_loss_param.loc_loss_type = caffe_pb2.MultiBoxLossParameter.SMOOTH_L1
layer.multibox_loss_param.conf_loss_type = caffe_pb2.MultiBoxLossParameter.LOGISTIC
layer.multibox_loss_param.loc_weight = 1.0
layer.multibox_loss_param.num_classes = self.class_num
layer.multibox_loss_param.share_location = True
layer.multibox_loss_param.match_type = caffe_pb2.MultiBoxLossParameter.PER_PREDICTION
layer.multibox_loss_param.overlap_threshold = 0.5
layer.multibox_loss_param.use_difficult_gt = True
layer.multibox_loss_param.neg_pos_ratio = 3.0
layer.multibox_loss_param.neg_overlap = 0.5
layer.multibox_loss_param.code_type = caffe_pb2.PriorBoxParameter.CENTER_SIZE
layer.multibox_loss_param.ignore_cross_boundary_bbox = False
layer.multibox_loss_param.mining_type = caffe_pb2.MultiBoxLossParameter.MAX_NEGATIVE
示例14: data_train_ssd
# 需要导入模块: from caffe.proto import caffe_pb2 [as 别名]
# 或者: from caffe.proto.caffe_pb2 import TRAIN [as 别名]
def data_train_ssd(self):
layer = self.net.layer.add()
layer.name = "data"
layer.type = "AnnotatedData"
layer.top.append("data")
layer.top.append("label")
layer.include.add().phase = caffe_pb2.TRAIN
layer.transform_param.scale = 0.007843
layer.transform_param.mirror = True
layer.transform_param.mean_value.append(127.5)
layer.transform_param.mean_value.append(127.5)
layer.transform_param.mean_value.append(127.5)
layer.transform_param.resize_param.prob = 1.0
layer.transform_param.resize_param.resize_mode = caffe_pb2.ResizeParameter.WARP
layer.transform_param.resize_param.height = self.input_size
layer.transform_param.resize_param.width = self.input_size
layer.transform_param.resize_param.interp_mode.append(caffe_pb2.ResizeParameter.LINEAR)
layer.transform_param.resize_param.interp_mode.append(caffe_pb2.ResizeParameter.AREA)
layer.transform_param.resize_param.interp_mode.append(caffe_pb2.ResizeParameter.NEAREST)
layer.transform_param.resize_param.interp_mode.append(caffe_pb2.ResizeParameter.CUBIC)
layer.transform_param.resize_param.interp_mode.append(caffe_pb2.ResizeParameter.LANCZOS4)
layer.transform_param.emit_constraint.emit_type = caffe_pb2.EmitConstraint.CENTER
layer.transform_param.distort_param.brightness_prob = 0.5
layer.transform_param.distort_param.brightness_delta = 32.0
layer.transform_param.distort_param.contrast_lower = 0.5
layer.transform_param.distort_param.contrast_upper = 1.5
layer.transform_param.distort_param.hue_prob = 0.5
layer.transform_param.distort_param.hue_delta = 18.0
layer.transform_param.distort_param.saturation_prob = 0.5
layer.transform_param.distort_param.saturation_lower = 0.5
layer.transform_param.distort_param.saturation_upper = 1.5
layer.transform_param.distort_param.random_order_prob = 0.0
layer.transform_param.expand_param.prob = 0.5
layer.transform_param.expand_param.max_expand_ratio = 4.0
layer.data_param.source = self.lmdb
layer.data_param.batch_size = 64
layer.data_param.backend = caffe_pb2.DataParameter.LMDB
sampler = layer.annotated_data_param.batch_sampler.add()
sampler.max_sample = 1
sampler.max_trials = 1
for overlap in [0.1, 0.3, 0.5, 0.7, 0.9, 1.0]:
sampler = layer.annotated_data_param.batch_sampler.add()
sampler.sampler.min_scale = 0.3
sampler.sampler.max_scale = 1.0
sampler.sampler.min_aspect_ratio = 0.5
sampler.sampler.max_aspect_ratio = 2.0
sampler.sample_constraint.min_jaccard_overlap = overlap
sampler.max_sample = 1
sampler.max_trials = 50
layer.annotated_data_param.label_map_file = self.label_map