本文整理汇总了Python中caffe2.python.brew.relu方法的典型用法代码示例。如果您正苦于以下问题:Python brew.relu方法的具体用法?Python brew.relu怎么用?Python brew.relu使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类caffe2.python.brew
的用法示例。
在下文中一共展示了brew.relu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: AddLeNetModel
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import relu [as 别名]
def AddLeNetModel(model, data):
'''
This part is the standard LeNet model: from data to the softmax prediction.
For each convolutional layer we specify dim_in - number of input channels
and dim_out - number or output channels. Also each Conv and MaxPool layer changes the
image size. For example, kernel of size 5 reduces each side of an image by 4.
While when we have kernel and stride sizes equal 2 in a MaxPool layer, it divides
each side in half.
'''
# Image size: 28 x 28 -> 24 x 24
conv1 = brew.conv(model, data, 'conv1', dim_in=1, dim_out=20, kernel=5)
# Image size: 24 x 24 -> 12 x 12
pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2)
# Image size: 12 x 12 -> 8 x 8
conv2 = brew.conv(model, pool1, 'conv2', dim_in=20, dim_out=50, kernel=5)
# Image size: 8 x 8 -> 4 x 4
pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)
# 50 * 4 * 4 stands for dim_out from previous layer multiplied by the image size
fc3 = brew.fc(model, pool2, 'fc3', dim_in=50 * 4 * 4, dim_out=500)
fc3 = brew.relu(model, fc3, fc3)
pred = brew.fc(model, fc3, 'pred', 500, 10)
softmax = brew.softmax(model, pred, 'softmax')
return softmax
示例2: block35
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import relu [as 别名]
def block35(self, model, v, num_in_channels, scale=1.0, relu=True, name='block35'):
towers = [None, None, None]
towers[0] = self.conv_factory(model, v, num_in_channels, num_filters=32,
kernel=1, name=name+'tower1_1')
towers[1] = self.conv_factory(model, v, num_in_channels, num_filters=32,
kernel=1, name=name+'tower2_1')
towers[1] = self.conv_factory(model, towers[1], 32, num_filters=32,
kernel=3, pad=1, name=name+'tower2_2')
towers[2] = self.conv_factory(model, v, num_in_channels, num_filters=32,
kernel=1, name=name+'tower3_1')
towers[2] = self.conv_factory(model, towers[2], 32, num_filters=48,
kernel=3, pad=1, name=name+'tower3_2')
towers[2] = self.conv_factory(model, towers[2], 48, num_filters=64,
kernel=3, pad=1, name=name+'tower3_3')
return self.block_head(model, v, towers, num_in_channels=32+32+64,
num_out_channels=num_in_channels,
scale=scale, relu=relu, name=name)
示例3: forward_pass_builder
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import relu [as 别名]
def forward_pass_builder(self, model, loss_scale=1.0):
"""
This function adds the operators, layers to the network. It should return
a list of loss-blobs that are used for computing the loss gradient. This
function is also passed an internally calculated loss_scale parameter that
is used to scale your loss to normalize for the number of GPUs.
Signature: function(model, loss_scale)
"""
v = 'data'
dim_in = self.input_shape[0]
for idx in range(5):
v = brew.fc(model, v, 'fc%d' % (idx+1), dim_in=dim_in, dim_out=2048)
v = brew.relu(model, v, 'relu%d' % (idx+1))
dim_in = 2048
return self.add_head_nodes(model, v, dim_in, 'fc%d' % (idx+2), loss_scale=loss_scale)
示例4: forward_pass_builder
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import relu [as 别名]
def forward_pass_builder(self, model, loss_scale=1.0):
"""
This function adds the operators, layers to the network. It should return
a list of loss-blobs that are used for computing the loss gradient. This
function is also passed an internally calculated loss_scale parameter that
is used to scale your loss to normalize for the number of GPUs.
Signature: function(model, loss_scale)
"""
v = 'data'
dim_in = self.input_shape[0]
for idx in range(3):
v = brew.fc(model, v, 'fc%d' % (idx+1), dim_in=dim_in, dim_out=1024)
v = brew.relu(model, v, 'relu%d' % (idx+1))
dim_in = 1024
return self.add_head_nodes(model, v, dim_in, 'fc%d' % (idx+2), loss_scale=loss_scale)
示例5: create_model
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import relu [as 别名]
def create_model(m, device_opts) :
with core.DeviceScope(device_opts):
conv1 = brew.conv(m, 'data', 'conv1', dim_in=1, dim_out=20, kernel=5)
pool1 = brew.max_pool(m, conv1, 'pool1', kernel=2, stride=2)
conv2 = brew.conv(m, pool1, 'conv2', dim_in=20, dim_out=50, kernel=5)
pool2 = brew.max_pool(m, conv2, 'pool2', kernel=2, stride=2)
fc3 = brew.fc(m, pool2, 'fc3', dim_in=50 * 4 * 4, dim_out=500)
fc3 = brew.relu(m, fc3, fc3)
pred = brew.fc(m, fc3, 'pred', 500, 2)
softmax = brew.softmax(m, pred, 'softmax')
m.net.AddExternalOutput(softmax)
return softmax
# add loss and optimizer
示例6: AddLeNetModel
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import relu [as 别名]
def AddLeNetModel(model, data):
'''
This part is the standard LeNet model: from data to the softmax prediction.
For each convolutional layer we specify dim_in - number of input channels
and dim_out - number or output channels. Also each Conv and MaxPool layer changes the
image size. For example, kernel of size 5 reduces each side of an image by 4.
While when we have kernel and stride sizes equal 2 in a MaxPool layer, it divides
each side in half.
'''
# Image size: 28 x 28 -> 24 x 24
conv1 = brew.conv(model, data, 'conv1', dim_in=1, dim_out=20, kernel=5)
# Image size: 24 x 24 -> 12 x 12
pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2)
# Image size: 12 x 12 -> 8 x 8
conv2 = brew.conv(model, pool1, 'conv2', dim_in=20, dim_out=100, kernel=5)
# Image size: 8 x 8 -> 4 x 4
pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)
# 50 * 4 * 4 stands for dim_out from previous layer multiplied by the
# image size
fc3 = brew.fc(model, pool2, 'fc3', dim_in=100 * 4 * 4, dim_out=500)
relu = brew.relu(model, fc3, fc3)
pred = brew.fc(model, relu, 'pred', 500, 10)
softmax = brew.softmax(model, pred, 'softmax')
return softmax
示例7: test_simple_model
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import relu [as 别名]
def test_simple_model(self):
model = model_helper.ModelHelper(name="mnist")
# how come those inputs don't break the forward pass =.=a
workspace.FeedBlob("data", np.random.randn(1, 3, 64, 64).astype(np.float32))
workspace.FeedBlob("label", np.random.randn(1, 1000).astype(np.int))
with core.NameScope("conv1"):
conv1 = brew.conv(model, "data", 'conv1', dim_in=1, dim_out=20, kernel=5)
# Image size: 24 x 24 -> 12 x 12
pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2)
# Image size: 12 x 12 -> 8 x 8
conv2 = brew.conv(model, pool1, 'conv2', dim_in=20, dim_out=100, kernel=5)
# Image size: 8 x 8 -> 4 x 4
pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)
with core.NameScope("classifier"):
# 50 * 4 * 4 stands for dim_out from previous layer multiplied by the image size
fc3 = brew.fc(model, pool2, 'fc3', dim_in=100 * 4 * 4, dim_out=500)
relu = brew.relu(model, fc3, fc3)
pred = brew.fc(model, relu, 'pred', 500, 10)
softmax = brew.softmax(model, pred, 'softmax')
xent = model.LabelCrossEntropy([softmax, "label"], 'xent')
# compute the expected loss
loss = model.AveragedLoss(xent, "loss")
model.net.RunAllOnMKL()
model.param_init_net.RunAllOnMKL()
model.AddGradientOperators([loss], skip=1)
blob_name_tracker = {}
graph = tb.model_to_graph_def(
model,
blob_name_tracker=blob_name_tracker,
shapes={},
show_simplified=False,
)
compare_proto(graph, self)
示例8: conv_factory
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import relu [as 别名]
def conv_factory(self, model, v, num_in_channels, num_filters, kernel,
stride=1, pad=0, relu=True, name='conv'):
"""Standard convolution block: Conv -> BatchNorm -> Activation
"""
if isinstance(pad, int):
pad_t = pad_b = pad_l = pad_r = pad
elif isinstance(pad, list) or isinstance(pad, tuple):
if len(pad) == 2:
pad_t = pad_b = pad[0]
pad_l = pad_r = pad[1]
elif len(pad) == 4:
pad_t = pad[0]
pad_b = pad[1]
pad_l = pad[2]
pad_r = pad[3]
else:
assert False, "Invalid length of pad array. Expecting 2 or 4 but have: " + str(pad)
else:
assert False, "Invalid type of padding: " + str(pad)
v = brew.conv(model, v, name + '_conv', num_in_channels, num_filters,
kernel=kernel, pad_t=pad_t, pad_l=pad_l, pad_b=pad_b,
pad_r=pad_r, stride=stride)
v = brew.spatial_bn(model, v, name+'_bn', num_filters, eps=2e-5,
momentum=0.9, is_test=(self.phase == 'inference'))
if relu is True:
v = brew.relu(model, v, name + '_relu')
return v
示例9: block_head
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import relu [as 别名]
def block_head(self, model, v, towers, num_in_channels, num_out_channels,
scale=1.0, relu=True, name='block_head_node'):
tower_mixed = brew.concat(model, towers, blob_out=name+'_tower_mixed')
tower_out = self.conv_factory(model, tower_mixed, num_in_channels,
num_filters=num_out_channels,
kernel=1, relu=relu, name=name+'tower_out')
#v = v + scale * tower_out
scaled = model.Scale(tower_out, name + '_scale', scale=scale)
v = brew.sum(model, [v, scaled], name+'_sum')
#
if relu is True:
v = brew.relu(model, v, name + '_relu')
return v
示例10: block17
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import relu [as 别名]
def block17(self, model, v, num_in_channels, scale=1.0, relu=True, name='block17'):
towers = [None, None]
towers[0] = self.conv_factory(model, v, num_in_channels, num_filters=192,
kernel=1, name=name+'_tower1_1')
towers[1] = self.conv_factory(model, v, num_in_channels, num_filters=129,
kernel=1, name=name+'tower2_1')
towers[1] = self.conv_factory(model, towers[1], 129, num_filters=160,
kernel=[1, 7], pad=[1, 2], name=name+'tower2_2')
towers[1] = self.conv_factory(model, towers[1], 160, num_filters=192,
kernel=[7, 1], pad=[2, 1], name=name+'tower2_3')
return self.block_head(model, v, towers, num_in_channels=192+192,
num_out_channels=num_in_channels,
scale=scale, relu=relu, name=name)
示例11: block8
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import relu [as 别名]
def block8(self, model, v, num_in_channels, scale=1.0, relu=True, name='block8'):
towers = [None, None]
towers[0] = self.conv_factory(model, v, num_in_channels, num_filters=192,
kernel=1, name=name+'_tower1_1')
towers[1] = self.conv_factory(model, v, num_in_channels, num_filters=192,
kernel=1, name=name+'tower2_1')
towers[1] = self.conv_factory(model, towers[1], 192, num_filters=224,
kernel=[1, 3], pad=[0, 1], name=name+'tower2_2')
towers[1] = self.conv_factory(model, towers[1], 224, num_filters=256,
kernel=[3, 1], pad=[1, 0], name=name+'tower2_3')
return self.block_head(model, v, towers, num_in_channels=192+256,
num_out_channels=num_in_channels,
scale=scale, relu=relu, name=name)
示例12: forward_pass_builder
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import relu [as 别名]
def forward_pass_builder(self, model, loss_scale=1.0):
"""
This function adds the operators, layers to the network. It should return
a list of loss-blobs that are used for computing the loss gradient. This
function is also passed an internally calculated loss_scale parameter that
is used to scale your loss to normalize for the number of GPUs.
Signature: function(model, loss_scale)
"""
is_inference = self.phase == 'inference'
layers, filters = VGG.specs[self.__model]['specs']
v = 'data'
dim_in = self.input_shape[0]
for i, num in enumerate(layers):
for j in range(num):
v = brew.conv(model, v, 'conv%d_%d' % (i+1, j+1), dim_in, filters[i], kernel=3, pad=1)
v = brew.relu(model, v, 'relu%d_%d' % (i+1, j+1))
dim_in = filters[i]
v = brew.max_pool(model, v, 'pool%d' % (i+1), kernel=2, stride=2)
dim_in = 25088 # 512 * 7 * 7 (output tensor of previous max pool layer)
for i in range(2):
v = brew.fc(model, v, 'fc%d' % (6+i), dim_in=dim_in, dim_out=4096)
v = brew.relu(model, v, 'relu%d' % (6+i))
v = brew.dropout(model, v, 'drop%d' % (6+i), ratio=0.5, is_test=is_inference)
dim_in = 4096
return self.add_head_nodes(model, v, 4096, 'fc8', loss_scale=loss_scale)
示例13: conv_factory
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import relu [as 别名]
def conv_factory(model, v, num_in_channels, num_filter, kernel, stride=1, pad=0, name=None, suffix=''):
v = brew.conv(model, v, 'conv_%s%s' %(name, suffix), num_in_channels, num_filter, kernel=kernel, pad=pad, stride=stride)
v = brew.relu(model, v, 'relu_%s%s' %(name, suffix))
return v
示例14: conv
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import relu [as 别名]
def conv(self, model, name, inputs, input_depth, num_filters, kernel, stride,
pad, is_inference):
# Check padding
if isinstance(pad, int):
pad_t = pad_b = pad_l = pad_r = pad
elif isinstance(pad, list) or isinstance(pad, tuple):
if len(pad) == 2:
pad_t = pad_b = pad[0]
pad_l = pad_r = pad[1]
elif len(pad) == 4:
pad_t = pad[0]
pad_b = pad[1]
pad_l = pad[2]
pad_r = pad[3]
else:
assert False, "Invalid length of pad array. Expecting 2 or 4 but have: " + str(pad)
else:
assert False, "Invalid type of padding: " + str(pad)
# Check kernel
if isinstance(kernel, int):
kernel = [kernel, kernel]
elif isinstance(kernel, tuple) or isinstance(kernel, list):
assert len(kernel) == 2, "Kernel must have length 2"
kernel = [kernel[0], kernel[1]]
else:
assert False, "Invalid type of kerne;: " + str(kernel)
#
self.counts[name] += 1
name = name + str(self.counts[name]-1)
#
v = brew.conv(model, inputs, name + '_conv', input_depth, num_filters,
kernel=kernel, stride=stride,
pad_t=pad_t, pad_l=pad_l, pad_b=pad_b, pad_r=pad_r,
no_bias=True)
v = brew.spatial_bn(model, v, name+'_bn', num_filters, eps=2e-5,
momentum=0.9, is_test=is_inference)
v = brew.relu(model, v, name+'_relu')
return v
示例15: forward_pass_builder
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import relu [as 别名]
def forward_pass_builder(self, model, loss_scale=1.0):
"""
This function adds the operators, layers to the network. It should return a list
of loss-blobs that are used for computing the loss gradient. This function is
also passed an internally calculated loss_scale parameter that is used to scale
your loss to normalize for the number of GPUs. Signature: function(model, loss_scale)
"""
is_inference = self.phase == 'inference'
v = 'data'
# Layer1
v = brew.conv(model, v, 'conv1', 3, 96, kernel=11, stride=4)
v = brew.relu(model, v, 'relu1')
v = brew.max_pool(model, v, 'pool1', kernel=2, stride=2)
# Layer2
v = brew.conv(model, v, 'conv2', 96, 256, kernel=5)
v = brew.relu(model, v, 'relu2')
v = brew.max_pool(model, v, 'pool2', kernel=2, stride=2)
# Layer3
v = brew.conv(model, v, 'conv3', 256, 512, kernel=3, pad=1)
v = brew.relu(model, v, 'relu3')
# Layer4
v = brew.conv(model, v, 'conv4', 512, 1024, kernel=3, pad=1)
v = brew.relu(model, v, 'relu4')
# Layer5
v = brew.conv(model, v, 'conv5', 1024, 1024, kernel=3, pad=1)
v = brew.relu(model, v, 'relu5')
v = brew.max_pool(model, v, 'pool5', kernel=2, stride=2)
# Layer6
v = brew.fc(model, v, 'fc6', dim_in=6*6*1024, dim_out=3072)
v = brew.relu(model, v, 'relu6')
v = brew.dropout(model, v, 'drop6', ratio=0.5, is_test=is_inference)
# Layer7
v = brew.fc(model, v, 'fc7', dim_in=3072, dim_out=4096)
v = brew.relu(model, v, 'relu7')
v = brew.dropout(model, v, 'drop7', ratio=0.5, is_test=is_inference)
return self.add_head_nodes(model, v, 4096, 'fc8', loss_scale=loss_scale)