本文整理汇总了Python中caffe2.python.brew.max_pool方法的典型用法代码示例。如果您正苦于以下问题:Python brew.max_pool方法的具体用法?Python brew.max_pool怎么用?Python brew.max_pool使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类caffe2.python.brew
的用法示例。
在下文中一共展示了brew.max_pool方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: AddLeNetModel
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import max_pool [as 别名]
def AddLeNetModel(model, data):
'''
This part is the standard LeNet model: from data to the softmax prediction.
For each convolutional layer we specify dim_in - number of input channels
and dim_out - number or output channels. Also each Conv and MaxPool layer changes the
image size. For example, kernel of size 5 reduces each side of an image by 4.
While when we have kernel and stride sizes equal 2 in a MaxPool layer, it divides
each side in half.
'''
# Image size: 28 x 28 -> 24 x 24
conv1 = brew.conv(model, data, 'conv1', dim_in=1, dim_out=20, kernel=5)
# Image size: 24 x 24 -> 12 x 12
pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2)
# Image size: 12 x 12 -> 8 x 8
conv2 = brew.conv(model, pool1, 'conv2', dim_in=20, dim_out=50, kernel=5)
# Image size: 8 x 8 -> 4 x 4
pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)
# 50 * 4 * 4 stands for dim_out from previous layer multiplied by the image size
fc3 = brew.fc(model, pool2, 'fc3', dim_in=50 * 4 * 4, dim_out=500)
fc3 = brew.relu(model, fc3, fc3)
pred = brew.fc(model, fc3, 'pred', 500, 10)
softmax = brew.softmax(model, pred, 'softmax')
return softmax
示例2: create_model
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import max_pool [as 别名]
def create_model(m, device_opts) :
with core.DeviceScope(device_opts):
conv1 = brew.conv(m, 'data', 'conv1', dim_in=1, dim_out=20, kernel=5)
pool1 = brew.max_pool(m, conv1, 'pool1', kernel=2, stride=2)
conv2 = brew.conv(m, pool1, 'conv2', dim_in=20, dim_out=50, kernel=5)
pool2 = brew.max_pool(m, conv2, 'pool2', kernel=2, stride=2)
fc3 = brew.fc(m, pool2, 'fc3', dim_in=50 * 4 * 4, dim_out=500)
fc3 = brew.relu(m, fc3, fc3)
pred = brew.fc(m, fc3, 'pred', 500, 2)
softmax = brew.softmax(m, pred, 'softmax')
m.net.AddExternalOutput(softmax)
return softmax
# add loss and optimizer
示例3: AddLeNetModel
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import max_pool [as 别名]
def AddLeNetModel(model, data):
'''
This part is the standard LeNet model: from data to the softmax prediction.
For each convolutional layer we specify dim_in - number of input channels
and dim_out - number or output channels. Also each Conv and MaxPool layer changes the
image size. For example, kernel of size 5 reduces each side of an image by 4.
While when we have kernel and stride sizes equal 2 in a MaxPool layer, it divides
each side in half.
'''
# Image size: 28 x 28 -> 24 x 24
conv1 = brew.conv(model, data, 'conv1', dim_in=1, dim_out=20, kernel=5)
# Image size: 24 x 24 -> 12 x 12
pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2)
# Image size: 12 x 12 -> 8 x 8
conv2 = brew.conv(model, pool1, 'conv2', dim_in=20, dim_out=100, kernel=5)
# Image size: 8 x 8 -> 4 x 4
pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)
# 50 * 4 * 4 stands for dim_out from previous layer multiplied by the
# image size
fc3 = brew.fc(model, pool2, 'fc3', dim_in=100 * 4 * 4, dim_out=500)
relu = brew.relu(model, fc3, fc3)
pred = brew.fc(model, relu, 'pred', 500, 10)
softmax = brew.softmax(model, pred, 'softmax')
return softmax
示例4: test_simple_model
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import max_pool [as 别名]
def test_simple_model(self):
model = model_helper.ModelHelper(name="mnist")
# how come those inputs don't break the forward pass =.=a
workspace.FeedBlob("data", np.random.randn(1, 3, 64, 64).astype(np.float32))
workspace.FeedBlob("label", np.random.randn(1, 1000).astype(np.int))
with core.NameScope("conv1"):
conv1 = brew.conv(model, "data", 'conv1', dim_in=1, dim_out=20, kernel=5)
# Image size: 24 x 24 -> 12 x 12
pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2)
# Image size: 12 x 12 -> 8 x 8
conv2 = brew.conv(model, pool1, 'conv2', dim_in=20, dim_out=100, kernel=5)
# Image size: 8 x 8 -> 4 x 4
pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)
with core.NameScope("classifier"):
# 50 * 4 * 4 stands for dim_out from previous layer multiplied by the image size
fc3 = brew.fc(model, pool2, 'fc3', dim_in=100 * 4 * 4, dim_out=500)
relu = brew.relu(model, fc3, fc3)
pred = brew.fc(model, relu, 'pred', 500, 10)
softmax = brew.softmax(model, pred, 'softmax')
xent = model.LabelCrossEntropy([softmax, "label"], 'xent')
# compute the expected loss
loss = model.AveragedLoss(xent, "loss")
model.net.RunAllOnMKL()
model.param_init_net.RunAllOnMKL()
model.AddGradientOperators([loss], skip=1)
blob_name_tracker = {}
graph = tb.model_to_graph_def(
model,
blob_name_tracker=blob_name_tracker,
shapes={},
show_simplified=False,
)
compare_proto(graph, self)
示例5: forward_pass_builder
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import max_pool [as 别名]
def forward_pass_builder(self, model, loss_scale=1.0):
"""
This function adds the operators, layers to the network. It should return a list
of loss-blobs that are used for computing the loss gradient. This function is
also passed an internally calculated loss_scale parameter that is used to scale
your loss to normalize for the number of GPUs. Signature: function(model, loss_scale)
"""
is_inference = self.phase == 'inference'
v = 'data'
v = brew.conv(model, v, 'conv1', 3, 64, kernel=11, stride=4)
v = brew.relu(model, v, 'relu1')
v = brew.max_pool(model, v, 'pool1', kernel=3, stride=2)
v = brew.conv(model, v, 'conv2', 64, 192, kernel=5, pad=2, group=1)
v = brew.relu(model, v, 'relu2')
v = brew.max_pool(model, v, 'pool2', kernel=3, stride=2)
v = brew.conv(model, v, 'conv3', 192, 384, kernel=3, pad=1)
v = brew.relu(model, v, 'relu3')
v = brew.conv(model, v, 'conv4', 384, 256, kernel=3, pad=1, group=1)
v = brew.relu(model, v, 'relu4')
v = brew.conv(model, v, 'conv5', 256, 256, kernel=3, pad=1, group=1)
v = brew.relu(model, v, 'relu5')
v = brew.max_pool(model, v, 'pool5', kernel=3, stride=2)
v = brew.fc(model, v, 'fc6', dim_in=9216, dim_out=4096)
v = brew.relu(model, v, 'relu6')
v = brew.dropout(model, v, 'drop6', ratio=0.5, is_test=is_inference)
v = brew.fc(model, v, 'fc7', dim_in=4096, dim_out=4096)
v = brew.relu(model, v, 'relu7')
v = brew.dropout(model, v, 'drop7', ratio=0.5, is_test=is_inference)
return self.add_head_nodes(model, v, 4096, 'fc8', loss_scale=loss_scale)
示例6: forward_pass_builder
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import max_pool [as 别名]
def forward_pass_builder(self, model, loss_scale=1.0):
"""
This function adds the operators, layers to the network. It should return
a list of loss-blobs that are used for computing the loss gradient. This
function is also passed an internally calculated loss_scale parameter that
is used to scale your loss to normalize for the number of GPUs.
Signature: function(model, loss_scale)
"""
is_inference = self.phase == 'inference'
layers, filters = VGG.specs[self.__model]['specs']
v = 'data'
dim_in = self.input_shape[0]
for i, num in enumerate(layers):
for j in range(num):
v = brew.conv(model, v, 'conv%d_%d' % (i+1, j+1), dim_in, filters[i], kernel=3, pad=1)
v = brew.relu(model, v, 'relu%d_%d' % (i+1, j+1))
dim_in = filters[i]
v = brew.max_pool(model, v, 'pool%d' % (i+1), kernel=2, stride=2)
dim_in = 25088 # 512 * 7 * 7 (output tensor of previous max pool layer)
for i in range(2):
v = brew.fc(model, v, 'fc%d' % (6+i), dim_in=dim_in, dim_out=4096)
v = brew.relu(model, v, 'relu%d' % (6+i))
v = brew.dropout(model, v, 'drop%d' % (6+i), ratio=0.5, is_test=is_inference)
dim_in = 4096
return self.add_head_nodes(model, v, 4096, 'fc8', loss_scale=loss_scale)
示例7: inception_factory
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import max_pool [as 别名]
def inception_factory(model, v, num_in_channels, num_1x1, num_3x3red, num_3x3, num_d5x5red, num_d5x5, proj, name):
# 1x1
c1x1 = conv_factory(model, v, num_in_channels, num_filter=num_1x1, kernel=1, name=('%s_1x1' % name))
# 3x3 reduce + 3x3
c3x3r = conv_factory(model, v, num_in_channels, num_filter=num_3x3red, kernel=1, name=('%s_3x3' % name), suffix='_reduce')
c3x3 = conv_factory(model, c3x3r, num_3x3red, num_filter=num_3x3, kernel=3, pad=1, name=('%s_3x3' % name))
# double 3x3 reduce + double 3x3
cd5x5r = conv_factory(model, v, num_in_channels, num_filter=num_d5x5red, kernel=1, name=('%s_5x5' % name), suffix='_reduce')
cd5x5 = conv_factory(model, cd5x5r, num_d5x5red, num_filter=num_d5x5, kernel=5, pad=2, name=('%s_5x5' % name))
# pool + proj
pooling = brew.max_pool(model, v, 'max_pool_%s_pool' % name, kernel=3, stride=1, pad=1)
cproj = conv_factory(model, pooling, num_in_channels, num_filter=proj, kernel=1, name=('%s_proj' % name))
# concat and return
return brew.concat(model, [c1x1, c3x3, cd5x5, cproj], 'ch_concat_%s_chconcat' % name)
示例8: forward_pass_builder
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import max_pool [as 别名]
def forward_pass_builder(self, model, loss_scale=1.0):
"""
This function adds the operators, layers to the network. It should return
a list of loss-blobs that are used for computing the loss gradient. This
function is also passed an internally calculated loss_scale parameter that
is used to scale your loss to normalize for the number of GPUs.
Signature: function(model, loss_scale)
"""
v = 'data'
v = conv_factory(model, v, self.input_shape[0], 64, kernel=7, stride=2, pad=3, name="conv1/7x7_s2")
v = brew.max_pool(model, v, 'pool1/3x3_s2', kernel=3, stride=2)
v = brew.lrn(model, v, 'pool1/norm1', size=5, alpha=0.0001, beta=0.75)
v = conv_factory(model, v, 64, 64, kernel=1, stride=1, name="conv2/3x3_reduce")
v = conv_factory(model, v, 64, 192, kernel=3, stride=1, pad=1, name="conv2/3x3")
v = brew.lrn(model, v, 'conv2/norm2', size=5, alpha=0.0001, beta=0.75)
v = brew.max_pool(model, v, 'pool2/3x3_s2', kernel=3, stride=2)
v = inception_factory(model, v, 192, 64, 96, 128, 16, 32, 32, name="inception_3a")
v = inception_factory(model, v, 256, 128, 128, 192, 32, 96, 64, name="inception_3b")
v = brew.max_pool(model, v, 'pool3/3x3_s2', kernel=3, stride=2)
v = inception_factory(model, v, 480, 192, 96, 208, 16, 48, 64, name="inception_4a")
v = inception_factory(model, v, 512, 160, 112, 224, 24, 64, 64, name="inception_4b")
v = inception_factory(model, v, 512, 128, 128, 256, 24, 64, 64, name="inception_4c")
v = inception_factory(model, v, 512, 112, 144, 288, 32, 64, 64, name="inception_4d")
v = inception_factory(model, v, 528, 256, 160, 320, 32, 128, 128, name="inception_4e")
v = brew.max_pool(model, v, 'pool4/3x3_s2', kernel=3, stride=2, pad=1)
v = inception_factory(model, v, 832, 256, 160, 320, 32, 128, 128, name="inception_5a")
v = inception_factory(model, v, 832, 384, 192, 384, 48, 128, 128, name="inception_5b")
v = brew.average_pool(model, v, 'pool5/7x7_s1', kernel=7, stride=1)
v = brew.dropout(model, v, 'pool5/drop_7x7_s1', ratio=0.5, is_test=(self.phase == 'inference'))
return self.add_head_nodes(model, v, 1024, 'classifier', loss_scale=loss_scale)
示例9: forward_pass_builder
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import max_pool [as 别名]
def forward_pass_builder(self, model, loss_scale=1.0):
"""
This function adds the operators, layers to the network. It should return a list
of loss-blobs that are used for computing the loss gradient. This function is
also passed an internally calculated loss_scale parameter that is used to scale
your loss to normalize for the number of GPUs. Signature: function(model, loss_scale)
"""
self.counts = defaultdict(lambda: 0)
is_inference = self.phase == 'inference'
v = 'data'
# Input conv modules
v = self.conv(model, 'conv', v, input_depth=3, num_filters=32, kernel=3, stride=2, pad=0, is_inference=is_inference)
v = self.conv(model, 'conv', v, input_depth=32, num_filters=32, kernel=3, stride=1, pad=0, is_inference=is_inference)
v = self.conv(model, 'conv', v, input_depth=32, num_filters=64, kernel=3, stride=1, pad=1, is_inference=is_inference)
v = brew.max_pool(model, v, blob_out='pool1', kernel=3, stride=2, pad=0)
v = self.conv(model, 'conv', v, input_depth=64, num_filters=80, kernel=1, stride=1, pad=0, is_inference=is_inference)
v = self.conv(model, 'conv', v, input_depth=80, num_filters=192, kernel=3, stride=1, pad=0, is_inference=is_inference)
v = brew.max_pool(model, v, blob_out='pool2', kernel=3, stride=2, pad=0)
# Three Type A inception modules
v = self.module_a(model, inputs=v, input_depth=192, n=32, is_inference=is_inference)
v = self.module_a(model, inputs=v, input_depth=256, n=64, is_inference=is_inference)
v = self.module_a(model, inputs=v, input_depth=288, n=64, is_inference=is_inference)
# One Type B inception module
v = self.module_b(model, inputs=v, input_depth=288, is_inference=is_inference)
# Four Type C inception modules
for n in (128, 160, 160, 192):
v = self.module_c(model, inputs=v, input_depth=768, n=n, is_inference=is_inference)
# One Type D inception module
v = self.module_d(model, inputs=v, input_depth=768, is_inference=is_inference)
# Two Type E inception modules
v = self.module_e(model, inputs=v, input_depth=1280, pooltype='avg', is_inference=is_inference)
v = self.module_e(model, inputs=v, input_depth=2048, pooltype='max', is_inference=is_inference)
# Final global pooling
v = brew.average_pool(model, v, blob_out='pool', kernel=8, stride=1, pad=0)
# And classifier
return self.add_head_nodes(model, v, 2048, 'classifier', loss_scale=loss_scale)
示例10: forward_pass_builder
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import max_pool [as 别名]
def forward_pass_builder(self, model, loss_scale=1.0):
"""
This function adds the operators, layers to the network. It should return a list
of loss-blobs that are used for computing the loss gradient. This function is
also passed an internally calculated loss_scale parameter that is used to scale
your loss to normalize for the number of GPUs. Signature: function(model, loss_scale)
"""
is_inference = self.phase == 'inference'
v = 'data'
# Layer1
v = brew.conv(model, v, 'conv1', 3, 96, kernel=11, stride=4)
v = brew.relu(model, v, 'relu1')
v = brew.max_pool(model, v, 'pool1', kernel=2, stride=2)
# Layer2
v = brew.conv(model, v, 'conv2', 96, 256, kernel=5)
v = brew.relu(model, v, 'relu2')
v = brew.max_pool(model, v, 'pool2', kernel=2, stride=2)
# Layer3
v = brew.conv(model, v, 'conv3', 256, 512, kernel=3, pad=1)
v = brew.relu(model, v, 'relu3')
# Layer4
v = brew.conv(model, v, 'conv4', 512, 1024, kernel=3, pad=1)
v = brew.relu(model, v, 'relu4')
# Layer5
v = brew.conv(model, v, 'conv5', 1024, 1024, kernel=3, pad=1)
v = brew.relu(model, v, 'relu5')
v = brew.max_pool(model, v, 'pool5', kernel=2, stride=2)
# Layer6
v = brew.fc(model, v, 'fc6', dim_in=6*6*1024, dim_out=3072)
v = brew.relu(model, v, 'relu6')
v = brew.dropout(model, v, 'drop6', ratio=0.5, is_test=is_inference)
# Layer7
v = brew.fc(model, v, 'fc7', dim_in=3072, dim_out=4096)
v = brew.relu(model, v, 'relu7')
v = brew.dropout(model, v, 'drop7', ratio=0.5, is_test=is_inference)
return self.add_head_nodes(model, v, 4096, 'fc8', loss_scale=loss_scale)
示例11: forward_pass_builder
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import max_pool [as 别名]
def forward_pass_builder(self, model, loss_scale=1.0):
"""
This function adds the operators, layers to the network. It should return a list
of loss-blobs that are used for computing the loss gradient. This function is
also passed an internally calculated loss_scale parameter that is used to scale
your loss to normalize for the number of GPUs. Signature: function(model, loss_scale)
"""
is_inference = self.phase == 'inference'
v = 'data'
v = brew.conv(model, v, 'conv1', 3, 96, kernel=11, stride=4)
v = brew.relu(model, v, 'relu1')
v = brew.lrn(model, v, 'norm1', size=5, alpha=0.0001, beta=0.75)
v = brew.max_pool(model, v, 'pool1', kernel=3, stride=2)
v = brew.conv(model, v, 'conv2', 96, 256, kernel=5, pad=2, group=1)
v = brew.relu(model, v, 'relu2')
v = brew.lrn(model, v, 'norm2', size=5, alpha=0.0001, beta=0.75)
v = brew.max_pool(model, v, 'pool2', kernel=3, stride=2)
v = brew.conv(model, v, 'conv3', 256, 384, kernel=3, pad=1)
v = brew.relu(model, v, 'relu3')
v = brew.conv(model, v, 'conv4', 384, 384, kernel=3, pad=1, group=1)
v = brew.relu(model, v, 'relu4')
v = brew.conv(model, v, 'conv5', 384, 256, kernel=3, pad=1, group=1)
v = brew.relu(model, v, 'relu5')
v = brew.max_pool(model, v, 'pool5', kernel=3, stride=2)
v = brew.fc(model, v, 'fc6', dim_in=9216, dim_out=4096)
v = brew.relu(model, v, 'relu6')
v = brew.dropout(model, v, 'drop6', ratio=0.5, is_test=is_inference)
v = brew.fc(model, v, 'fc7', dim_in=4096, dim_out=4096)
v = brew.relu(model, v, 'relu7')
v = brew.dropout(model, v, 'drop7', ratio=0.5, is_test=is_inference)
return self.add_head_nodes(model, v, 4096, 'fc8', loss_scale=loss_scale)
示例12: Add_Original_CIFAR10_Model
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import max_pool [as 别名]
def Add_Original_CIFAR10_Model(model, data, num_classes, image_height, image_width, image_channels):
# Convolutional layer 1
conv1 = brew.conv(model, data, 'conv1', dim_in=image_channels, dim_out=32, kernel=5, stride=1, pad=2)
h,w = update_dims(height=image_height, width=image_width, kernel=5, stride=1, pad=2)
# Pooling layer 1
pool1 = brew.max_pool(model, conv1, 'pool1', kernel=3, stride=2)
h,w = update_dims(height=h, width=w, kernel=3, stride=2, pad=0)
# ReLU layer 1
relu1 = brew.relu(model, pool1, 'relu1')
# Convolutional layer 2
conv2 = brew.conv(model, relu1, 'conv2', dim_in=32, dim_out=32, kernel=5, stride=1, pad=2)
h,w = update_dims(height=h, width=w, kernel=5, stride=1, pad=2)
# ReLU layer 2
relu2 = brew.relu(model, conv2, 'relu2')
# Pooling layer 1
pool2 = brew.average_pool(model, relu2, 'pool2', kernel=3, stride=2)
h,w = update_dims(height=h, width=w, kernel=3, stride=2, pad=0)
# Convolutional layer 3
conv3 = brew.conv(model, pool2, 'conv3', dim_in=32, dim_out=64, kernel=5, stride=1, pad=2)
h,w = update_dims(height=h, width=w, kernel=5, stride=1, pad=2)
# ReLU layer 3
relu3 = brew.relu(model, conv3, 'relu3')
# Pooling layer 3
pool3 = brew.average_pool(model, relu3, 'pool3', kernel=3, stride=2)
h,w = update_dims(height=h, width=w, kernel=3, stride=2, pad=0)
# Fully connected layers
fc1 = brew.fc(model, pool3, 'fc1', dim_in=64*h*w, dim_out=64)
fc2 = brew.fc(model, fc1, 'fc2', dim_in=64, dim_out=num_classes)
# Softmax layer
softmax = brew.softmax(model, fc2, 'softmax')
return softmax
# ## Test Saved Model From Part 1
#
# ### Construct Model for Testing
#
# The first thing we need is a model helper object that we can attach the lmdb reader to.
# In[4]:
# Create a ModelHelper object with init_params=False
示例13: resnet
# 需要导入模块: from caffe2.python import brew [as 别名]
# 或者: from caffe2.python.brew import max_pool [as 别名]
def resnet(self, model, units, num_stages, filter_list, bottle_neck=True, bn_mom=0.9,
is_inference=False, loss_scale=1.0):
"""Return ResNet symbol of
Parameters
----------
units : list
Number of units in each stage
num_stages : int
Number of stage
filter_list : list
Channel size of each stage
num_classes : int
Ouput size of symbol
dataset : str
Dataset type, only cifar10 and imagenet supports
workspace : int
Workspace used in convolution operator''
dtype : str
Precision (float32 or float16)
"""
num_unit = len(units)
assert num_unit == num_stages
v = 'data'
(nchannel, _, _) = self.input_shape # (nchannel, height, width)
v = brew.conv(model, v, 'conv0', nchannel, filter_list[0], kernel=7, pad=3,
stride=2, no_bias=True)
v = brew.spatial_bn(model, v, 'bn0', filter_list[0], eps=2e-5, momentum=bn_mom,
is_test=is_inference)
v = brew.relu(model, v, 'relu0')
v = brew.max_pool(model, v, 'pool0', kernel=3, stride=2, pad=1)
dim_in = filter_list[0]
for i in range(num_stages):
v = self.residual_unit(model, v, dim_in, filter_list[i+1], stride=(1 if i == 0 else 2),
dim_match=False,
name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck,
is_inference=is_inference)
dim_in = filter_list[i+1]
for j in range(units[i]-1):
v = self.residual_unit(model, v, dim_in, filter_list[i+1], 1, True,
name='stage%d_unit%d' % (i + 1, j + 2),
bottle_neck=bottle_neck, is_inference=is_inference)
v = brew.average_pool(model, v, 'pool1', kernel=7, global_pool=True)
return self.add_head_nodes(model, v, dim_in, 'classifier', loss_scale=loss_scale)