本文整理汇总了Python中modeling.generate_anchors.generate_anchors方法的典型用法代码示例。如果您正苦于以下问题:Python generate_anchors.generate_anchors方法的具体用法?Python generate_anchors.generate_anchors怎么用?Python generate_anchors.generate_anchors使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类modeling.generate_anchors
的用法示例。
在下文中一共展示了generate_anchors.generate_anchors方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from modeling import generate_anchors [as 别名]
# 或者: from modeling.generate_anchors import generate_anchors [as 别名]
def __init__(self, dim_in, spatial_scale):
super().__init__()
self.dim_in = dim_in
self.dim_out = dim_in if cfg.RPN.OUT_DIM_AS_IN_DIM else cfg.RPN.OUT_DIM
anchors = generate_anchors(
stride=1. / spatial_scale,
sizes=cfg.RPN.SIZES,
aspect_ratios=cfg.RPN.ASPECT_RATIOS)
num_anchors = anchors.shape[0]
# RPN hidden representation
self.RPN_conv = nn.Conv2d(self.dim_in, self.dim_out, 3, 1, 1)
# Proposal classification scores
self.n_score_out = num_anchors * 2 if cfg.RPN.CLS_ACTIVATION == 'softmax' \
else num_anchors
self.RPN_cls_score = nn.Conv2d(self.dim_out, self.n_score_out, 1, 1, 0)
# Proposal bbox regression deltas
self.RPN_bbox_pred = nn.Conv2d(self.dim_out, num_anchors * 4, 1, 1, 0)
self.RPN_GenerateProposals = GenerateProposalsOp(anchors, spatial_scale)
self.RPN_GenerateProposalLabels = GenerateProposalLabelsOp()
self._init_weights()
示例2: __init__
# 需要导入模块: from modeling import generate_anchors [as 别名]
# 或者: from modeling.generate_anchors import generate_anchors [as 别名]
def __init__(self, dim_in, spatial_scales):
super().__init__()
self.dim_in = dim_in
self.spatial_scales = spatial_scales
self.dim_out = self.dim_in
num_anchors = len(cfg.FPN.RPN_ASPECT_RATIOS)
# Create conv ops shared by all FPN levels
self.FPN_RPN_conv = nn.Conv2d(dim_in, self.dim_out, 3, 1, 1)
dim_score = num_anchors * 2 if cfg.RPN.CLS_ACTIVATION == 'softmax' \
else num_anchors
self.FPN_RPN_cls_score = nn.Conv2d(self.dim_out, dim_score, 1, 1, 0)
self.FPN_RPN_bbox_pred = nn.Conv2d(self.dim_out, 4 * num_anchors, 1, 1, 0)
self.GenerateProposals_modules = nn.ModuleList()
k_max = cfg.FPN.RPN_MAX_LEVEL # coarsest level of pyramid
k_min = cfg.FPN.RPN_MIN_LEVEL # finest level of pyramid
for lvl in range(k_min, k_max + 1):
sc = self.spatial_scales[k_max - lvl] # in reversed order
lvl_anchors = generate_anchors(
stride=2.**lvl,
sizes=(cfg.FPN.RPN_ANCHOR_START_SIZE * 2.**(lvl - k_min), ),
aspect_ratios=cfg.FPN.RPN_ASPECT_RATIOS
)
self.GenerateProposals_modules.append(GenerateProposalsOp(lvl_anchors, sc))
self.CollectAndDistributeFpnRpnProposals = CollectAndDistributeFpnRpnProposalsOp()
self._init_weights()
示例3: __init__
# 需要导入模块: from modeling import generate_anchors [as 别名]
# 或者: from modeling.generate_anchors import generate_anchors [as 别名]
def __init__(self, dim_in, spatial_scales):
super().__init__()
self.dim_in = dim_in
self.spatial_scales = spatial_scales
self.dim_out = self.dim_in
num_anchors = len(cfg.FPN.RPN_ASPECT_RATIOS)
# Create conv ops shared by all FPN levels
self.FPN_RPN_conv = nn.Conv2d(dim_in, self.dim_out, 3, 1, 1)
dim_score = num_anchors * 2 if cfg.RPN.CLS_ACTIVATION == 'softmax' \
else num_anchors
self.FPN_RPN_cls_score = nn.Conv2d(self.dim_out, dim_score, 1, 1, 0)
self.FPN_RPN_bbox_pred = nn.Conv2d(self.dim_out, 4 * num_anchors, 1, 1, 0)
self.GenerateProposals_modules = nn.ModuleList()
k_max = cfg.FPN.RPN_MAX_LEVEL # coarsest level of pyramid
k_min = cfg.FPN.RPN_MIN_LEVEL # finest level of pyramid
for lvl in range(k_min, k_max + 1):
sc = self.spatial_scales[k_max - lvl] # in reversed order
lvl_anchors = generate_anchors(
stride=2. ** lvl,
sizes=(cfg.FPN.RPN_ANCHOR_START_SIZE * 2. ** (lvl - k_min),),
aspect_ratios=cfg.FPN.RPN_ASPECT_RATIOS
)
self.GenerateProposals_modules.append(GenerateProposalsOp(lvl_anchors, sc))
self.CollectAndDistributeFpnRpnProposals = CollectAndDistributeFpnRpnProposalsOp()
self._init_weights()
示例4: _create_cell_anchors
# 需要导入模块: from modeling import generate_anchors [as 别名]
# 或者: from modeling.generate_anchors import generate_anchors [as 别名]
def _create_cell_anchors():
"""
Generate all types of anchors for all fpn levels/scales/aspect ratios.
This function is called only once at the beginning of inference.
"""
k_max, k_min = cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.RPN_MIN_LEVEL
scales_per_octave = cfg.RETINANET.SCALES_PER_OCTAVE
aspect_ratios = cfg.RETINANET.ASPECT_RATIOS
anchor_scale = cfg.RETINANET.ANCHOR_SCALE
A = scales_per_octave * len(aspect_ratios)
anchors = {}
for lvl in range(k_min, k_max + 1):
# create cell anchors array
stride = 2. ** lvl
cell_anchors = np.zeros((A, 4))
a = 0
for octave in range(scales_per_octave):
octave_scale = 2 ** (octave / float(scales_per_octave))
for aspect in aspect_ratios:
anchor_sizes = (stride * octave_scale * anchor_scale, )
anchor_aspect_ratios = (aspect, )
cell_anchors[a, :] = generate_anchors(
stride=stride, sizes=anchor_sizes,
aspect_ratios=anchor_aspect_ratios)
a += 1
anchors[lvl] = cell_anchors
return anchors
示例5: get_anchors
# 需要导入模块: from modeling import generate_anchors [as 别名]
# 或者: from modeling.generate_anchors import generate_anchors [as 别名]
def get_anchors(spatial_scale):
anchors = generate_anchors.generate_anchors(
stride=1. / spatial_scale,
sizes=cfg.RPN.SIZES,
aspect_ratios=cfg.RPN.ASPECT_RATIOS).astype(np.float32)
return anchors
示例6: create_cell_anchors
# 需要导入模块: from modeling import generate_anchors [as 别名]
# 或者: from modeling.generate_anchors import generate_anchors [as 别名]
def create_cell_anchors():
"""
Generate all types of anchors for all fpn levels/scales/aspect ratios.
This function is called only once at the beginning of inference.
"""
k_max, k_min = cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.RPN_MIN_LEVEL
scales_per_octave = cfg.RETINANET.SCALES_PER_OCTAVE
aspect_ratios = cfg.RETINANET.ASPECT_RATIOS
anchor_scale = cfg.RETINANET.ANCHOR_SCALE
A = scales_per_octave * len(aspect_ratios)
anchors = {}
for lvl in range(k_min, k_max + 1):
# create cell anchors array
stride = 2. ** lvl
cell_anchors = np.zeros((A, 4))
a = 0
for octave in range(scales_per_octave):
octave_scale = 2 ** (octave / float(scales_per_octave))
for aspect in aspect_ratios:
anchor_sizes = (stride * octave_scale * anchor_scale, )
anchor_aspect_ratios = (aspect, )
cell_anchors[a, :] = generate_anchors(
stride=stride, sizes=anchor_sizes,
aspect_ratios=anchor_aspect_ratios)
a += 1
anchors[lvl] = cell_anchors
return anchors
示例7: get_field_of_anchors
# 需要导入模块: from modeling import generate_anchors [as 别名]
# 或者: from modeling.generate_anchors import generate_anchors [as 别名]
def get_field_of_anchors(
stride, anchor_sizes, anchor_aspect_ratios, octave=None, aspect=None
):
global _threadlocal_foa
if not hasattr(_threadlocal_foa, 'cache'):
_threadlocal_foa.cache = {}
cache_key = str(stride) + str(anchor_sizes) + str(anchor_aspect_ratios)
if cache_key in _threadlocal_foa.cache:
return _threadlocal_foa.cache[cache_key]
# Anchors at a single feature cell
cell_anchors = generate_anchors(
stride=stride, sizes=anchor_sizes, aspect_ratios=anchor_aspect_ratios
)
num_cell_anchors = cell_anchors.shape[0]
# Generate canonical proposals from shifted anchors
# Enumerate all shifted positions on the (H, W) grid
fpn_max_size = cfg.FPN.COARSEST_STRIDE * np.ceil(
cfg.TRAIN.MAX_SIZE / float(cfg.FPN.COARSEST_STRIDE)
)
field_size = int(np.ceil(fpn_max_size / float(stride)))
shifts = np.arange(0, field_size) * stride
shift_x, shift_y = np.meshgrid(shifts, shifts)
shift_x = shift_x.ravel()
shift_y = shift_y.ravel()
shifts = np.vstack((shift_x, shift_y, shift_x, shift_y)).transpose()
# Broacast anchors over shifts to enumerate all anchors at all positions
# in the (H, W) grid:
# - add A cell anchors of shape (1, A, 4) to
# - K shifts of shape (K, 1, 4) to get
# - all shifted anchors of shape (K, A, 4)
# - reshape to (K*A, 4) shifted anchors
A = num_cell_anchors
K = shifts.shape[0]
field_of_anchors = (
cell_anchors.reshape((1, A, 4)) +
shifts.reshape((1, K, 4)).transpose((1, 0, 2))
)
field_of_anchors = field_of_anchors.reshape((K * A, 4))
foa = FieldOfAnchors(
field_of_anchors=field_of_anchors.astype(np.float32),
num_cell_anchors=num_cell_anchors,
stride=stride,
field_size=field_size,
octave=octave,
aspect=aspect
)
_threadlocal_foa.cache[cache_key] = foa
return foa