本文整理汇总了Python中mxnet.gluon方法的典型用法代码示例。如果您正苦于以下问题:Python mxnet.gluon方法的具体用法?Python mxnet.gluon怎么用?Python mxnet.gluon使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet
的用法示例。
在下文中一共展示了mxnet.gluon方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_descriptor
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import gluon [as 别名]
def get_descriptor(ctx):
""" construct and return descriptor """
d_net = gluon.nn.Sequential()
with d_net.name_scope():
d_net.add(SNConv2D(num_filter=64, kernel_size=4, strides=2, padding=1, in_channels=3, ctx=ctx))
d_net.add(gluon.nn.LeakyReLU(0.2))
d_net.add(SNConv2D(num_filter=128, kernel_size=4, strides=2, padding=1, in_channels=64, ctx=ctx))
d_net.add(gluon.nn.LeakyReLU(0.2))
d_net.add(SNConv2D(num_filter=256, kernel_size=4, strides=2, padding=1, in_channels=128, ctx=ctx))
d_net.add(gluon.nn.LeakyReLU(0.2))
d_net.add(SNConv2D(num_filter=512, kernel_size=4, strides=2, padding=1, in_channels=256, ctx=ctx))
d_net.add(gluon.nn.LeakyReLU(0.2))
d_net.add(SNConv2D(num_filter=1, kernel_size=4, strides=1, padding=0, in_channels=512, ctx=ctx))
return d_net
示例2: test_exc_gluon
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import gluon [as 别名]
def test_exc_gluon():
def gluon(exec_wait=True):
model = nn.Sequential()
model.add(nn.Dense(128, activation='tanh', in_units=10, flatten=False))
model.add(nn.Dropout(1))
model.add(nn.Dense(64, activation='tanh', in_units=256),
nn.Dense(32, in_units=64))
x = mx.sym.var('data')
y = model(x)
model.collect_params().initialize(ctx=[default_context()])
z = model(mx.nd.random.normal(10, -10, (32, 2, 10), ctx=default_context()))
if exec_wait:
z.wait_to_read()
gluon(exec_wait=False)
assert_raises(MXNetError, gluon, True)
示例3: infer
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import gluon [as 别名]
def infer(ctx, count, val_data, batch_fn, opt, net, batch_size, acc_top1, acc_top5):
"""Inference using gluon"""
if count>0:
val_data.reset()
for i, batch in enumerate(val_data):
btic = time.time()
data, label = batch_fn(batch, ctx)
outputs = [net(X.astype(opt.dtype, copy=False)) for X in data]
acc_top1.update(label, outputs)
logging.info('Batch [%d]'%(i))
logging.info('Top 1 accuracy: %d'%(acc_top1.get()[1]))
time_taken = time.time() - btic
if i<20:
logging.info('warmup_throughput: %d samples/sec warmup_time %f'%(
int(batch_size / time_taken), time_taken))
else:
if count>0:
logging.info('Speed: %d samples/sec Time cost=%f'%(
int(batch_size / time_taken), time_taken))
return
示例4: _transform_label
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import gluon [as 别名]
def _transform_label(self, label, height, width):
label = np.array(label).ravel()
header_len = int(label[0]) # label header
label_width = int(label[1]) # the label width for each object, >= 5
if label_width < 5:
raise ValueError(
"Label info for each object shoudl >= 5, given {}".format(label_width))
min_len = header_len + 5
if len(label) < min_len:
raise ValueError(
"Expected label length >= {}, got {}".format(min_len, len(label)))
if (len(label) - header_len) % label_width:
raise ValueError(
"Broken label of size {}, cannot reshape into (N, {}) "
"if header length {} is excluded".format(len(label), label_width, header_len))
gcv_label = label[header_len:].reshape(-1, label_width)
# swap columns, gluon-cv requires [xmin-ymin-xmax-ymax-id-extra0-extra1-xxx]
ids = gcv_label[:, 0].copy()
gcv_label[:, :4] = gcv_label[:, 1:5]
gcv_label[:, 4] = ids
# restore to absolute coordinates
gcv_label[:, (0, 2)] *= width
gcv_label[:, (1, 3)] *= height
return gcv_label
示例5: preprocess
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import gluon [as 别名]
def preprocess(data):
"""Preprocess the image before running it through the network"""
data = mx.image.imresize(data, image_sz[0], image_sz[1])
data = data.astype(np.float32)
data = data/255
# These mean values were obtained from
# https://mxnet.incubator.apache.org/api/python/gluon/model_zoo.html
data = mx.image.color_normalize(data,
mean=mx.nd.array([0.485, 0.456, 0.406]),
std=mx.nd.array([0.229, 0.224, 0.225]))
data = mx.nd.transpose(data, (2,0,1)) # Channel first
return data
示例6: get_generator
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import gluon [as 别名]
def get_generator():
""" construct and return generator """
g_net = gluon.nn.Sequential()
with g_net.name_scope():
g_net.add(gluon.nn.Conv2DTranspose(
channels=512, kernel_size=4, strides=1, padding=0, use_bias=False))
g_net.add(gluon.nn.BatchNorm())
g_net.add(gluon.nn.LeakyReLU(0.2))
g_net.add(gluon.nn.Conv2DTranspose(
channels=256, kernel_size=4, strides=2, padding=1, use_bias=False))
g_net.add(gluon.nn.BatchNorm())
g_net.add(gluon.nn.LeakyReLU(0.2))
g_net.add(gluon.nn.Conv2DTranspose(
channels=128, kernel_size=4, strides=2, padding=1, use_bias=False))
g_net.add(gluon.nn.BatchNorm())
g_net.add(gluon.nn.LeakyReLU(0.2))
g_net.add(gluon.nn.Conv2DTranspose(
channels=64, kernel_size=4, strides=2, padding=1, use_bias=False))
g_net.add(gluon.nn.BatchNorm())
g_net.add(gluon.nn.LeakyReLU(0.2))
g_net.add(gluon.nn.Conv2DTranspose(channels=3, kernel_size=4, strides=2, padding=1, use_bias=False))
g_net.add(gluon.nn.Activation('tanh'))
return g_net
示例7: get_training_data
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import gluon [as 别名]
def get_training_data(batch_size):
""" helper function to get dataloader"""
return gluon.data.DataLoader(
CIFAR10(train=True, transform=transformer),
batch_size=batch_size, shuffle=True, last_batch='discard')
示例8: _transform_label
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import gluon [as 别名]
def _transform_label(label, height=None, width=None):
label = np.array(label).ravel()
header_len = int(label[0]) # label header
label_width = int(label[1]) # the label width for each object, >= 5
if label_width < 5:
raise ValueError(
"Label info for each object should >= 5, given {}".format(label_width))
min_len = header_len + 5
if len(label) < min_len:
raise ValueError(
"Expected label length >= {}, got {}".format(min_len, len(label)))
if (len(label) - header_len) % label_width:
raise ValueError(
"Broken label of size {}, cannot reshape into (N, {}) "
"if header length {} is excluded".format(len(label), label_width, header_len))
gcv_label = label[header_len:].reshape(-1, label_width)
# swap columns, gluon-cv requires [xmin-ymin-xmax-ymax-id-extra0-extra1-xxx]
ids = gcv_label[:, 0].copy()
gcv_label[:, :4] = gcv_label[:, 1:5]
gcv_label[:, 4] = ids
# restore to absolute coordinates
if height is not None:
gcv_label[:, (0, 2)] *= width
if width is not None:
gcv_label[:, (1, 3)] *= height
return gcv_label
示例9: __init__
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import gluon [as 别名]
def __init__(self, model_path, gpu_id=None):
"""
初始化gluon模型
:param model_path: 模型地址
:param gpu_id: 在哪一块gpu上运行
"""
info = pickle.load(open(model_path.replace('.params', '.info'), 'rb'))
print('load {} epoch params'.format(info['epoch']))
config = info['config']
alphabet = config['dataset']['alphabet']
self.ctx = try_gpu(gpu_id)
self.transform = []
for t in config['dataset']['train']['dataset']['args']['transforms']:
if t['type'] in ['ToTensor', 'Normalize']:
self.transform.append(t)
self.transform = get_transforms(self.transform)
self.gpu_id = gpu_id
img_h, img_w = 32, 100
for process in config['dataset']['train']['dataset']['args']['pre_processes']:
if process['type'] == "Resize":
img_h = process['args']['img_h']
img_w = process['args']['img_w']
break
self.img_w = img_w
self.img_h = img_h
self.img_mode = config['dataset']['train']['dataset']['args']['img_mode']
self.alphabet = alphabet
self.net = get_model(len(alphabet), self.ctx, config['arch']['args'])
self.net.load_parameters(model_path, self.ctx)
# self.net = gluon.SymbolBlock.imports('crnn_lite-symbol.json', ['data'], 'crnn_lite-0000.params', ctx=self.ctx)
self.net.hybridize()
示例10: get_data_rec
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import gluon [as 别名]
def get_data_rec(rec_val, rec_val_idx, batch_size, num_workers):
"""
Creates and returns data MXNet Data Iterator object and a function that splits data into batches
(if using image record iter for input)
"""
rec_val = os.path.expanduser(rec_val)
rec_val_idx = os.path.expanduser(rec_val_idx)
mean_rgb = [123.68, 116.779, 103.939]
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
return data, label
val_data = mx.io.ImageRecordIter(
path_imgrec = rec_val,
path_imgidx = rec_val_idx,
preprocess_threads = num_workers,
shuffle = False,
batch_size = batch_size,
resize = 256,
label_width = 1,
rand_crop = False,
rand_mirror = False,
data_shape = (3, 224, 224),
mean_r = mean_rgb[0],
mean_g = mean_rgb[1],
mean_b = mean_rgb[2]
)
return val_data, batch_fn
示例11: get_data_loader
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import gluon [as 别名]
def get_data_loader(data_dir, batch_size, num_workers, opt):
"""
Creates and returns data MXNet Data Loader object and a function that splits data into batches
"""
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
return data, label
if opt.mode == 'symbolic':
val_data = mx.io.NDArrayIter(
mx.nd.random.normal(shape=(opt.dataset_size, 3, 224, 224), ctx=context),
label=mx.nd.array(range(opt.dataset_size)),
batch_size=batch_size,
)
transform_test = transforms.Compose([
transforms.Resize(256, keep_ratio=True),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
val_data = gluon.data.DataLoader(
imagenet.classification.ImageNet(data_dir, train=False).transform_first(transform_test),
batch_size=batch_size, shuffle=False, num_workers=num_workers)
return val_data, batch_fn
示例12: _net2pb
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import gluon [as 别名]
def _net2pb(net):
if isinstance(net, HybridBlock):
# TODO(junwu): may need a more approprite way to get symbol from a HybridBlock
if not net._cached_graph:
raise RuntimeError(
"Please first call net.hybridize() and then run forward with "
"this net at least once before calling add_graph().")
net = net._cached_graph[1]
elif not isinstance(net, Symbol):
raise TypeError('only accepts mxnet.gluon.HybridBlock and mxnet.symbol.Symbol '
'as input network, received type {}'.format(str(type(net))))
return _sym2pb(net)
示例13: doctest
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import gluon [as 别名]
def doctest(doctest_namespace):
doctest_namespace["np"] = np
doctest_namespace["gluonts"] = gluonts
doctest_namespace["mx"] = mx
doctest_namespace["gluon"] = mx.gluon
import doctest
doctest.ELLIPSIS_MARKER = "-etc-"
示例14: cifar10_infer
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import gluon [as 别名]
def cifar10_infer(model_name, use_tensorrt, num_workers, ctx=mx.gpu(0), batch_size=128):
executor = get_classif_model(model_name, use_tensorrt, ctx, batch_size)
num_ex = 10000
all_preds = np.zeros([num_ex, 10])
all_label_test = np.zeros(num_ex)
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
data_loader = lambda: gluon.data.DataLoader(
gluon.data.vision.CIFAR10(train=False).transform_first(transform_test),
batch_size=batch_size, shuffle=False, num_workers=num_workers)
val_data = data_loader()
for idx, (data, label) in enumerate(val_data):
# Skip last batch if it's undersized.
if data.shape[0] < batch_size:
continue
offset = idx * batch_size
all_label_test[offset:offset + batch_size] = label.asnumpy()
# warm-up, but don't use result
executor.forward(is_train=False, data=data)
executor.outputs[0].wait_to_read()
gc.collect()
val_data = data_loader()
example_ct = 0
start = time()
# if use_tensorrt:
for idx, (data, label) in enumerate(val_data):
# Skip last batch if it's undersized.
if data.shape[0] < batch_size:
continue
executor.forward(is_train=False, data=data)
preds = executor.outputs[0].asnumpy()
offset = idx * batch_size
all_preds[offset:offset + batch_size, :] = preds[:batch_size]
example_ct += batch_size
all_preds = np.argmax(all_preds, axis=1)
matches = (all_preds[:example_ct] == all_label_test[:example_ct]).sum()
duration = time() - start
return duration, 100.0 * matches / example_ct