本文整理匯總了Python中mxnet.nd.transpose方法的典型用法代碼示例。如果您正苦於以下問題:Python nd.transpose方法的具體用法?Python nd.transpose怎麽用?Python nd.transpose使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類mxnet.nd
的用法示例。
在下文中一共展示了nd.transpose方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: forward
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import transpose [as 別名]
def forward(self,x):
"""
return shape:(batch_size,2000,2)
"""
# Encode layer
question = x[:,0:30]
question = self.Embed(question)
question = self.gru(question)
#interaction layer
interaction = nd.dot(question,self.topic_embedding.data())
interaction = nd.transpose(interaction,axes=(0,2,1))
interaction = interaction.reshape((batch_size*2000,-1))
# interaction = interaction.expand_dims(axis=1)
# print("interaction done")
#agg layer
# interaction = self.pooling(self.conv_2(self.conv_1(interaction)))
# print("agg done")
res = self.mlp_2(self.mlp_1(interaction))
res = res.reshape((batch_size,2000))
return res
#Train Model
示例2: __getitem__
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import transpose [as 別名]
def __getitem__(self, idx):
img_path = self.data_frame.iloc[idx, 0]
img = cv2.imread(img_path, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
x, y, w, h = self.data_frame.iloc[idx, 1:5]
l, t, ww, hh = enlarge_bbox(x, y, w, h, self.enlarge_factor)
r, b = l + ww, t + hh
img = img[t: b, l:r, :]
img = cv2.resize(img, (self.img_size, self.img_size))
img = img.astype(np.float32) - 127.5
img = nd.transpose(nd.array(img), (2, 0, 1))
label_path = img_path.replace('.jpg', '.mat')
label = sio.loadmat(label_path)
params_shape = label['Shape_Para'].astype(np.float32).ravel()
params_exp = label['Exp_Para'].astype(np.float32).ravel()
return img, params_shape, params_exp
示例3: _convert_bbox
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import transpose [as 別名]
def _convert_bbox(self, delta, anchor):
"""from loc to predict postion
Parameters
----------
delta : ndarray or np.ndarray
network output
anchor : np.ndarray
generate anchor location
Returns
-------
rejust predict postion though Anchor
"""
delta = nd.transpose(delta, axes=(1, 2, 3, 0))
delta = nd.reshape(delta, shape=(4, -1))
delta = delta.asnumpy()
delta[0, :] = delta[0, :] * anchor[:, 2] + anchor[:, 0]
delta[1, :] = delta[1, :] * anchor[:, 3] + anchor[:, 1]
delta[2, :] = np.exp(delta[2, :]) * anchor[:, 2]
delta[3, :] = np.exp(delta[3, :]) * anchor[:, 3]
return delta
示例4: _convert_score
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import transpose [as 別名]
def _convert_score(self, score):
"""from cls to score
Parameters
----------
score : ndarray
network output
Returns
-------
get feature map score though softmax
"""
score = nd.transpose(score, axes=(1, 2, 3, 0))
score = nd.reshape(score, shape=(2, -1))
score = nd.transpose(score, axes=(1, 0))
score = nd.softmax(score, axis=1)
score = nd.slice_axis(score, axis=1, begin=1, end=2)
score = nd.squeeze(score, axis=1)
return score.asnumpy()
示例5: load_data_fashion_mnist
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import transpose [as 別名]
def load_data_fashion_mnist(batch_size, resize=None, root="~/.mxnet/datasets/fashion-mnist"):
"""download the fashion mnist dataest and then load into memory"""
def transform_mnist(data, label):
# Transform a batch of examples.
if resize:
n = data.shape[0]
new_data = nd.zeros((n, resize, resize, data.shape[3]))
for i in range(n):
new_data[i] = image.imresize(data[i], resize, resize)
data = new_data
# change data from batch x height x width x channel to batch x channel x height x width
return nd.transpose(data.astype('float32'), (0, 3, 1, 2)) / 255, label.astype('float32')
mnist_train = gluon.data.vision.FashionMNIST(root=root, train=True, transform=None)
mnist_test = gluon.data.vision.FashionMNIST(root=root, train=False, transform=None)
# Transform later to avoid memory explosion.
train_data = DataLoader(mnist_train, batch_size, shuffle=True, transform=transform_mnist)
test_data = DataLoader(mnist_test, batch_size, shuffle=False, transform=transform_mnist)
return (train_data, test_data)
示例6: load_data_mnist
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import transpose [as 別名]
def load_data_mnist(batch_size, resize=None, root="~/.mxnet/datasets/mnist"):
"""download the fashion mnist dataest and then load into memory"""
def transform_mnist(data, label):
# Transform a batch of examples.
if resize:
n = data.shape[0]
new_data = nd.zeros((n, resize, resize, data.shape[3]))
for i in range(n):
new_data[i] = image.imresize(data[i], resize, resize)
data = new_data
# change data from batch x height x width x channel to batch x channel x height x width
return nd.transpose(data.astype('float32'), (0, 3, 1, 2)) / 255, label.astype('float32')
mnist_train = gluon.data.vision.MNIST(root=root, train=True, transform=None)
mnist_test = gluon.data.vision.MNIST(root=root, train=False, transform=None)
# Transform later to avoid memory explosion.
train_data = DataLoader(mnist_train, batch_size, shuffle=True, transform=transform_mnist)
test_data = DataLoader(mnist_test, batch_size, shuffle=False, transform=transform_mnist)
return (train_data, test_data)
示例7: net_define
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import transpose [as 別名]
def net_define():
net = nn.Sequential()
with net.name_scope():
net.add(nn.Embedding(config.MAX_WORDS, config.EMBEDDING_DIM))
net.add(rnn.GRU(128,layout='NTC',bidirectional=True, num_layers=2, dropout=0.2))
net.add(transpose(axes=(0,2,1)))
# net.add(nn.MaxPool2D(pool_size=(config.MAX_LENGTH,1)))
# net.add(nn.Conv2D(128, kernel_size=(101,1), padding=(50,0), groups=128,activation='relu'))
net.add(PrimeConvCap(8,32, kernel_size=(1,1), padding=(0,0)))
# net.add(AdvConvCap(8,32,8,32, kernel_size=(1,1), padding=(0,0)))
net.add(CapFullyBlock(8*(config.MAX_LENGTH)/2, num_cap=12, input_units=32, units=16, route_num=5))
# net.add(CapFullyBlock(8*(config.MAX_LENGTH-8), num_cap=12, input_units=32, units=16, route_num=5))
# net.add(CapFullyBlock(8, num_cap=12, input_units=32, units=16, route_num=5))
net.add(nn.Dropout(0.2))
# net.add(LengthBlock())
net.add(nn.Dense(6, activation='sigmoid'))
net.initialize(init=init.Xavier())
return net
示例8: net_define_eu
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import transpose [as 別名]
def net_define_eu():
net = nn.Sequential()
with net.name_scope():
net.add(nn.Embedding(config.MAX_WORDS, config.EMBEDDING_DIM))
net.add(rnn.GRU(128,layout='NTC',bidirectional=True, num_layers=1, dropout=0.2))
net.add(transpose(axes=(0,2,1)))
net.add(nn.GlobalMaxPool1D())
'''
net.add(FeatureBlock1())
'''
net.add(extendDim(axes=3))
net.add(PrimeConvCap(16, 32, kernel_size=(1,1), padding=(0,0),strides=(1,1)))
net.add(CapFullyNGBlock(16, num_cap=12, input_units=32, units=16, route_num=3))
net.add(nn.Dropout(0.2))
net.add(nn.Dense(6, activation='sigmoid'))
net.initialize(init=init.Xavier())
return net
示例9: forward
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import transpose [as 別名]
def forward(self, x):
x_t = nd.transpose(x, axes=(0,2,1))
conv3_out = self.conv3(x_t)
conv5_out = self.conv5(conv3_out) + conv3_out
conv7_out = self.conv7(conv5_out) + conv5_out
# conv_out = nd.concat(*[conv3_out, conv5_out, conv7_out], dim=1)
conv_out = self.conv_drop(conv7_out)
conv_max_pooled = self.conv_maxpool(conv_out)
gru_out = self.gru(x)
gru_out_t = nd.transpose(gru_out, axes=(0,2,1))
# gru_pooled = nd.transpose(gru_out, axes=(0,2,1))
# gru_maxpooled = self.gru_post_max(gru_out_t)
# return gru_maxpooled
# gru_avepooled = self.gru_post_ave(gru_out_t)
# gru_pooled = nd.concat(*[gru_maxpooled, gru_avepooled], dim=1)
# gru_pooled = nd.concat(*[gru_maxpooled, gru_avepooled], dim=1)
gru_maxpooled = self.gru_maxpool(gru_out_t)
# gru_avepooled = self.gru_maxpool(gru_out_t)
# gru_pooled = nd.concat(*[gru_maxpooled, gru_avepooled], dim=1)
# conv_ave_pooled = self.conv_avepool(conv_out)
concated_feature = nd.concat(*[gru_maxpooled, conv_max_pooled], dim=1)
return concated_feature
示例10: transformer
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import transpose [as 別名]
def transformer(data, label):
jitter_param = 0.4
lighting_param = 0.1
im = data
auglist = image.CreateAugmenter(data_shape=(3, 224, 224),
rand_crop=True,
rand_resize=True,
rand_mirror=True,
brightness=jitter_param,
saturation=jitter_param,
contrast=jitter_param,
pca_noise=lighting_param,
mean=True,
std=True)
for aug in auglist:
im = aug(im)
im = nd.transpose(im, (2, 0, 1))
return im, label
示例11: load_data_fashion_mnist
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import transpose [as 別名]
def load_data_fashion_mnist(batch_size, resize=None, root="~/.mxnet/datasets/fashion-mnist"):
"""download the fashion mnist dataest and then load into memory"""
def transform_mnist(data, label):
# Transform a batch of examples.
if resize:
n = data.shape[0]
new_data = nd.zeros((n, resize, resize, data.shape[3]))
for i in range(n):
new_data[i] = image.imresize(data[i], resize, resize)
data = new_data
# change data from batch x height x width x channel to batch x channel x height x width
return nd.transpose(data.astype('float32'), (0,3,1,2))/255, label.astype('float32')
mnist_train = gluon.data.vision.FashionMNIST(root=root, train=True, transform=None)
mnist_test = gluon.data.vision.FashionMNIST(root=root, train=False, transform=None)
# Transform later to avoid memory explosion.
train_data = DataLoader(mnist_train, batch_size, shuffle=True, transform=transform_mnist)
test_data = DataLoader(mnist_test, batch_size, shuffle=False, transform=transform_mnist)
return (train_data, test_data)
示例12: load_data_fashion_mnist
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import transpose [as 別名]
def load_data_fashion_mnist(batch_size, resize=None):
"""download the fashion mnist dataest and then load into memory"""
def transform_mnist(data, label):
if resize:
# resize to resize x resize
data = image.imresize(data, resize, resize)
# change data from height x weight x channel to channel x height x weight
return nd.transpose(data.astype('float32'), (2,0,1))/255, label.astype('float32')
mnist_train = gluon.data.vision.FashionMNIST(root='./data',
train=True, transform=transform_mnist)
mnist_test = gluon.data.vision.FashionMNIST(root='./data',
train=False, transform=transform_mnist)
train_data = gluon.data.DataLoader(
mnist_train, batch_size, shuffle=True)
test_data = gluon.data.DataLoader(
mnist_test, batch_size, shuffle=False)
return (train_data, test_data)
示例13: load_data_mnist
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import transpose [as 別名]
def load_data_mnist(batch_size, resize=None):
"""download the fashion mnist dataest and then load into memory"""
def transform_mnist(data, label):
if resize:
# resize to resize x resize
data = image.imresize(data, resize, resize)
# change data from height x weight x channel to channel x height x weight
return nd.transpose(data.astype('float32'), (2,0,1))/255, label.astype('float32')
mnist_train = gluon.data.vision.MNIST(root='./data',
train=True, transform=transform_mnist)
mnist_test = gluon.data.vision.MNIST(root='./data',
train=False, transform=transform_mnist)
train_data = gluon.data.DataLoader(
mnist_train, batch_size, shuffle=True)
test_data = gluon.data.DataLoader(
mnist_test, batch_size, shuffle=False)
return (train_data, test_data)
示例14: transform
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import transpose [as 別名]
def transform(data, label):
return nd.transpose(data.astype(np.float32), (2,0,1))/255, label.astype(np.float32)
示例15: transform
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import transpose [as 別名]
def transform(data, target_wd, target_ht, is_train, box):
"""Crop and normnalize an image nd array."""
if box is not None:
x, y, w, h = box
data = data[y:min(y+h, data.shape[0]), x:min(x+w, data.shape[1])]
# Resize to target_wd * target_ht.
data = mx.image.imresize(data, target_wd, target_ht)
# Normalize in the same way as the pre-trained model.
data = data.astype(np.float32) / 255.0
data = (data - mx.nd.array([0.485, 0.456, 0.406])) / mx.nd.array([0.229, 0.224, 0.225])
if is_train:
if random.random() < 0.5:
data = nd.flip(data, axis=1)
data, _ = mx.image.random_crop(data, (224, 224))
else:
data, _ = mx.image.center_crop(data, (224, 224))
# Transpose from (target_wd, target_ht, 3)
# to (3, target_wd, target_ht).
data = nd.transpose(data, (2, 0, 1))
# If image is greyscale, repeat 3 times to get RGB image.
if data.shape[0] == 1:
data = nd.tile(data, (3, 1, 1))
return data.reshape((1,) + data.shape)