本文整理匯總了Python中mxnet.io.DataBatch方法的典型用法代碼示例。如果您正苦於以下問題:Python io.DataBatch方法的具體用法?Python io.DataBatch怎麽用?Python io.DataBatch使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類mxnet.io
的用法示例。
在下文中一共展示了io.DataBatch方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: next
# 需要導入模塊: from mxnet import io [as 別名]
# 或者: from mxnet.io import DataBatch [as 別名]
def next(self):
"""Returns the next batch of data."""
if self.curr_idx == len(self.idx):
raise StopIteration
#i = batches index, j = starting record
i, j = self.idx[self.curr_idx]
self.curr_idx += 1
indices = self.ndindex[i][j:j + self.batch_size]
sentences = self.ndsent[i][j:j + self.batch_size]
characters = self.ndchar[i][j:j + self.batch_size]
label = self.ndlabel[i][j:j + self.batch_size]
return DataBatch([sentences, characters], [label], pad=0, index = indices, bucket_key=self.buckets[i],
provide_data=[DataDesc(name=self.data_names[0], shape=sentences.shape, layout=self.layout),
DataDesc(name=self.data_names[1], shape=characters.shape, layout=self.layout)],
provide_label=[DataDesc(name=self.label_name, shape=label.shape, layout=self.layout)])
示例2: forward
# 需要導入模塊: from mxnet import io [as 別名]
# 或者: from mxnet.io import DataBatch [as 別名]
def forward(self, data_batch, is_train=None):
#g,x = self.get_params()
#print('{fc7_weight[0][0]}', self._iter, g['fc7_0_weight'].asnumpy()[0][0])
#print('{pre_fc1_weight[0][0]}', self._iter, g['pre_fc1_weight'].asnumpy()[0][0])
assert self.binded and self.params_initialized
self._curr_module.forward(data_batch, is_train=is_train)
if is_train:
self._iter+=1
fc1, label = self._curr_module.get_outputs(merge_multi_context=True)
global_fc1 = fc1
self.global_label = label.as_in_context(self._ctx_cpu)
for i, _module in enumerate(self._arcface_modules):
_label = self.global_label - self._ctx_class_start[i]
db_global_fc1 = io.DataBatch([global_fc1], [_label])
_module.forward(db_global_fc1) #fc7 with margin
#print('forward end')
示例3: inference
# 需要導入模塊: from mxnet import io [as 別名]
# 或者: from mxnet.io import DataBatch [as 別名]
def inference(self, model_input):
if self.error is not None:
return None
# Check input shape
check_input_shape(model_input, self.signature)
self.mx_model.forward(DataBatch([model_input]))
model_input = self.mx_model.get_outputs()
# by pass lazy evaluation get_outputs either returns a list of nd arrays
# a list of list of NDArray
for d in model_input:
if isinstance(d, list):
for n in model_input:
if isinstance(n, mx.ndarray.ndarray.NDArray):
n.wait_to_read()
elif isinstance(d, mx.ndarray.ndarray.NDArray):
d.wait_to_read()
return model_input
示例4: next
# 需要導入模塊: from mxnet import io [as 別名]
# 或者: from mxnet.io import DataBatch [as 別名]
def next(self):
self.cur_iter += 1
if self.cur_iter <= self.max_iter:
return DataBatch(data=(self.data,),
label=(self.label,),
pad=0,
index=None,
provide_data=self.provide_data,
provide_label=self.provide_label)
else:
raise StopIteration
示例5: create_batch
# 需要導入模塊: from mxnet import io [as 別名]
# 或者: from mxnet.io import DataBatch [as 別名]
def create_batch(self, frame):
"""
:param frame: an (w,h,channels) numpy array (image)
:return: DataBatch of (1,channels,data_shape,data_shape)
"""
frame_resize = mx.nd.array(cv2.resize(frame, (self.data_shape[0], self.data_shape[1])))
#frame_resize = mx.img.imresize(frame, self.data_shape[0], self.data_shape[1], cv2.INTER_LINEAR)
# Change dimensions from (w,h,channels) to (channels, w, h)
frame_t = mx.nd.transpose(frame_resize, axes=(2,0,1))
frame_norm = frame_t - self.mean_pixels_nd
# Add dimension for batch, results in (1,channels,w,h)
batch_frame = [mx.nd.expand_dims(frame_norm, axis=0)]
batch_shape = [DataDesc('data', batch_frame[0].shape)]
batch = DataBatch(data=batch_frame, provide_data=batch_shape)
return batch
示例6: test_DataBatch
# 需要導入模塊: from mxnet import io [as 別名]
# 或者: from mxnet.io import DataBatch [as 別名]
def test_DataBatch():
from nose.tools import ok_
from mxnet.io import DataBatch
import re
batch = DataBatch(data=[mx.nd.ones((2, 3))])
ok_(re.match(
'DataBatch: data shapes: \[\(2L?, 3L?\)\] label shapes: None', str(batch)))
batch = DataBatch(data=[mx.nd.ones((2, 3)), mx.nd.ones(
(7, 8))], label=[mx.nd.ones((4, 5))])
ok_(re.match(
'DataBatch: data shapes: \[\(2L?, 3L?\), \(7L?, 8L?\)\] label shapes: \[\(4L?, 5L?\)\]', str(batch)))
示例7: next
# 需要導入模塊: from mxnet import io [as 別名]
# 或者: from mxnet.io import DataBatch [as 別名]
def next(self):
"""return one dict which contains "data" and "label" """
if self.iter_next():
self.data, self.label = self._read()
#for i in range(0, 10):
# self.data, self.label = self._read()
# d.append(mx.nd.array(self.data[0][1]))
# l.append(mx.nd.array(self.label[0][1]))
res = DataBatch(data=[mx.nd.array(self.data[0][1])], label=[mx.nd.array(self.label[0][1])], pad=self.getpad(), index=None)
#if self.cursor % 100 == 0:
# print "cursor: " + str(self.cursor)
return res
else:
raise StopIteration
示例8: next
# 需要導入模塊: from mxnet import io [as 別名]
# 或者: from mxnet.io import DataBatch [as 別名]
def next(self):
"""For DataBatch definition, see this page:
https://mxnet.incubator.apache.org/api/python/io.html#mxnet.io.DataBatch
"""
return DataBatch(data=(self.data,), label=(self.label,), pad=0, index=None, provide_data=self.provide_data,
provide_label=self.provide_label)
示例9: test_DataBatch
# 需要導入模塊: from mxnet import io [as 別名]
# 或者: from mxnet.io import DataBatch [as 別名]
def test_DataBatch():
from nose.tools import ok_
from mxnet.io import DataBatch
import re
batch = DataBatch(data=[mx.nd.ones((2,3))])
ok_(re.match('DataBatch: data shapes: \[\(2L?, 3L?\)\] label shapes: None', str(batch)))
batch = DataBatch(data=[mx.nd.ones((2,3)), mx.nd.ones((7,8))], label=[mx.nd.ones((4,5))])
ok_(re.match('DataBatch: data shapes: \[\(2L?, 3L?\), \(7L?, 8L?\)\] label shapes: \[\(4L?, 5L?\)\]', str(batch)))
示例10: next
# 需要導入模塊: from mxnet import io [as 別名]
# 或者: from mxnet.io import DataBatch [as 別名]
def next(self):
"""Returns the next batch of data."""
#print('in next', self.cur, self.labelcur)
#self.nbatch+=1
batch_size = self.batch_size
#c, h, w = self.data_shape
batch_data = nd.empty(self.provide_data[0][1])
batch_label = nd.empty(self.provide_label[0][1])
i = 0
try:
while i < batch_size:
item = self.next_sample()
with open(item['image_path'], 'rb') as fin:
img = fin.read()
try:
#if config.to_gray:
# _data = mx.image.imdecode(img, flag=0) #to gray
#else:
# _data = mx.image.imdecode(img)
#self.check_valid_image(_data)
img = np.fromstring(img, np.uint8)
if config.to_gray:
_data = cv2.imdecode(img, cv2.IMREAD_GRAYSCALE)
else:
_data = cv2.imdecode(img, cv2.IMREAD_COLOR)
_data = cv2.cvtColor(_data, cv2.COLOR_BGR2RGB)
if _data.shape[0]!=config.img_height or _data.shape[1]!=config.img_width:
_data = cv2.resize(_data, (config.img_width, config.img_height) )
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
_data = mx.nd.array(_data)
#print(_data.shape)
#if _data.shape[0]!=config.img_height or _data.shape[1]!=config.img_width:
# _data = self.resize_aug(_data)
#print(_data.shape)
_data = _data.astype('float32')
_data -= 127.5
_data *= 0.0078125
data = [_data]
label = item['label']
for datum in data:
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
#print(datum.shape)
batch_data[i][:] = self.postprocess_data(datum)
batch_label[i][:] = label
i += 1
except StopIteration:
if i<batch_size:
raise StopIteration
data_all = [batch_data]
if config.use_lstm:
data_all += self.init_state_arrays
return io.DataBatch(data_all, [batch_label], batch_size - i)
示例11: next
# 需要導入模塊: from mxnet import io [as 別名]
# 或者: from mxnet.io import DataBatch [as 別名]
def next(self):
if not self.is_init:
self.reset()
self.is_init = True
"""Returns the next batch of data."""
#print('in next', self.cur, self.labelcur)
self.nbatch+=1
batch_size = self.batch_size
c, h, w = self.data_shape
batch_data = nd.empty((batch_size, c, h, w))
if self.provide_label is not None:
batch_label = nd.empty(self.provide_label[0][1])
i = 0
try:
while i < batch_size:
label, s, bbox, landmark = self.next_sample()
#if label[1]>=0.0 or label[2]>=0.0:
# print(label[0:10])
_data = self.imdecode(s)
if self.rand_mirror:
_rd = random.randint(0,1)
if _rd==1:
_data = mx.ndarray.flip(data=_data, axis=1)
if self.nd_mean is not None:
_data = _data.astype('float32')
_data -= self.nd_mean
_data *= 0.0078125
if self.cutoff>0:
centerh = random.randint(0, _data.shape[0]-1)
centerw = random.randint(0, _data.shape[1]-1)
half = self.cutoff//2
starth = max(0, centerh-half)
endh = min(_data.shape[0], centerh+half)
startw = max(0, centerw-half)
endw = min(_data.shape[1], centerw+half)
_data = _data.astype('float32')
#print(starth, endh, startw, endw, _data.shape)
_data[starth:endh, startw:endw, :] = 127.5
data = [_data]
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
#print('aa',data[0].shape)
#data = self.augmentation_transform(data)
#print('bb',data[0].shape)
for datum in data:
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
#print(datum.shape)
batch_data[i][:] = self.postprocess_data(datum)
batch_label[i][:] = label
i += 1
except StopIteration:
if i<batch_size:
raise StopIteration
return io.DataBatch([batch_data], [batch_label], batch_size - i)
示例12: reset_c2c
# 需要導入模塊: from mxnet import io [as 別名]
# 或者: from mxnet.io import DataBatch [as 別名]
def reset_c2c(self):
self.select_triplets()
for identity,v in self.id2range.iteritems():
_list = range(*v)
for idx in _list:
s = imgrec.read_idx(idx)
ocontents.append(s)
embeddings = None
#print(len(ocontents))
ba = 0
while True:
bb = min(ba+args.batch_size, len(ocontents))
if ba>=bb:
break
_batch_size = bb-ba
_batch_size2 = max(_batch_size, args.ctx_num)
data = nd.zeros( (_batch_size2,3, image_size[0], image_size[1]) )
label = nd.zeros( (_batch_size2,) )
count = bb-ba
ii=0
for i in xrange(ba, bb):
header, img = mx.recordio.unpack(ocontents[i])
img = mx.image.imdecode(img)
img = nd.transpose(img, axes=(2, 0, 1))
data[ii][:] = img
label[ii][:] = header.label
ii+=1
while ii<_batch_size2:
data[ii][:] = data[0][:]
label[ii][:] = label[0][:]
ii+=1
db = mx.io.DataBatch(data=(data,), label=(label,))
self.mx_model.forward(db, is_train=False)
net_out = self.mx_model.get_outputs()
net_out = net_out[0].asnumpy()
model.forward(db, is_train=False)
net_out = model.get_outputs()
net_out = net_out[0].asnumpy()
if embeddings is None:
embeddings = np.zeros( (len(ocontents), net_out.shape[1]))
embeddings[ba:bb,:] = net_out[0:_batch_size,:]
ba = bb
embeddings = sklearn.preprocessing.normalize(embeddings)
embedding = np.mean(embeddings, axis=0, keepdims=True)
embedding = sklearn.preprocessing.normalize(embedding)
sims = np.dot(embeddings, embedding).flatten()
assert len(sims)==len(_list)
for i in xrange(len(_list)):
_idx = _list[i]
self.idx2cos[_idx] = sims[i]
示例13: reset_c2c
# 需要導入模塊: from mxnet import io [as 別名]
# 或者: from mxnet.io import DataBatch [as 別名]
def reset_c2c(self):
self.select_triplets()
for identity,v in self.id2range.iteritems():
_list = range(*v)
for idx in _list:
s = imgrec.read_idx(idx)
ocontents.append(s)
embeddings = None
#print(len(ocontents))
ba = 0
while True:
bb = min(ba+args.batch_size, len(ocontents))
if ba>=bb:
break
_batch_size = bb-ba
_batch_size2 = max(_batch_size, args.ctx_num)
data = nd.zeros( (_batch_size2,3, image_size[0], image_size[1]) )
label = nd.zeros( (_batch_size2,) )
count = bb-ba
ii=0
for i in range(ba, bb):
header, img = mx.recordio.unpack(ocontents[i])
img = mx.image.imdecode(img)
img = nd.transpose(img, axes=(2, 0, 1))
data[ii][:] = img
label[ii][:] = header.label
ii+=1
while ii<_batch_size2:
data[ii][:] = data[0][:]
label[ii][:] = label[0][:]
ii+=1
db = mx.io.DataBatch(data=(data,), label=(label,))
self.mx_model.forward(db, is_train=False)
net_out = self.mx_model.get_outputs()
net_out = net_out[0].asnumpy()
model.forward(db, is_train=False)
net_out = model.get_outputs()
net_out = net_out[0].asnumpy()
if embeddings is None:
embeddings = np.zeros( (len(ocontents), net_out.shape[1]))
embeddings[ba:bb,:] = net_out[0:_batch_size,:]
ba = bb
embeddings = sklearn.preprocessing.normalize(embeddings)
embedding = np.mean(embeddings, axis=0, keepdims=True)
embedding = sklearn.preprocessing.normalize(embedding)
sims = np.dot(embeddings, embedding).flatten()
assert len(sims)==len(_list)
for i in range(len(_list)):
_idx = _list[i]
self.idx2cos[_idx] = sims[i]