本文整理汇总了Python中mxnet.ndarray.array函数的典型用法代码示例。如果您正苦于以下问题:Python array函数的具体用法?Python array怎么用?Python array使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了array函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_word_embedding_similarity_evaluation_models
def test_word_embedding_similarity_evaluation_models(similarity_function):
try:
from scipy import stats
except ImportError:
raise ImportError('This testcase requires scipy.')
dataset = nlp.data.WordSim353()
counter = nlp.data.utils.Counter(w for wpair in dataset for w in wpair[:2])
vocab = nlp.vocab.Vocab(counter)
vocab.set_embedding(
nlp.embedding.create('fasttext', source='wiki.simple',
embedding_root='tests/data/embedding'))
data = [[vocab[d[0]], vocab[d[1]], d[2]] for d in dataset]
words1, words2, scores = zip(*data)
evaluator = nlp.embedding.evaluation.WordEmbeddingSimilarity(
vocab.embedding.idx_to_vec,
similarity_function=similarity_function)
evaluator.initialize()
words1, words2 = nd.array(words1), nd.array(words2)
pred_similarity = evaluator(words1, words2)
sr = stats.spearmanr(pred_similarity.asnumpy(), np.array(scores))
assert np.isclose(0.6076485693769645, sr.correlation)
示例2: main
def main(ctx):
calcEngine = CALC()
tmp = np.asarray( [k for k in range(6)] )
matA = nd.array( np.reshape( tmp ,(2,3) ) ).as_in_context( ctx )
tmp = np.asarray( [k*10 for k in range(6)] )
matB = nd.array( np.reshape( tmp, (2,3) ) ).as_in_context( ctx )
num = 1000
if 1:
t0 = time.time()
for k in range(num):
matD = calcEngine.calc_sum(matA, matB)
t1 = time.time()
print 'dll: time cost {}ms'.format( float(t1 - t0)*1000/num)
print matD
if 1:
t0 = time.time()
for k in range(num):
matC = calc_sum(matA, matB)
t1 = time.time()
print 'py: time cost {}ms'.format( float(t1 - t0)*1000/num)
print matC
示例3: _preprocess
def _preprocess(self, data):
input_shape = self.signature['inputs'][0]['data_shape']
height, width = input_shape[2:]
img_arr = image.read(data[0])
img_arr = image.resize(img_arr, width, height)
img_arr = image.color_normalize(img_arr, nd.array([127.5]), nd.array([127.5]))
img_arr = image.transform_shape(img_arr)
return [img_arr]
示例4: train
def train(input_variable, target_variable, encoder, decoder, teacher_forcing_ratio,
encoder_optimizer, decoder_optimizer, criterion, max_length, ctx):
with autograd.record():
loss = F.zeros((1,), ctx=ctx)
encoder_hidden = encoder.initHidden(ctx)
input_length = input_variable.shape[0]
target_length = target_variable.shape[0]
encoder_outputs, encoder_hidden = encoder(
input_variable.expand_dims(0), encoder_hidden)
if input_length < max_length:
encoder_outputs = F.concat(encoder_outputs.flatten(),
F.zeros((max_length - input_length, encoder.hidden_size), ctx=ctx), dim=0)
else:
encoder_outputs = encoder_outputs.flatten()
decoder_input = F.array([SOS_token], ctx=ctx)
decoder_hidden = encoder_hidden
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
loss = F.add(loss, criterion(decoder_output, target_variable[di]))
print criterion(decoder_output, target_variable[di])
decoder_input = target_variable[di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
topi = decoder_output.argmax(axis=1)
decoder_input = F.array([topi.asscalar()], ctx=ctx)
loss = F.add(loss, criterion(decoder_output, target_variable[di]))
if topi.asscalar() == EOS_token:
break
loss.backward()
encoder_optimizer.step(1)
decoder_optimizer.step(1)
return loss.asscalar()/target_length
示例5: next
def next(self):
if self._fetcher.iter_next():
tic = time.time()
data_batch = self._fetcher.get()
print 'Waited for {} seconds'.format(time.time() - tic)
else:
raise StopIteration
return DataBatch(data=[array(data_batch[0])], label=[array(data_batch[1])])
示例6: _score_sentence
def _score_sentence(self, feats, tags):
# Gives the score of a provided tag sequence
score = nd.array([0])
tags = nd.concat(nd.array([self.tag2idx[START_TAG]]), *tags, dim=0)
for i, feat in enumerate(feats):
score = score + \
self.transitions[to_scalar(tags[i+1]), to_scalar(tags[i])] + feat[to_scalar(tags[i+1])]
score = score + self.transitions[self.tag2idx[STOP_TAG],
to_scalar(tags[int(tags.shape[0]-1)])]
return score
示例7: data_iter
def data_iter():
# generate random indices
idx = list(range(num_examples))
random.shuffle(idx) # randomly sort
for i in range(0, num_examples, batch_size): #1000 examples and fetch 10 each time
j = nd.array(idx[i: min(i+batch_size, num_examples)])
yield nd.take(X, j), nd.take(y,j) # ?
示例8: data_generator
def data_generator(batch_size):
index = list(range(config.training_size))
random.shuffle(index)
for i in range(0, config.training_size, batch_size):
j = nd.array(index[i:min(i + batch_size, config.training_size)])
yield nd.take(X, j), nd.take(y, j)
示例9: _forward_alg
def _forward_alg(self, feats):
# Do the forward algorithm to compute the partition function
alphas = [[-10000.] * self.tagset_size]
alphas[0][self.tag2idx[START_TAG]] = 0.
alphas = nd.array(alphas)
# Iterate through the sentence
for feat in feats:
alphas_t = [] # The forward variables at this timestep
for next_tag in range(self.tagset_size):
# broadcast the emission score: it is the same regardless of
# the previous tag
emit_score = feat[next_tag].reshape((1, -1))
# the ith entry of trans_score is the score of transitioning to
# next_tag from i
trans_score = self.transitions[next_tag].reshape((1, -1))
# The ith entry of next_tag_var is the value for the
# edge (i -> next_tag) before we do log-sum-exp
next_tag_var = alphas + trans_score + emit_score
# The forward variable for this tag is log-sum-exp of all the
# scores.
alphas_t.append(log_sum_exp(next_tag_var))
alphas = nd.concat(*alphas_t, dim=0).reshape((1, -1))
terminal_var = alphas + self.transitions[self.tag2idx[STOP_TAG]]
alpha = log_sum_exp(terminal_var)
return alpha
示例10: data_iter
def data_iter():
# 产生一个随机索引
idx = list(range(num_examples))
random.shuffle(idx)##打乱
for i in range(0, num_examples, batch_size):##0 10 20 ...
j = nd.array(idx[i:min(i+batch_size,num_examples)])##随机抽取10个样例
yield nd.take(X, j), nd.take(y, j)##样例和标签 我们通过python的yield来构造一个迭代器。
示例11: test_out_grads
def test_out_grads():
x = nd.ones((3, 5))
dx = nd.zeros_like(x)
mark_variables([x], [dx])
da = None
db = nd.array([1,2,3,4,5])
dc = nd.array([5,4,3,2,1])
with train_section():
a, b, c = nd.split(x, axis=0, num_outputs=3, squeeze_axis=True)
backward([a, b, c], [da, db, dc])
assert (dx.asnumpy() == np.array(
[[1,1,1,1,1],
[1,2,3,4,5],
[5,4,3,2,1]])).all()
示例12: calculate_avg_q
def calculate_avg_q(samples, qnet):
total_q = 0.0
for i in range(len(samples)):
state = nd.array(samples[i:i + 1], ctx=qnet.ctx) / float(255.0)
total_q += qnet.forward(is_train=False, data=state)[0].asnumpy().max(axis=1).sum()
avg_q_score = total_q / float(len(samples))
return avg_q_score
示例13: forward
def forward(self, x):
if self.scale_factor == 0:
warnings.warn("Scale factor cannot be 0.")
return x
if isinstance(x, np.ndarray):
return nd.array(x/self.scale_factor)
return x / self.scale_factor
示例14: SGD
def SGD(sym, data_inputs, X, Y, X_test, Y_test, total_iter_num,
lr=None,
lr_scheduler=None, prior_precision=1,
out_grad_f=None,
initializer=None,
minibatch_size=100, dev=mx.gpu()):
if out_grad_f is None:
label_key = list(set(data_inputs.keys()) - set(['data']))[0]
exe, params, params_grad, _ = get_executor(sym, dev, data_inputs, initializer)
optimizer = mx.optimizer.create('sgd', learning_rate=lr,
rescale_grad=X.shape[0] / minibatch_size,
lr_scheduler=lr_scheduler,
wd=prior_precision)
updater = mx.optimizer.get_updater(optimizer)
start = time.time()
for i in range(total_iter_num):
indices = numpy.random.randint(X.shape[0], size=minibatch_size)
X_batch = X[indices]
Y_batch = Y[indices]
exe.arg_dict['data'][:] = X_batch
if out_grad_f is None:
exe.arg_dict[label_key][:] = Y_batch
exe.forward(is_train=True)
exe.backward()
else:
exe.forward(is_train=True)
exe.backward(out_grad_f(exe.outputs, nd.array(Y_batch, ctx=dev)))
for k in params:
updater(k, params_grad[k], params[k])
if (i + 1) % 500 == 0:
end = time.time()
print("Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start))
sample_test_acc(exe, X=X_test, Y=Y_test, label_num=10, minibatch_size=100)
start = time.time()
return exe, params, params_grad
示例15: get_image
def get_image(self,X):
B,C,H,W = self.shape
X = np.reshape(X,(28,28))
X = X[:,:,np.newaxis]
X = np.tile(X,(1,1,3))
if H > X.shape[0] or W > X.shape[1]:
raise RuntimeError
if H < X.shape[0] or W < X.shape[1]:
if self.fortrain:
X, _ = mx.image.random_crop(nd.array(X),(H,W))
else:
X,_ = mx.image.center_crop(nd.array(X),(H,W))
X = np.transpose(X.asnumpy(),(2,0,1))
else:
#print "data augment is off"
X = np.transpose(X,(2,0,1))
return X