本文整理汇总了Python中cntk.Axis类的典型用法代码示例。如果您正苦于以下问题:Python Axis类的具体用法?Python Axis怎么用?Python Axis使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Axis类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train_faster_rcnn_e2e
def train_faster_rcnn_e2e(cfg):
# Input variables denoting features and labeled ground truth rois (as 5-tuples per roi)
image_input = input_variable(shape=(cfg.NUM_CHANNELS, cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH),
dynamic_axes=[Axis.default_batch_axis()],
name=cfg["MODEL"].FEATURE_NODE_NAME)
roi_input = input_variable((cfg.INPUT_ROIS_PER_IMAGE, 5), dynamic_axes=[Axis.default_batch_axis()])
dims_input = input_variable((6), dynamic_axes=[Axis.default_batch_axis()])
dims_node = alias(dims_input, name='dims_input')
# Instantiate the Faster R-CNN prediction model and loss function
loss, pred_error = create_faster_rcnn_model(image_input, roi_input, dims_node, cfg)
if cfg["CNTK"].DEBUG_OUTPUT:
print("Storing graphs and models to %s." % cfg.OUTPUT_PATH)
plot(loss, os.path.join(cfg.OUTPUT_PATH, "graph_frcn_train_e2e." + cfg["CNTK"].GRAPH_TYPE))
# Set learning parameters
e2e_lr_factor = cfg["MODEL"].E2E_LR_FACTOR
e2e_lr_per_sample_scaled = [x * e2e_lr_factor for x in cfg["CNTK"].E2E_LR_PER_SAMPLE]
mm_schedule = momentum_schedule(cfg["CNTK"].MOMENTUM_PER_MB)
print("Using base model: {}".format(cfg["MODEL"].BASE_MODEL))
print("lr_per_sample: {}".format(e2e_lr_per_sample_scaled))
train_model(image_input, roi_input, dims_input, loss, pred_error,
e2e_lr_per_sample_scaled, mm_schedule, cfg["CNTK"].L2_REG_WEIGHT, cfg["CNTK"].E2E_MAX_EPOCHS, cfg)
return create_faster_rcnn_eval_model(loss, image_input, dims_input, cfg)
示例2: train_faster_rcnn_e2e
def train_faster_rcnn_e2e(base_model_file_name, debug_output=False):
# Input variables denoting features and labeled ground truth rois (as 5-tuples per roi)
image_input = input_variable((num_channels, image_height, image_width), dynamic_axes=[Axis.default_batch_axis()], name=feature_node_name)
roi_input = input_variable((cfg["CNTK"].INPUT_ROIS_PER_IMAGE, 5), dynamic_axes=[Axis.default_batch_axis()])
dims_input = input_variable((6), dynamic_axes=[Axis.default_batch_axis()])
dims_node = alias(dims_input, name='dims_input')
# Instantiate the Faster R-CNN prediction model and loss function
loss, pred_error = create_faster_rcnn_predictor(base_model_file_name, image_input, roi_input, dims_node)
if debug_output:
print("Storing graphs and models to %s." % globalvars['output_path'])
plot(loss, os.path.join(globalvars['output_path'], "graph_frcn_train_e2e." + cfg["CNTK"].GRAPH_TYPE))
# Set learning parameters
e2e_lr_factor = globalvars['e2e_lr_factor']
e2e_lr_per_sample_scaled = [x * e2e_lr_factor for x in cfg["CNTK"].E2E_LR_PER_SAMPLE]
mm_schedule = momentum_schedule(cfg["CNTK"].MOMENTUM_PER_MB)
print("Using base model: {}".format(cfg["CNTK"].BASE_MODEL))
print("lr_per_sample: {}".format(e2e_lr_per_sample_scaled))
train_model(image_input, roi_input, dims_input, loss, pred_error,
e2e_lr_per_sample_scaled, mm_schedule, cfg["CNTK"].L2_REG_WEIGHT, globalvars['e2e_epochs'])
return create_eval_model(loss, image_input, dims_input)
示例3: __init__
def __init__(self, eval_model, cfg):
# load model once in constructor and push images through the model in 'process_image()'
self._img_shape = (cfg.NUM_CHANNELS, cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH)
image_input = input_variable(shape=self._img_shape,
dynamic_axes=[Axis.default_batch_axis()],
name=cfg["MODEL"].FEATURE_NODE_NAME)
dims_input = input_variable((1,6), dynamic_axes=[Axis.default_batch_axis()], name='dims_input')
self._eval_model = eval_model(image_input, dims_input)
示例4: __init__
def __init__(self, eval_model, cfg):
# load model once in constructor and push images through the model in 'process_image()'
self._img_shape = (cfg.NUM_CHANNELS, cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH)
image_input = input_variable(shape=self._img_shape,
dynamic_axes=[Axis.default_batch_axis()],
name=cfg["MODEL"].FEATURE_NODE_NAME)
roi_proposals = input_variable((cfg.NUM_ROI_PROPOSALS, 4), dynamic_axes=[Axis.default_batch_axis()],
name="roi_proposals")
self._eval_model = eval_model(image_input, roi_proposals)
self._cfg = cfg
示例5: eval_and_plot_faster_rcnn
def eval_and_plot_faster_rcnn(eval_model, num_images_to_plot, test_map_file, img_shape,
results_base_path, feature_node_name, classes,
drawUnregressedRois=False, drawNegativeRois=False,
nmsThreshold=0.5, nmsConfThreshold=0.0, bgrPlotThreshold = 0.8):
# get image paths
with open(test_map_file) as f:
content = f.readlines()
img_base_path = os.path.dirname(os.path.abspath(test_map_file))
img_file_names = [os.path.join(img_base_path, x.split('\t')[1]) for x in content]
# prepare model
image_input = input_variable(img_shape, dynamic_axes=[Axis.default_batch_axis()], name=feature_node_name)
dims_input = input_variable((1,6), dynamic_axes=[Axis.default_batch_axis()], name='dims_input')
frcn_eval = eval_model(image_input, dims_input)
#dims_input_const = cntk.constant([image_width, image_height, image_width, image_height, image_width, image_height], (1, 6))
print("Plotting results from Faster R-CNN model for %s images." % num_images_to_plot)
for i in range(0, num_images_to_plot):
imgPath = img_file_names[i]
# evaluate single image
_, cntk_img_input, dims = load_resize_and_pad(imgPath, img_shape[2], img_shape[1])
dims_input = np.array(dims, dtype=np.float32)
dims_input.shape = (1,) + dims_input.shape
output = frcn_eval.eval({frcn_eval.arguments[0]: [cntk_img_input], frcn_eval.arguments[1]: dims_input})
out_dict = dict([(k.name, k) for k in output])
out_cls_pred = output[out_dict['cls_pred']][0]
out_rpn_rois = output[out_dict['rpn_rois']][0]
out_bbox_regr = output[out_dict['bbox_regr']][0]
labels = out_cls_pred.argmax(axis=1)
scores = out_cls_pred.max(axis=1).tolist()
if drawUnregressedRois:
# plot results without final regression
imgDebug = visualizeResultsFaster(imgPath, labels, scores, out_rpn_rois, img_shape[2], img_shape[1],
classes, nmsKeepIndices=None, boDrawNegativeRois=drawNegativeRois,
decisionThreshold=bgrPlotThreshold)
imsave("{}/{}_{}".format(results_base_path, i, os.path.basename(imgPath)), imgDebug)
# apply regression and nms to bbox coordinates
regressed_rois = regress_rois(out_rpn_rois, out_bbox_regr, labels, dims)
nmsKeepIndices = apply_nms_to_single_image_results(regressed_rois, labels, scores,
nms_threshold=nmsThreshold,
conf_threshold=nmsConfThreshold)
img = visualizeResultsFaster(imgPath, labels, scores, regressed_rois, img_shape[2], img_shape[1],
classes, nmsKeepIndices=nmsKeepIndices,
boDrawNegativeRois=drawNegativeRois,
decisionThreshold=bgrPlotThreshold)
imsave("{}/{}_regr_{}".format(results_base_path, i, os.path.basename(imgPath)), img)
示例6: test_op_reduce_over_batch_axis
def test_op_reduce_over_batch_axis(input_data, device_id, precision):
from .. import reduce_sum, reduce_max, reduce_min, reduce_mean, reduce_log_sum_exp, reduce_prod
from cntk import Axis
dt = PRECISION_TO_TYPE[precision]
data = AA(input_data, dtype=dt)
a = C.input_variable(shape=data.shape[1:],
dtype=sanitize_dtype_cntk(dt),
needs_gradient=True,
name='a')
ops = [
(reduce_sum, lambda x:np.sum(x, axis=0, keepdims=False), lambda x,f:np.ones_like(x)),
(reduce_max, lambda x:np.amax(x, axis=0, keepdims=False), lambda x,f:min_max_bwd(x,f, dt)),
(reduce_min, lambda x:np.amin(x, axis=0, keepdims=False), lambda x,f:min_max_bwd(x,f, dt)),
(reduce_mean, lambda x:np.mean(x, axis=0, keepdims=False), lambda x,f:np.ones_like(x)/x.shape[0]),
(reduce_log_sum_exp, lambda x:np.log(np.sum(np.exp(x), axis=0, keepdims=False)), lambda x,f:np.exp(x-f)),
(reduce_prod, lambda x:np.prod(x, axis=0, keepdims=False), lambda x,f:f / x)
]
for op,fwd,bwd in ops:
input_op = op(a, axis=Axis.default_batch_axis())
expected_forward = fwd(data)
expected_backward = bwd(data, expected_forward)
binding = {a: data}
actual_backward = input_op.grad(binding)
actual_forward = input_op.eval(binding)
assert np.allclose(actual_forward, expected_forward)
for ab,eb in zip (actual_backward, expected_backward):
assert np.allclose(ab, eb)
示例7: test_model_not_criterion_subset
def test_model_not_criterion_subset():
input_dim = 2
proj_dim = 11
model1_dim = 3
model2_dim = 4
x = input_variable((input_dim,))
core = Embedding(proj_dim)
model1 = Dense(model1_dim)(sequence.last(core(x)))
model1_label = input_variable((model1_dim,), dynamic_axes=[Axis.default_batch_axis()])
ce_model1 = cross_entropy_with_softmax(model1, model1_label)
pe_model1 = classification_error(model1, model1_label)
model2 = Dense(model2_dim)(core(x))
model2_label = input_variable((model2_dim,))
ce_model2 = cross_entropy_with_softmax(model2, model2_label)
pe_model2 = classification_error(model2, model2_label)
ce = 0.5 * sequence.reduce_sum(ce_model2) + 0.5 * ce_model1
lr_schedule = learning_rate_schedule(0.003, UnitType.sample)
trainer_multitask = Trainer(model1, (ce, pe_model1), sgd(ce.parameters, lr=lr_schedule))
x_data = np.asarray([[2., 1.], [1., 2.]], np.float32)
model1_label_data = np.asarray([1., 0., 0.], np.float32)
model2_label_data = np.asarray([[0., 1., 0., 0.], [0., 0., 0., 1.]], np.float32)
trainer_multitask.train_minibatch({x : [x_data], model1_label : [model1_label_data], model2_label : [model2_label_data]})
示例8: test_eval_sparse_dense
def test_eval_sparse_dense(tmpdir, device_id):
from cntk import Axis
from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs
from cntk.device import cpu, gpu, set_default_device
from cntk.ops import input_variable, times
from scipy.sparse import csr_matrix
input_vocab_dim = label_vocab_dim = 69
ctf_data = '''\
0 |S0 3:1 |# <s> |S1 3:1 |# <s>
0 |S0 4:1 |# A |S1 32:1 |# ~AH
0 |S0 5:1 |# B |S1 36:1 |# ~B
0 |S0 4:1 |# A |S1 31:1 |# ~AE
0 |S0 7:1 |# D |S1 38:1 |# ~D
0 |S0 12:1 |# I |S1 47:1 |# ~IY
0 |S0 1:1 |# </s> |S1 1:1 |# </s>
2 |S0 60:1 |# <s> |S1 3:1 |# <s>
2 |S0 61:1 |# A |S1 32:1 |# ~AH
'''
ctf_file = str(tmpdir/'2seqtest.txt')
with open(ctf_file, 'w') as f:
f.write(ctf_data)
mbs = MinibatchSource(CTFDeserializer(ctf_file, StreamDefs(
features = StreamDef(field='S0', shape=input_vocab_dim, is_sparse=True),
labels = StreamDef(field='S1', shape=label_vocab_dim, is_sparse=True)
)), randomize=False, epoch_size = 2)
batch_axis = Axis.default_batch_axis()
input_seq_axis = Axis('inputAxis')
label_seq_axis = Axis('labelAxis')
input_dynamic_axes = [batch_axis, input_seq_axis]
raw_input = input_variable(
shape=input_vocab_dim, dynamic_axes=input_dynamic_axes,
name='raw_input', is_sparse=True)
mb_valid = mbs.next_minibatch(minibatch_size_in_samples=100,
input_map={raw_input : mbs.streams.features})
z = times(raw_input, np.eye(input_vocab_dim))
e_reader = z.eval(mb_valid)
# CSR with the raw_input encoding in ctf_data
one_hot_data = [
[3, 4, 5, 4, 7, 12, 1],
[60, 61]
]
data = [csr_matrix(np.eye(input_vocab_dim, dtype=np.float32)[d]) for d in
one_hot_data]
e_csr = z.eval({raw_input: data}, device=cntk_device(device_id))
assert np.all([np.allclose(a, b) for a,b in zip(e_reader, e_csr)])
# One-hot with the raw_input encoding in ctf_data
data = one_hot(one_hot_data, num_classes=input_vocab_dim)
e_hot = z.eval({raw_input: data}, device=cntk_device(device_id))
assert np.all([np.allclose(a, b) for a,b in zip(e_reader, e_hot)])
示例9: create_inputs
def create_inputs(vocab_dim):
batch_axis = Axis.default_batch_axis()
input_seq_axis = Axis('inputAxis')
input_dynamic_axes = [batch_axis, input_seq_axis]
input_sequence = input_variable(shape=vocab_dim, dynamic_axes=input_dynamic_axes)
label_sequence = input_variable(shape=vocab_dim, dynamic_axes=input_dynamic_axes)
return input_sequence, label_sequence
示例10: test_op_reduce_mean_all_constant
def test_op_reduce_mean_all_constant(input_data, axis, device_id, precision):
dt = PRECISION_TO_TYPE[precision]
value = AA(input_data, dtype=dt)
from .. import reduce_mean
from cntk import Axis, Constant
a = Constant(value, name='a')
input_op = reduce_mean(a, axis=Axis.all_axes())
expected_forward = AA(np.mean(value))
actual_forward = input_op.eval()
assert np.allclose(actual_forward, expected_forward)
示例11: train_sequence_classifier
def train_sequence_classifier(debug_output=False):
input_dim = 2000
cell_dim = 25
hidden_dim = 25
embedding_dim = 50
num_output_classes = 5
# Input variables denoting the features and label data
features = input_variable(shape=input_dim, is_sparse=True)
label = input_variable(num_output_classes, dynamic_axes=[
Axis.default_batch_axis()])
# Instantiate the sequence classification model
classifier_output = LSTM_sequence_classifer_net(
features, num_output_classes, embedding_dim, hidden_dim, cell_dim)
ce = cross_entropy_with_softmax(classifier_output, label)
pe = classification_error(classifier_output, label)
rel_path = r"../../../../Tests/EndToEndTests/Text/SequenceClassification/Data/Train.ctf"
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), rel_path)
reader = create_reader(path, True, input_dim, num_output_classes)
input_map = {
features : reader.streams.features,
label : reader.streams.labels
}
lr_per_sample = learning_rate_schedule(0.0005, UnitType.sample)
# Instantiate the trainer object to drive the model training
trainer = Trainer(classifier_output, (ce, pe),
sgd(classifier_output.parameters, lr=lr_per_sample))
# Get minibatches of sequences to train with and perform model training
minibatch_size = 200
training_progress_output_freq = 10
if debug_output:
training_progress_output_freq = training_progress_output_freq/3
for i in range(251):
mb = reader.next_minibatch(minibatch_size, input_map=input_map)
trainer.train_minibatch(mb)
print_training_progress(trainer, i, training_progress_output_freq)
import copy
evaluation_average = copy.copy(
trainer.previous_minibatch_evaluation_average)
loss_average = copy.copy(trainer.previous_minibatch_loss_average)
return evaluation_average, loss_average
示例12: train_sequence_classifier
def train_sequence_classifier():
input_dim = 2000;
cell_dim = 25;
hidden_dim = 25;
embedding_dim = 50;
num_output_classes = 5;
# Input variables denoting the features and label data
features = input_variable(shape=input_dim, is_sparse=True)
label = input_variable(num_output_classes, dynamic_axes = [Axis.default_batch_axis()])
# Instantiate the sequence classification model
classifier_output = LSTM_sequence_classifer_net(features, num_output_classes, embedding_dim, hidden_dim, cell_dim)
ce = cross_entropy_with_softmax(classifier_output, label)
pe = classification_error(classifier_output, label)
rel_path = r"../../../../Tests/EndToEndTests/Text/SequenceClassification/Data/Train.ctf"
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), rel_path)
feature_stream_name = 'features'
labels_stream_name = 'labels'
mb_source = text_format_minibatch_source(path, [
StreamConfiguration( feature_stream_name, input_dim, True, 'x' ),
StreamConfiguration( labels_stream_name, num_output_classes, False, 'y')], 0)
features_si = mb_source.stream_info(features)
labels_si = mb_source.stream_info(label)
# Instantiate the trainer object to drive the model training
lr = lr = learning_rates_per_sample(0.0005)
trainer = Trainer(classifier_output, ce, pe, [sgd_learner(classifier_output.owner.parameters(), lr)])
# Get minibatches of sequences to train with and perform model training
minibatch_size = 200
training_progress_output_freq = 10
i = 0;
while True:
mb = mb_source.get_next_minibatch(minibatch_size)
if len(mb) == 0:
break
# Specify the mapping of input variables in the model to actual minibatch data to be trained with
arguments = {features : mb[features_si].m_data, label : mb[labels_si].m_data}
trainer.train_minibatch(arguments)
print_training_progress(trainer, i, training_progress_output_freq)
i += 1
示例13: test_op_reduce_mean_all_constant
def test_op_reduce_mean_all_constant(input_data, axis, device_id, precision):
# dt = PRECISION_TO_TYPE[precision]
# FIXME: we'd like to do dt = PRECISION_TO_TYPE[precision]
# however there seems to be an issue with actual_forward below
# that gets computed correctly but by the time np.allclose executes
# it contains garbage values. The problem goes away if one uses
# actual_forward = np.copy(input_op.eval())
dt = np.float32
value = AA(input_data, dtype=dt)
from .. import reduce_mean
from cntk import Axis, Constant
a = Constant(value, name='a')
input_op = reduce_mean(a, axis=Axis.all_axes())
expected_forward = AA(np.mean(value))
actual_forward = input_op.eval()
assert np.allclose(actual_forward, expected_forward)
示例14: test_op_reduce_all
def test_op_reduce_all(input_data, axis, device_id, precision):
# FIXME: we'd like to do dt = PRECISION_TO_TYPE[precision]
# however there seems to be an issue with actual_forward below
# that gets computed correctly but by the time np.allclose executes
# it contains garbage values. The problem goes away if one uses
# actual_forward = np.copy(input_op.eval(binding))
dt = np.float32
data = AA(input_data, dtype=dt)
a = I(shape=data.shape,
dtype=sanitize_dtype_cntk(dt),
needs_gradient=True,
name='a')
# create batch
value = [AA([data,data-0.5], dtype=dt),AA([data+0.25], dtype=dt)]
from .. import reduce_sum, reduce_max, reduce_min, reduce_mean, reduce_log_sum_exp, reduce_prod
from cntk import Axis
def max_bwd(x,f):
y = np.zeros_like(x)
yr = y.ravel()
xr = x.ravel()
for i in range(x.size):
if xr[i] == f: yr[i] = 1
return y
ops = [ (reduce_sum, lambda x:AA(sum(np.sum(xi) for xi in x)), lambda x,f:[np.ones_like(xi) for xi in x]),
(reduce_max, lambda x:AA(max(np.max(xi) for xi in x)), lambda x,f:[max_bwd(xi,f) for xi in x]),
(reduce_min, lambda x:AA(min(np.min(xi) for xi in x)), lambda x,f:[max_bwd(xi,f) for xi in x]),
(reduce_mean, lambda x:AA(sum(np.sum(xi) for xi in x)/sum(xi.size for xi in x)), lambda x,f:[np.ones_like(xi)/sum(xj.size for xj in x) for xi in x]),
(reduce_log_sum_exp, lambda x:AA(np.log(sum(np.sum(np.exp(xi)) for xi in x))), lambda x,f:[np.exp(xi-f) for xi in x]),
(reduce_prod, lambda x:AA(np.prod([np.prod(xi) for xi in x])), lambda x,f:[f/xi for xi in x])
]
for op,fwd,bwd in ops:
input_op = op(a, axis=Axis.all_axes())
expected_forward = fwd(value)
expected_backward = bwd(value,expected_forward)
binding = {a: value}
actual_backward = input_op.grad(binding)[0]
actual_forward = np.copy(input_op.eval(binding))
assert np.allclose(actual_forward, expected_forward)
for ab,eb in zip (actual_backward, expected_backward):
assert np.allclose(ab, eb)
示例15: test_op_reduce_over_batch_axis
def test_op_reduce_over_batch_axis(input_data, device_id, precision):
from .. import reduce_sum, reduce_max, reduce_min, reduce_mean, reduce_log_sum_exp, reduce_prod
from cntk import Axis
dt = PRECISION_TO_TYPE[precision]
data = AA(input_data, dtype=dt)
a = C.input_variable(shape=data.shape[1:],
dtype=sanitize_dtype_cntk(dt),
needs_gradient=True,
name='a')
def min_max_bwd(x, f):
forward_array = np.asarray(f, dtype=dt)
min_max_elements = forward_array.reshape(forward_array.size).tolist()
# place 1.0s where minimum or maximum elements are
backward = np.zeros_like(x)
for element in min_max_elements:
backward += np.asarray(x == element)
return backward
ops = [
(reduce_sum, lambda x:np.sum(x, axis=0, keepdims=False), lambda x,f:np.ones_like(x)),
(reduce_max, lambda x:np.amax(x, axis=0, keepdims=False), lambda x,f:min_max_bwd(x,f)),
(reduce_min, lambda x:np.amin(x, axis=0, keepdims=False), lambda x,f:min_max_bwd(x,f)),
(reduce_mean, lambda x:np.mean(x, axis=0, keepdims=False), lambda x,f:np.ones_like(x)/x.shape[0]),
(reduce_log_sum_exp, lambda x:np.log(np.sum(np.exp(x), axis=0, keepdims=False)), lambda x,f:np.exp(x-f)),
(reduce_prod, lambda x:np.prod(x, axis=0, keepdims=False), lambda x,f:f / x)
]
for op,fwd,bwd in ops:
input_op = op(a, axis=Axis.default_batch_axis())
expected_forward = fwd(data)
expected_backward = bwd(data, expected_forward)
binding = {a: data}
actual_backward = input_op.grad(binding)
actual_forward = input_op.eval(binding)
assert np.allclose(actual_forward, expected_forward)
for ab,eb in zip (actual_backward, expected_backward):
assert np.allclose(ab, eb)