当前位置: 首页>>代码示例>>Python>>正文


Python cntk.input_variable方法代码示例

本文整理汇总了Python中cntk.input_variable方法的典型用法代码示例。如果您正苦于以下问题:Python cntk.input_variable方法的具体用法?Python cntk.input_variable怎么用?Python cntk.input_variable使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cntk的用法示例。


在下文中一共展示了cntk.input_variable方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _create_model_and_execute_test

# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import input_variable [as 别名]
def _create_model_and_execute_test(params):
    # Create CNTK model
    input_var = C.input_variable(params['input_dim'], np.float32)
    params['input_var'] = input_var
    params['act_fun'] = C.layers.blocks.identity
    params['init_fun'] = C.glorot_uniform()

    model = params['create_model'](params)

    label_var = C.input_variable((params['label_dim']), np.float32)
    loss = C.cross_entropy_with_softmax(model, label_var)
    eval_error = C.classification_error(model, label_var)

    lr_schedule = C.learning_rate_schedule(0.05, C.UnitType.minibatch)
    learner = C.sgd(model.parameters, lr_schedule)
    trainer = C.Trainer(model, (loss, eval_error), [learner])

    input_value, label_value = _generate_random_sample(
        params['batch_size'],
        params['input_dim'],
        params['label_dim']
    )

    # Import to ngraph
    ng_loss, placeholders = CNTKImporter(batch_size=params['batch_size']).import_model(loss)
    parallel_update = CommonSGDOptimizer(0.05).minimize(ng_loss, ng_loss.variables())

    transformer = ng.transformers.make_transformer()
    update_fun = transformer.computation([ng_loss, parallel_update], *placeholders)

    # Execute on CNTK
    trainer.train_minibatch({input_var: input_value, label_var: label_value})
    cntk_ret = trainer.previous_minibatch_loss_average

    # Execute on ngraph
    input_value = np.moveaxis(input_value, 0, -1)
    label_value = np.moveaxis(label_value, 0, -1)
    ng_ret = update_fun(input_value, label_value)[0]

    return cntk_ret, ng_ret 
开发者ID:NervanaSystems,项目名称:ngraph-python,代码行数:42,代码来源:test_ops_compoud.py

示例2: create_network

# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import input_variable [as 别名]
def create_network(num_convolution_layers):
    """ Create network

    """
    # Input variables denoting the features and label data
    input_var = cntk.input_variable((_NUM_CHANNELS, _IMAGE_HEIGHT, _IMAGE_WIDTH))
    label_var = cntk.input_variable((_NUM_CLASSES))

    # create model, and configure learning parameters
    # Instantiate the feedforward classification model
    input_removemean = minus(input_var, constant(128))
    scaled_input = element_times(constant(0.00390625), input_removemean)

    print('Creating NN model')
    with layers.default_options(activation=relu, pad=True):
        model = layers.Sequential([
            layers.For(range(num_convolution_layers), lambda: [
                layers.Convolution2D((3, 3), 64),
                layers.Convolution2D((3, 3), 64),
                layers.MaxPooling((3, 3), (2, 2))
            ]),
            layers.For(range(2), lambda i: [
                layers.Dense([256, 128][i]),
                layers.Dropout(0.5)
            ]),
            layers.Dense(_NUM_CLASSES, activation=None)
        ])(scaled_input)

    # loss and metric
    ce = cross_entropy_with_softmax(model, label_var)
    pe = classification_error(model, label_var)

    return {
        'name': 'convnet',
        'feature': input_var,
        'label': label_var,
        'ce': ce,
        'pe': pe,
        'output': model
    } 
开发者ID:Azure,项目名称:batch-shipyard,代码行数:42,代码来源:ConvNet_CIFAR10.py

示例3: D

# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import input_variable [as 别名]
def D(x_img, x_code):
    '''
    Detector network architecture

    Args:
        x_img: cntk.input_variable represent images to network
        x_code: cntk.input_variable represent conditional code to network
    '''
    def bn_with_leaky_relu(x, leak=0.2):
        h = C.layers.BatchNormalization(map_rank=1)(x)
        r = C.param_relu(C.constant((np.ones(h.shape) * leak).astype(np.float32)), h)
        return r

    with C.layers.default_options(init=C.normal(scale=0.02)):

        h0 = C.layers.Convolution2D(dkernel, 1, strides=dstride)(x_img)
        h0 = bn_with_leaky_relu(h0, leak=0.2)
        print('h0 shape :', h0.shape)

        h1 = C.layers.Convolution2D(dkernel, 64, strides=dstride)(h0)
        h1 = bn_with_leaky_relu(h1, leak=0.2)
        print('h1 shape :', h1.shape)

        h2 = C.layers.Dense(256, activation=None)(h1)
        h2 = bn_with_leaky_relu(h2, leak=0.2)
        print('h2 shape :', h2.shape)

        h2_aug = C.splice(h2, x_code)

        h3 = C.layers.Dense(256, activation=C.relu)(h2_aug)

        h4 = C.layers.Dense(1, activation=C.sigmoid, name='D_out')(h3)
        print('h3 shape :', h4.shape)

        return h4 
开发者ID:astorfi,项目名称:CNTK-World,代码行数:37,代码来源:conditional-DCGAN.py

示例4: eval_and_plot_faster_rcnn

# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import input_variable [as 别名]
def eval_and_plot_faster_rcnn(eval_model, num_images_to_plot, test_map_file, img_shape,
                              results_base_path, feature_node_name, classes,
                              drawUnregressedRois=False, drawNegativeRois=False,
                              nmsThreshold=0.5, nmsConfThreshold=0.0, bgrPlotThreshold = 0.8):
    # get image paths
    with open(test_map_file) as f:
        content = f.readlines()
    img_base_path = os.path.dirname(os.path.abspath(test_map_file))
    img_file_names = [os.path.join(img_base_path, x.split('\t')[1]) for x in content]

    # prepare model
    image_input = input_variable(img_shape, dynamic_axes=[Axis.default_batch_axis()], name=feature_node_name)
    dims_input = input_variable((1,6), dynamic_axes=[Axis.default_batch_axis()], name='dims_input')
    frcn_eval = eval_model(image_input, dims_input)

    #dims_input_const = cntk.constant([image_width, image_height, image_width, image_height, image_width, image_height], (1, 6))
    print("Plotting results from Faster R-CNN model for %s images." % num_images_to_plot)
    for i in range(0, num_images_to_plot):
        imgPath = img_file_names[i]

        # evaluate single image
        _, cntk_img_input, dims = load_resize_and_pad(imgPath, img_shape[2], img_shape[1])

        dims_input = np.array(dims, dtype=np.float32)
        dims_input.shape = (1,) + dims_input.shape
        output = frcn_eval.eval({frcn_eval.arguments[0]: [cntk_img_input], frcn_eval.arguments[1]: dims_input})

        out_dict = dict([(k.name, k) for k in output])
        out_cls_pred = output[out_dict['cls_pred']][0]
        out_rpn_rois = output[out_dict['rpn_rois']][0]
        out_bbox_regr = output[out_dict['bbox_regr']][0]

        labels = out_cls_pred.argmax(axis=1)
        scores = out_cls_pred.max(axis=1).tolist()

        if drawUnregressedRois:
            # plot results without final regression
            imgDebug = visualizeResultsFaster(imgPath, labels, scores, out_rpn_rois, img_shape[2], img_shape[1],
                                              classes, nmsKeepIndices=None, boDrawNegativeRois=drawNegativeRois,
                                              decisionThreshold=bgrPlotThreshold)
            imsave("{}/{}_{}".format(results_base_path, i, os.path.basename(imgPath)), imgDebug)

        # apply regression and nms to bbox coordinates
        regressed_rois = regress_rois(out_rpn_rois, out_bbox_regr, labels, dims)

        nmsKeepIndices = apply_nms_to_single_image_results(regressed_rois, labels, scores,
                                                    nms_threshold=nmsThreshold,
                                                    conf_threshold=nmsConfThreshold)

        img = visualizeResultsFaster(imgPath, labels, scores, regressed_rois, img_shape[2], img_shape[1],
                                     classes, nmsKeepIndices=nmsKeepIndices,
                                     boDrawNegativeRois=drawNegativeRois,
                                     decisionThreshold=bgrPlotThreshold)
        imsave("{}/{}_regr_{}".format(results_base_path, i, os.path.basename(imgPath)), img)


####################################
# helper library
#################################### 
开发者ID:karolzak,项目名称:cntk-python-web-service-on-azure,代码行数:61,代码来源:plot_helpers.py

示例5: build_graph

# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import input_variable [as 别名]
def build_graph(config):
    assert(config['type'] in ["cnn", "lstm", "gru", "bilstm", "bigru"])
    if config["type"] == "cnn":
        # static model
        features = C.input_variable(input_dim_model, name="input")
        labels = C.input_variable(label_dim, name="label")
    else:
        # recurrent model
        features = C.sequence.input_variable(input_dim_model, name="input")
        labels = C.sequence.input_variable(label_dim, name="label")
    netoutput = create_model(features, config["type"], config["encoder"], config["pretrained_model"], config["e3_clone"])

    if config["l2_loss_type"] == 1:
        print("Use standard l2 loss")
        ce = l2_loss(netoutput, labels)
    elif config["l2_loss_type"] == 2:
        print("Use variance normalized l2 loss")
        ce = std_normalized_l2_loss(netoutput, labels)
    else:
        raise ValueError("Unsupported loss type")

    # enforce sparsity output
    if config["l1_reg"] > sys.float_info.epsilon:
        ce = ce + config["l1_reg"] * l1_reg_loss(netoutput)
    
    # performance metrics
    pe = C.squared_error(netoutput, labels)

    if config["constlr"]:
        lr_schedule = config["lr"]
    else:
        if config["lr_list"] is not None:
            print("use learning rate schedule from file")
            lr_schedule = config["lr_list"]
        else:
            if config["type"] != "cnn": # default learning rate for recurrent model
                lr_schedule = [0.005] + [0.0025]*2 + [0.001]*4 + [0.0005]*8 + [0.00025]*16 + [0.0001]*1000 + [0.00005]*1000 + [0.000025]
            elif config["lr_schedule"] == 1: # learning rate for CNN
                lr_schedule = [0.005] + [0.0025]*2 + [0.00125]*3 + [0.0005]*4 + [0.00025]*5 + [0.0001]
            elif config["lr_schedule"] == 2:
                lr_schedule = [0.005] + [0.0025]*2 + [0.00125]*3 + [0.0005]*4 + [0.00025]*5 + [0.0001]*100 + [0.00005]*50 + [0.000025]*50 + [0.00001]
            else:
                raise ValueError("unknown learning rate")
    learning_rate = C.learning_parameter_schedule_per_sample(lr_schedule, epoch_size=config["epoch_size"])
    momentum_schedule = C.momentum_schedule(0.9, minibatch_size=300)
    
    learner = C.adam(netoutput.parameters, lr=learning_rate, momentum=momentum_schedule,
                        l2_regularization_weight=0.0001,
                        gradient_clipping_threshold_per_sample=3.0, gradient_clipping_with_truncation=True)
    trainer = C.Trainer(netoutput, (ce, pe), [learner])

    return features, labels, netoutput, trainer


#-----------------------------------
# training procedure
#-----------------------------------

# create reader 
开发者ID:haixpham,项目名称:end2end_AU_speech,代码行数:61,代码来源:train_end2end.py


注:本文中的cntk.input_variable方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。