当前位置: 首页>>代码示例>>Python>>正文


Python logger.info方法代码示例

本文整理汇总了Python中tensorpack.utils.logger.info方法的典型用法代码示例。如果您正苦于以下问题:Python logger.info方法的具体用法?Python logger.info怎么用?Python logger.info使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorpack.utils.logger的用法示例。


在下文中一共展示了logger.info方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build_graph

# 需要导入模块: from tensorpack.utils import logger [as 别名]
# 或者: from tensorpack.utils.logger import info [as 别名]
def build_graph(self, image, label):
        image = ImageNetModel.image_preprocess(image, bgr=self.image_bgr)
        assert self.data_format in ['NCHW', 'NHWC']
        if self.data_format == 'NCHW':
            image = tf.transpose(image, [0, 3, 1, 2])

        logits = self.get_logits(image)
        print('self.label_smoothing', self.label_smoothing)
        loss = ImageNetModel.compute_loss_and_error(logits, label, self.label_smoothing)

        if self.weight_decay > 0:
            wd_loss = regularize_cost(self.weight_decay_pattern,
                                      tf.contrib.layers.l2_regularizer(self.weight_decay),
                                      name='l2_regularize_loss')
            add_moving_summary(loss, wd_loss)
            total_cost = tf.add_n([loss, wd_loss], name='cost')
        else:
            total_cost = tf.identity(loss, name='cost')
            add_moving_summary(total_cost)

        if self.loss_scale != 1.:
            logger.info("Scaling the total loss by {} ...".format(self.loss_scale))
            return total_cost * self.loss_scale
        else:
            return total_cost 
开发者ID:huawei-noah,项目名称:ghostnet,代码行数:27,代码来源:imagenet_utils.py

示例2: build_graph

# 需要导入模块: from tensorpack.utils import logger [as 别名]
# 或者: from tensorpack.utils.logger import info [as 别名]
def build_graph(self, image, label):
        image = self.image_preprocess(image)
        assert self.data_format == 'NCHW'
        image = tf.transpose(image, [0, 3, 1, 2])

        logits = self.get_logits(image)
        loss = ImageNetModel.compute_loss_and_error(
            logits, label, label_smoothing=self.label_smoothing)

        if self.weight_decay > 0:
            wd_loss = regularize_cost(self.weight_decay_pattern,
                                      tf.contrib.layers.l2_regularizer(self.weight_decay),
                                      name='l2_regularize_loss')
            add_moving_summary(loss, wd_loss)
            total_cost = tf.add_n([loss, wd_loss], name='cost')
        else:
            total_cost = tf.identity(loss, name='cost')
            add_moving_summary(total_cost)

        if self.loss_scale != 1.:
            logger.info("Scaling the total loss by {} ...".format(self.loss_scale))
            return total_cost * self.loss_scale
        else:
            return total_cost 
开发者ID:tensorpack,项目名称:benchmarks,代码行数:26,代码来源:imagenet_utils.py

示例3: calc_flops

# 需要导入模块: from tensorpack.utils import logger [as 别名]
# 或者: from tensorpack.utils.logger import info [as 别名]
def calc_flops(model):
    # manually build the graph with batch=1
    input_desc = [
        InputDesc(tf.float32, [1, model.image_size, model.image_size, 3], "input"),
        InputDesc(tf.int32, [1], "label")
    ]
    input = PlaceholderInput()
    input.setup(input_desc)
    with TowerContext("", is_training=False):
        model.build_graph(*input.get_input_tensors())
    model_utils.describe_trainable_vars()

    tf.profiler.profile(
        tf.get_default_graph(),
        cmd="op",
        options=tf.profiler.ProfileOptionBuilder.float_operation())
    logger.info("Note that TensorFlow counts flops in a different way from the paper.")
    logger.info("TensorFlow counts multiply+add as two flops, however the paper counts them "
                "as 1 flop because it can be executed in one instruction.") 
开发者ID:osmr,项目名称:imgclsmob,代码行数:21,代码来源:utils_tp.py

示例4: _create_info_merge

# 需要导入模块: from tensorpack.utils import logger [as 别名]
# 或者: from tensorpack.utils.logger import info [as 别名]
def _create_info_merge(next_id, h_id, o_id, aux_weight, is_candidate,
            final_merge_op=LayerTypes.MERGE_WITH_SUM,
            hallu_gate_layer=LayerTypes.NO_FORWARD_LAYER):
        """
        Form the LayerInfo for the merge operation between hallu of id h_id and the original
        tensor of id o_id (out_id). The new LayerInfo will have info.id == next_id.
        Return a list of layers used for merging
        Note any change to this function need to be mirrored in _finalize_info_merge
        """
        inputs = [None] * 2
        inputs[LayerInfoList.ORIG_IDX_IN_MERGE_HALLU] = o_id
        inputs[LayerInfoList.HALLU_IDX_IN_MERGE_HALLU] = h_id
        operations = [LayerTypes.IDENTITY] * 2 + [final_merge_op]
        operations[LayerInfoList.HALLU_IDX_IN_MERGE_HALLU] = hallu_gate_layer
        info = LayerInfo(next_id, inputs=inputs, operations=operations,
            aux_weight=aux_weight, is_candidate=is_candidate)
        return [info] 
开发者ID:microsoft,项目名称:petridishnn,代码行数:19,代码来源:layer_info.py

示例5: _remove_connection_from_id

# 需要导入模块: from tensorpack.utils import logger [as 别名]
# 或者: from tensorpack.utils.logger import info [as 别名]
def _remove_connection_from_id(info, id_to_remove):
        if not id_to_remove in info.inputs:
            return info
        if isinstance(info.stop_gradient, list):
            assert len(info.stop_gradient) == len(info.inputs), \
                "Invalid info {}".format(info)
        if isinstance(info.down_sampling, list):
            assert len(info.down_sampling) == len(info.inputs), \
                "Invalid info {}".format(info)
        assert len(info.operations) == len(info.inputs) + 1, \
            "Invalid info {}".format(info)

        idx = 0
        while idx < len(info.inputs):
            if info.inputs[idx] == id_to_remove:
                del info.inputs[idx]
                del info.operations[idx]
                if isinstance(info.stop_gradient, list):
                    del info.stop_gradient[idx]
                if isinstance(info.down_sampling, list):
                    del info.down_sampling[idx]
            idx += 1
        return info 
开发者ID:microsoft,项目名称:petridishnn,代码行数:25,代码来源:layer_info.py

示例6: critic_predictor

# 需要导入模块: from tensorpack.utils import logger [as 别名]
# 或者: from tensorpack.utils.logger import info [as 别名]
def critic_predictor(ctrl, model_dir, vs_name):
    """
    Create an OfflinePredictorWithSaver for test-time use.
    """
    model = critic_factory(ctrl, is_train=False, vs_name=vs_name)
    output_names = ['{}/predicted_accuracy:0'.format(vs_name)]
    session_config=None
    if ctrl.critic_type == CriticTypes.LSTM:
        session_config = tf.ConfigProto(device_count = {'GPU': 0})
    pred_config = PredictConfig(
        model=model,
        input_names=model.input_names,
        output_names=output_names,
        session_creator=NewSessionCreator(config=session_config)
    )
    if model_dir:
        ckpt = tf.train.latest_checkpoint(model_dir)
        logger.info("Loading {} predictor from {}".format(vs_name, ckpt))
        if ckpt:
            pred_config.session_init = SaverRestore(ckpt)
    predictor = OfflinePredictorWithSaver(pred_config)
    return predictor 
开发者ID:microsoft,项目名称:petridishnn,代码行数:24,代码来源:critic.py

示例7: crawl_ve_from_remote_logs

# 需要导入模块: from tensorpack.utils import logger [as 别名]
# 或者: from tensorpack.utils.logger import info [as 别名]
def crawl_ve_from_remote_logs(mi_info, dn):
    """
    deprecated do not use

    Args:
    mi_info : a dict mapping from model iter to ModelSearchInfo
    dn : directory path of the one that directly contains the server log.log, i.e.,
    the remote logs are in {dn}/{model_iter}/log.log
    """
    for mi in mi_info:
        info = mi_info[mi]
        if info.ve is None or info.ve > 1.0:
            log_fn = os.path.join(dn, str(mi), 'log.log')
            if os.path.exists(log_fn):
                ve = grep_val_err_from_log(log_fn)
                mi_info[mi].ve = ve
    return mi_info 
开发者ID:microsoft,项目名称:petridishnn,代码行数:19,代码来源:critic.py

示例8: _log_convex_hull_parent_choice

# 需要导入模块: from tensorpack.utils import logger [as 别名]
# 或者: from tensorpack.utils.logger import info [as 别名]
def _log_convex_hull_parent_choice(self, q_parent, mi_info, e_idx):
        l_pqef = [
            pqef for pqef in q_parent.all_as_generator(full_info=True)
        ]
        l_mi, l_ve, l_fp, l_te, l_cnt = [], [], [], [], []
        for pqef in l_pqef:
            mi = pqef[IDX_PQE].model_iter
            l_mi.append(mi)
            l_ve.append(mi_info[mi].ve)
            l_fp.append(mi_info[mi].fp)
            l_cnt.append(pqef[IDX_CNT])
        logger.info(
            "CONVEX HULL info:\nl_fp={}\nl_ve={}\nl_cnt={}\nl_mi={}".format(
            l_fp, l_ve, l_cnt, l_mi
            ))
        logger.info("Chose parent e_idx={} mi={}".format(e_idx, l_mi[e_idx])) 
开发者ID:microsoft,项目名称:petridishnn,代码行数:18,代码来源:controller.py

示例9: __init__

# 需要导入模块: from tensorpack.utils import logger [as 别名]
# 或者: from tensorpack.utils.logger import info [as 别名]
def __init__(self, options):
        super(PetridishModel, self).__init__()
        self.options = options

        # Classification info
        self.prediction_feature = options.prediction_feature
        self.out_filters = options.init_channel
        self.stem_channel_rate = options.stem_channel_rate
        self.data_format = options.data_format

        # LayerInfoList as a record of the mathematical graph
        self.net_info = options.net_info
        self.master = self.net_info.master
        self.is_cell_based = self.net_info.is_cell_based()
        self.n_layers = len(self.master)
        self.n_aux_preds = sum([int(x.aux_weight > 0) for x in self.master])

        self.ch_dim = _data_format_to_ch_dim(self.data_format)
        self.params_to_regularize = None

        self.compute_hallu_stats = False
        if hasattr(options, 'compute_hallu_stats'):
            self.compute_hallu_stats = options.compute_hallu_stats 
开发者ID:microsoft,项目名称:petridishnn,代码行数:25,代码来源:feedforward.py

示例10: generate_regression_callbacks

# 需要导入模块: from tensorpack.utils import logger [as 别名]
# 或者: from tensorpack.utils.logger import info [as 别名]
def generate_regression_callbacks(layer_info_list, name_only=False):
    """
        A list of callbacks for getting validation errors.
    """
    vcs = []
    names = []
    for info in layer_info_list:
        if info.aux_weight > 0:
            scope_name = scope_prediction(info.id)
            name = scope_name+'/mean_square_error:0'
            vcs.append(ScalarStats(\
                names=name,
                prefix='val_'))
            names.append(name)
    if name_only:
        return names
    return vcs 
开发者ID:microsoft,项目名称:petridishnn,代码行数:19,代码来源:common.py

示例11: fork_and_train_model

# 需要导入模块: from tensorpack.utils import logger [as 别名]
# 或者: from tensorpack.utils.logger import info [as 别名]
def fork_and_train_model(ipc, options, log_dir, child_dir, prev_dir,
        model_str, model_iter, parent_iter, search_depth, job_type):
    """
    Spawn a process to write a script for the crawler. then
    wait for the crawler to finish. Aftewards, report to the
    main process.
    """
    entry_func = partial(
        train_child_remotely,
        model_options=options, log_dir=log_dir,
        child_dir=child_dir, prev_dir=prev_dir,
        curr_iter=model_iter)
    #logger.info('Remote child {} will check finish in dir {}'.format(
    #   model_iter, log_dir))
    stop_func = partial(has_stopped, log_dir=log_dir)
    msg_func = lambda model_str=model_str, \
        model_iter=model_iter, parent_iter=parent_iter, \
        search_depth=search_depth, job_type=job_type \
        : [ model_str, model_iter, parent_iter, search_depth, job_type ]
    ipc.spawn(job_type, entry_func, stop_func, msg_func, sleep_time=1) 
开发者ID:microsoft,项目名称:petridishnn,代码行数:22,代码来源:petridish_main.py

示例12: __init__

# 需要导入模块: from tensorpack.utils import logger [as 别名]
# 或者: from tensorpack.utils.logger import info [as 别名]
def __init__(self, args):
        super(AnytimeFCN, self).__init__(None, args)


        # Class weight for fully convolutional networks
        self.class_weight = None
        if hasattr(args, 'class_weight'):
            self.class_weight = args.class_weight
        if self.class_weight is None:
            self.class_weight = np.ones(self.num_classes, dtype=np.float32)
        logger.info('Class weights: {}'.format(self.class_weight))

        self.is_label_one_hot = args.is_label_one_hot
        self.eval_threshold = args.eval_threshold
        self.do_scale_feat_to_label = args.do_scale_feat_to_label
        self.n_pools = args.n_pools if not self.do_scale_feat_to_label else 0
        self.is_atrous = args.is_atrous
        self.output_stride = args.output_stride
        # base_o_s / o_s * base_rate ; base_o_s == 16
        self.atrous_rates = [6,12,18]
        self.atrous_rates_base_output_stride = 16

        self.input_height = args.input_height
        self.input_width = args.input_width 
开发者ID:microsoft,项目名称:petridishnn,代码行数:26,代码来源:anytime_fcn.py

示例13: input_transform

# 需要导入模块: from tensorpack.utils import logger [as 别名]
# 或者: from tensorpack.utils.logger import info [as 别名]
def input_transform(self, points, k=3):
        # [B,N,3] --> [3, k]
        num_point = points.get_shape()[1]
        points = tf.expand_dims(points, -1)
        with argscope(Conv2D, nl=BNReLU, padding='VALID'), \
                argscope(FullyConnected, nl=BNReLU):
            transmat = (LinearWrap(points)
                        .Conv2D('tconv0', 64, kernel_shape=[1, 3])
                        .Conv2D('tconv1', 128, kernel_shape=1)
                        .Conv2D('tconv2', 1024, kernel_shape=1)
                        .MaxPooling('tpool0', [num_point, 1])
                        .FullyConnected('tfc0', 512, nl=BNReLU)
                        .FullyConnected('tfc1', 256, nl=BNReLU)
                        .TransformPoints('transf_xyz', 3, in_dim=3)())
        logger.info('transformation matrix: {}\n\n'.format(transmat.get_shape()))
        return transmat 
开发者ID:PatWie,项目名称:tensorflow-recipes,代码行数:18,代码来源:pointnet.py

示例14: _parameter_net

# 需要导入模块: from tensorpack.utils import logger [as 别名]
# 或者: from tensorpack.utils.logger import info [as 别名]
def _parameter_net(self, theta, kernel_shape=9):
        """Estimate filters for convolution layers

        Args:
            theta: angle of filter
            kernel_shape: size of each filter

        Returns:
            learned filter as [B, k, k, 1]
        """
        with argscope(LeakyReLU, alpha=0.2), \
                argscope(FullyConnected, nl=LeakyReLU):
            net = FullyConnected('fc1', theta, 64)
            net = FullyConnected('fc2', net, 128)

        pred_filter = FullyConnected('fc3', net, kernel_shape ** 2, nl=tf.identity)
        pred_filter = tf.reshape(pred_filter, [BATCH, kernel_shape, kernel_shape, 1], name="pred_filter")
        logger.info('Parameter net output: {}'.format(pred_filter.get_shape().as_list()))
        return pred_filter 
开发者ID:PatWie,项目名称:tensorflow-recipes,代码行数:21,代码来源:steering-filter.py

示例15: build_graph

# 需要导入模块: from tensorpack.utils import logger [as 别名]
# 或者: from tensorpack.utils.logger import info [as 别名]
def build_graph(self, image, label):
        image = self.image_preprocess(image)
        assert self.data_format in ['NCHW', 'NHWC']
        if self.data_format == 'NCHW':
            image = tf.transpose(image, [0, 3, 1, 2])

        logits = self.get_logits(image)
        loss = ImageNetModel.compute_loss_and_error(
            logits, label, label_smoothing=self.label_smoothing)

        if self.weight_decay > 0:
            wd_loss = regularize_cost(self.weight_decay_pattern,
                                      tf.contrib.layers.l2_regularizer(self.weight_decay),
                                      name='l2_regularize_loss')
            add_moving_summary(loss, wd_loss)
            total_cost = tf.add_n([loss, wd_loss], name='cost')
        else:
            total_cost = tf.identity(loss, name='cost')
            add_moving_summary(total_cost)

        if self.loss_scale != 1.:
            logger.info("Scaling the total loss by {} ...".format(self.loss_scale))
            return total_cost * self.loss_scale
        else:
            return total_cost 
开发者ID:ppwwyyxx,项目名称:GroupNorm-reproduce,代码行数:27,代码来源:imagenet_utils.py


注:本文中的tensorpack.utils.logger.info方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。