当前位置: 首页>>代码示例>>Python>>正文


Python glog.info方法代码示例

本文整理汇总了Python中glog.info方法的典型用法代码示例。如果您正苦于以下问题:Python glog.info方法的具体用法?Python glog.info怎么用?Python glog.info使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在glog的用法示例。


在下文中一共展示了glog.info方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import glog [as 别名]
# 或者: from glog import info [as 别名]
def __init__(self, uri, batch_size=100):
        """Create a new UseEncoderClient object

        Args:
            uri: The uri to the tensorflow_hub USE module
            batch_size: maximum number of sentences to encode at once
        """
        self._batch_size = batch_size
        self._session = tf.Session(graph=tf.Graph())
        with self._session.graph.as_default():
            glog.info("Loading %s model from tensorflow hub", uri)
            embed_fn = tf_hub.Module(uri)
            self._fed_texts = tf.placeholder(shape=[None], dtype=tf.string)
            self._embeddings = embed_fn(self._fed_texts)
            encoding_info = embed_fn.get_output_info_dict().get('default')
            if encoding_info:
                self._encoding_dim = encoding_info.get_shape()[-1].value
            init_ops = (
                tf.global_variables_initializer(), tf.tables_initializer())
        glog.info("Initializing graph.")
        self._session.run(init_ops) 
开发者ID:PolyAI-LDN,项目名称:polyai-models,代码行数:23,代码来源:encoder_clients.py

示例2: predict

# 需要导入模块: import glog [as 别名]
# 或者: from glog import info [as 别名]
def predict(image_path, lanenet_weights, hnet_weights):
    assert ops.exists(image_path), '{:s} not exist'.format(image_path)
    log.info('开始读取图像数据并进行预处理')
    t_start = time.time()
    image = cv2.imread(image_path, cv2.IMREAD_COLOR)

    image_vis = cv2.resize(image, (512, 256), interpolation=cv2.INTER_LINEAR)
    cv2.imwrite('./out/origin_image.png', image_vis)

    image_hnet = cv2.resize(image, (128, 64), interpolation=cv2.INTER_LINEAR)
    log.info('图像读取完毕, 耗时: {:.5f}s'.format(time.time() - t_start))

    # step1: predict from lanenet model
    lane_coordinate, cluster_index, labels = predict_lanenet(image_vis, lanenet_weights)
    tf.reset_default_graph()

    # step2: fit from hnet model
    lanes_pts = []
    for i in cluster_index:
        idx = np.where(labels == i)
        coord = lane_coordinate[idx]
        lanes_pts.append(coord)
    mask_image = hnet_predict(image_hnet, hnet_weights, lanes_pts, image_vis)
    cv2.imwrite('./out/predict_hnet.png', mask_image) 
开发者ID:stesha2016,项目名称:lanenet-enet-hnet,代码行数:26,代码来源:lanenet_hnet_predict.py

示例3: _cluster

# 需要导入模块: import glog [as 别名]
# 或者: from glog import info [as 别名]
def _cluster(prediction, bandwidth):
        """
        实现论文SectionⅡ的cluster部分
        :param prediction:
        :param bandwidth:
        :return:
        """
        ms = MeanShift(bandwidth, bin_seeding=True)
        log.info('开始Mean shift聚类 ...')
        tic = time.time()
        try:
            ms.fit(prediction)
        except ValueError as err:
            log.error(err)
            return 0, [], []
        log.info('Mean Shift耗时: {:.5f}s'.format(time.time() - tic))
        labels = ms.labels_
        cluster_centers = ms.cluster_centers_

        num_clusters = cluster_centers.shape[0]

        log.info('聚类簇个数为: {:d}'.format(num_clusters))

        return num_clusters, labels, cluster_centers 
开发者ID:stesha2016,项目名称:lanenet-enet-hnet,代码行数:26,代码来源:lanenet_cluster.py

示例4: _cluster_v2

# 需要导入模块: import glog [as 别名]
# 或者: from glog import info [as 别名]
def _cluster_v2(prediction):
        """
        dbscan cluster
        :param prediction:
        :return:
        """
        db = DBSCAN(eps=0.7, min_samples=200).fit(prediction)
        db_labels = db.labels_
        unique_labels = np.unique(db_labels)
        unique_labels = [tmp for tmp in unique_labels if tmp != -1]
        log.info('聚类簇个数为: {:d}'.format(len(unique_labels)))

        num_clusters = len(unique_labels)
        cluster_centers = db.components_

        return num_clusters, db_labels, cluster_centers 
开发者ID:stesha2016,项目名称:lanenet-enet-hnet,代码行数:18,代码来源:lanenet_cluster.py

示例5: _generate_char_dict

# 需要导入模块: import glog [as 别名]
# 或者: from glog import info [as 别名]
def _generate_char_dict(self):
        """
        generate the char dict and ord map dict json file according to the lexicon list.
        gather all the single characters used in lexicon list.
        :return:
        """
        char_lexicon_set = set()
        for lexcion in self._lexicon_list:
            for s in lexcion:
                char_lexicon_set.add(s)

        log.info('Char set length: {:d}'.format(len(char_lexicon_set)))

        char_lexicon_list = list(char_lexicon_set)
        char_dict_builder = establish_char_dict.CharDictBuilder()
        char_dict_builder.write_char_dict(char_lexicon_list, save_path=self._char_dict_path)
        char_dict_builder.map_ord_to_index(char_lexicon_list, save_path=self._ord_map_dict_path)

        log.info('Write char dict map complete') 
开发者ID:MaybeShewill-CV,项目名称:CRNN_Tensorflow,代码行数:21,代码来源:shadownet_data_feed_pipline.py

示例6: gather_detectron

# 需要导入模块: import glog [as 别名]
# 或者: from glog import info [as 别名]
def gather_detectron(self):
        glog.info('Gathering Detectron')

        if not exists(join(self.path_to_dataset, 'detectron')):
            os.mkdir(join(self.path_to_dataset, 'detectron'))

        detectron_file = join(self.path_to_dataset, 'metadata', 'detectron.p')
        if exists(detectron_file):
            glog.info('Loading coarse detections from: {0}'.format(detectron_file))
            with open(detectron_file, 'rb') as f:
                self.detectron = pickle.load(f)

        else:

            for i, basename in enumerate(tqdm(self.frame_basenames)):
                with open(join(self.path_to_dataset, 'detectron', '{0}.yml'.format(basename)), 'rb') as stream:
                    data = yaml.load(stream)
                boxes, classes, segms = data['boxes'], data['classes'], data['segms']

                self.detectron[basename] = {'boxes': boxes, 'segms': segms, 'keyps': None, 'classes': classes}

            with open(detectron_file, 'wb') as f:
                pickle.dump(self.detectron, f) 
开发者ID:krematas,项目名称:soccerontable,代码行数:25,代码来源:core.py

示例7: __init__

# 需要导入模块: import glog [as 别名]
# 或者: from glog import info [as 别名]
def __init__(self, uri):
        """Create a new `USEDualEncoder` object."""
        self._session = tf.Session(graph=tf.Graph())
        with self._session.graph.as_default():
            glog.info("Loading %s model from tensorflow hub", uri)
            embed_fn = tensorflow_hub.Module(uri)
            self._fed_texts = tf.placeholder(shape=[None], dtype=tf.string)
            self._context_embeddings = embed_fn(
                dict(input=self._fed_texts),
                signature="question_encoder",
                as_dict=True,
            )['outputs']
            empty_strings = tf.fill(
                tf.shape(self._fed_texts), ""
            )
            self._response_embeddings = embed_fn(
                dict(input=self._fed_texts, context=empty_strings),
                signature="response_encoder",
                as_dict=True,
            )['outputs']
            init_ops = (
                tf.global_variables_initializer(), tf.tables_initializer())
        glog.info("Initializing graph.")
        self._session.run(init_ops) 
开发者ID:PolyAI-LDN,项目名称:conversational-datasets,代码行数:26,代码来源:vector_based.py

示例8: _create_train_and_dev

# 需要导入模块: import glog [as 别名]
# 或者: from glog import info [as 别名]
def _create_train_and_dev(self, contexts, responses):
        """Create a train and dev set of context and response vectors."""
        glog.info("Encoding the train set.")
        context_encodings = []
        response_encodings = []

        for i in tqdm(range(0, len(contexts), self._ENCODING_BATCH_SIZE)):
            contexts_batch = contexts[i:i + self._ENCODING_BATCH_SIZE]
            responses_batch = responses[i:i + self._ENCODING_BATCH_SIZE]
            context_encodings.append(
                self._encoder.encode_context(contexts_batch))
            response_encodings.append(
                self._encoder.encode_response(responses_batch))

        context_encodings = np.concatenate(
            context_encodings).astype(np.float32)
        response_encodings = np.concatenate(
            response_encodings).astype(np.float32)

        return train_test_split(
            context_encodings, response_encodings,
            test_size=0.2) 
开发者ID:PolyAI-LDN,项目名称:conversational-datasets,代码行数:24,代码来源:vector_based.py

示例9: init_from_model_file

# 需要导入模块: import glog [as 别名]
# 或者: from glog import info [as 别名]
def init_from_model_file(self, model_file):
    """
    Initializes the model from a pre-trained model

    Args:
       model_file (str): the pre-trained model file path
    """
    log.info('Loading model from: {}'.format(model_file))
    if not os.path.isfile(model_file):
      raise Exception('No state file found in {}'.format(model_file))
    model_saved_state = torch.load(model_file, map_location='cpu')
    model_params = model_saved_state['model_params']
    self.current_epoch = model_saved_state['last_epoch']
    self.loss = model_saved_state.get('loss', self.loss)
    self.loss_params = model_saved_state.get('loss_params', self.loss_params)
    self.optimizer_type = model_saved_state['optimizer_type']
    self.items = model_saved_state.get('items', None)
    self.users = model_saved_state.get('users', None)
    self.num_items = model_saved_state.get('num_items', None)
    self.num_users = model_saved_state.get('num_users', None)
    self.__optimizer_state_dict = model_saved_state['optimizer']
    self.__sparse_optimizer_state_dict = model_saved_state.get('sparse_optimizer', None)

    self.model.load_model_params(model_params)
    self.__init_model()
    self.model.load_state_dict(model_saved_state['model']) 
开发者ID:amoussawi,项目名称:recoder,代码行数:28,代码来源:model.py

示例10: save_state

# 需要导入模块: import glog [as 别名]
# 或者: from glog import info [as 别名]
def save_state(self, model_checkpoint_prefix):
    """
    Saves the model state in the path starting with ``model_checkpoint_prefix`` and appending it
    with the model current training epoch

    Args:
      model_checkpoint_prefix (str): the model save path prefix

    Returns:
      the model state file path
    """
    checkpoint_file = "{}_epoch_{}.model".format(model_checkpoint_prefix, self.current_epoch)
    log.info("Saving model to {}".format(checkpoint_file))
    current_state = {
      'recoder_version': __version__,
      'model_params': self.model.model_params(),
      'last_epoch': self.current_epoch,
      'model': self.model.state_dict(),
      'optimizer_type': self.optimizer_type,
      'optimizer': self.optimizer.state_dict(),
      'items': self.items,
      'users': self.users,
      'num_items': self.num_items,
      'num_users': self.num_users
    }

    if type(self.loss) is str:
      current_state['loss'] = self.loss
      current_state['loss_params'] = self.loss_params

    torch.save(current_state, checkpoint_file)
    return checkpoint_file 
开发者ID:amoussawi,项目名称:recoder,代码行数:34,代码来源:model.py

示例11: evaluate

# 需要导入模块: import glog [as 别名]
# 或者: from glog import info [as 别名]
def evaluate(self, eval_dataset, num_recommendations, metrics, batch_size=1, num_users=None):
    """
    Evaluates the current model given an evaluation dataset.

    Args:
      eval_dataset (RecommendationDataset): evaluation dataset
      num_recommendations (int): number of top recommendations to consider.
      metrics (list): list of ``Metric`` to use for evaluation.
      batch_size (int, optional): batch size of computations.
    """
    results = self._evaluate(eval_dataset, num_recommendations, metrics,
                             batch_size=batch_size, num_users=num_users)
    for metric in results:
      log.info('{}: {}'.format(metric, np.mean(results[metric]))) 
开发者ID:amoussawi,项目名称:recoder,代码行数:16,代码来源:model.py

示例12: __load_index

# 需要导入模块: import glog [as 别名]
# 或者: from glog import info [as 别名]
def __load_index(self, index_file):
    log.info('Loading index file from {}'.format(index_file))
    with open(index_file, 'rb') as _index_file:
      state = pickle.load(_index_file)
    self.embedding_size = state['embedding_size']
    self.id_map = state['id_map']
    embeddings_file = index_file + '.embeddings'
    self.index = an.AnnoyIndex(self.embedding_size, metric='angular')
    self.index.load(embeddings_file)
    self.inverse_id_map = dict([(v,k) for k,v in self.id_map.items()]) 
开发者ID:amoussawi,项目名称:recoder,代码行数:12,代码来源:embedding.py

示例13: encode_sentences

# 需要导入模块: import glog [as 别名]
# 或者: from glog import info [as 别名]
def encode_sentences(self, sentences):
        """Encode a list of sentences

        Args:
            sentences: the list of sentences

        Returns:
            an (N, d) numpy matrix of sentence encodings.
        """
        missing_sentences = [
            sentence for sentence in sentences
            if sentence not in self._encodings_dict]
        if len(sentences) != len(missing_sentences):
            glog.info(f"{len(sentences) - len(missing_sentences)} cached "
                      f"sentences will not be encoded")
        if missing_sentences:
            missing_encodings = self._encoder_client.encode_sentences(
                missing_sentences)
            for sentence, encoding in zip(missing_sentences,
                                          missing_encodings):
                self._encodings_dict[sentence] = encoding
            self._save_encodings_dict()

        encodings = np.array(
            [self._encodings_dict[sentence] for sentence in sentences])
        return encodings 
开发者ID:PolyAI-LDN,项目名称:polyai-models,代码行数:28,代码来源:encoder_clients.py

示例14: _create_tokenizer_from_hub_module

# 需要导入模块: import glog [as 别名]
# 或者: from glog import info [as 别名]
def _create_tokenizer_from_hub_module(uri):
        """Get the vocab file and casing info from the Hub module."""
        with tf.Graph().as_default():
            bert_module = tf_hub.Module(uri, trainable=False)
            tokenization_info = bert_module(
                signature="tokenization_info", as_dict=True)
            with tf.Session() as sess:
                vocab_file, do_lower_case = sess.run(
                    [
                        tokenization_info["vocab_file"],
                        tokenization_info["do_lower_case"]
                    ])

        return FullTokenizer(
            vocab_file=vocab_file, do_lower_case=do_lower_case) 
开发者ID:PolyAI-LDN,项目名称:polyai-models,代码行数:17,代码来源:encoder_clients.py

示例15: _evaluate

# 需要导入模块: import glog [as 别名]
# 或者: from glog import info [as 别名]
def _evaluate(client, examples):
    # Compute context encodings.
    context_encodings = client.encode_contexts(
        contexts=[example.context for example in examples],
        extra_contexts=[example.extra_contexts for example in examples],
    )

    # Iterate through examples and score candidates.
    ranks = []
    for i, example in enumerate(examples):
        responses = [example.response] + example.distractors
        response_encodings = client.encode_responses(responses)
        scores = context_encodings[i].dot(response_encodings.T)

        # Find the position of 0 in the argsort, as index 0 is the correct
        # response.
        ranks.append((-scores).argsort().argmin())
        if (i + 1) % 100 == 0:
            glog.info(f"Scored {i + 1} / {len(examples)} examples.")

    ranks = numpy.asarray(ranks)

    for k in [1, 10, 50]:
        recall_at_k = (ranks < k).mean()
        glog.info(f"Recall@{k} = {recall_at_k:.3f}")

    mrr = (1 / (1.0 + ranks)).mean()
    glog.info(f"MRR = {mrr:.3f}") 
开发者ID:PolyAI-LDN,项目名称:polyai-models,代码行数:30,代码来源:evaluate_encoder.py


注:本文中的glog.info方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。