當前位置: 首頁>>代碼示例>>Python>>正文


Python glog.info方法代碼示例

本文整理匯總了Python中glog.info方法的典型用法代碼示例。如果您正苦於以下問題:Python glog.info方法的具體用法?Python glog.info怎麽用?Python glog.info使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在glog的用法示例。


在下文中一共展示了glog.info方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: import glog [as 別名]
# 或者: from glog import info [as 別名]
def __init__(self, uri, batch_size=100):
        """Create a new UseEncoderClient object

        Args:
            uri: The uri to the tensorflow_hub USE module
            batch_size: maximum number of sentences to encode at once
        """
        self._batch_size = batch_size
        self._session = tf.Session(graph=tf.Graph())
        with self._session.graph.as_default():
            glog.info("Loading %s model from tensorflow hub", uri)
            embed_fn = tf_hub.Module(uri)
            self._fed_texts = tf.placeholder(shape=[None], dtype=tf.string)
            self._embeddings = embed_fn(self._fed_texts)
            encoding_info = embed_fn.get_output_info_dict().get('default')
            if encoding_info:
                self._encoding_dim = encoding_info.get_shape()[-1].value
            init_ops = (
                tf.global_variables_initializer(), tf.tables_initializer())
        glog.info("Initializing graph.")
        self._session.run(init_ops) 
開發者ID:PolyAI-LDN,項目名稱:polyai-models,代碼行數:23,代碼來源:encoder_clients.py

示例2: predict

# 需要導入模塊: import glog [as 別名]
# 或者: from glog import info [as 別名]
def predict(image_path, lanenet_weights, hnet_weights):
    assert ops.exists(image_path), '{:s} not exist'.format(image_path)
    log.info('開始讀取圖像數據並進行預處理')
    t_start = time.time()
    image = cv2.imread(image_path, cv2.IMREAD_COLOR)

    image_vis = cv2.resize(image, (512, 256), interpolation=cv2.INTER_LINEAR)
    cv2.imwrite('./out/origin_image.png', image_vis)

    image_hnet = cv2.resize(image, (128, 64), interpolation=cv2.INTER_LINEAR)
    log.info('圖像讀取完畢, 耗時: {:.5f}s'.format(time.time() - t_start))

    # step1: predict from lanenet model
    lane_coordinate, cluster_index, labels = predict_lanenet(image_vis, lanenet_weights)
    tf.reset_default_graph()

    # step2: fit from hnet model
    lanes_pts = []
    for i in cluster_index:
        idx = np.where(labels == i)
        coord = lane_coordinate[idx]
        lanes_pts.append(coord)
    mask_image = hnet_predict(image_hnet, hnet_weights, lanes_pts, image_vis)
    cv2.imwrite('./out/predict_hnet.png', mask_image) 
開發者ID:stesha2016,項目名稱:lanenet-enet-hnet,代碼行數:26,代碼來源:lanenet_hnet_predict.py

示例3: _cluster

# 需要導入模塊: import glog [as 別名]
# 或者: from glog import info [as 別名]
def _cluster(prediction, bandwidth):
        """
        實現論文SectionⅡ的cluster部分
        :param prediction:
        :param bandwidth:
        :return:
        """
        ms = MeanShift(bandwidth, bin_seeding=True)
        log.info('開始Mean shift聚類 ...')
        tic = time.time()
        try:
            ms.fit(prediction)
        except ValueError as err:
            log.error(err)
            return 0, [], []
        log.info('Mean Shift耗時: {:.5f}s'.format(time.time() - tic))
        labels = ms.labels_
        cluster_centers = ms.cluster_centers_

        num_clusters = cluster_centers.shape[0]

        log.info('聚類簇個數為: {:d}'.format(num_clusters))

        return num_clusters, labels, cluster_centers 
開發者ID:stesha2016,項目名稱:lanenet-enet-hnet,代碼行數:26,代碼來源:lanenet_cluster.py

示例4: _cluster_v2

# 需要導入模塊: import glog [as 別名]
# 或者: from glog import info [as 別名]
def _cluster_v2(prediction):
        """
        dbscan cluster
        :param prediction:
        :return:
        """
        db = DBSCAN(eps=0.7, min_samples=200).fit(prediction)
        db_labels = db.labels_
        unique_labels = np.unique(db_labels)
        unique_labels = [tmp for tmp in unique_labels if tmp != -1]
        log.info('聚類簇個數為: {:d}'.format(len(unique_labels)))

        num_clusters = len(unique_labels)
        cluster_centers = db.components_

        return num_clusters, db_labels, cluster_centers 
開發者ID:stesha2016,項目名稱:lanenet-enet-hnet,代碼行數:18,代碼來源:lanenet_cluster.py

示例5: _generate_char_dict

# 需要導入模塊: import glog [as 別名]
# 或者: from glog import info [as 別名]
def _generate_char_dict(self):
        """
        generate the char dict and ord map dict json file according to the lexicon list.
        gather all the single characters used in lexicon list.
        :return:
        """
        char_lexicon_set = set()
        for lexcion in self._lexicon_list:
            for s in lexcion:
                char_lexicon_set.add(s)

        log.info('Char set length: {:d}'.format(len(char_lexicon_set)))

        char_lexicon_list = list(char_lexicon_set)
        char_dict_builder = establish_char_dict.CharDictBuilder()
        char_dict_builder.write_char_dict(char_lexicon_list, save_path=self._char_dict_path)
        char_dict_builder.map_ord_to_index(char_lexicon_list, save_path=self._ord_map_dict_path)

        log.info('Write char dict map complete') 
開發者ID:MaybeShewill-CV,項目名稱:CRNN_Tensorflow,代碼行數:21,代碼來源:shadownet_data_feed_pipline.py

示例6: gather_detectron

# 需要導入模塊: import glog [as 別名]
# 或者: from glog import info [as 別名]
def gather_detectron(self):
        glog.info('Gathering Detectron')

        if not exists(join(self.path_to_dataset, 'detectron')):
            os.mkdir(join(self.path_to_dataset, 'detectron'))

        detectron_file = join(self.path_to_dataset, 'metadata', 'detectron.p')
        if exists(detectron_file):
            glog.info('Loading coarse detections from: {0}'.format(detectron_file))
            with open(detectron_file, 'rb') as f:
                self.detectron = pickle.load(f)

        else:

            for i, basename in enumerate(tqdm(self.frame_basenames)):
                with open(join(self.path_to_dataset, 'detectron', '{0}.yml'.format(basename)), 'rb') as stream:
                    data = yaml.load(stream)
                boxes, classes, segms = data['boxes'], data['classes'], data['segms']

                self.detectron[basename] = {'boxes': boxes, 'segms': segms, 'keyps': None, 'classes': classes}

            with open(detectron_file, 'wb') as f:
                pickle.dump(self.detectron, f) 
開發者ID:krematas,項目名稱:soccerontable,代碼行數:25,代碼來源:core.py

示例7: __init__

# 需要導入模塊: import glog [as 別名]
# 或者: from glog import info [as 別名]
def __init__(self, uri):
        """Create a new `USEDualEncoder` object."""
        self._session = tf.Session(graph=tf.Graph())
        with self._session.graph.as_default():
            glog.info("Loading %s model from tensorflow hub", uri)
            embed_fn = tensorflow_hub.Module(uri)
            self._fed_texts = tf.placeholder(shape=[None], dtype=tf.string)
            self._context_embeddings = embed_fn(
                dict(input=self._fed_texts),
                signature="question_encoder",
                as_dict=True,
            )['outputs']
            empty_strings = tf.fill(
                tf.shape(self._fed_texts), ""
            )
            self._response_embeddings = embed_fn(
                dict(input=self._fed_texts, context=empty_strings),
                signature="response_encoder",
                as_dict=True,
            )['outputs']
            init_ops = (
                tf.global_variables_initializer(), tf.tables_initializer())
        glog.info("Initializing graph.")
        self._session.run(init_ops) 
開發者ID:PolyAI-LDN,項目名稱:conversational-datasets,代碼行數:26,代碼來源:vector_based.py

示例8: _create_train_and_dev

# 需要導入模塊: import glog [as 別名]
# 或者: from glog import info [as 別名]
def _create_train_and_dev(self, contexts, responses):
        """Create a train and dev set of context and response vectors."""
        glog.info("Encoding the train set.")
        context_encodings = []
        response_encodings = []

        for i in tqdm(range(0, len(contexts), self._ENCODING_BATCH_SIZE)):
            contexts_batch = contexts[i:i + self._ENCODING_BATCH_SIZE]
            responses_batch = responses[i:i + self._ENCODING_BATCH_SIZE]
            context_encodings.append(
                self._encoder.encode_context(contexts_batch))
            response_encodings.append(
                self._encoder.encode_response(responses_batch))

        context_encodings = np.concatenate(
            context_encodings).astype(np.float32)
        response_encodings = np.concatenate(
            response_encodings).astype(np.float32)

        return train_test_split(
            context_encodings, response_encodings,
            test_size=0.2) 
開發者ID:PolyAI-LDN,項目名稱:conversational-datasets,代碼行數:24,代碼來源:vector_based.py

示例9: init_from_model_file

# 需要導入模塊: import glog [as 別名]
# 或者: from glog import info [as 別名]
def init_from_model_file(self, model_file):
    """
    Initializes the model from a pre-trained model

    Args:
       model_file (str): the pre-trained model file path
    """
    log.info('Loading model from: {}'.format(model_file))
    if not os.path.isfile(model_file):
      raise Exception('No state file found in {}'.format(model_file))
    model_saved_state = torch.load(model_file, map_location='cpu')
    model_params = model_saved_state['model_params']
    self.current_epoch = model_saved_state['last_epoch']
    self.loss = model_saved_state.get('loss', self.loss)
    self.loss_params = model_saved_state.get('loss_params', self.loss_params)
    self.optimizer_type = model_saved_state['optimizer_type']
    self.items = model_saved_state.get('items', None)
    self.users = model_saved_state.get('users', None)
    self.num_items = model_saved_state.get('num_items', None)
    self.num_users = model_saved_state.get('num_users', None)
    self.__optimizer_state_dict = model_saved_state['optimizer']
    self.__sparse_optimizer_state_dict = model_saved_state.get('sparse_optimizer', None)

    self.model.load_model_params(model_params)
    self.__init_model()
    self.model.load_state_dict(model_saved_state['model']) 
開發者ID:amoussawi,項目名稱:recoder,代碼行數:28,代碼來源:model.py

示例10: save_state

# 需要導入模塊: import glog [as 別名]
# 或者: from glog import info [as 別名]
def save_state(self, model_checkpoint_prefix):
    """
    Saves the model state in the path starting with ``model_checkpoint_prefix`` and appending it
    with the model current training epoch

    Args:
      model_checkpoint_prefix (str): the model save path prefix

    Returns:
      the model state file path
    """
    checkpoint_file = "{}_epoch_{}.model".format(model_checkpoint_prefix, self.current_epoch)
    log.info("Saving model to {}".format(checkpoint_file))
    current_state = {
      'recoder_version': __version__,
      'model_params': self.model.model_params(),
      'last_epoch': self.current_epoch,
      'model': self.model.state_dict(),
      'optimizer_type': self.optimizer_type,
      'optimizer': self.optimizer.state_dict(),
      'items': self.items,
      'users': self.users,
      'num_items': self.num_items,
      'num_users': self.num_users
    }

    if type(self.loss) is str:
      current_state['loss'] = self.loss
      current_state['loss_params'] = self.loss_params

    torch.save(current_state, checkpoint_file)
    return checkpoint_file 
開發者ID:amoussawi,項目名稱:recoder,代碼行數:34,代碼來源:model.py

示例11: evaluate

# 需要導入模塊: import glog [as 別名]
# 或者: from glog import info [as 別名]
def evaluate(self, eval_dataset, num_recommendations, metrics, batch_size=1, num_users=None):
    """
    Evaluates the current model given an evaluation dataset.

    Args:
      eval_dataset (RecommendationDataset): evaluation dataset
      num_recommendations (int): number of top recommendations to consider.
      metrics (list): list of ``Metric`` to use for evaluation.
      batch_size (int, optional): batch size of computations.
    """
    results = self._evaluate(eval_dataset, num_recommendations, metrics,
                             batch_size=batch_size, num_users=num_users)
    for metric in results:
      log.info('{}: {}'.format(metric, np.mean(results[metric]))) 
開發者ID:amoussawi,項目名稱:recoder,代碼行數:16,代碼來源:model.py

示例12: __load_index

# 需要導入模塊: import glog [as 別名]
# 或者: from glog import info [as 別名]
def __load_index(self, index_file):
    log.info('Loading index file from {}'.format(index_file))
    with open(index_file, 'rb') as _index_file:
      state = pickle.load(_index_file)
    self.embedding_size = state['embedding_size']
    self.id_map = state['id_map']
    embeddings_file = index_file + '.embeddings'
    self.index = an.AnnoyIndex(self.embedding_size, metric='angular')
    self.index.load(embeddings_file)
    self.inverse_id_map = dict([(v,k) for k,v in self.id_map.items()]) 
開發者ID:amoussawi,項目名稱:recoder,代碼行數:12,代碼來源:embedding.py

示例13: encode_sentences

# 需要導入模塊: import glog [as 別名]
# 或者: from glog import info [as 別名]
def encode_sentences(self, sentences):
        """Encode a list of sentences

        Args:
            sentences: the list of sentences

        Returns:
            an (N, d) numpy matrix of sentence encodings.
        """
        missing_sentences = [
            sentence for sentence in sentences
            if sentence not in self._encodings_dict]
        if len(sentences) != len(missing_sentences):
            glog.info(f"{len(sentences) - len(missing_sentences)} cached "
                      f"sentences will not be encoded")
        if missing_sentences:
            missing_encodings = self._encoder_client.encode_sentences(
                missing_sentences)
            for sentence, encoding in zip(missing_sentences,
                                          missing_encodings):
                self._encodings_dict[sentence] = encoding
            self._save_encodings_dict()

        encodings = np.array(
            [self._encodings_dict[sentence] for sentence in sentences])
        return encodings 
開發者ID:PolyAI-LDN,項目名稱:polyai-models,代碼行數:28,代碼來源:encoder_clients.py

示例14: _create_tokenizer_from_hub_module

# 需要導入模塊: import glog [as 別名]
# 或者: from glog import info [as 別名]
def _create_tokenizer_from_hub_module(uri):
        """Get the vocab file and casing info from the Hub module."""
        with tf.Graph().as_default():
            bert_module = tf_hub.Module(uri, trainable=False)
            tokenization_info = bert_module(
                signature="tokenization_info", as_dict=True)
            with tf.Session() as sess:
                vocab_file, do_lower_case = sess.run(
                    [
                        tokenization_info["vocab_file"],
                        tokenization_info["do_lower_case"]
                    ])

        return FullTokenizer(
            vocab_file=vocab_file, do_lower_case=do_lower_case) 
開發者ID:PolyAI-LDN,項目名稱:polyai-models,代碼行數:17,代碼來源:encoder_clients.py

示例15: _evaluate

# 需要導入模塊: import glog [as 別名]
# 或者: from glog import info [as 別名]
def _evaluate(client, examples):
    # Compute context encodings.
    context_encodings = client.encode_contexts(
        contexts=[example.context for example in examples],
        extra_contexts=[example.extra_contexts for example in examples],
    )

    # Iterate through examples and score candidates.
    ranks = []
    for i, example in enumerate(examples):
        responses = [example.response] + example.distractors
        response_encodings = client.encode_responses(responses)
        scores = context_encodings[i].dot(response_encodings.T)

        # Find the position of 0 in the argsort, as index 0 is the correct
        # response.
        ranks.append((-scores).argsort().argmin())
        if (i + 1) % 100 == 0:
            glog.info(f"Scored {i + 1} / {len(examples)} examples.")

    ranks = numpy.asarray(ranks)

    for k in [1, 10, 50]:
        recall_at_k = (ranks < k).mean()
        glog.info(f"Recall@{k} = {recall_at_k:.3f}")

    mrr = (1 / (1.0 + ranks)).mean()
    glog.info(f"MRR = {mrr:.3f}") 
開發者ID:PolyAI-LDN,項目名稱:polyai-models,代碼行數:30,代碼來源:evaluate_encoder.py


注:本文中的glog.info方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。