本文整理汇总了Python中glog.error方法的典型用法代码示例。如果您正苦于以下问题:Python glog.error方法的具体用法?Python glog.error怎么用?Python glog.error使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类glog
的用法示例。
在下文中一共展示了glog.error方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: normalize
# 需要导入模块: import glog [as 别名]
# 或者: from glog import error [as 别名]
def normalize(gt_image, gt_binary_image, gt_instance_image):
"""
Normalize the image data by substracting the imagenet mean value
:param gt_image:
:param gt_binary_image:
:param gt_instance_image:
:return:
"""
if gt_image.get_shape().as_list()[-1] != 3 \
or gt_binary_image.get_shape().as_list()[-1] != 1 \
or gt_instance_image.get_shape().as_list()[-1] != 1:
log.error(gt_image.get_shape())
log.error(gt_binary_image.get_shape())
log.error(gt_instance_image.get_shape())
raise ValueError('Input must be of size [height, width, C>0]')
gt_image = tf.cast(gt_image, dtype=tf.float32)
gt_image = tf.subtract(tf.divide(gt_image, tf.constant(127.5, dtype=tf.float32)),
tf.constant(1.0, dtype=tf.float32))
return gt_image, gt_binary_image, gt_instance_image
示例2: _cluster
# 需要导入模块: import glog [as 别名]
# 或者: from glog import error [as 别名]
def _cluster(prediction, bandwidth):
"""
实现论文SectionⅡ的cluster部分
:param prediction:
:param bandwidth:
:return:
"""
ms = MeanShift(bandwidth, bin_seeding=True)
log.info('开始Mean shift聚类 ...')
tic = time.time()
try:
ms.fit(prediction)
except ValueError as err:
log.error(err)
return 0, [], []
log.info('Mean Shift耗时: {:.5f}s'.format(time.time() - tic))
labels = ms.labels_
cluster_centers = ms.cluster_centers_
num_clusters = cluster_centers.shape[0]
log.info('聚类簇个数为: {:d}'.format(num_clusters))
return num_clusters, labels, cluster_centers
示例3: imread
# 需要导入模块: import glog [as 别名]
# 或者: from glog import error [as 别名]
def imread(filename, dtype=np.float32, sfactor=1.0, image_type='rgb', flip=False):
if exists(filename):
image = cv2.imread(filename)
if image_type == 'gray':
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
elif image_type == 'rgb':
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
else:
glog.error('Unknown format')
if dtype == np.float32 or dtype == np.float64:
image = image.astype(dtype)
image /= 255.
if sfactor != 1.0:
image = cv2.resize(image, None, fx=sfactor, fy=sfactor)
if flip:
image = image[:, ::-1, :]
else:
glog.error('File {0} not found'.format(filename))
image = np.array([-1])
return image
示例4: get_dataset_info
# 需要导入模块: import glog [as 别名]
# 或者: from glog import error [as 别名]
def get_dataset_info(path_to_dataset, info_file='info.txt'):
fname = os.path.join(path_to_dataset, info_file)
if os.path.exists(fname):
info = np.loadtxt(fname, delimiter=':', dtype=str)
out = {}
for i in range(info.shape[0]):
out[info[i, 0]] = info[i, 1]
out['fps'] = int(out['fps'])
out['height'] = int(out['height'])
out['width'] = int(out['width'])
out['ext'] = out['extension'][1:]
if 'flipped' not in out:
out['flipped'] = 0
else:
out['flipped'] = int(out['flipped'])
return out
else:
glog.error('There is no info file in folder {0}'.format(path_to_dataset))
return -1
示例5: _embedding_feats_dbscan_cluster
# 需要导入模块: import glog [as 别名]
# 或者: from glog import error [as 别名]
def _embedding_feats_dbscan_cluster(embedding_image_feats):
"""
dbscan cluster
:param embedding_image_feats:
:return:
"""
db = DBSCAN(eps=CFG.POSTPROCESS.DBSCAN_EPS, min_samples=CFG.POSTPROCESS.DBSCAN_MIN_SAMPLES)
try:
features = StandardScaler().fit_transform(embedding_image_feats)
db.fit(features)
except Exception as err:
log.error(err)
ret = {
'origin_features': None,
'cluster_nums': 0,
'db_labels': None,
'unique_labels': None,
'cluster_center': None
}
return ret
db_labels = db.labels_
unique_labels = np.unique(db_labels)
num_clusters = len(unique_labels)
cluster_centers = db.components_
ret = {
'origin_features': features,
'cluster_nums': num_clusters,
'db_labels': db_labels,
'unique_labels': unique_labels,
'cluster_center': cluster_centers
}
return ret
示例6: _lane_fit
# 需要导入模块: import glog [as 别名]
# 或者: from glog import error [as 别名]
def _lane_fit(lane_pts):
"""
车道线多项式拟合
:param lane_pts:
:return:
"""
if not isinstance(lane_pts, np.ndarray):
lane_pts = np.array(lane_pts, np.float32)
x = lane_pts[:, 0]
y = lane_pts[:, 1]
x_fit = []
y_fit = []
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
f1 = np.polyfit(y, x, 3)
p1 = np.poly1d(f1)
y_min = int(np.min(y))
y_max = int(np.max(y))
y_fit = []
for i in range(y_min, y_max + 1):
y_fit.append(i)
x_fit = p1(y_fit)
except Warning as e:
x_fit = x
y_fit = y
finally:
return zip(x_fit, y_fit)
示例7: _write_tfrecords
# 需要导入模块: import glog [as 别名]
# 或者: from glog import error [as 别名]
def _write_tfrecords(tfrecords_writer):
"""
:param tfrecords_writer:
:return:
"""
while True:
sample_info = _SAMPLE_INFO_QUEUE.get()
if sample_info == _SENTINEL:
log.info('Process {:d} finished writing work'.format(os.getpid()))
tfrecords_writer.close()
break
sample_path = sample_info[0]
sample_label = sample_info[1]
if _is_valid_jpg_file(sample_path):
log.error('Image file: {:d} is not a valid jpg file'.format(sample_path))
continue
try:
image = cv2.imread(sample_path, cv2.IMREAD_COLOR)
if image is None:
continue
image = cv2.resize(image, dsize=tuple(CFG.ARCH.INPUT_SIZE), interpolation=cv2.INTER_LINEAR)
image = image.tostring()
except IOError as err:
log.error(err)
continue
features = tf.train.Features(feature={
'labels': _int64_feature(sample_label),
'images': _bytes_feature(image),
'imagepaths': _bytes_feature(sample_path)
})
tf_example = tf.train.Example(features=features)
tfrecords_writer.write(tf_example.SerializeToString())
log.debug('Process: {:d} get sample from sample_info_queue[current_size={:d}], '
'and write it to local file at time: {}'.format(
os.getpid(), _SAMPLE_INFO_QUEUE.qsize(), time.strftime('%H:%M:%S')))
示例8: _init_example_info_queue
# 需要导入模块: import glog [as 别名]
# 或者: from glog import error [as 别名]
def _init_example_info_queue(self):
"""
Read index file and put example info into SAMPLE_INFO_QUEUE
:return:
"""
log.info('Start filling {:s} dataset sample information queue...'.format(self._dataset_flag))
t_start = time.time()
for annotation_info in tqdm.tqdm(self._annotation_infos):
image_path = annotation_info[0]
lexicon_index = annotation_info[1]
try:
lexicon_label = [self._lexicon_infos[lexicon_index]]
encoded_label, _ = self.encode_labels(lexicon_label)
_SAMPLE_INFO_QUEUE.put((image_path, encoded_label[0]))
except IndexError:
log.error('Lexicon doesn\'t contain lexicon index {:d}'.format(lexicon_index))
continue
for i in range(self._writer_process_nums):
_SAMPLE_INFO_QUEUE.put(_SENTINEL)
log.debug('Complete filling dataset sample information queue[current size: {:d}], cost time: {:.5f}s'.format(
_SAMPLE_INFO_QUEUE.qsize(),
time.time() - t_start
))
示例9: imagesc
# 需要导入模块: import glog [as 别名]
# 或者: from glog import error [as 别名]
def imagesc(matrix, points=None, ax=None, cmap='jet', grid=True, show_axis=True, vmin=None, vmax=None):
if len(matrix.shape) > 2:
glog.error('Input has 3 dimensions, maybe use imshow?')
else:
show = False
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
show = True
if points is not None:
ax.plot(points[:, 0], points[:, 1], 'c.')
if vmin is None:
vmin = np.min(matrix)
if vmax is None:
vmax = np.max(matrix)
ax.imshow(matrix, interpolation='nearest', cmap=cmap, vmin=vmin, vmax=vmax)
if grid:
ax.grid('on')
if not show_axis:
ax.axis('off')
if show:
plt.show()
示例10: read_flo
# 需要导入模块: import glog [as 别名]
# 或者: from glog import error [as 别名]
def read_flo(filename):
with open(filename, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)
if 202021.25 != magic:
glog.error('Magic number incorrect. Invalid .flo file')
flow = -1
else:
w = np.fromfile(f, np.int32, count=1)[0]
h = np.fromfile(f, np.int32, count=1)[0]
data = np.fromfile(f, np.float32, count=2 * w * h)
flow = np.resize(data, (h, w, 2))
return flow
示例11: _preprocess_data
# 需要导入模块: import glog [as 别名]
# 或者: from glog import error [as 别名]
def _preprocess_data(encoder_client, hparams, data_dir):
"""Reads the data from the files, encodes it and parses the labels
Args:
encoder_client: an EncoderClient
hparams: a tf.contrib.training.HParams object containing the model
and training hyperparameters
data_dir: The directory where the inten data has been downloaded
Returns:
categories, encodings, labels
"""
if hparams.data_regime == "full":
train_file = "train"
elif hparams.data_regime == "10":
train_file = "train_10"
elif hparams.data_regime == "30":
train_file = "train_30"
else:
glog.error(f"Invalid data regime: {hparams.data_regime}")
train_data = os.path.join(
data_dir, hparams.task, f"{train_file}.csv")
test_data = os.path.join(data_dir, hparams.task, "test.csv")
categories_file = os.path.join(data_dir, hparams.task, "categories.json")
with tf.gfile.Open(categories_file, "r") as categories_file:
categories = json.load(categories_file)
labels = {}
encodings = {}
with tf.gfile.Open(train_data, "r") as data_file:
data = np.array(list(csv.reader(data_file))[1:])
labels[_TRAIN] = data[:, 1]
encodings[_TRAIN] = encoder_client.encode_sentences(data[:, 0])
with tf.gfile.Open(test_data, "r") as data_file:
data = np.array(list(csv.reader(data_file))[1:])
labels[_TEST] = data[:, 1]
encodings[_TEST] = encoder_client.encode_sentences(data[:, 0])
# convert labels to integers
labels = {
k: np.array(
[categories.index(x) for x in v]) for k, v in labels.items()
}
return categories, encodings, labels
示例12: calibrate_camera
# 需要导入模块: import glog [as 别名]
# 或者: from glog import error [as 别名]
def calibrate_camera(self, vis_every=-1):
if not exists(join(self.path_to_dataset, 'calib')):
os.mkdir(join(self.path_to_dataset, 'calib'))
calib_file = join(self.path_to_dataset, 'metadata', 'calib.p')
if exists(calib_file):
glog.info('Loading coarse detections from: {0}'.format(calib_file))
with open(calib_file, 'rb') as f:
self.calib = pickle.load(f)
else:
if not self.file_lists_match(listdir(join(self.path_to_dataset, 'calib'))):
# The first frame is estimated by manual clicking
manual_calib = join(self.path_to_dataset, 'calib', '{0}.npy'.format(self.frame_basenames[0]))
if exists(manual_calib):
calib_npy = np.load(manual_calib).item()
A, R, T = calib_npy['A'], calib_npy['R'], calib_npy['T']
else:
img = self.get_frame(0)
coarse_mask = self.get_mask_from_detectron(0)
A, R, T = calibration.calibrate_by_click(img, coarse_mask)
if A is None:
glog.error('Manual calibration failed!')
else:
np.save(join(self.path_to_dataset, 'calib', '{0}'.format(self.frame_basenames[0])),
{'A': A, 'R': R, 'T': T})
for i in tqdm(range(1, self.n_frames)):
# glog.info('Calibrating frame {0} ({1}/{2})'.format(self.frame_basenames[i], i, self.n_frames))
img = self.get_frame(i)
coarse_mask = self.get_mask_from_detectron(i)
if i % vis_every == 0:
vis = True
else:
vis = False
A, R, T, __ = calibration.calibrate_from_initialization(img, coarse_mask, A, R, T, vis)
np.save(join(self.path_to_dataset, 'calib', '{0}'.format(self.frame_basenames[i])),
{'A': A, 'R': R, 'T': T})
for i, basename in enumerate(tqdm(self.frame_basenames)):
calib_npy = np.load(join(self.path_to_dataset, 'calib', '{0}.npy'.format(basename))).item()
A, R, T = calib_npy['A'], calib_npy['R'], calib_npy['T']
self.calib[basename] = {'A': A, 'R': R, 'T': T}
with open(calib_file, 'wb') as f:
pickle.dump(self.calib, f)
# ------------------------------------------------------------------------------------------------------------------