本文整理汇总了Python中neptune.create_experiment方法的典型用法代码示例。如果您正苦于以下问题:Python neptune.create_experiment方法的具体用法?Python neptune.create_experiment怎么用?Python neptune.create_experiment使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neptune
的用法示例。
在下文中一共展示了neptune.create_experiment方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import neptune [as 别名]
# 或者: from neptune import create_experiment [as 别名]
def __init__(self, *args, **kwargs):
try:
import neptune
except ImportError:
raise RuntimeError(
"This contrib module requires neptune-client to be installed. "
"You may install neptune with command: \n pip install neptune-client \n"
)
if kwargs.get("offline_mode", False):
self.mode = "offline"
neptune.init(project_qualified_name="dry-run/project", backend=neptune.OfflineBackend())
else:
self.mode = "online"
neptune.init(api_token=kwargs.get("api_token"), project_qualified_name=kwargs.get("project_name"))
kwargs["name"] = kwargs.pop("experiment_name", None)
self._experiment_kwargs = {
k: v for k, v in kwargs.items() if k not in ["api_token", "project_name", "offline_mode"]
}
self.experiment = neptune.create_experiment(**self._experiment_kwargs)
示例2: train_evaluate_cv
# 需要导入模块: import neptune [as 别名]
# 或者: from neptune import create_experiment [as 别名]
def train_evaluate_cv():
meta = pd.read_csv(PARAMS.metadata_filepath)
if DEV_MODE:
meta = meta.sample(PARAMS.dev_mode_size, random_state=SEED)
meta_train = meta[meta['is_train'] == 1]
with neptune.create_experiment(name=EXPERIMENT_NAME,
params=PARAMS,
tags=TAGS + ['train', 'evaluate', 'on_cv_folds'],
upload_source_files=get_filepaths(),
properties={'experiment_dir': EXPERIMENT_DIR}):
cv = utils.KFoldBySortedValue(n_splits=PARAMS.n_cv_splits, shuffle=PARAMS.shuffle, random_state=SEED)
fold_auc = []
for fold_id, (train_idx, valid_idx) in enumerate(cv.split(meta_train[DEPTH_COLUMN].values.reshape(-1))):
train_data_split, valid_data_split = meta_train.iloc[train_idx], meta_train.iloc[valid_idx]
if USE_AUXILIARY_DATA:
auxiliary = pd.read_csv(PARAMS.auxiliary_metadata_filepath)
train_auxiliary = auxiliary[auxiliary[ID_COLUMN].isin(valid_data_split[ID_COLUMN].tolist())]
train_data_split = pd.concat([train_data_split, train_auxiliary], axis=0)
LOGGER.info('Started fold {}'.format(fold_id))
auc, _ = fold_fit_evaluate_loop(train_data_split, valid_data_split, fold_id)
LOGGER.info('Fold {} AUC {}'.format(fold_id, auc))
neptune.send_metric('Fold {} AUC'.format(fold_id), auc)
fold_auc.append(auc)
auc_mean, auc_std = np.mean(fold_auc), np.std(fold_auc)
log_scores(auc_mean, auc_std)
示例3: evaluate_cv
# 需要导入模块: import neptune [as 别名]
# 或者: from neptune import create_experiment [as 别名]
def evaluate_cv():
meta = pd.read_csv(PARAMS.metadata_filepath)
if DEV_MODE:
meta = meta.sample(PARAMS.dev_mode_size, random_state=SEED)
meta_train = meta[meta['is_train'] == 1]
with neptune.create_experiment(name=EXPERIMENT_NAME,
params=PARAMS,
tags=TAGS + ['evaluate', 'on_cv_folds'],
upload_source_files=get_filepaths(),
properties={'experiment_dir': EXPERIMENT_DIR}):
cv = utils.KFoldBySortedValue(n_splits=PARAMS.n_cv_splits, shuffle=PARAMS.shuffle, random_state=SEED)
fold_auc = []
for fold_id, (train_idx, valid_idx) in enumerate(cv.split(meta_train[DEPTH_COLUMN].values.reshape(-1))):
valid_data_split = meta_train.iloc[valid_idx]
LOGGER.info('Started fold {}'.format(fold_id))
auc, _ = fold_evaluate_loop(valid_data_split, fold_id)
LOGGER.info('Fold {} AUC {}'.format(fold_id, auc))
neptune.send_metric('Fold {} AUC'.format(fold_id), auc)
fold_auc.append(auc)
auc_mean, auc_std = np.mean(fold_auc), np.std(fold_auc)
log_scores(auc_mean, auc_std)
示例4: evaluate_predict_cv
# 需要导入模块: import neptune [as 别名]
# 或者: from neptune import create_experiment [as 别名]
def evaluate_predict_cv():
meta = pd.read_csv(PARAMS.metadata_filepath)
if DEV_MODE:
meta = meta.sample(PARAMS.dev_mode_size, random_state=SEED)
meta_train = meta[meta['is_train'] == 1]
meta_test = meta[meta['is_train'] == 0]
with neptune.create_experiment(name=EXPERIMENT_NAME,
params=PARAMS,
tags=TAGS + ['evaluate', 'predict', 'on_cv_folds'],
upload_source_files=get_filepaths(),
properties={'experiment_dir': EXPERIMENT_DIR}):
cv = utils.KFoldBySortedValue(n_splits=PARAMS.n_cv_splits, shuffle=PARAMS.shuffle, random_state=SEED)
fold_auc, out_of_fold_train_predictions, out_of_fold_test_predictions = [], [], []
for fold_id, (train_idx, valid_idx) in enumerate(cv.split(meta_train[DEPTH_COLUMN].values.reshape(-1))):
valid_data_split = meta_train.iloc[valid_idx]
LOGGER.info('Started fold {}'.format(fold_id))
auc, out_of_fold_prediction, test_prediction = fold_evaluate_predict_loop(valid_data_split,
meta_test,
fold_id)
LOGGER.info('Fold {} AUC {}'.format(fold_id, auc))
neptune.send_metric('Fold {} AUC'.format(fold_id), auc)
fold_auc.append(auc)
out_of_fold_train_predictions.append(out_of_fold_prediction)
out_of_fold_test_predictions.append(test_prediction)
train_ids, train_predictions = [], []
for idx_fold, train_pred_fold in out_of_fold_train_predictions:
train_ids.extend(idx_fold)
train_predictions.extend(train_pred_fold)
auc_mean, auc_std = np.mean(fold_auc), np.std(fold_auc)
log_scores(auc_mean, auc_std)
save_predictions(train_ids, train_predictions, meta_test, out_of_fold_test_predictions)
示例5: train
# 需要导入模块: import neptune [as 别名]
# 或者: from neptune import create_experiment [as 别名]
def train():
meta = pd.read_csv(PARAMS.metadata_filepath)
meta_train = meta[meta['is_train'] == 1]
cv = utils.KFoldBySortedValue(n_splits=PARAMS.n_cv_splits, shuffle=PARAMS.shuffle, random_state=SEED)
for train_idx, valid_idx in cv.split(meta_train[DEPTH_COLUMN].values.reshape(-1)):
break
meta_train_split, meta_valid_split = meta_train.iloc[train_idx], meta_train.iloc[valid_idx]
if USE_AUXILIARY_DATA:
auxiliary = pd.read_csv(PARAMS.auxiliary_metadata_filepath)
train_auxiliary = auxiliary[auxiliary[ID_COLUMN].isin(meta_valid_split[ID_COLUMN].tolist())]
meta_train_split = pd.concat([meta_train_split, train_auxiliary], axis=0)
if DEV_MODE:
meta_train_split = meta_train_split.sample(PARAMS.dev_mode_size, random_state=SEED)
meta_valid_split = meta_valid_split.sample(int(PARAMS.dev_mode_size / 2), random_state=SEED)
with neptune.create_experiment(name=EXPERIMENT_NAME,
params=PARAMS,
tags=TAGS + ['train'],
upload_source_files=get_filepaths(),
properties={'experiment_dir': EXPERIMENT_DIR}):
data = {'input': {'meta': meta_train_split
},
'callback_input': {'meta_valid': meta_valid_split
}
}
pipeline_network = network(config=CONFIG, train_mode=True)
pipeline_network.clean_cache()
pipeline_network.fit_transform(data)
pipeline_network.clean_cache()
示例6: predict
# 需要导入模块: import neptune [as 别名]
# 或者: from neptune import create_experiment [as 别名]
def predict():
meta = pd.read_csv(PARAMS.metadata_filepath)
meta_test = meta[meta['is_train'] == 0]
if DEV_MODE:
meta_test = meta_test.sample(PARAMS.dev_mode_size, random_state=SEED)
data = {'input': {'meta': meta_test,
},
'callback_input': {'meta_valid': None
}
}
with neptune.create_experiment(name=EXPERIMENT_NAME,
params=PARAMS,
tags=TAGS + ['predict'],
upload_source_files=get_filepaths(),
properties={'experiment_dir': EXPERIMENT_DIR}):
pipeline_network = network(config=CONFIG, train_mode=False)
pipeline_postprocessing = pipelines.mask_postprocessing(config=CONFIG)
pipeline_network.clean_cache()
predicted_masks = pipeline_network.transform(data)
test_masks = {'input_masks': predicted_masks
}
output = pipeline_postprocessing.transform(test_masks)
pipeline_network.clean_cache()
pipeline_postprocessing.clean_cache()
y_pred_test = output['binarized_images']
submission = utils.create_submission(meta_test, y_pred_test)
submission_filepath = os.path.join(EXPERIMENT_DIR, 'submission.csv')
submission.to_csv(submission_filepath, index=None, encoding='utf-8')
LOGGER.info('submission saved to {}'.format(submission_filepath))
LOGGER.info('submission head \n\n{}'.format(submission.head()))
示例7: train_evaluate_cv
# 需要导入模块: import neptune [as 别名]
# 或者: from neptune import create_experiment [as 别名]
def train_evaluate_cv():
meta = pd.read_csv(PARAMS.metadata_filepath)
if DEV_MODE:
meta = meta.sample(PARAMS.dev_mode_size, random_state=SEED)
meta_train = meta[meta['is_train'] == 1]
with neptune.create_experiment(name=EXPERIMENT_NAME,
params=PARAMS,
tags=TAGS + ['train', 'evaluate', 'on_cv_folds'],
upload_source_files=get_filepaths(),
properties={'experiment_dir': EXPERIMENT_DIR}):
cv = utils.KFoldBySortedValue(n_splits=PARAMS.n_cv_splits, shuffle=PARAMS.shuffle, random_state=SEED)
fold_iou, fold_iout = [], []
for fold_id, (train_idx, valid_idx) in enumerate(cv.split(meta_train[DEPTH_COLUMN].values.reshape(-1))):
train_data_split, valid_data_split = meta_train.iloc[train_idx], meta_train.iloc[valid_idx]
if USE_AUXILIARY_DATA:
auxiliary = pd.read_csv(PARAMS.auxiliary_metadata_filepath)
train_auxiliary = auxiliary[auxiliary[ID_COLUMN].isin(valid_data_split[ID_COLUMN].tolist())]
train_data_split = pd.concat([train_data_split, train_auxiliary], axis=0)
LOGGER.info('Started fold {}'.format(fold_id))
iou, iout, _ = fold_fit_evaluate_loop(train_data_split, valid_data_split, fold_id)
LOGGER.info('Fold {} IOU {}'.format(fold_id, iou))
neptune.send_metric('Fold {} IOU'.format(fold_id), iou)
LOGGER.info('Fold {} IOUT {}'.format(fold_id, iout))
neptune.send_metric('Fold {} IOUT'.format(fold_id), iout)
fold_iou.append(iou)
fold_iout.append(iout)
iou_mean, iou_std = np.mean(fold_iou), np.std(fold_iou)
iout_mean, iout_std = np.mean(fold_iout), np.std(fold_iout)
log_scores(iou_mean, iou_std, iout_mean, iout_std)
示例8: evaluate_cv
# 需要导入模块: import neptune [as 别名]
# 或者: from neptune import create_experiment [as 别名]
def evaluate_cv():
meta = pd.read_csv(PARAMS.metadata_filepath)
if DEV_MODE:
meta = meta.sample(PARAMS.dev_mode_size, random_state=SEED)
meta_train = meta[meta['is_train'] == 1]
with neptune.create_experiment(name=EXPERIMENT_NAME,
params=PARAMS,
tags=TAGS + ['evaluate', 'on_cv_folds'],
upload_source_files=get_filepaths(),
properties={'experiment_dir': EXPERIMENT_DIR}):
cv = utils.KFoldBySortedValue(n_splits=PARAMS.n_cv_splits, shuffle=PARAMS.shuffle, random_state=SEED)
fold_iou, fold_iout = [], []
for fold_id, (train_idx, valid_idx) in enumerate(cv.split(meta_train[DEPTH_COLUMN].values.reshape(-1))):
valid_data_split = meta_train.iloc[valid_idx]
LOGGER.info('Started fold {}'.format(fold_id))
iou, iout, _ = fold_evaluate_loop(valid_data_split, fold_id)
LOGGER.info('Fold {} IOU {}'.format(fold_id, iou))
neptune.send_metric('Fold {} IOU'.format(fold_id), iou)
LOGGER.info('Fold {} IOUT {}'.format(fold_id, iout))
neptune.send_metric('Fold {} IOUT'.format(fold_id), iout)
fold_iou.append(iou)
fold_iout.append(iout)
iou_mean, iou_std = np.mean(fold_iou), np.std(fold_iou)
iout_mean, iout_std = np.mean(fold_iout), np.std(fold_iout)
log_scores(iou_mean, iou_std, iout_mean, iout_std)
示例9: start_experiment
# 需要导入模块: import neptune [as 别名]
# 或者: from neptune import create_experiment [as 别名]
def start_experiment(self):
neptune.init(project_qualified_name=self.config.project)
neptune.create_experiment(name=self.config.name,
params=self.params,
upload_source_files=get_filepaths(),
tags=self.config.tags)
示例10: train_evaluate_predict_cv
# 需要导入模块: import neptune [as 别名]
# 或者: from neptune import create_experiment [as 别名]
def train_evaluate_predict_cv():
meta = pd.read_csv(PARAMS.metadata_filepath)
if DEV_MODE:
meta = meta.sample(PARAMS.dev_mode_size, random_state=SEED)
meta_train = meta[meta['is_train'] == 1]
meta_test = meta[meta['is_train'] == 0]
with neptune.create_experiment(name=EXPERIMENT_NAME,
params=PARAMS,
tags=TAGS + ['train', 'evaluate', 'predict', 'on_cv_folds'],
upload_source_files=get_filepaths(),
properties={'experiment_dir': EXPERIMENT_DIR}):
cv = utils.KFoldBySortedValue(n_splits=PARAMS.n_cv_splits, shuffle=PARAMS.shuffle, random_state=SEED)
fold_auc, out_of_fold_train_predictions, out_of_fold_test_predictions = [], [], []
for fold_id, (train_idx, valid_idx) in enumerate(cv.split(meta_train[DEPTH_COLUMN].values.reshape(-1))):
train_data_split, valid_data_split = meta_train.iloc[train_idx], meta_train.iloc[valid_idx]
if USE_AUXILIARY_DATA:
auxiliary = pd.read_csv(PARAMS.auxiliary_metadata_filepath)
train_auxiliary = auxiliary[auxiliary[ID_COLUMN].isin(valid_data_split[ID_COLUMN].tolist())]
train_data_split = pd.concat([train_data_split, train_auxiliary], axis=0)
LOGGER.info('Started fold {}'.format(fold_id))
auc, out_of_fold_prediction, test_prediction = fold_fit_evaluate_predict_loop(train_data_split,
valid_data_split,
meta_test,
fold_id)
LOGGER.info('Fold {} AUC {}'.format(fold_id, auc))
neptune.send_metric('Fold {} AUC'.format(fold_id), auc)
fold_auc.append(auc)
out_of_fold_train_predictions.append(out_of_fold_prediction)
out_of_fold_test_predictions.append(test_prediction)
train_ids, train_predictions = [], []
for idx_fold, train_pred_fold in out_of_fold_train_predictions:
train_ids.extend(idx_fold)
train_predictions.extend(train_pred_fold)
auc_mean, auc_std = np.mean(fold_auc), np.std(fold_auc)
log_scores(auc_mean, auc_std)
save_predictions(train_ids, train_predictions, meta_test, out_of_fold_test_predictions)
示例11: train_evaluate_predict_cv
# 需要导入模块: import neptune [as 别名]
# 或者: from neptune import create_experiment [as 别名]
def train_evaluate_predict_cv():
meta = pd.read_csv(PARAMS.metadata_filepath)
if DEV_MODE:
meta = meta.sample(PARAMS.dev_mode_size, random_state=SEED)
meta_train = meta[meta['is_train'] == 1]
meta_test = meta[meta['is_train'] == 0]
with neptune.create_experiment(name=EXPERIMENT_NAME,
params=PARAMS,
tags=TAGS + ['train', 'evaluate', 'predict', 'on_cv_folds'],
upload_source_files=get_filepaths(),
properties={'experiment_dir': EXPERIMENT_DIR}):
cv = utils.KFoldBySortedValue(n_splits=PARAMS.n_cv_splits, shuffle=PARAMS.shuffle, random_state=SEED)
fold_iou, fold_iout, out_of_fold_train_predictions, out_of_fold_test_predictions = [], [], [], []
for fold_id, (train_idx, valid_idx) in enumerate(cv.split(meta_train[DEPTH_COLUMN].values.reshape(-1))):
train_data_split, valid_data_split = meta_train.iloc[train_idx], meta_train.iloc[valid_idx]
if USE_AUXILIARY_DATA:
auxiliary = pd.read_csv(PARAMS.auxiliary_metadata_filepath)
train_auxiliary = auxiliary[auxiliary[ID_COLUMN].isin(valid_data_split[ID_COLUMN].tolist())]
train_data_split = pd.concat([train_data_split, train_auxiliary], axis=0)
LOGGER.info('Started fold {}'.format(fold_id))
iou, iout, out_of_fold_prediction, test_prediction = fold_fit_evaluate_predict_loop(train_data_split,
valid_data_split,
meta_test,
fold_id)
LOGGER.info('Fold {} IOU {}'.format(fold_id, iou))
neptune.send_metric('Fold {} IOU'.format(fold_id), iou)
LOGGER.info('Fold {} IOUT {}'.format(fold_id, iout))
neptune.send_metric('Fold {} IOUT'.format(fold_id), iout)
fold_iou.append(iou)
fold_iout.append(iout)
out_of_fold_train_predictions.append(out_of_fold_prediction)
out_of_fold_test_predictions.append(test_prediction)
train_ids, train_predictions = [], []
for idx_fold, train_pred_fold in out_of_fold_train_predictions:
train_ids.extend(idx_fold)
train_predictions.extend(train_pred_fold)
iou_mean, iou_std = np.mean(fold_iou), np.std(fold_iou)
iout_mean, iout_std = np.mean(fold_iout), np.std(fold_iout)
log_scores(iou_mean, iou_std, iout_mean, iout_std)
save_predictions(train_ids, train_predictions, meta_test, out_of_fold_test_predictions)
示例12: evaluate_predict_cv
# 需要导入模块: import neptune [as 别名]
# 或者: from neptune import create_experiment [as 别名]
def evaluate_predict_cv():
meta = pd.read_csv(PARAMS.metadata_filepath)
if DEV_MODE:
meta = meta.sample(PARAMS.dev_mode_size, random_state=SEED)
meta_train = meta[meta['is_train'] == 1]
meta_test = meta[meta['is_train'] == 0]
with neptune.create_experiment(name=EXPERIMENT_NAME,
params=PARAMS,
tags=TAGS + ['evaluate', 'predict', 'on_cv_folds'],
upload_source_files=get_filepaths(),
properties={'experiment_dir': EXPERIMENT_DIR}):
cv = utils.KFoldBySortedValue(n_splits=PARAMS.n_cv_splits, shuffle=PARAMS.shuffle, random_state=SEED)
fold_iou, fold_iout, out_of_fold_train_predictions, out_of_fold_test_predictions = [], [], [], []
for fold_id, (train_idx, valid_idx) in enumerate(cv.split(meta_train[DEPTH_COLUMN].values.reshape(-1))):
valid_data_split = meta_train.iloc[valid_idx]
LOGGER.info('Started fold {}'.format(fold_id))
iou, iout, out_of_fold_prediction, test_prediction = fold_evaluate_predict_loop(valid_data_split,
meta_test,
fold_id)
LOGGER.info('Fold {} IOU {}'.format(fold_id, iou))
neptune.send_metric('Fold {} IOU'.format(fold_id), iou)
LOGGER.info('Fold {} IOUT {}'.format(fold_id, iout))
neptune.send_metric('Fold {} IOUT'.format(fold_id), iout)
fold_iou.append(iou)
fold_iout.append(iout)
out_of_fold_train_predictions.append(out_of_fold_prediction)
out_of_fold_test_predictions.append(test_prediction)
train_ids, train_predictions = [], []
for idx_fold, train_pred_fold in out_of_fold_train_predictions:
train_ids.extend(idx_fold)
train_predictions.extend(train_pred_fold)
iou_mean, iou_std = np.mean(fold_iou), np.std(fold_iou)
iout_mean, iout_std = np.mean(fold_iout), np.std(fold_iout)
log_scores(iou_mean, iou_std, iout_mean, iout_std)
save_predictions(train_ids, train_predictions, meta_test, out_of_fold_test_predictions)
示例13: __init__
# 需要导入模块: import neptune [as 别名]
# 或者: from neptune import create_experiment [as 别名]
def __init__(
self,
metric_names: List[str] = None,
log_on_batch_end: bool = True,
log_on_epoch_end: bool = True,
offline_mode: bool = False,
**logging_params,
):
"""
Args:
metric_names (List[str]): list of metric names to log,
if none - logs everything
log_on_batch_end (bool): logs per-batch metrics if set True
log_on_epoch_end (bool): logs per-epoch metrics if set True
offline_mode (bool): whether logging to Neptune server should
be turned off. It is useful for debugging
"""
super().__init__(
order=CallbackOrder.logging,
node=CallbackNode.master,
scope=CallbackScope.experiment,
)
self.metrics_to_log = metric_names
self.log_on_batch_end = log_on_batch_end
self.log_on_epoch_end = log_on_epoch_end
if not (self.log_on_batch_end or self.log_on_epoch_end):
raise ValueError("You have to log something!")
if (self.log_on_batch_end and not self.log_on_epoch_end) or (
not self.log_on_batch_end and self.log_on_epoch_end
):
self.batch_log_suffix = ""
self.epoch_log_suffix = ""
else:
self.batch_log_suffix = "_batch"
self.epoch_log_suffix = "_epoch"
if offline_mode:
neptune.init(
project_qualified_name="dry-run/project",
backend=neptune.OfflineBackend(),
)
else:
neptune.init(
api_token=logging_params["api_token"],
project_qualified_name=logging_params["project_name"],
)
logging_params.pop("api_token")
logging_params.pop("project_name")
self.experiment = neptune.create_experiment(**logging_params)