本文整理汇总了Python中scrapbook.read_notebook方法的典型用法代码示例。如果您正苦于以下问题:Python scrapbook.read_notebook方法的具体用法?Python scrapbook.read_notebook怎么用?Python scrapbook.read_notebook使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scrapbook
的用法示例。
在下文中一共展示了scrapbook.read_notebook方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_text_classification_unified_information
# 需要导入模块: import scrapbook [as 别名]
# 或者: from scrapbook import read_notebook [as 别名]
def test_text_classification_unified_information(notebooks, tmp):
notebook_path = notebooks["tc_unified_information"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
kernel_name=KERNEL_NAME,
parameters=dict(
DATA_FOLDER=tmp,
BERT_CACHE_DIR=tmp,
BATCH_SIZE=32,
BATCH_SIZE_PRED=512,
NUM_EPOCHS=1,
TEST=True,
QUICK_RUN=True,
),
)
result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict
assert pytest.approx(result["accuracy"], 0.93, abs=ABS_TOL)
assert pytest.approx(result["precision"], 0.93, abs=ABS_TOL)
assert pytest.approx(result["recall"], 0.93, abs=ABS_TOL)
assert pytest.approx(result["f1"], 0.93, abs=ABS_TOL)
示例2: test_text_classification_introspective_rationale
# 需要导入模块: import scrapbook [as 别名]
# 或者: from scrapbook import read_notebook [as 别名]
def test_text_classification_introspective_rationale(notebooks, tmp):
notebook_path = notebooks["tc_introspective_rationale"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
kernel_name=KERNEL_NAME,
parameters=dict(
DATA_FOLDER=tmp,
CUDA=torch.cuda.is_available(),
QUICK_RUN=False,
MODEL_SAVE_DIR=tmp
),
)
result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict
print(result)
assert pytest.approx(result["accuracy"], 0.72, abs=ABS_TOL)
assert pytest.approx(result["anti_accuracy"], 0.69, abs=ABS_TOL)
assert pytest.approx(result["sparsity"], 0.17, abs=ABS_TOL)
开发者ID:interpretml,项目名称:interpret-text,代码行数:20,代码来源:test_notebook_introspective_rationale_explainer.py
示例3: assay_one_notebook
# 需要导入模块: import scrapbook [as 别名]
# 或者: from scrapbook import read_notebook [as 别名]
def assay_one_notebook(notebook_name, test_values):
"""Test a single notebook.
This uses nbformat to append `nteract-scrapbook` commands to the
specified notebook. The content of the commands and their expected
values are stored in the `test_values` dictionary. The keys of this
dictionary are strings to be used as scrapbook keys. They corresponding
value is a `ScrapSpec` tuple. The `code` member of this tuple is
the code (as a string) to be run to generate the scrapbook value. The
`expected` member is a Python object which is checked for equality with
the scrapbook value
Makes certain assumptions about directory layout.
"""
input_notebook = "notebooks/" + notebook_name + ".ipynb"
processed_notebook = "./test/notebooks/" + notebook_name + ".processed.ipynb"
output_notebook = "./test/notebooks/" + notebook_name + ".output.ipynb"
append_scrapbook_commands(input_notebook, processed_notebook, test_values)
pm.execute_notebook(processed_notebook, output_notebook)
nb = sb.read_notebook(output_notebook)
for k, v in test_values.items():
assert nb.scraps[k].data == v.expected
示例4: test_unilm_abstractive_summarization
# 需要导入模块: import scrapbook [as 别名]
# 或者: from scrapbook import read_notebook [as 别名]
def test_unilm_abstractive_summarization(notebooks, tmp):
notebook_path = notebooks["unilm_abstractive_summarization"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
kernel_name=KERNEL_NAME,
parameters=dict(
QUICK_RUN=True,
NUM_GPUS=torch.cuda.device_count(),
TOP_N=100,
WARMUP_STEPS=5,
MAX_STEPS=50,
GRADIENT_ACCUMULATION_STEPS=1,
TEST_PER_GPU_BATCH_SIZE=2,
BEAM_SIZE=3,
MODEL_DIR=tmp,
RESULT_DIR=tmp,
),
)
result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict
assert pytest.approx(result["rouge_1_f_score"], 0.2, abs=ABS_TOL)
assert pytest.approx(result["rouge_2_f_score"], 0.07, abs=ABS_TOL)
assert pytest.approx(result["rouge_l_f_score"], 0.16, abs=ABS_TOL)
示例5: test_entailment_multinli_bert
# 需要导入模块: import scrapbook [as 别名]
# 或者: from scrapbook import read_notebook [as 别名]
def test_entailment_multinli_bert(notebooks, tmp):
notebook_path = notebooks["entailment_multinli_transformers"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters={
"MODEL_NAME": "bert-base-uncased",
"TO_LOWER": True,
"TRAIN_DATA_USED_FRACTION": 0.05,
"DEV_DATA_USED_FRACTION": 0.05,
"NUM_EPOCHS": 1,
"CACHE_DIR": tmp
},
kernel_name=KERNEL_NAME,
)
result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict
assert pytest.approx(result["matched_precision"], 0.76, abs=ABS_TOL)
assert pytest.approx(result["matched_recall"], 0.76, abs=ABS_TOL)
assert pytest.approx(result["matched_f1"], 0.76, abs=ABS_TOL)
assert pytest.approx(result["mismatched_precision"], 0.76, abs=ABS_TOL)
assert pytest.approx(result["mismatched_recall"], 0.76, abs=ABS_TOL)
assert pytest.approx(result["mismatched_f1"], 0.76, abs=ABS_TOL)
示例6: test_tc_mnli_transformers
# 需要导入模块: import scrapbook [as 别名]
# 或者: from scrapbook import read_notebook [as 别名]
def test_tc_mnli_transformers(notebooks, tmp):
notebook_path = notebooks["tc_mnli_transformers"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
kernel_name=KERNEL_NAME,
parameters=dict(
NUM_GPUS=1,
DATA_FOLDER=tmp,
CACHE_DIR=tmp,
BATCH_SIZE=16,
NUM_EPOCHS=1,
TRAIN_DATA_FRACTION=0.05,
TEST_DATA_FRACTION=0.05,
MODEL_NAMES=["distilbert-base-uncased"],
),
)
result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict
assert pytest.approx(result["accuracy"], 0.885, abs=ABS_TOL)
assert pytest.approx(result["f1"], 0.885, abs=ABS_TOL)
示例7: test_minilm_abstractive_summarization
# 需要导入模块: import scrapbook [as 别名]
# 或者: from scrapbook import read_notebook [as 别名]
def test_minilm_abstractive_summarization(notebooks, tmp):
notebook_path = notebooks["minilm_abstractive_summarization"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
kernel_name=KERNEL_NAME,
parameters=dict(
QUICK_RUN=True,
NUM_GPUS=torch.cuda.device_count(),
TOP_N=100,
WARMUP_STEPS=5,
MAX_STEPS=50,
GRADIENT_ACCUMULATION_STEPS=1,
TEST_PER_GPU_BATCH_SIZE=2,
BEAM_SIZE=3,
CLEANUP_RESULTS=True,
),
)
result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict
assert pytest.approx(result["rouge_1_f_score"], 0.2, abs=ABS_TOL)
assert pytest.approx(result["rouge_2_f_score"], 0.07, abs=ABS_TOL)
assert pytest.approx(result["rouge_l_f_score"], 0.16, abs=ABS_TOL)
示例8: test_question_answering_squad_transformers
# 需要导入模块: import scrapbook [as 别名]
# 或者: from scrapbook import read_notebook [as 别名]
def test_question_answering_squad_transformers(notebooks, tmp):
notebook_path = notebooks["question_answering_squad_transformers"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters={
"TRAIN_DATA_USED_PERCENT": 0.15,
"DEV_DATA_USED_PERCENT": 0.15,
"NUM_EPOCHS": 1,
"MAX_SEQ_LENGTH": 384,
"DOC_STRIDE": 128,
"PER_GPU_BATCH_SIZE": 4,
"MODEL_NAME": "distilbert-base-uncased",
"DO_LOWER_CASE": True,
"CACHE_DIR": tmp
},
kernel_name=KERNEL_NAME,
)
result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict
assert pytest.approx(result["exact"], 0.55, abs=ABS_TOL)
assert pytest.approx(result["f1"], 0.70, abs=ABS_TOL)
示例9: test_bidaf_deep_dive
# 需要导入模块: import scrapbook [as 别名]
# 或者: from scrapbook import read_notebook [as 别名]
def test_bidaf_deep_dive(
notebooks, subscription_id, resource_group, workspace_name, workspace_region
):
notebook_path = notebooks["bidaf_deep_dive"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters={
"NUM_EPOCHS": 1,
"config_path": None,
"PROJECT_FOLDER": "examples/question_answering/bidaf-question-answering",
"SQUAD_FOLDER": "examples/question_answering/squad",
"LOGS_FOLDER": "examples/question_answering/",
"BIDAF_CONFIG_PATH": "examples/question_answering/",
"subscription_id": subscription_id,
"resource_group": resource_group,
"workspace_name": workspace_name,
"workspace_region": workspace_region,
},
)
result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict["validation_EM"]
assert result == pytest.approx(0.5, abs=ABS_TOL)
示例10: test_extractive_summarization_cnndm_transformers
# 需要导入模块: import scrapbook [as 别名]
# 或者: from scrapbook import read_notebook [as 别名]
def test_extractive_summarization_cnndm_transformers(notebooks, tmp):
notebook_path = notebooks["extractive_summarization_cnndm_transformer"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
kernel_name=KERNEL_NAME,
parameters=dict(
QUICK_RUN=True,
TOP_N=100,
CHUNK_SIZE=200,
USE_PREPROCESSED_DATA=False,
DATA_PATH=tmp,
CACHE_DIR=tmp,
BATCH_SIZE=3000,
REPORT_EVERY=50,
MAX_STEPS=100,
WARMUP_STEPS=5e2,
MODEL_NAME="distilbert-base-uncased",
),
)
result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict
assert pytest.approx(result["rouge_2_f_score"], 0.1, abs=ABS_TOL)
示例11: test_extractive_summarization_cnndm_transformers_processed
# 需要导入模块: import scrapbook [as 别名]
# 或者: from scrapbook import read_notebook [as 别名]
def test_extractive_summarization_cnndm_transformers_processed(notebooks, tmp):
notebook_path = notebooks["extractive_summarization_cnndm_transformer"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
kernel_name=KERNEL_NAME,
parameters=dict(
QUICK_RUN=True,
TOP_N=100,
CHUNK_SIZE=200,
USE_PREPROCESSED_DATA=True,
DATA_PATH=tmp,
CACHE_DIR=tmp,
PROCESSED_DATA_PATH=tmp,
BATCH_SIZE=3000,
REPORT_EVERY=50,
MAX_STEPS=100,
WARMUP_STEPS=5e2,
MODEL_NAME="distilbert-base-uncased",
),
)
result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict
assert pytest.approx(result["rouge_2_f_score"], 0.1, abs=ABS_TOL)
示例12: test_abstractive_summarization_bertsumabs_cnndm
# 需要导入模块: import scrapbook [as 别名]
# 或者: from scrapbook import read_notebook [as 别名]
def test_abstractive_summarization_bertsumabs_cnndm(notebooks, tmp):
notebook_path = notebooks["abstractive_summarization_bertsumabs_cnndm"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
kernel_name=KERNEL_NAME,
parameters=dict(
QUICK_RUN=True,
TOP_N=1000,
MAX_POS=512,
DATA_FOLDER=tmp,
CACHE_DIR=tmp,
BATCH_SIZE_PER_GPU=3,
REPORT_EVERY=50,
MAX_STEPS=100,
MODEL_NAME="bert-base-uncased",
),
)
result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict
assert pytest.approx(result["rouge_2_f_score"], 0.01, abs=ABS_TOL)
开发者ID:microsoft,项目名称:nlp-recipes,代码行数:22,代码来源:test_notebooks_abstractive_summarization_bertsumabs.py
示例13: test_bert_senteval
# 需要导入模块: import scrapbook [as 别名]
# 或者: from scrapbook import read_notebook [as 别名]
def test_bert_senteval(
notebooks, subscription_id, resource_group, workspace_name, workspace_region, tmp
):
notebook_path = notebooks["bert_senteval"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
kernel_name=KERNEL_NAME,
parameters=dict(
subscription_id=subscription_id,
resource_group=resource_group,
workspace_name=workspace_name,
workspace_region=workspace_region,
CACHE_DIR=tmp,
LOCAL_UTILS="utils_nlp",
LOCAL_SENTEVAL="utils_nlp/eval/SentEval",
EXPERIMENT_NAME="test-nlp-ss-bert",
CLUSTER_NAME="eval-gpu",
MAX_NODES=1,
),
)
pearson = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict["pearson"]
mse = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict["mse"]
assert pearson == pytest.approx(0.6, abs=ABS_TOL)
assert mse < 1.8
示例14: test_automl_local_deployment_aci
# 需要导入模块: import scrapbook [as 别名]
# 或者: from scrapbook import read_notebook [as 别名]
def test_automl_local_deployment_aci(
notebooks, subscription_id, resource_group, workspace_name, workspace_region
):
notebook_path = notebooks["automl_local_deployment_aci"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters={
"automl_iterations": 1,
"automl_iteration_timeout": 7,
"config_path": None,
"webservice_name": "aci-test-service",
"subscription_id": subscription_id,
"resource_group": resource_group,
"workspace_name": workspace_name,
"workspace_region": workspace_region,
},
)
result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict["pearson_correlation"]
assert result == pytest.approx(0.5, abs=ABS_TOL)
示例15: test_gensen_aml_deep_dive
# 需要导入模块: import scrapbook [as 别名]
# 或者: from scrapbook import read_notebook [as 别名]
def test_gensen_aml_deep_dive(notebooks):
notebook_path = notebooks["gensen_aml_deep_dive"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
CACHE_DIR="./tests/integration/temp",
AZUREML_CONFIG_PATH="./tests/integration/.azureml",
UTIL_NLP_PATH="./utils_nlp",
MAX_EPOCH=1,
TRAIN_SCRIPT="./examples/sentence_similarity/gensen_train.py",
CONFIG_PATH="./examples/sentence_similarity/gensen_config.json",
MAX_TOTAL_RUNS=1,
MAX_CONCURRENT_RUNS=1,
),
)
result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict
assert result["min_val_loss"] > 5
assert result["learning_rate"] >= 0.0001
assert result["learning_rate"] <= 0.001