本文整理汇总了Python中config.DEFAULT_MODEL_PATH属性的典型用法代码示例。如果您正苦于以下问题:Python config.DEFAULT_MODEL_PATH属性的具体用法?Python config.DEFAULT_MODEL_PATH怎么用?Python config.DEFAULT_MODEL_PATH使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类config
的用法示例。
在下文中一共展示了config.DEFAULT_MODEL_PATH属性的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import config [as 别名]
# 或者: from config import DEFAULT_MODEL_PATH [as 别名]
def __init__(self, path=DEFAULT_MODEL_PATH):
logger.info('Loading model from: {}...'.format(path))
# Parameters for inference (need to be the same values the model was trained with)
self.max_seq_length = 512
self.doc_stride = 128
self.max_query_length = 64
self.max_answer_length = 30
# Initialize the tokenizer
self.tokenizer = FullTokenizer(
vocab_file='assets/vocab.txt', do_lower_case=True)
self.predict_fn = predictor.from_saved_model(DEFAULT_MODEL_PATH)
logger.info('Loaded model')
示例2: __init__
# 需要导入模块: import config [as 别名]
# 或者: from config import DEFAULT_MODEL_PATH [as 别名]
def __init__(self, path=DEFAULT_MODEL_PATH):
logger.info('Loading model from: {}...'.format(path))
sess = tf.Session(graph=tf.Graph())
# Load the graph
model_graph_def = sm.loader.load(sess, [sm.tag_constants.SERVING], path)
sig_def = model_graph_def.signature_def[sm.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
input_name = sig_def.inputs['input_images'].name
output_name = sig_def.outputs['output_images'].name
# Set up instance variables and required inputs for inference
self.sess = sess
self.model_graph_def = model_graph_def
self.output_tensor = sess.graph.get_tensor_by_name(output_name)
self.input_name = input_name
self.output_name = output_name
logger.info('Loaded model')
示例3: __init__
# 需要导入模块: import config [as 别名]
# 或者: from config import DEFAULT_MODEL_PATH [as 别名]
def __init__(self, path=DEFAULT_MODEL_PATH):
# TODO Replace this part with SavedModel
g = tf.Graph()
with g.as_default():
model = inference_wrapper.InferenceWrapper()
restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
path)
g.finalize()
self.model = model
sess = tf.Session(graph=g)
# Load the model from checkpoint.
restore_fn(sess)
self.sess = sess
示例4: __init__
# 需要导入模块: import config [as 别名]
# 或者: from config import DEFAULT_MODEL_PATH [as 别名]
def __init__(self, path=DEFAULT_MODEL_PATH):
logger.info('Loading model from: {}...'.format(path))
clear_session()
self.model = models.load_model(
os.path.join(path, 'resnet50.h5'))
# this seems to be required to make Keras models play nicely with threads
self.model._make_predict_function()
logger.info('Loaded model: {}'.format(self.model.name))
with open(os.path.join(DEFAULT_MODEL_PATH,
'class_index.json')) as class_file:
self.classes = json.load(class_file)
示例5: __init__
# 需要导入模块: import config [as 别名]
# 或者: from config import DEFAULT_MODEL_PATH [as 别名]
def __init__(self, path=DEFAULT_MODEL_PATH):
logger.info('Loading model from: %s...', path)
self.log_dir = TemporaryDirectory()
self.p_summarize = Popen(['python', 'core/getpoint/run_summarization.py', '--mode=decode', # nosec - B603
'--ckpt_dir={}'.format(ASSET_DIR),
'--vocab_path={}'.format(DEFAULT_VOCAB_PATH),
'--log_root={}'.format(self.log_dir.name)],
stdin=PIPE, stdout=PIPE)
示例6: __init__
# 需要导入模块: import config [as 别名]
# 或者: from config import DEFAULT_MODEL_PATH [as 别名]
def __init__(self, path=DEFAULT_MODEL_PATH):
logger.info('Loading model from: {}...'.format(path))
# load assets first to enable model definition
self._load_assets(path)
# Loading the tf SavedModel
self.graph = tf.Graph()
self.sess = tf.Session(graph=self.graph)
tf.saved_model.loader.load(self.sess, [tag_constants.SERVING], DEFAULT_MODEL_PATH)
self.word_ids_tensor = self.sess.graph.get_tensor_by_name('word_input:0')
self.char_ids_tensor = self.sess.graph.get_tensor_by_name('char_input:0')
self.output_tensor = self.sess.graph.get_tensor_by_name('predict_output/truediv:0')
示例7: __init__
# 需要导入模块: import config [as 别名]
# 或者: from config import DEFAULT_MODEL_PATH [as 别名]
def __init__(self, path=DEFAULT_MODEL_PATH):
logger.info('Loading model from: {}...'.format(path))
# Load the graph
# Set up instance variables and required inputs for inference
logger.info('Loaded model')
示例8: __init__
# 需要导入模块: import config [as 别名]
# 或者: from config import DEFAULT_MODEL_PATH [as 别名]
def __init__(self, path=DEFAULT_MODEL_PATH):
logger.info('Loading models from: {}...'.format(path))
self.models = {}
for model in MODELS:
logger.info('Loading model: {}'.format(model))
self.models[model] = SingleModelWrapper(model=model, path=path)
logger.info('Loaded all models')
示例9: __init__
# 需要导入模块: import config [as 别名]
# 或者: from config import DEFAULT_MODEL_PATH [as 别名]
def __init__(self, path=DEFAULT_MODEL_PATH):
"""Instantiate the BERT model."""
logger.info('Loading model from: {}...'.format(path))
# Load the model
# 1. set the appropriate parameters
self.eval_batch_size = 64
self.max_seq_length = 256
self.do_lower_case = True
# 2. Initialize the PyTorch model
model_state_dict = torch.load(DEFAULT_MODEL_PATH+'pytorch_model.bin', map_location='cpu')
self.tokenizer = BertTokenizer.from_pretrained(DEFAULT_MODEL_PATH, do_lower_case=self.do_lower_case)
self.model = BertForMultiLabelSequenceClassification.from_pretrained(DEFAULT_MODEL_PATH,
num_labels=len(LABEL_LIST),
state_dict=model_state_dict)
self.device = torch.device("cpu")
self.model.to(self.device)
# 3. Set the layers to evaluation mode
self.model.eval()
logger.info('Loaded model')
示例10: __init__
# 需要导入模块: import config [as 别名]
# 或者: from config import DEFAULT_MODEL_PATH [as 别名]
def __init__(self, path=DEFAULT_MODEL_PATH, model_file=DEFAULT_MODEL_FILE):
logger.info('Loading model from: {}...'.format(path))
model_path = '{}/{}'.format(path, model_file)
clear_session()
self.graph = tf.Graph()
with self.graph.as_default():
self.model = models.load_model(model_path)
logger.info('Loaded model: {}'.format(self.model.name))
self._load_assets(path)
示例11: __init__
# 需要导入模块: import config [as 别名]
# 或者: from config import DEFAULT_MODEL_PATH [as 别名]
def __init__(self, path=DEFAULT_MODEL_PATH):
logger.info('Loading model from: {}...'.format(path))
# Initialize the SRGAN controller
self.SRGAN = SRGAN_controller(checkpoint=DEFAULT_MODEL_PATH)
logger.info('Loaded model')
示例12: __init__
# 需要导入模块: import config [as 别名]
# 或者: from config import DEFAULT_MODEL_PATH [as 别名]
def __init__(self, path=DEFAULT_MODEL_PATH):
logger.info('Loading model from: {}...'.format(path))
self.sess = tensorflow.keras.backend.get_session()
base_model = tensorflow.keras.models.load_model(path, compile=False)
probs = tensorflow.keras.layers.Activation('sigmoid', name="sigmoid")(base_model.output)
self.model = tensorflow.keras.models.Model(inputs=base_model.input, outputs=probs)
self.input_tensor = self.model.input
self.output_tensor = self.model.output
示例13: __init__
# 需要导入模块: import config [as 别名]
# 或者: from config import DEFAULT_MODEL_PATH [as 别名]
def __init__(self, path=DEFAULT_MODEL_PATH):
logger.info('Loading model from: {}...'.format(path))
self.max_seq_length = 128
self.do_lower_case = True
# Set Logging verbosity
tf.logging.set_verbosity(tf.logging.INFO)
# Loading the tf Graph
self.graph = tf.Graph()
self.sess = tf.Session(graph=self.graph)
tf.saved_model.loader.load(self.sess, [tag_constants.SERVING], DEFAULT_MODEL_PATH)
# Validate init_checkpoint
tokenization.validate_case_matches_checkpoint(self.do_lower_case,
DEFAULT_MODEL_PATH)
# Initialize the dataprocessor
self.processor = MAXAPIProcessor()
# Get the labels
self.label_list = self.processor.get_labels()
# Initialize the tokenizer
self.tokenizer = tokenization.FullTokenizer(
vocab_file=f'{DEFAULT_MODEL_PATH}/vocab.txt', do_lower_case=self.do_lower_case)
logger.info('Loaded model')
示例14: __init__
# 需要导入模块: import config [as 别名]
# 或者: from config import DEFAULT_MODEL_PATH [as 别名]
def __init__(self, path=DEFAULT_MODEL_PATH, model_dir=DEFAULT_MODEL_DIR):
logger.info('Loading model from: {}...'.format(path))
sess = tf.Session(graph=tf.Graph())
# load the graph
saved_model_path = '{}/{}'.format(path, model_dir)
model_graph_def = tf.saved_model.load(sess, [tf.saved_model.tag_constants.SERVING], saved_model_path)
sig_def = model_graph_def.signature_def[tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
input_name = sig_def.inputs['inputs'].name
output_name = sig_def.outputs['scores'].name
# Load labels from file
label_file = codecs.open('./{}/labels.txt'.format(path), "r", encoding="utf-8")
labels = [label.strip('\n') for label in label_file.readlines()]
self.labels = labels
# set up instance variables and required inputs for inference
self.sess = sess
self.model_graph_def = model_graph_def
self.output_tensor = sess.graph.get_tensor_by_name(output_name)
self.input_name = input_name
self.output_name = output_name
self.means = np.load('./{}/crop_mean.npy'.format(path)).reshape(
[NUM_FRAMES_PER_CLIP, CROP_SIZE, CROP_SIZE, CHANNELS])
logger.info('Loaded model')