本文整理汇总了Python中tensorflow.contrib.crf.crf_log_likelihood方法的典型用法代码示例。如果您正苦于以下问题:Python crf.crf_log_likelihood方法的具体用法?Python crf.crf_log_likelihood怎么用?Python crf.crf_log_likelihood使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.crf
的用法示例。
在下文中一共展示了crf.crf_log_likelihood方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: loss_layer
# 需要导入模块: from tensorflow.contrib import crf [as 别名]
# 或者: from tensorflow.contrib.crf import crf_log_likelihood [as 别名]
def loss_layer(self, project_logits, lengths, name=None):
with tf.variable_scope("crf_loss" if not name else name):
small = -1000.0
# pad logits for crf loss
start_logits = tf.concat(
[small * tf.ones(shape=[self.batch_size, 1, self.num_tags]), tf.zeros(shape=[self.batch_size, 1, 1])],
axis=-1)
pad_logits = tf.cast(small * tf.ones([self.batch_size, self.num_steps, 1]), tf.float32)
logits = tf.concat([project_logits, pad_logits], axis=-1)
logits = tf.concat([start_logits, logits], axis=1)
targets = tf.concat(
[tf.cast(self.num_tags * tf.ones([self.batch_size, 1]), tf.int32), self.targets], axis=-1)
self.trans = tf.get_variable(
"transitions",
shape=[self.num_tags + 1, self.num_tags + 1],
initializer=self.initializer)
log_likelihood, self.trans = crf_log_likelihood(
inputs=logits,
tag_indices=targets,
transition_params=self.trans,
sequence_lengths=lengths + 1)
return tf.reduce_mean(-log_likelihood)
示例2: loss_layer_crf
# 需要导入模块: from tensorflow.contrib import crf [as 别名]
# 或者: from tensorflow.contrib.crf import crf_log_likelihood [as 别名]
def loss_layer_crf(self):
with tf.variable_scope("loss_layer"):
logits = self.logits
targets = self.targets
self.trans = tf.get_variable(
"transitions",
shape=[self.nums_tags, self.nums_tags],
initializer=self.initializer
)
log_likelihood, self.trans = crf_log_likelihood(
inputs=logits,
tag_indices=targets,
transition_params=self.trans,
sequence_lengths=self.length
)
self.loss = tf.reduce_mean(-log_likelihood)
示例3: loss_layer
# 需要导入模块: from tensorflow.contrib import crf [as 别名]
# 或者: from tensorflow.contrib.crf import crf_log_likelihood [as 别名]
def loss_layer(self, project_logits, lengths, name=None):
"""
calculate crf loss
:param project_logits: [1, num_steps, num_tags]
:return: scalar loss
"""
with tf.variable_scope("crf_loss" if not name else name):
small = -1000.0
# pad logits for crf loss
start_logits = tf.concat(
[small * tf.ones(shape=[self.batch_size, 1, self.num_tags]), tf.zeros(shape=[self.batch_size, 1, 1])],
axis=-1)
pad_logits = tf.cast(small * tf.ones([self.batch_size, self.num_steps, 1]), tf.float32)
logits = tf.concat([project_logits, pad_logits], axis=-1)
logits = tf.concat([start_logits, logits], axis=1)
targets = tf.concat(
[tf.cast(self.num_tags * tf.ones([self.batch_size, 1]), tf.int32), self.targets], axis=-1)
self.trans = tf.get_variable(
"transitions",
shape=[self.num_tags + 1, self.num_tags + 1],
initializer=self.initializer)
log_likelihood, self.trans = crf_log_likelihood(
inputs=logits,
tag_indices=targets,
transition_params=self.trans,
sequence_lengths=lengths + 1)
return tf.reduce_mean(-log_likelihood)
示例4: loss_op
# 需要导入模块: from tensorflow.contrib import crf [as 别名]
# 或者: from tensorflow.contrib.crf import crf_log_likelihood [as 别名]
def loss_op(self):
if self.CRF:
log_likelihood, self.transition_params = crf_log_likelihood(
inputs=self.logits, tag_indices=self.labels, sequence_lengths=self.sequence_lengths)
self.loss = -tf.reduce_mean(log_likelihood)
else:
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits,
labels=self.labels)
mask = tf.sequence_mask(self.sequence_lengths)
losses = tf.boolean_mask(losses, mask)
self.loss = tf.reduce_mean(losses)
tf.summary.scalar("loss", self.loss)
示例5: loss_layer
# 需要导入模块: from tensorflow.contrib import crf [as 别名]
# 或者: from tensorflow.contrib.crf import crf_log_likelihood [as 别名]
def loss_layer(self, project_logits, lengths, name=None):
"""
calculate crf loss
:param project_logits: [1, num_steps, num_tags]
:return: scalar loss
"""
with tf.variable_scope("crf_loss" if not name else name):
small = -1000.0
# pad logits for crf loss
start_logits = tf.concat(
[small * tf.ones(shape=[self.batch_size, 1, self.num_tags]), tf.zeros(shape=[self.batch_size, 1, 1])], axis=-1)
pad_logits = tf.cast(small * tf.ones([self.batch_size, self.num_steps, 1]), tf.float32)
logits = tf.concat([project_logits, pad_logits], axis=-1)
logits = tf.concat([start_logits, logits], axis=1)
targets = tf.concat(
[tf.cast(self.num_tags*tf.ones([self.batch_size, 1]), tf.int32), self.targets], axis=-1)
self.trans = tf.get_variable(
"transitions",
shape=[self.num_tags + 1, self.num_tags + 1],
initializer=self.initializer)
#crf_log_likelihood在一个条件随机场里面计算标签序列的log-likelihood
#inputs: 一个形状为[batch_size, max_seq_len, num_tags] 的tensor,
#一般使用BILSTM处理之后输出转换为他要求的形状作为CRF层的输入.
#tag_indices: 一个形状为[batch_size, max_seq_len] 的矩阵,其实就是真实标签.
#sequence_lengths: 一个形状为 [batch_size] 的向量,表示每个序列的长度.
#transition_params: 形状为[num_tags, num_tags] 的转移矩阵
#log_likelihood: 标量,log-likelihood
#transition_params: 形状为[num_tags, num_tags] 的转移矩阵
log_likelihood, self.trans = crf_log_likelihood(
inputs=logits,
tag_indices=targets,
transition_params=self.trans,
sequence_lengths=lengths+1)
return tf.reduce_mean(-log_likelihood)
示例6: loss_layer
# 需要导入模块: from tensorflow.contrib import crf [as 别名]
# 或者: from tensorflow.contrib.crf import crf_log_likelihood [as 别名]
def loss_layer(self, project_logits, lengths, name=None):
"""
calculate crf loss
:param project_logits: [1, num_steps, num_tags]
:return: scalar loss
"""
with tf.variable_scope("crf_loss" if not name else name):
small = -1000.0
# pad logits for crf loss
# start_logits.shape (?, 1, 52)
start_logits = tf.concat(
[small * tf.ones(shape=[self.batch_size, 1, self.num_tags]), tf.zeros(shape=[self.batch_size, 1, 1])], axis=-1)
pad_logits = tf.cast(
small * tf.ones([self.batch_size, self.num_steps, 1]), tf.float32)
# project_logits.shape (?, ?, 51)
# pad_logits.shape (?, ?, 1)
# logits.shape (?, ?, 52)
logits = tf.concat([project_logits, pad_logits], axis=-1)
logits = tf.concat([start_logits, logits], axis=1)
targets = tf.concat(
[tf.cast(self.num_tags*tf.ones([self.batch_size, 1]), tf.int32), self.targets], axis=-1)
self.trans = tf.get_variable(
"transitions",
shape=[self.num_tags + 1, self.num_tags + 1],
initializer=self.initializer)
# crf_log_likelihood在一个条件随机场里面计算标签序列的log-likelihood
# inputs: 一个形状为[batch_size, max_seq_len, num_tags] 的tensor,
# 一般使用BILSTM处理之后输出转换为他要求的形状作为CRF层的输入.
# tag_indices: 一个形状为[batch_size, max_seq_len] 的矩阵,其实就是真实标签.
# sequence_lengths: 一个形状为 [batch_size] 的向量,表示每个序列的长度.
# transition_params: 形状为[num_tags, num_tags] 的转移矩阵
# log_likelihood: 标量, log-likelihood
log_likelihood, self.trans = crf_log_likelihood(
inputs=logits,
tag_indices=targets,
transition_params=self.trans,
sequence_lengths=lengths+1)
return tf.reduce_mean(-log_likelihood)
示例7: _build_loss_op
# 需要导入模块: from tensorflow.contrib import crf [as 别名]
# 或者: from tensorflow.contrib.crf import crf_log_likelihood [as 别名]
def _build_loss_op(self):
if self.cfg["use_crf"]:
crf_loss, self.trans_params = crf_log_likelihood(self.logits, self.tags, self.seq_len)
self.loss = tf.reduce_mean(-crf_loss)
else: # using softmax
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.tags)
mask = tf.sequence_mask(self.seq_len)
self.loss = tf.reduce_mean(tf.boolean_mask(losses, mask))
tf.summary.scalar("loss", self.loss)
示例8: _build_loss_op
# 需要导入模块: from tensorflow.contrib import crf [as 别名]
# 或者: from tensorflow.contrib.crf import crf_log_likelihood [as 别名]
def _build_loss_op(self):
if self.cfg["use_crf"]:
crf_loss, self.trans_params = crf_log_likelihood(self.logits, self.tags, self.seq_len - 1)
self.loss = tf.reduce_mean(-crf_loss)
else: # using softmax
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.tags)
mask = tf.sequence_mask(self.seq_len)
self.loss = tf.reduce_mean(tf.boolean_mask(losses, mask))
if self.cfg["l2_reg"] is not None and self.cfg["l2_reg"] > 0.0: # l2 regularization
l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if "bias" not in v.name])
self.loss += self.cfg["l2_reg"] * l2_loss
tf.summary.scalar("loss", self.loss)
示例9: loss_layer
# 需要导入模块: from tensorflow.contrib import crf [as 别名]
# 或者: from tensorflow.contrib.crf import crf_log_likelihood [as 别名]
def loss_layer(self, project_logits, lengths, name=None):
with tf.variable_scope("crf_loss" if not name else name):
small = -1000.0
start_logits = tf.concat(
[small * tf.ones(shape=[self.batch_size, 1, self.num_tags]), tf.zeros(shape=[self.batch_size, 1, 1])],
axis=-1)
pad_logits = tf.cast(small * tf.ones([self.batch_size, self.num_steps, 1]), tf.float32)
logits = tf.concat([project_logits, pad_logits], axis=-1)
logits = tf.concat([start_logits, logits], axis=1)
targets = tf.concat(
[tf.cast(self.num_tags * tf.ones([self.batch_size, 1]), tf.int32), self.targets], axis=-1)
self.trans = tf.get_variable(
"transitions",
shape=[self.num_tags + 1, self.num_tags + 1],
initializer=self.initializer)
log_likelihood, self.trans = crf_log_likelihood(
inputs=logits,
tag_indices=targets,
transition_params=self.trans,
sequence_lengths=lengths + 1)
return tf.reduce_mean(-log_likelihood)
示例10: __init__
# 需要导入模块: from tensorflow.contrib import crf [as 别名]
# 或者: from tensorflow.contrib.crf import crf_log_likelihood [as 别名]
def __init__(self,num_classes,max_docs,input_size,rnn_units=300,
dropout_keep=0.9,lr=0.0001,bidirectional=True):
self.max_docs = max_docs
self.dropout_keep = dropout_keep
self.dropout = tf.placeholder(tf.float32)
self.rnn_units = rnn_units
self.doc_input = tf.placeholder(tf.float32, shape=[None,max_docs,input_size])
self.num_docs = tf.placeholder(tf.int32, shape=[None])
max_len = tf.reduce_max(self.num_docs)
doc_input_reduced = self.doc_input[:,:max_len,:]
doc_input_reduced = tf.nn.dropout(doc_input_reduced,self.dropout)
self.labels = tf.placeholder(tf.int32,shape=[None,max_docs])
labels_reduced = self.labels[:,:max_len]
with tf.variable_scope('rnn',initializer=tf.contrib.layers.xavier_initializer()):
if bidirectional:
[outputs_fw,outputs_bw],_ = tf.nn.bidirectional_dynamic_rnn(
GRUCell(self.rnn_units/2),GRUCell(self.rnn_units/2),
doc_input_reduced,sequence_length=self.num_docs,dtype=tf.float32)
outputs = tf.concat((outputs_fw,outputs_bw),2)
else:
outputs,_ = tf.nn.dynamic_rnn(GRUCell(self.rnn_units),
doc_input_reduced,sequence_length=self.num_docs,dtype=tf.float32)
outputs = tf.nn.dropout(outputs,self.dropout)
#conditional random field
weights = tf.get_variable("weights",[outputs.shape[2],num_classes],initializer=tf.contrib.layers.xavier_initializer())
matricized_docs = tf.reshape(outputs,[-1,outputs.shape[2]])
matricized_unary = tf.matmul(matricized_docs,weights)
unary_scores = tf.reshape(matricized_unary,[-1,max_len,num_classes])
log_likelihood, transition_params = crf_log_likelihood(unary_scores,labels_reduced,self.num_docs)
preds,viterbi_score = crf_decode(unary_scores,transition_params,self.num_docs)
self.doc_idx = tf.placeholder(tf.int32, shape=[None,2])
self.prediction = tf.gather_nd(preds,self.doc_idx)
#loss, accuracy, and training functions
self.loss = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(lr,0.9,0.99).minimize(self.loss)
#init op
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.saver = tf.train.Saver()
self.sess = tf.Session(config=config)
self.sess.run(tf.global_variables_initializer())