本文整理汇总了Python中layers.Dense方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Dense方法的具体用法?Python layers.Dense怎么用?Python layers.Dense使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类layers
的用法示例。
在下文中一共展示了layers.Dense方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _build_low_layers
# 需要导入模块: import layers [as 别名]
# 或者: from layers import Dense [as 别名]
def _build_low_layers(self, args):
self.user_emb_matrix = tf.get_variable('user_emb_matrix', [self.n_user, args.dim])
self.item_emb_matrix = tf.get_variable('item_emb_matrix', [self.n_item, args.dim])
self.entity_emb_matrix = tf.get_variable('entity_emb_matrix', [self.n_entity, args.dim])
self.relation_emb_matrix = tf.get_variable('relation_emb_matrix', [self.n_relation, args.dim])
# [batch_size, dim]
self.user_embeddings = tf.nn.embedding_lookup(self.user_emb_matrix, self.user_indices)
self.item_embeddings = tf.nn.embedding_lookup(self.item_emb_matrix, self.item_indices)
self.head_embeddings = tf.nn.embedding_lookup(self.entity_emb_matrix, self.head_indices)
self.relation_embeddings = tf.nn.embedding_lookup(self.relation_emb_matrix, self.relation_indices)
self.tail_embeddings = tf.nn.embedding_lookup(self.entity_emb_matrix, self.tail_indices)
for _ in range(args.L):
user_mlp = Dense(input_dim=args.dim, output_dim=args.dim)
tail_mlp = Dense(input_dim=args.dim, output_dim=args.dim)
cc_unit = CrossCompressUnit(args.dim)
self.user_embeddings = user_mlp(self.user_embeddings)
self.item_embeddings, self.head_embeddings = cc_unit([self.item_embeddings, self.head_embeddings])
self.tail_embeddings = tail_mlp(self.tail_embeddings)
self.vars_rs.extend(user_mlp.vars)
self.vars_rs.extend(cc_unit.vars)
self.vars_kge.extend(tail_mlp.vars)
self.vars_kge.extend(cc_unit.vars)
示例2: __init__
# 需要导入模块: import layers [as 别名]
# 或者: from layers import Dense [as 别名]
def __init__(self, args, n_users, n_items, n_entities, n_relations):
super(MKR, self).__init__()
self.n_user = n_users
self.n_item = n_items
self.n_entity = n_entities
self.n_relation = n_relations
self.L = args.L
self.H = args.H
self.dim = args.dim
# 定义embedding矩阵
self.user_emb_matrix = nn.Embedding(n_users, args.dim)
self.item_emb_matrix = nn.Embedding(n_items, args.dim)
self.entity_emb_matrix = nn.Embedding(n_entities, args.dim)
self.relation_emb_matrix = nn.Embedding(n_relations, args.dim)
# 定义网络
self.user_mlps, self.tail_mlps, self.cc_units = [], [], []
self.kge_mlps = []
for _ in range(args.L):
self.user_mlps.append(Dense(args.dim, args.dim))
self.tail_mlps.append(Dense(args.dim, args.dim))
self.cc_units.append(CrossCompressUnit(args.dim))
for _ in range(args.H):
self.kge_mlps.append(Dense(args.dim * 2, args.dim * 2))
self.kge_pred_mlp = Dense(args.dim * 2, args.dim)
self.sigmoid = nn.Sigmoid()
示例3: __init__
# 需要导入模块: import layers [as 别名]
# 或者: from layers import Dense [as 别名]
def __init__(self, input_dim, output_dim, model_size="small", neigh_input_dim=None,
dropout=0., bias=True, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(MaxPoolingAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if name is not None:
name = '/' + name
else:
name = ''
if neigh_input_dim == None:
neigh_input_dim = input_dim
if concat:
self.output_dim = 2 * output_dim
if model_size == "small":
hidden_dim = self.hidden_dim = 50
elif model_size == "big":
hidden_dim = self.hidden_dim = 50
self.mlp_layers = []
self.mlp_layers.append(Dense(input_dim=neigh_input_dim, output_dim=hidden_dim, act=tf.nn.relu,
dropout=dropout, sparse_inputs=False, logging=self.logging))
with tf.variable_scope(self.name + name + '_vars'):
self.vars['neigh_weights'] = glorot([hidden_dim, output_dim], name='neigh_weights')
self.vars['self_weights'] = glorot([input_dim, output_dim], name='self_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
self.input_dim = input_dim
self.output_dim = output_dim
self.neigh_input_dim = neigh_input_dim
示例4: build
# 需要导入模块: import layers [as 别名]
# 或者: from layers import Dense [as 别名]
def build(self):
samples1_list, support_sizes1_list = [], []
for r_idx in range(self.num_relations):
samples1, support_sizes1 = self.sample(self.inputs1, self.layer_infos[r_idx])
samples1_list.append(samples1)
support_sizes1_list.append(support_sizes1)
num_samples = [layer_info.num_samples for layer_info in self.layer_infos[0]]
self.outputs1_list = []
dim_mult = 2 if self.concat else 1 # multiplication to get the correct output dimension
dim_mult = dim_mult * 2
for r_idx in range(self.num_relations):
outputs1, self.aggregators = self.aggregate(samples1_list[r_idx], [self.features], self.dims, num_samples,
support_sizes1, concat=self.concat, model_size=self.model_size)
self.relation_batch = tf.tile([tf.nn.embedding_lookup(self.relation_vectors, r_idx)], [self.batch_size, 1])
outputs1 = tf.concat([outputs1, self.relation_batch], 1)
self.attention_weights = tf.matmul(outputs1, self.attention_vec)
self.attention_weights = tf.tile(self.attention_weights, [1, dim_mult*self.dims[-1]])
outputs1 = tf.multiply(self.attention_weights, outputs1)
self.outputs1_list += [outputs1]
# self.outputs1 = tf.reduce_mean(self.outputs1_list, 0)
self.outputs1 = tf.stack(self.outputs1_list, 1)
self.outputs1 = tf.reduce_sum(self.outputs1, axis=1, keepdims=False)
self.outputs1 = tf.nn.l2_normalize(self.outputs1, 1)
self.node_pred = layers.Dense(dim_mult*self.dims[-1], self.num_classes,
dropout=self.placeholders['dropout'],
act=lambda x : x)
# TF graph management
self.node_preds = self.node_pred(self.outputs1)
self._loss()
grads_and_vars = self.optimizer.compute_gradients(self.loss)
clipped_grads_and_vars = [(tf.clip_by_value(grad, -5.0, 5.0) if grad is not None else None, var)
for grad, var in grads_and_vars]
self.grad, _ = clipped_grads_and_vars[0]
self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars)
self.preds = self.predict()
示例5: _build
# 需要导入模块: import layers [as 别名]
# 或者: from layers import Dense [as 别名]
def _build(self):
self.layers.append(layers.Dense(input_dim=self.input_dim,
output_dim=self.dims[1],
act=tf.nn.relu,
dropout=self.placeholders['dropout'],
sparse_inputs=False,
logging=self.logging))
self.layers.append(layers.Dense(input_dim=self.dims[1],
output_dim=self.output_dim,
act=lambda x: x,
dropout=self.placeholders['dropout'],
logging=self.logging))
示例6: _build_high_layers
# 需要导入模块: import layers [as 别名]
# 或者: from layers import Dense [as 别名]
def _build_high_layers(self, args):
# RS
use_inner_product = True
if use_inner_product:
# [batch_size]
self.scores = tf.reduce_sum(self.user_embeddings * self.item_embeddings, axis=1)
else:
# [batch_size, dim * 2]
self.user_item_concat = tf.concat([self.user_embeddings, self.item_embeddings], axis=1)
for _ in range(args.H - 1):
rs_mlp = Dense(input_dim=args.dim * 2, output_dim=args.dim * 2)
# [batch_size, dim * 2]
self.user_item_concat = rs_mlp(self.user_item_concat)
self.vars_rs.extend(rs_mlp.vars)
rs_pred_mlp = Dense(input_dim=args.dim * 2, output_dim=1)
# [batch_size]
self.scores = tf.squeeze(rs_pred_mlp(self.user_item_concat))
self.vars_rs.extend(rs_pred_mlp.vars)
self.scores_normalized = tf.nn.sigmoid(self.scores)
# KGE
# [batch_size, dim * 2]
self.head_relation_concat = tf.concat([self.head_embeddings, self.relation_embeddings], axis=1)
for _ in range(args.H - 1):
kge_mlp = Dense(input_dim=args.dim * 2, output_dim=args.dim * 2)
# [batch_size, dim]
self.head_relation_concat = kge_mlp(self.head_relation_concat)
self.vars_kge.extend(kge_mlp.vars)
kge_pred_mlp = Dense(input_dim=args.dim * 2, output_dim=args.dim)
# [batch_size, 1]
self.tail_pred = kge_pred_mlp(self.head_relation_concat)
self.vars_kge.extend(kge_pred_mlp.vars)
self.tail_pred = tf.nn.sigmoid(self.tail_pred)
self.scores_kge = tf.nn.sigmoid(tf.reduce_sum(self.tail_embeddings * self.tail_pred, axis=1))
self.rmse = tf.reduce_mean(
tf.sqrt(tf.reduce_sum(tf.square(self.tail_embeddings - self.tail_pred), axis=1) / args.dim))
示例7: __init__
# 需要导入模块: import layers [as 别名]
# 或者: from layers import Dense [as 别名]
def __init__(self, input_dim, output_dim, model_size="small", neigh_input_dim=None,
dropout=0., bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(MaxPoolingAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if neigh_input_dim is None:
neigh_input_dim = input_dim
if name is not None:
name = '/' + name
else:
name = ''
if model_size == "small":
hidden_dim = self.hidden_dim = 512
elif model_size == "big":
hidden_dim = self.hidden_dim = 1024
self.mlp_layers = []
self.mlp_layers.append(Dense(input_dim=neigh_input_dim,
output_dim=hidden_dim,
act=tf.nn.relu,
dropout=dropout,
sparse_inputs=False,
logging=self.logging))
with tf.variable_scope(self.name + name + '_vars'):
self.vars['neigh_weights'] = glorot([hidden_dim, output_dim],
name='neigh_weights')
self.vars['self_weights'] = glorot([input_dim, output_dim],
name='self_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
self.neigh_input_dim = neigh_input_dim