本文整理汇总了Python中tensorflow.sparse_placeholder函数的典型用法代码示例。如果您正苦于以下问题:Python sparse_placeholder函数的具体用法?Python sparse_placeholder怎么用?Python sparse_placeholder使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sparse_placeholder函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, sess, n_nodes, args):
self.sess = sess
self.result_dir = args.result_dir
self.dataset_name = args.dataset_name
self.n_nodes = n_nodes
self.n_hidden = args.n_hidden
self.n_embedding = args.n_embedding
self.dropout = args.dropout
self.learning_rate = args.learning_rate
self.max_iteration = args.max_iteration
self.shape = np.array([self.n_nodes, self.n_nodes])
self.adjacency = tf.sparse_placeholder(tf.float32, shape=self.shape, name='adjacency')
self.norm_adj_mat = tf.sparse_placeholder(tf.float32, shape=self.shape, name='norm_adj_mat')
self.keep_prob = tf.placeholder(tf.float32)
self.W_0_mu = None
self.W_1_mu = None
self.W_0_sigma = None
self.W_1_sigma = None
self.mu_np = []
self.sigma_np = []
self._build_VGAE()
示例2: __init__
def __init__(self, **hparam ):
'''
vocab_size, emb_size, enc_func, dec_func, is_tied_params, lambda_w, learning_rate, type_of_opt
(adadelta)rho, (adam)beta1, beta2, epsilon
'''
self.vocab_size = hparam['vocab_size'] if 'vocab_size' in hparam else 100000
self.emb_size = hparam['emb_size'] if 'emb_size' in hparam else 64
self.is_tied_params = hparam['is_tied_params'] if 'is_tied_params' in hparam else False
self.init_value = hparam['init_value'] if 'init_value' in hparam else 0.01
self.lambda_w = hparam['lambda_w'] if 'lambda_w' in hparam else 0.001
self.lr = hparam['learning_rate'] if 'learning_rate' in hparam else 0.001
self.opt = hparam['type_of_opt'] if 'type_of_opt' in hparam else 'adam'
self.rho = hparam['rho'] if 'rho' in hparam else 0.95
self.epsilon = hparam['epsilon'] if 'epsilon' in hparam else 1e-8
self.beta1 = hparam['beta1'] if 'beta1' in hparam else 0.9
self.beta2 = hparam['beta2'] if 'beta2' in hparam else 0.999
self.enc_func = self.get_activation_func(hparam['enc_func'] if 'enc_func' in hparam else 'tanh')
self.dec_func = self.get_activation_func(hparam['dec_func'] if 'dec_func' in hparam else 'tanh')
self.summary_path = hparam['tf_summary_file'] if 'tf_summary_file' in hparam else 'log_tmp_path'
self.saver = None
self.X = tf.sparse_placeholder(tf.float32)
self.Y = tf.sparse_placeholder(tf.float32)
self.mask = tf.sparse_placeholder(tf.float32)
self.params = {}
self.W = tf.Variable(
tf.truncated_normal([self.vocab_size, self.emb_size], stddev=self.init_value / math.sqrt(float(self.emb_size)), mean=0),
name='encoder_W' , dtype=tf.float32
)
self.b = tf.Variable(tf.truncated_normal([self.emb_size], stddev=self.init_value * 0.001, mean=0), name='encoder_bias', dtype=tf.float32 )
self.params['W'] = self.W
self.params['b'] = self.b
if not self.is_tied_params:
self.W_prime = tf.Variable(
tf.truncated_normal([self.emb_size, self.vocab_size], stddev=self.init_value / math.sqrt(float(self.emb_size)), mean=0),
name='decoder_W' , dtype=tf.float32
)
self.params['W_prime'] = self.W_prime
else:
self.W_prime = tf.transpose(self.W)
self.b_prime = tf.Variable(tf.truncated_normal([self.vocab_size], stddev=self.init_value * 0.001, mean=0), name='decoder_W', dtype=tf.float32 )
self.params['b_prime'] = self.b_prime
self.encoded_values, self.decoded_values, self.masked_decoded_values, self.error, self.loss, self.train_step, self.summary = self.build_model()
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.log_writer = tf.summary.FileWriter(self.summary_path, graph = self.sess.graph)
self._glo_ite_counter = 0
示例3: test_edit_distance
def test_edit_distance():
graph = tf.Graph()
with graph.as_default():
truth = tf.sparse_placeholder(tf.int32)
hyp = tf.sparse_placeholder(tf.int32)
editDist = tf.edit_distance(hyp, truth, normalize=False)
with tf.Session(graph=graph) as session:
truthTest = sparse_tensor_feed([[0,1,2], [0,1,2,3,4]])
hypTest = sparse_tensor_feed([[3,4,5], [0,1,2,2]])
feedDict = {truth: truthTest, hyp: hypTest}
dist = session.run([editDist], feed_dict=feedDict)
print(dist)
示例4: __init__
def __init__(self,args):
super(seqMLP, self).__init__()
self.args = args
self.batch_size=args.batch_size
self.input_data = tf.placeholder(tf.float32,[self.args.batch_size,self.args.sentence_length,self.args.word_dim],name='inputdata')
self.output_data = tf.sparse_placeholder(tf.float32, name='outputdata') #[None, 114]
self.dense_outputdata= tf.sparse_tensor_to_dense(self.output_data)
self.keep_prob = tf.placeholder(tf.float32,name='keep_prob_NER')
self.entMentIndex = tf.placeholder(tf.int32,[None,5],name='ent_mention_index')
self.entCtxLeftIndex = tf.placeholder(tf.int32,[None,10],name='ent_ctxleft_index')
self.entCtxRightIndex = tf.placeholder(tf.int32,[None,10],name='ent_ctxright_index')
self.pos_f1 = tf.placeholder(tf.float32,[None,5,1])
self.pos_f2 = tf.placeholder(tf.float32,[None,10,1])
self.pos_f3 = tf.placeholder(tf.float32,[None,10,1])
self.figerHier = np.asarray(cPickle.load(open('data/figer/figerhierarchical.p','rb')),np.float32) #add the hierarchy features
self.layers={}
self.layers['fullyConnect'] = layers_lib.FullyConnection(self.args.class_size)
used = tf.sign(tf.reduce_max(tf.abs(self.input_data),reduction_indices=2))
self.length = tf.cast(tf.reduce_sum(used,reduction_indices=1),tf.int32)
with tf.device('/gpu:0'):
self.prediction,self.loss_lm = self.cl_loss_from_embedding(self.input_data)
print 'self.loss_lm:',self.loss_lm
_,self.adv_loss = self.adversarial_loss()
print 'self.adv_loss:',self.adv_loss
self.loss = tf.add(self.loss_lm,self.adv_loss)
示例5: __init__
def __init__(self, field_sizes=None, embed_size=10, filter_sizes=None, layer_acts=None, drop_out=None,
init_path=None, opt_algo='gd', learning_rate=1e-2, random_seed=None):
Model.__init__(self)
init_vars = []
num_inputs = len(field_sizes)
for i in range(num_inputs):
init_vars.append(('embed_%d' % i, [field_sizes[i], embed_size], 'xavier', dtype))
init_vars.append(('f1', [embed_size, filter_sizes[0], 1, 2], 'xavier', dtype))
init_vars.append(('f2', [embed_size, filter_sizes[1], 2, 2], 'xavier', dtype))
init_vars.append(('w1', [2 * 3 * embed_size, 1], 'xavier', dtype))
init_vars.append(('b1', [1], 'zero', dtype))
self.graph = tf.Graph()
with self.graph.as_default():
if random_seed is not None:
tf.set_random_seed(random_seed)
self.X = [tf.sparse_placeholder(dtype) for i in range(num_inputs)]
self.y = tf.placeholder(dtype)
self.keep_prob_train = 1 - np.array(drop_out)
self.keep_prob_test = np.ones_like(drop_out)
self.layer_keeps = tf.placeholder(dtype)
self.vars = utils.init_var_map(init_vars, init_path)
w0 = [self.vars['embed_%d' % i] for i in range(num_inputs)]
xw = tf.concat([tf.sparse_tensor_dense_matmul(self.X[i], w0[i]) for i in range(num_inputs)], 1)
l = xw
l = tf.transpose(tf.reshape(l, [-1, num_inputs, embed_size, 1]), [0, 2, 1, 3])
f1 = self.vars['f1']
l = tf.nn.conv2d(l, f1, [1, 1, 1, 1], 'SAME')
l = tf.transpose(
utils.max_pool_4d(
tf.transpose(l, [0, 1, 3, 2]),
int(num_inputs / 2)),
[0, 1, 3, 2])
f2 = self.vars['f2']
l = tf.nn.conv2d(l, f2, [1, 1, 1, 1], 'SAME')
l = tf.transpose(
utils.max_pool_4d(
tf.transpose(l, [0, 1, 3, 2]), 3),
[0, 1, 3, 2])
l = tf.nn.dropout(
utils.activate(
tf.reshape(l, [-1, embed_size * 3 * 2]),
layer_acts[0]),
self.layer_keeps[0])
w1 = self.vars['w1']
b1 = self.vars['b1']
l = tf.matmul(l, w1) + b1
l = tf.squeeze(l)
self.y_prob = tf.sigmoid(l)
self.loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=l, labels=self.y))
self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
tf.global_variables_initializer().run(session=self.sess)
示例6: __init__
def __init__(self, input_dim=None, output_dim=1, init_path=None, opt_algo='gd', learning_rate=1e-2, l2_weight=0,
random_seed=None):
Model.__init__(self)
init_vars = [('w', [input_dim, output_dim], 'xavier', dtype),
('b', [output_dim], 'zero', dtype)]
self.graph = tf.Graph()
with self.graph.as_default():
if random_seed is not None:
tf.set_random_seed(random_seed)
self.X = tf.sparse_placeholder(dtype)
self.y = tf.placeholder(dtype)
self.vars = utils.init_var_map(init_vars, init_path) # 初始化变量w, b
w = self.vars['w']
b = self.vars['b']
xw = tf.sparse_tensor_dense_matmul(self.X, w)
logits = tf.reshape(xw + b, [-1])
self.y_prob = tf.sigmoid(logits)
self.loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=self.y, logits=logits)) + \
l2_weight * tf.nn.l2_loss(xw)
self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
tf.global_variables_initializer().run(session=self.sess)
示例7: testMatchingTensorInfoProtoMaps
def testMatchingTensorInfoProtoMaps(self):
sig1 = _make_signature({
"x": tf.placeholder(tf.int32, [2]),
}, {
"x": tf.placeholder(tf.int32, [2]),
})
sig2 = _make_signature({
"x": tf.placeholder(tf.int32, [2]),
}, {
"x": tf.sparse_placeholder(tf.int64, [2]),
})
self.assertTrue(
tensor_info.tensor_info_proto_maps_match(sig1.inputs, sig2.inputs))
self.assertFalse(
tensor_info.tensor_info_proto_maps_match(sig1.outputs, sig2.outputs))
sig3 = _make_signature({
"x": tf.placeholder(tf.int32, [None]),
}, {
"x": tf.placeholder(tf.int32, [2]),
})
self.assertFalse(
tensor_info.tensor_info_proto_maps_match(sig1.inputs, sig3.inputs))
self.assertTrue(
tensor_info.tensor_info_proto_maps_match(sig1.outputs, sig3.outputs))
示例8: add_placeholders
def add_placeholders(self):
# the batch_size and max_stepsize每步都是变长的。
self.input_tensor = tf.placeholder(tf.float32, [None, None, n_input + (2 * n_input * n_context)],
name='input') # 语音log filter bank or MFCC features
self.text = tf.sparse_placeholder(tf.int32, name='text') # 文本
self.seq_length = tf.placeholder(tf.int32, [None], name='seq_length') # 序列长
self.keep_dropout = tf.placeholder(tf.float32)
示例9: _run_test_als
def _run_test_als(self, use_factors_weights_cache):
with self.test_session():
col_init = np.random.rand(7, 3)
als_model = factorization_ops.WALSModel(
5, 7, 3,
col_init=col_init,
row_weights=None,
col_weights=None,
use_factors_weights_cache=use_factors_weights_cache)
als_model.initialize_op.run()
als_model.worker_init.run()
als_model.row_update_prep_gramian_op.run()
als_model.initialize_row_update_op.run()
process_input_op = als_model.update_row_factors(self._wals_inputs)[1]
process_input_op.run()
row_factors1 = [x.eval() for x in als_model.row_factors]
wals_model = factorization_ops.WALSModel(
5, 7, 3,
col_init=col_init,
row_weights=0,
col_weights=0,
use_factors_weights_cache=use_factors_weights_cache)
wals_model.initialize_op.run()
wals_model.worker_init.run()
wals_model.row_update_prep_gramian_op.run()
wals_model.initialize_row_update_op.run()
process_input_op = wals_model.update_row_factors(self._wals_inputs)[1]
process_input_op.run()
row_factors2 = [x.eval() for x in wals_model.row_factors]
for r1, r2 in zip(row_factors1, row_factors2):
self.assertAllClose(r1, r2, atol=1e-3)
# Here we test partial column updates.
sp_c = np_matrix_to_tf_sparse(INPUT_MATRIX, col_slices=[2, 0],
shuffle=True).eval()
sp_feeder = tf.sparse_placeholder(tf.float32)
feed_dict = {sp_feeder: sp_c}
als_model.col_update_prep_gramian_op.run()
als_model.initialize_col_update_op.run()
process_input_op = als_model.update_col_factors(sp_input=sp_feeder)[1]
process_input_op.run(feed_dict=feed_dict)
col_factors1 = [x.eval() for x in als_model.col_factors]
feed_dict = {sp_feeder: sp_c}
wals_model.col_update_prep_gramian_op.run()
wals_model.initialize_col_update_op.run()
process_input_op = wals_model.update_col_factors(sp_input=sp_feeder)[1]
process_input_op.run(feed_dict=feed_dict)
col_factors2 = [x.eval() for x in wals_model.col_factors]
for c1, c2 in zip(col_factors1, col_factors2):
self.assertAllClose(c1, c2, rtol=5e-3, atol=1e-2)
示例10: __init__
def __init__(self, mode):
self.mode = mode
# image
self.inputs = tf.placeholder(tf.float32, [None, FLAGS.image_height, FLAGS.image_width, FLAGS.image_channel])
# SparseTensor required by ctc_loss op
self.labels = tf.sparse_placeholder(tf.int32)
# 1d array of size [batch_size]
# self.seq_len = tf.placeholder(tf.int32, [None])
# l2
self._extra_train_ops = []
示例11: get_train_model
def get_train_model():
# Has size [batch_size, max_stepsize, num_features], but the
# batch_size and max_stepsize can vary along each step
inputs, features = convolutional_layers()
# print features.get_shape()
# inputs = tf.placeholder(tf.float32, [None, None, common.OUTPUT_SHAPE[0]])
# Here we use sparse_placeholder that will generate a
# SparseTensor required by ctc_loss op.
targets = tf.sparse_placeholder(tf.int32)
# 1d array of size [batch_size]
seq_len = tf.placeholder(tf.int32, [None])
# Defining the cell
# Can be:
# tf.nn.rnn_cell.RNNCell
# tf.nn.rnn_cell.GRUCell
# cell = tf.contrib.rnn.LSTMCell(common.num_hidden, state_is_tuple=True)
# Stacking rnn cells
stack = tf.contrib.rnn.MultiRNNCell([lstm_cell() for _ in range(0, common.num_layers)],
state_is_tuple=True)
# The second output is the last state and we will no use that
outputs, _ = tf.nn.dynamic_rnn(stack, features, seq_len, dtype=tf.float32)
shape = tf.shape(features)
batch_s, max_timesteps = shape[0], shape[1]
# Reshaping to apply the same weights over the timesteps
outputs = tf.reshape(outputs, [-1, common.num_hidden])
# Truncated normal with mean 0 and stdev=0.1
# Tip: Try another initialization
# see https://www.tensorflow.org/versions/r0.9/api_docs/python/contrib.layers.html#initializers
W = tf.Variable(tf.truncated_normal([common.num_hidden,
common.num_classes],
stddev=0.1), name="W")
# Zero initialization
# Tip: Is tf.zeros_initializer the same?
b = tf.Variable(tf.constant(0., shape=[common.num_classes]), name="b")
# Doing the affine projection
logits = tf.matmul(outputs, W) + b
# Reshaping back to the original shape
logits = tf.reshape(logits, [batch_s, -1, common.num_classes])
# Time major
logits = tf.transpose(logits, (1, 0, 2))
return logits, inputs, targets, seq_len, W, b
示例12: testBuildInputMap
def testBuildInputMap(self):
x = tf.placeholder(tf.int32, [2])
y = tf.sparse_placeholder(tf.string, [None])
sig = _make_signature({"x": x, "y": y}, {})
input_map = tensor_info.build_input_map(sig.inputs, {"x": x, "y": y})
self.assertEquals(len(input_map), 4)
self.assertEquals(input_map[x.name], x)
self.assertEquals(input_map[y.indices.name], y.indices)
self.assertEquals(input_map[y.values.name], y.values)
self.assertEquals(input_map[y.dense_shape.name], y.dense_shape)
示例13: testBuildOutputMap
def testBuildOutputMap(self):
x = tf.placeholder(tf.int32, [2])
y = tf.sparse_placeholder(tf.string, [None])
sig = _make_signature({}, {"x": x, "y": y})
def _get_tensor(name):
return tf.get_default_graph().get_tensor_by_name(name)
output_map = tensor_info.build_output_map(sig.outputs, _get_tensor)
self.assertEquals(len(output_map), 2)
self.assertEquals(output_map["x"], x)
self.assertEquals(output_map["y"].indices, y.indices)
self.assertEquals(output_map["y"].values, y.values)
self.assertEquals(output_map["y"].dense_shape, y.dense_shape)
示例14: _run_test_als_transposed
def _run_test_als_transposed(self, use_factors_weights_cache):
with self.test_session():
col_init = np.random.rand(7, 3)
als_model = factorization_ops.WALSModel(
5, 7, 3,
col_init=col_init,
row_weights=None,
col_weights=None,
use_factors_weights_cache=use_factors_weights_cache)
als_model.initialize_op.run()
als_model.worker_init.run()
wals_model = factorization_ops.WALSModel(
5, 7, 3,
col_init=col_init,
row_weights=[0] * 5,
col_weights=[0] * 7,
use_factors_weights_cache=use_factors_weights_cache)
wals_model.initialize_op.run()
wals_model.worker_init.run()
sp_feeder = tf.sparse_placeholder(tf.float32)
# Here test partial row update with identical inputs but with transposed
# input for als.
sp_r_t = np_matrix_to_tf_sparse(INPUT_MATRIX, [3, 1],
transpose=True).eval()
sp_r = np_matrix_to_tf_sparse(INPUT_MATRIX, [3, 1]).eval()
feed_dict = {sp_feeder: sp_r_t}
als_model.row_update_prep_gramian_op.run()
als_model.initialize_row_update_op.run()
process_input_op = als_model.update_row_factors(sp_input=sp_feeder,
transpose_input=True)[1]
process_input_op.run(feed_dict=feed_dict)
# Only updated row 1 and row 3, so only compare these rows since others
# have randomly initialized values.
row_factors1 = [als_model.row_factors[0].eval()[1],
als_model.row_factors[0].eval()[3]]
feed_dict = {sp_feeder: sp_r}
wals_model.row_update_prep_gramian_op.run()
wals_model.initialize_row_update_op.run()
process_input_op = wals_model.update_row_factors(sp_input=sp_feeder)[1]
process_input_op.run(feed_dict=feed_dict)
# Only updated row 1 and row 3, so only compare these rows since others
# have randomly initialized values.
row_factors2 = [wals_model.row_factors[0].eval()[1],
wals_model.row_factors[0].eval()[3]]
for r1, r2 in zip(row_factors1, row_factors2):
self.assertAllClose(r1, r2, atol=1e-3)
示例15: __init__
def __init__(self,args):
'''
@time: 2016/12/20
@editor: wujs
@function: also need to return the candidates entity mentions lstm representation
'''
super(seqCNN, self).__init__()
self.args = args
self.batch_size=args.batch_size
self.input_data = tf.placeholder(tf.float32,[self.args.batch_size,self.args.sentence_length,self.args.word_dim],name='inputdata')
print 'self.input_data:',self.input_data
self.output_data = tf.sparse_placeholder(tf.float32, name='outputdata')
self.keep_prob = tf.placeholder(tf.float32,name='keep_prob_NER')
self.pos_f1 = tf.placeholder(tf.float32,[None,5,1])
self.pos_f2 = tf.placeholder(tf.float32,[None,10,1])
self.pos_f3 = tf.placeholder(tf.float32,[None,10,1])
self.entMentIndex = tf.placeholder(tf.int32,[None,5],name='ent_mention_index')
self.entCtxLeftIndex = tf.placeholder(tf.int32,[None,10],name='ent_ctxleft_index')
self.entCtxRightIndex = tf.placeholder(tf.int32,[None,10],name='ent_ctxright_index')
if args.datasets == 'figer':
self.hier = np.asarray(cPickle.load(open('data/figer/figerhierarchical.p','rb')),np.float32) #add the hierarchy features
else:
self.hier = np.asarray(cPickle.load(open('data/OntoNotes/OntoNoteshierarchical.p','rb')),np.float32)
self.pred_bias = tf.Variable(tf.zeros([self.args.class_size]), name="pred_bias")
self.layers={}
self.layers['CNN'] = layers_lib.CNN(filters=[1,2,3,4,5],word_embedding_size=self.args.word_dim+1,num_filters=5)
self.layers['fullyConnect_ment'] = layers_lib.FullyConnection(self.args.class_size,name='FullyConnection_ment') # 90 is the row of type hierical
self.layers['fullyConnect_ctx'] = layers_lib.FullyConnection(self.args.class_size,name='FullyConnection_ctx')
#self.layers['fullyConnect_ctx'] = layers_lib.FullyConnection(np.shape(self.hier)[0],name='FullyConnection_ctx')
self.dense_outputdata= tf.sparse_tensor_to_dense(self.output_data)
print 'self.dense_outputdata:', self.dense_outputdata
self.prediction,self.loss_lm = self.cl_loss_from_embedding(self.input_data)
print 'self.loss_lm:',self.loss_lm
_,self.adv_loss = self.adversarial_loss()
print 'self.adv_loss:',self.adv_loss
self.loss = tf.add(self.loss_lm,self.adv_loss)