本文整理汇总了Python中tensorflow.sparse_placeholder方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.sparse_placeholder方法的具体用法?Python tensorflow.sparse_placeholder怎么用?Python tensorflow.sparse_placeholder使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.sparse_placeholder方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_placeholder [as 别名]
def __init__(self, input_size, batch_size, data_generator_creator, max_steps=None):
super().__init__(input_size)
self.batch_size = batch_size
self.data_generator_creator = data_generator_creator
self.steps_left = max_steps
with tf.device("/cpu:0"):
# Define input and label placeholders
# inputs is of dimension [batch_size, max_time, input_size]
self.inputs = tf.placeholder(tf.float32, [batch_size, None, input_size], name='inputs')
self.sequence_lengths = tf.placeholder(tf.int32, [batch_size], name='sequence_lengths')
self.labels = tf.sparse_placeholder(tf.int32, name='labels')
# Queue for inputs and labels
self.queue = tf.FIFOQueue(dtypes=[tf.float32, tf.int32, tf.string],
capacity=100)
# queues do not support sparse tensors yet, we need to serialize...
serialized_labels = tf.serialize_many_sparse(self.labels)
self.enqueue_op = self.queue.enqueue([self.inputs,
self.sequence_lengths,
serialized_labels])
示例2: add_placeholders
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_placeholder [as 别名]
def add_placeholders(self):
"""
Defines the placeholder required for the model
"""
self.input_x = tf.placeholder(tf.int32, shape=[None, None], name='input_data') # Words in a document (batch_size x max_words)
self.input_y = tf.placeholder(tf.int32, shape=[None, None], name='input_labels') # Actual document creation year of the document
self.x_len = tf.placeholder(tf.int32, shape=[None], name='input_len') # Number of words in each document in a batch
self.et_idx = tf.placeholder(tf.int32, shape=[None, None], name='et_idx') # Index of tokens which are events/time_expressions
self.et_mask = tf.placeholder(tf.float32, shape=[None, None], name='et_mask')
# Array of batch_size number of dictionaries, where each dictionary is mapping of label to sparse_placeholder [Temporal graph]
self.de_adj_mat = [{lbl: tf.sparse_placeholder(tf.float32, shape=[None, None]) for lbl in range(self.num_deLabel)} for _ in range(self.p.batch_size)]
# Array of batch_size number of dictionaries, where each dictionary is mapping of label to sparse_placeholder [Syntactic graph]
self.et_adj_mat = [{lbl: tf.sparse_placeholder(tf.float32, shape=[None, None]) for lbl in range(self.num_etLabel)} for _ in range(self.p.batch_size)]
self.seq_len = tf.placeholder(tf.int32, shape=(), name='seq_len') # Maximum number of words in documents of a batch
self.max_et = tf.placeholder(tf.int32, shape=(), name='max_et') # Maximum number of events/time_expressions in documents of a batch
self.dropout = tf.placeholder_with_default(self.p.dropout, shape=(), name='dropout') # Dropout used in GCN Layer
self.rec_dropout = tf.placeholder_with_default(self.p.rec_dropout, shape=(), name='rec_dropout') # Dropout used in Bi-LSTM
示例3: get_edit_distance
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_placeholder [as 别名]
def get_edit_distance(hyp_arr, truth_arr, normalize, level):
''' calculate edit distance
This is very universal, both for cha-level and phn-level
'''
graph = tf.Graph()
with graph.as_default():
truth = tf.sparse_placeholder(tf.int32)
hyp = tf.sparse_placeholder(tf.int32)
editDist = tf.reduce_sum(tf.edit_distance(hyp, truth, normalize=normalize))
with tf.Session(graph=graph) as session:
truthTest = list_to_sparse_tensor(truth_arr, level)
hypTest = list_to_sparse_tensor(hyp_arr, level)
feedDict = {truth: truthTest, hyp: hypTest}
dist = session.run(editDist, feed_dict=feedDict)
return dist
示例4: _create_placeholders
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_placeholder [as 别名]
def _create_placeholders(self):
"""Create placeholders."""
with tf.name_scope('input'):
self.placeholders = {
'adj_train':
tf.sparse_placeholder(tf.float32), # normalized
'node_labels':
tf.placeholder(tf.float32, shape=[None, self.n_hidden[-1]]),
'node_mask':
tf.placeholder(tf.float32, shape=[
None,
]),
'is_training':
tf.placeholder(tf.bool),
}
if self.sparse_features:
self.placeholders['features'] = tf.sparse_placeholder(tf.float32)
else:
self.placeholders['features'] = tf.placeholder(
tf.float32, shape=[None, self.input_dim])
示例5: get_architecture
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_placeholder [as 别名]
def get_architecture():
inputs_ph = tf.placeholder(
dtype=tf.float32, shape=[None, FLAGS.features_dim], name="features_")
support_ph = tf.sparse_placeholder(
dtype=tf.float32, shape=[None, None], name="support_")
tf.logging.info("Reordering indices of support - this is extremely "
"important as sparse operations assume sparse indices have "
"been ordered.")
support_reorder = tf.sparse_reorder(support_ph)
rgat_layer = RGAT(units=FLAGS.units, relations=FLAGS.relations)
outputs = rgat_layer(inputs=inputs_ph, support=support_reorder)
return inputs_ph, support_ph, outputs
示例6: build_placeholders
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_placeholder [as 别名]
def build_placeholders(self):
num_supports = 1
self.placeholders = {
'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],
'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(self.features[2], dtype=tf.int64)),
'labels': tf.placeholder(tf.float32, shape=(None, self.labels.shape[1])),
'labels_mask': tf.placeholder(tf.int32),
'dropout': tf.placeholder_with_default(0., shape=()),
# helper variable for sparse dropout
'num_features_nonzero': tf.placeholder(tf.int32)
}
示例7: _setup_variables
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_placeholder [as 别名]
def _setup_variables(self):
"""
Creating TensorFlow variables and placeholders.
"""
self.node_embedding = tf.random_uniform([self.node_count, self.args.node_embedding_dimensions], -1.0, 1.0)
self.node_embedding = tf.Variable(self.node_embedding, dtype=tf.float32)
self.feature_embedding = tf.random_uniform([self.feature_count, self.args.feature_embedding_dimensions], -1.0, 1.0)
self.feature_embedding = tf.Variable(self.feature_embedding, dtype=tf.float32)
self.combined_dimensions = self.args.node_embedding_dimensions + self.args.feature_embedding_dimensions
self.noise_embedding = tf.Variable(tf.truncated_normal([self.node_count, self.combined_dimensions],
stddev=1.0/math.sqrt(self.combined_dimensions)),
dtype=tf.float32)
self.noise_bias = tf.Variable(tf.zeros([self.node_count]),
dtype=tf.float32)
self.noise_bias = tf.Variable(tf.zeros([self.node_count]),
dtype=tf.float32)
self.left_nodes = tf.placeholder(tf.int32, shape=[None])
self.node_features = tf.sparse_placeholder(tf.float32,
shape=[None, self.feature_count])
self.right_nodes = tf.placeholder(tf.int32,
shape=[None, 1])
示例8: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_placeholder [as 别名]
def __init__(self, input_dim=None, output_dim=1, init_path=None, opt_algo='gd', learning_rate=1e-2, l2_weight=0,
random_seed=None):
Model.__init__(self)
init_vars = [('w', [input_dim, output_dim], 'xavier', dtype),
('b', [output_dim], 'zero', dtype)]
self.graph = tf.Graph()
with self.graph.as_default():
if random_seed is not None:
tf.set_random_seed(random_seed)
self.X = tf.sparse_placeholder(dtype)
self.y = tf.placeholder(dtype)
self.vars = utils.init_var_map(init_vars, init_path)
w = self.vars['w']
b = self.vars['b']
xw = tf.sparse_tensor_dense_matmul(self.X, w)
logits = tf.reshape(xw + b, [-1])
self.y_prob = tf.sigmoid(logits)
self.loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=self.y, logits=logits)) + \
l2_weight * tf.nn.l2_loss(xw)
self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
tf.global_variables_initializer().run(session=self.sess)
示例9: __setup_inductive
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_placeholder [as 别名]
def __setup_inductive(self, A, X, p_nodes):
N = A.shape[0]
nodes_rnd = np.random.permutation(N)
n_hide = int(N * p_nodes)
nodes_hide = nodes_rnd[:n_hide]
A_hidden = A.copy().tolil()
A_hidden[nodes_hide] = 0
A_hidden[:, nodes_hide] = 0
# additionally add any dangling nodes to the hidden ones since we can't learn from them
nodes_dangling = np.where(A_hidden.sum(0).A1 + A_hidden.sum(1).A1 == 0)[0]
if len(nodes_dangling) > 0:
nodes_hide = np.concatenate((nodes_hide, nodes_dangling))
nodes_keep = np.setdiff1d(np.arange(N), nodes_hide)
self.X = tf.sparse_placeholder(tf.float32)
self.feed_dict = {self.X: sparse_feeder(X[nodes_keep])}
self.ind_pairs = batch_pairs_sample(A, nodes_hide)
self.ind_ground_truth = A[self.ind_pairs[:, 0], self.ind_pairs[:, 1]].A1
self.ind_feed_dict = {self.X: sparse_feeder(X)}
A = A[nodes_keep][:, nodes_keep]
return A
示例10: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_placeholder [as 别名]
def __init__(self, input_dim=None, output_dim=1, factor_order=10, init_path=None, opt_algo='gd', learning_rate=1e-2,
l2_w=0, l2_v=0, random_seed=None):
Model.__init__(self)
init_vars = [('w', [input_dim, output_dim], 'xavier', dtype),
('v', [input_dim, factor_order], 'xavier', dtype),
('b', [output_dim], 'zero', dtype)]
self.graph = tf.Graph()
with self.graph.as_default():
if random_seed is not None:
tf.set_random_seed(random_seed)
self.X = tf.sparse_placeholder(dtype)
self.y = tf.placeholder(dtype)
self.vars = utils.init_var_map(init_vars, init_path)
X_square = tf.SparseTensor(self.X.indices, tf.square(self.X.values), tf.to_int64(tf.shape(self.X)))
xv = tf.square(tf.sparse_tensor_dense_matmul(self.X, self.vars['v']))
p = 0.5 * tf.reshape(
tf.reduce_sum(xv - tf.sparse_tensor_dense_matmul(X_square, tf.square(self.vars['v'])), 1),
[-1, output_dim])
xw = tf.sparse_tensor_dense_matmul(self.X, self.vars['w'])
logits = tf.reshape(xw + self.vars['b'] + p, [-1])
self.y_prob = tf.sigmoid(logits)
self.loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=self.y)) + \
l2_w * tf.nn.l2_loss(xw) + \
l2_v * tf.nn.l2_loss(xv)
self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
tf.global_variables_initializer().run(session=self.sess)
示例11: placeholder
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_placeholder [as 别名]
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
"""Instantiates a placeholder tensor and returns it.
# Arguments
shape: Shape of the placeholder
(integer tuple, may include `None` entries).
ndim: Number of axes of the tensor.
At least one of {`shape`, `ndim`} must be specified.
If both are specified, `shape` is used.
dtype: Placeholder type.
sparse: Boolean, whether the placeholder should have a sparse type.
name: Optional name string for the placeholder.
# Returns
Tensor instance (with Keras metadata included).
# Examples
```python
>>> from keras import backend as K
>>> input_ph = K.placeholder(shape=(2, 4, 5))
>>> input_ph._keras_shape
(2, 4, 5)
>>> input_ph
<tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32>
```
"""
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = tuple([None for _ in range(ndim)])
if sparse:
x = tf.sparse_placeholder(dtype, shape=shape, name=name)
else:
x = tf.placeholder(dtype, shape=shape, name=name)
x._keras_shape = shape
x._uses_learning_phase = False
return x
示例12: add_placeholders
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_placeholder [as 别名]
def add_placeholders(self):
# input tensor for log filter or MFCC features
self.input_tensor = tf.placeholder( tf.float32,
[None, None, self.n_dim],
name='input')
self.text = tf.sparse_placeholder(tf.int32, name='text')
self.seq_length = tf.placeholder(tf.int32, [None], name='seq_length')
self.keep_dropout = tf.placeholder(tf.float32)
示例13: load_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_placeholder [as 别名]
def load_model(self, model: str, model_path: str, max_seq_length: int):
spm_path_info = None
g = tf.Graph()
with g.as_default():
hub_module = hub.Module(model_path)
if model == 'use_transformer_lite':
self.input_placeholder = tf.sparse_placeholder(tf.int64, shape=[None, None])
self.use_outputs = hub_module(
inputs=dict(
values=self.input_placeholder.values,
indices=self.input_placeholder.indices,
dense_shape=self.input_placeholder.dense_shape)
)
spm_path_info = hub_module(signature="spm_path")
else:
self.sentences = tf.placeholder(tf.string, shape=[None])
self.use_outputs = hub_module(self.sentences, as_dict=True)
init_op = tf.group([tf.global_variables_initializer(), tf.tables_initializer()])
g.finalize()
self.sess = tf.Session(graph=g)
self.sess.run(init_op)
if model == 'use_transformer_lite':
spm_path = self.sess.run(spm_path_info)
self.sp_model.Load(spm_path)
self.model_name = model
self.max_seq_length = max_seq_length
示例14: construct_placeholders
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_placeholder [as 别名]
def construct_placeholders(num_classes):
placeholders = {
'labels': tf.placeholder(DTYPE, shape=(None, num_classes), name='labels'),
'node_subgraph': tf.placeholder(tf.int32, shape=(None), name='node_subgraph'),
'dropout': tf.placeholder(DTYPE, shape=(None), name='dropout'),
'adj_subgraph' : tf.sparse_placeholder(DTYPE,name='adj_subgraph',shape=(None,None)),
'adj_subgraph_0' : tf.sparse_placeholder(DTYPE,name='adj_subgraph_0'),
'adj_subgraph_1' : tf.sparse_placeholder(DTYPE,name='adj_subgraph_1'),
'adj_subgraph_2' : tf.sparse_placeholder(DTYPE,name='adj_subgraph_2'),
'adj_subgraph_3' : tf.sparse_placeholder(DTYPE,name='adj_subgraph_3'),
'adj_subgraph_4' : tf.sparse_placeholder(DTYPE,name='adj_subgraph_4'),
'adj_subgraph_5' : tf.sparse_placeholder(DTYPE,name='adj_subgraph_5'),
'adj_subgraph_6' : tf.sparse_placeholder(DTYPE,name='adj_subgraph_6'),
'adj_subgraph_7' : tf.sparse_placeholder(DTYPE,name='adj_subgraph_7'),
'dim0_adj_sub' : tf.placeholder(tf.int64,shape=(None),name='dim0_adj_sub'),
'norm_loss': tf.placeholder(DTYPE,shape=(None),name='norm_loss'),
'is_train': tf.placeholder(tf.bool, shape=(None), name='is_train')
}
return placeholders
#########
# TRAIN #
#########
示例15: setup_network_and_graph
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_placeholder [as 别名]
def setup_network_and_graph(self):
# e.g: log filter bank or MFCC features
# shape = [batch_size, max_stepsize, n_input + (2 * n_input * n_context)]
# the batch_size and max_stepsize can vary along each step
self.input_tensor = tf.placeholder(
tf.float32, [None, None, self.n_input + (2 * self.n_input * self.n_context)], name='input')
# Use sparse_placeholder; will generate a SparseTensor, required by ctc_loss op.
self.targets = tf.sparse_placeholder(tf.int32, name='targets')
# 1d array of size [batch_size]
self.seq_length = tf.placeholder(tf.int32, [None], name='seq_length')