本文整理汇总了Python中tensorflow.placeholders方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.placeholders方法的具体用法?Python tensorflow.placeholders怎么用?Python tensorflow.placeholders使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.placeholders方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: rnn_placeholders
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholders [as 别名]
def rnn_placeholders(state):
"""
Given nested [multilayer] RNN state tensor, infers and returns state placeholders.
Args:
state: tf.nn.lstm zero-state tuple.
Returns: tuple of placeholders
"""
if isinstance(state, tf.contrib.rnn.LSTMStateTuple):
c, h = state
c = tf.placeholder(tf.float32, tf.TensorShape([None]).concatenate(c.get_shape()[1:]), c.op.name + '_c_pl')
h = tf.placeholder(tf.float32, tf.TensorShape([None]).concatenate(h.get_shape()[1:]), h.op.name + '_h_pl')
return tf.contrib.rnn.LSTMStateTuple(c, h)
elif isinstance(state, tf.Tensor):
h = state
h = tf.placeholder(tf.float32, tf.TensorShape([None]).concatenate(h.get_shape()[1:]), h.op.name + '_h_pl')
return h
else:
structure = [rnn_placeholders(x) for x in state]
return tuple(structure)
示例2: nested_placeholders
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholders [as 别名]
def nested_placeholders(ob_space, batch_dim=None, name='nested'):
"""
Given nested observation space as dictionary of shape tuples,
returns nested state batch-wise placeholders.
Args:
ob_space: [nested] dict of shapes
name: name scope
batch_dim: batch dimension
Returns:
nested dictionary of placeholders
"""
if isinstance(ob_space, dict):
out = {key: nested_placeholders(value, batch_dim, name + '_' + key) for key, value in ob_space.items()}
return out
else:
out = tf.placeholder(tf.float32, [batch_dim] + list(ob_space), name + '_pl')
return out
示例3: build_graph
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholders [as 别名]
def build_graph(self, constraint_obj, target, input_val_dict, reg_coeff):
"""
Sets the objective function and target weights for the optimize function
Args:
constraint_obj (tf_op) : constraint objective
target (Policy) : Policy whose values we are optimizing over
inputs (list) : tuple of tf.placeholders for input data which may be subsampled. The first dimension corresponds to the number of data points
reg_coeff (float): regularization coefficient
"""
self._target = target
self.reg_coeff = reg_coeff
self._input_ph_dict = input_val_dict
params = list(target.get_params().values())
constraint_grads = tf.gradients(constraint_obj, xs=params)
for idx, (grad, param) in enumerate(zip(constraint_grads, params)):
if grad is None:
constraint_grads[idx] = tf.zeros_like(param)
constraint_gradient = tf.concat([tf.reshape(grad, [-1]) for grad in constraint_grads], axis=0)
self._constraint_gradient = constraint_gradient
示例4: likelihood_ratio_sym
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholders [as 别名]
def likelihood_ratio_sym(self, obs, action, dist_info_old, policy_params):
"""
Computes the likelihood p_new(obs|act)/p_old ratio between
Args:
obs (tf.Tensor): symbolic variable for observations
action (tf.Tensor): symbolic variable for actions
dist_info_old (dict): dictionary of tf.placeholders with old policy information
policy_params (dict): dictionary of the policy parameters (each value is a tf.Tensor)
Returns:
(tf.Tensor) : likelihood ratio
"""
distribution_info_new = self.distribution_info_sym(obs, params=policy_params)
likelihood_ratio = self._dist.likelihood_ratio_sym(action, dist_info_old, distribution_info_new)
return likelihood_ratio
示例5: flat_placeholders
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholders [as 别名]
def flat_placeholders(ob_space, batch_dim=None, name='flt'):
"""
Given nested observation space as dictionary of shape tuples,
returns flattened dictionary of batch-wise placeholders.
Args:
ob_space: [nested dict] of tuples
name: name_scope
batch_dim: batch dimension
Returns:
flat dictionary of tf.placeholders
"""
return flatten_nested(nested_placeholders(ob_space, batch_dim=batch_dim, name=name))
示例6: feed_dict_from_nested
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholders [as 别名]
def feed_dict_from_nested(placeholder, value, expand_batch=False):
"""
Zips flat feed dictionary form nested dictionaries of placeholders and values.
Args:
placeholder: nested dictionary of placeholders
value: nested dictionary of values
expand_batch: if true - add fake batch dimension to values
Returns:
flat feed_dict
"""
assert_same_structure(placeholder, value, check_types=True)
return _flat_from_nested(placeholder, value, expand_batch)
示例7: feed_dict_rnn_context
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholders [as 别名]
def feed_dict_rnn_context(placeholders, values):
"""
Creates tf.feed_dict for flat placeholders and nested values.
Args:
placeholders: flat structure of placeholders
values: nested structure of values
Returns:
flat feed dictionary
"""
return {key: value for key, value in zip(placeholders, flatten_nested(values))}
示例8: _get_placeholder_list
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholders [as 别名]
def _get_placeholder_list(name, length, dtype=tf.int32):
"""
Args:
name: prefix of name of each tf.placeholder list item, where i'th name is [name]i.
length: number of items (tf.placeholders) in the returned list.
Returns:
list of tensorflow placeholder of dtype=tf.int32 and unspecified shape.
"""
return [tf.placeholder(dtype, shape=[None], name=name+str(i)) for i in range(length)]
示例9: loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholders [as 别名]
def loss(self, net_out):
m = self.meta
loss_type = self.meta['type']
assert loss_type in _LOSS_TYPE, \
'Loss type {} not implemented'.format(loss_type)
out = net_out
out_shape = out.get_shape()
out_dtype = out.dtype.base_dtype
_truth = tf.placeholders(out_dtype, out_shape)
self.placeholders = dict({
'truth': _truth
})
diff = _truth - out
if loss_type in ['sse','12']:
loss = tf.nn.l2_loss(diff)
elif loss_type == ['smooth']:
small = tf.cast(diff < 1, tf.float32)
large = 1. - small
l1_loss = tf.nn.l1_loss(tf.multiply(diff, large))
l2_loss = tf.nn.l2_loss(tf.multiply(diff, small))
loss = l1_loss + l2_loss
elif loss_type in ['sparse', 'l1']:
loss = l1_loss(diff)
elif loss_type == 'softmax':
loss = tf.nn.softmax_cross_entropy_with_logits(logits, y)
loss = tf.reduce_mean(loss)
elif loss_type == 'svm':
assert 'train_size' in m, \
'Must specify'
size = m['train_size']
self.nu = tf.Variable(tf.ones([train_size, num_classes]))
示例10: loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholders [as 别名]
def loss(self, net_out):
m = self.meta
loss_type = self.meta['type']
assert loss_type in _LOSS_TYPE, \
'Loss type {} not implemented'.format(loss_type)
out = net_out
out_shape = out.get_shape()
out_dtype = out.dtype.base_dtype
_truth = tf.placeholders(out_dtype, out_shape)
self.placeholders = dict({
'truth': _truth
})
diff = _truth - out
if loss_type in ['sse', '12']:
loss = tf.nn.l2_loss(diff)
elif loss_type == ['smooth']:
small = tf.cast(diff < 1, tf.float32)
large = 1. - small
l1_loss = tf.nn.l1_loss(tf.multiply(diff, large))
l2_loss = tf.nn.l2_loss(tf.multiply(diff, small))
loss = l1_loss + l2_loss
elif loss_type in ['sparse', 'l1']:
loss = l1_loss(diff)
elif loss_type == 'softmax':
loss = tf.nn.softmax_cross_entropy_with_logits(logits, y)
loss = tf.reduce_mean(loss)
elif loss_type == 'svm':
assert 'train_size' in m, \
'Must specify'
size = m['train_size']
self.nu = tf.Variable(tf.ones([train_size, num_classes]))
示例11: distribution_info_sym
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholders [as 别名]
def distribution_info_sym(self, obs_var, params=None):
"""
Return the symbolic distribution information about the actions.
Args:
obs_var (placeholder) : symbolic variable for observations
params (None or dict) : a dictionary of placeholders that contains information about the
state of the policy at the time it received the observation
Returns:
(dict) : a dictionary of tf placeholders for the policy output distribution
"""
raise NotImplementedError
示例12: distribution_info_keys
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholders [as 别名]
def distribution_info_keys(self, obs, state_infos):
"""
Args:
obs (placeholder) : symbolic variable for observations
state_infos (dict) : a dictionary of placeholders that contains information about the
state of the policy at the time it received the observation
Returns:
(dict) : a dictionary of tf placeholders for the policy output distribution
"""
raise NotImplementedError