本文整理汇总了Python中my.tensorflow.reconstruct方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.reconstruct方法的具体用法?Python tensorflow.reconstruct怎么用?Python tensorflow.reconstruct使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类my.tensorflow
的用法示例。
在下文中一共展示了tensorflow.reconstruct方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: bw_dynamic_rnn
# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import reconstruct [as 别名]
def bw_dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
assert not time_major # TODO : to be implemented later!
flat_inputs = flatten(inputs, 2) # [-1, J, d]
flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')
flat_inputs = tf.reverse(flat_inputs, 1) if sequence_length is None \
else tf.reverse_sequence(flat_inputs, sequence_length, 1)
flat_outputs, final_state = _dynamic_rnn(cell, flat_inputs, sequence_length=flat_len,
initial_state=initial_state, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=scope)
flat_outputs = tf.reverse(flat_outputs, 1) if sequence_length is None \
else tf.reverse_sequence(flat_outputs, sequence_length, 1)
outputs = reconstruct(flat_outputs, inputs, 2)
return outputs, final_state
示例2: bidirectional_dynamic_rnn
# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import reconstruct [as 别名]
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
initial_state_fw=None, initial_state_bw=None,
dtype=None, parallel_iterations=None,
swap_memory=False, time_major=False, scope=None):
assert not time_major
flat_inputs = flatten(inputs, 2) # [-1, J, d]
flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')
(flat_fw_outputs, flat_bw_outputs), final_state = \
_bidirectional_dynamic_rnn(cell_fw, cell_bw, flat_inputs, sequence_length=flat_len,
initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw,
dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=scope)
fw_outputs = reconstruct(flat_fw_outputs, inputs, 2)
bw_outputs = reconstruct(flat_bw_outputs, inputs, 2)
# FIXME : final state is not reshaped!
return (fw_outputs, bw_outputs), final_state
示例3: bidirectional_rnn
# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import reconstruct [as 别名]
def bidirectional_rnn(cell_fw, cell_bw, inputs,
initial_state_fw=None, initial_state_bw=None,
dtype=None, sequence_length=None, scope=None):
flat_inputs = flatten(inputs, 2) # [-1, J, d]
flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')
(flat_fw_outputs, flat_bw_outputs), final_state = \
tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, flat_inputs, sequence_length=flat_len,
initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw,
dtype=dtype, scope=scope)
fw_outputs = reconstruct(flat_fw_outputs, inputs, 2)
bw_outputs = reconstruct(flat_bw_outputs, inputs, 2)
# FIXME : final state is not reshaped!
return (fw_outputs, bw_outputs), final_state
示例4: linear
# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import reconstruct [as 别名]
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
is_train=None):
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
flat_args = [flatten(arg, 1) for arg in args]
if input_keep_prob < 1.0:
assert is_train is not None
flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob), lambda: arg)
for arg in flat_args]
with tf.variable_scope(scope or 'Linear'):
flat_out = _linear(flat_args, output_size, bias, bias_initializer=tf.constant_initializer(bias_start))
out = reconstruct(flat_out, args[0], 1)
if squeeze:
out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
if wd:
add_wd(wd)
return out
示例5: linear
# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import reconstruct [as 别名]
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
is_train=None):
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
flat_args = [flatten(arg, 1) for arg in args]
if input_keep_prob < 1.0:
assert is_train is not None
flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob), lambda: arg)
for arg in flat_args]
with tf.variable_scope(scope or 'Linear'):
flat_out = _linear(flat_args, output_size, bias, bias_start=bias_start)
out = reconstruct(flat_out, args[0], 1)
if squeeze:
out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
if wd:
add_wd(wd)
return out
示例6: dynamic_rnn
# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import reconstruct [as 别名]
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
assert not time_major # TODO : to be implemented later!
print("dynamic rnn input")
print(inputs.get_shape())
flat_inputs = flatten(inputs, 2) # [-1, J, d]
print("dynamic rnn flatten shape")
print(flat_inputs.get_shape())
flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')
flat_outputs, final_state = _dynamic_rnn(cell, flat_inputs, sequence_length=flat_len,
initial_state=initial_state, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=scope)
print("flat_outputs shape")
print(flat_outputs.get_shape())
outputs = reconstruct(flat_outputs, inputs, 2)
return outputs, final_state
示例7: linear
# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import reconstruct [as 别名]
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
is_train=None):
with tf.variable_scope(scope or "linear"):
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
flat_args = [flatten(arg, 1) for arg in args]
# if input_keep_prob < 1.0:
assert is_train is not None
flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob), lambda: arg)
for arg in flat_args]
flat_out = _linear(flat_args, output_size, bias)
out = reconstruct(flat_out, args[0], 1)
if squeeze:
out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
if wd:
add_wd(wd)
return out
示例8: bidirectional_rnn
# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import reconstruct [as 别名]
def bidirectional_rnn(cell_fw, cell_bw, inputs,
initial_state_fw=None, initial_state_bw=None,
dtype=None, sequence_length=None, scope=None):
flat_inputs = flatten(inputs, 2) # [-1, J, d]
flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')
(flat_fw_outputs, flat_bw_outputs), final_state = \
_bidirectional_rnn(cell_fw, cell_bw, flat_inputs, sequence_length=flat_len,
initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw,
dtype=dtype, scope=scope)
fw_outputs = reconstruct(flat_fw_outputs, inputs, 2)
bw_outputs = reconstruct(flat_bw_outputs, inputs, 2)
# FIXME : final state is not reshaped!
return (fw_outputs, bw_outputs), final_state
示例9: linear
# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import reconstruct [as 别名]
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
is_train=None):
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
flat_args = [flatten(arg, 1) for arg in args]
if input_keep_prob < 1.0:
assert is_train is not None
flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob), lambda: arg)
for arg in flat_args]
flat_out = _linear(flat_args, output_size, bias, bias_start=bias_start, scope=scope)
out = reconstruct(flat_out, args[0], 1)
if squeeze:
out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
if wd:
add_wd(wd)
return out
示例10: dynamic_rnn
# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import reconstruct [as 别名]
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
assert not time_major # TODO : to be implemented later!
flat_inputs = flatten(inputs, 2) # [-1, J, d]
flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')
flat_outputs, final_state = _dynamic_rnn(cell, flat_inputs, sequence_length=flat_len,
initial_state=initial_state, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=scope)
outputs = reconstruct(flat_outputs, inputs, 2)
return outputs, final_state
示例11: bidirectional_dynamic_rnn
# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import reconstruct [as 别名]
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
initial_state_fw=None, initial_state_bw=None,
dtype=None, parallel_iterations=None,
swap_memory=False, time_major=False, scope=None):
assert not time_major
flat_inputs = flatten(inputs, 2) # [-1, J, d]
flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')
(flat_fw_outputs, flat_bw_outputs), final_state = \
_bidirectional_dynamic_rnn(cell_fw, cell_bw, flat_inputs, sequence_length=flat_len,
initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw,
dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=scope)
fw_outputs = reconstruct(flat_fw_outputs, inputs, 2)
bw_outputs = reconstruct(flat_bw_outputs, inputs, 2)
# FIXME : final state is not reshaped!
return (fw_outputs, bw_outputs), final_state
# def bidirectional_rnn(cell_fw, cell_bw, inputs,
# initial_state_fw=None, initial_state_bw=None,
# dtype=None, sequence_length=None, scope=None):
# flat_inputs = flatten(inputs, 2) # [-1, J, d]
# flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')
# (flat_fw_outputs, flat_bw_outputs), final_state = \
# _bidirectional_rnn(cell_fw, cell_bw, flat_inputs, sequence_length=flat_len,
# initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw,
# dtype=dtype, scope=scope)
# fw_outputs = reconstruct(flat_fw_outputs, inputs, 2)
# bw_outputs = reconstruct(flat_bw_outputs, inputs, 2)
# # FIXME : final state is not reshaped!
# return (fw_outputs, bw_outputs), final_state
示例12: softmax
# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import reconstruct [as 别名]
def softmax(logits, mask=None, scope=None):
with tf.name_scope(scope or "Softmax"):
if mask is not None:
logits = exp_mask(logits, mask)
flat_logits = flatten(logits, 1)
flat_out = tf.nn.softmax(flat_logits)
out = reconstruct(flat_out, logits, 1)
return out