當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.reconstruct方法代碼示例

本文整理匯總了Python中my.tensorflow.reconstruct方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.reconstruct方法的具體用法?Python tensorflow.reconstruct怎麽用?Python tensorflow.reconstruct使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在my.tensorflow的用法示例。


在下文中一共展示了tensorflow.reconstruct方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: bw_dynamic_rnn

# 需要導入模塊: from my import tensorflow [as 別名]
# 或者: from my.tensorflow import reconstruct [as 別名]
def bw_dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
                   dtype=None, parallel_iterations=None, swap_memory=False,
                   time_major=False, scope=None):
    assert not time_major  # TODO : to be implemented later!

    flat_inputs = flatten(inputs, 2)  # [-1, J, d]
    flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')

    flat_inputs = tf.reverse(flat_inputs, 1) if sequence_length is None \
        else tf.reverse_sequence(flat_inputs, sequence_length, 1)
    flat_outputs, final_state = _dynamic_rnn(cell, flat_inputs, sequence_length=flat_len,
                                             initial_state=initial_state, dtype=dtype,
                                             parallel_iterations=parallel_iterations, swap_memory=swap_memory,
                                             time_major=time_major, scope=scope)
    flat_outputs = tf.reverse(flat_outputs, 1) if sequence_length is None \
        else tf.reverse_sequence(flat_outputs, sequence_length, 1)

    outputs = reconstruct(flat_outputs, inputs, 2)
    return outputs, final_state 
開發者ID:IsaacChanghau,項目名稱:AmusingPythonCodes,代碼行數:21,代碼來源:rnn.py

示例2: bidirectional_dynamic_rnn

# 需要導入模塊: from my import tensorflow [as 別名]
# 或者: from my.tensorflow import reconstruct [as 別名]
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
                              initial_state_fw=None, initial_state_bw=None,
                              dtype=None, parallel_iterations=None,
                              swap_memory=False, time_major=False, scope=None):
    assert not time_major

    flat_inputs = flatten(inputs, 2)  # [-1, J, d]
    flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')

    (flat_fw_outputs, flat_bw_outputs), final_state = \
        _bidirectional_dynamic_rnn(cell_fw, cell_bw, flat_inputs, sequence_length=flat_len,
                                   initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw,
                                   dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory,
                                   time_major=time_major, scope=scope)

    fw_outputs = reconstruct(flat_fw_outputs, inputs, 2)
    bw_outputs = reconstruct(flat_bw_outputs, inputs, 2)
    # FIXME : final state is not reshaped!
    return (fw_outputs, bw_outputs), final_state 
開發者ID:IsaacChanghau,項目名稱:AmusingPythonCodes,代碼行數:21,代碼來源:rnn.py

示例3: bidirectional_rnn

# 需要導入模塊: from my import tensorflow [as 別名]
# 或者: from my.tensorflow import reconstruct [as 別名]
def bidirectional_rnn(cell_fw, cell_bw, inputs,
                      initial_state_fw=None, initial_state_bw=None,
                      dtype=None, sequence_length=None, scope=None):

    flat_inputs = flatten(inputs, 2)  # [-1, J, d]
    flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')

    (flat_fw_outputs, flat_bw_outputs), final_state = \
        tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, flat_inputs, sequence_length=flat_len,
                                        initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw,
                                        dtype=dtype, scope=scope)

    fw_outputs = reconstruct(flat_fw_outputs, inputs, 2)
    bw_outputs = reconstruct(flat_bw_outputs, inputs, 2)
    # FIXME : final state is not reshaped!
    return (fw_outputs, bw_outputs), final_state 
開發者ID:IsaacChanghau,項目名稱:AmusingPythonCodes,代碼行數:18,代碼來源:rnn.py

示例4: linear

# 需要導入模塊: from my import tensorflow [as 別名]
# 或者: from my.tensorflow import reconstruct [as 別名]
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
           is_train=None):
    if args is None or (nest.is_sequence(args) and not args):
        raise ValueError("`args` must be specified")
    if not nest.is_sequence(args):
        args = [args]

    flat_args = [flatten(arg, 1) for arg in args]
    if input_keep_prob < 1.0:
        assert is_train is not None
        flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob), lambda: arg)
                     for arg in flat_args]
    with tf.variable_scope(scope or 'Linear'):
        flat_out = _linear(flat_args, output_size, bias, bias_initializer=tf.constant_initializer(bias_start))
    out = reconstruct(flat_out, args[0], 1)
    if squeeze:
        out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
    if wd:
        add_wd(wd)

    return out 
開發者ID:IsaacChanghau,項目名稱:AmusingPythonCodes,代碼行數:23,代碼來源:nn.py

示例5: linear

# 需要導入模塊: from my import tensorflow [as 別名]
# 或者: from my.tensorflow import reconstruct [as 別名]
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
           is_train=None):
    if args is None or (nest.is_sequence(args) and not args):
        raise ValueError("`args` must be specified")
    if not nest.is_sequence(args):
        args = [args]

    flat_args = [flatten(arg, 1) for arg in args]
    if input_keep_prob < 1.0:
        assert is_train is not None
        flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob), lambda: arg)
                     for arg in flat_args]
    with tf.variable_scope(scope or 'Linear'):
        flat_out = _linear(flat_args, output_size, bias, bias_start=bias_start)
    out = reconstruct(flat_out, args[0], 1)
    if squeeze:
        out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
    if wd:
        add_wd(wd)

    return out 
開發者ID:wenwei202,項目名稱:iss-rnns,代碼行數:23,代碼來源:nn.py

示例6: dynamic_rnn

# 需要導入模塊: from my import tensorflow [as 別名]
# 或者: from my.tensorflow import reconstruct [as 別名]
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
                dtype=None, parallel_iterations=None, swap_memory=False,
                time_major=False, scope=None):
    assert not time_major  # TODO : to be implemented later!
    print("dynamic rnn input")
    print(inputs.get_shape())
    flat_inputs = flatten(inputs, 2)  # [-1, J, d]
    print("dynamic rnn flatten shape")
    print(flat_inputs.get_shape())
    flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')

    flat_outputs, final_state = _dynamic_rnn(cell, flat_inputs, sequence_length=flat_len,
                                             initial_state=initial_state, dtype=dtype,
                                             parallel_iterations=parallel_iterations, swap_memory=swap_memory,
                                             time_major=time_major, scope=scope)
    print("flat_outputs shape")
    print(flat_outputs.get_shape())
    outputs = reconstruct(flat_outputs, inputs, 2)
    return outputs, final_state 
開發者ID:YichenGong,項目名稱:Densely-Interactive-Inference-Network,代碼行數:21,代碼來源:rnn.py

示例7: linear

# 需要導入模塊: from my import tensorflow [as 別名]
# 或者: from my.tensorflow import reconstruct [as 別名]
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
           is_train=None):
    with tf.variable_scope(scope or "linear"):
        if args is None or (nest.is_sequence(args) and not args):
            raise ValueError("`args` must be specified")
        if not nest.is_sequence(args):
            args = [args]

        flat_args = [flatten(arg, 1) for arg in args]
        # if input_keep_prob < 1.0:
        assert is_train is not None
        flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob), lambda: arg)
                         for arg in flat_args]
        flat_out = _linear(flat_args, output_size, bias)
        out = reconstruct(flat_out, args[0], 1)
        if squeeze:
            out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
        if wd:
            add_wd(wd)

    return out 
開發者ID:YichenGong,項目名稱:Densely-Interactive-Inference-Network,代碼行數:23,代碼來源:nn.py

示例8: bidirectional_rnn

# 需要導入模塊: from my import tensorflow [as 別名]
# 或者: from my.tensorflow import reconstruct [as 別名]
def bidirectional_rnn(cell_fw, cell_bw, inputs,
                      initial_state_fw=None, initial_state_bw=None,
                      dtype=None, sequence_length=None, scope=None):

    flat_inputs = flatten(inputs, 2)  # [-1, J, d]
    flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')

    (flat_fw_outputs, flat_bw_outputs), final_state = \
        _bidirectional_rnn(cell_fw, cell_bw, flat_inputs, sequence_length=flat_len,
                           initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw,
                           dtype=dtype, scope=scope)

    fw_outputs = reconstruct(flat_fw_outputs, inputs, 2)
    bw_outputs = reconstruct(flat_bw_outputs, inputs, 2)
    # FIXME : final state is not reshaped!
    return (fw_outputs, bw_outputs), final_state 
開發者ID:sld,項目名稱:convai-bot-1337,代碼行數:18,代碼來源:rnn.py

示例9: linear

# 需要導入模塊: from my import tensorflow [as 別名]
# 或者: from my.tensorflow import reconstruct [as 別名]
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
           is_train=None):
    if args is None or (nest.is_sequence(args) and not args):
        raise ValueError("`args` must be specified")
    if not nest.is_sequence(args):
        args = [args]

    flat_args = [flatten(arg, 1) for arg in args]
    if input_keep_prob < 1.0:
        assert is_train is not None
        flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob), lambda: arg)
                     for arg in flat_args]
    flat_out = _linear(flat_args, output_size, bias, bias_start=bias_start, scope=scope)
    out = reconstruct(flat_out, args[0], 1)
    if squeeze:
        out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
    if wd:
        add_wd(wd)

    return out 
開發者ID:sld,項目名稱:convai-bot-1337,代碼行數:22,代碼來源:nn.py

示例10: dynamic_rnn

# 需要導入模塊: from my import tensorflow [as 別名]
# 或者: from my.tensorflow import reconstruct [as 別名]
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
                dtype=None, parallel_iterations=None, swap_memory=False,
                time_major=False, scope=None):
    assert not time_major  # TODO : to be implemented later!
    flat_inputs = flatten(inputs, 2)  # [-1, J, d]
    flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')

    flat_outputs, final_state = _dynamic_rnn(cell, flat_inputs, sequence_length=flat_len,
                                             initial_state=initial_state, dtype=dtype,
                                             parallel_iterations=parallel_iterations, swap_memory=swap_memory,
                                             time_major=time_major, scope=scope)

    outputs = reconstruct(flat_outputs, inputs, 2)
    return outputs, final_state 
開發者ID:IsaacChanghau,項目名稱:AmusingPythonCodes,代碼行數:16,代碼來源:rnn.py

示例11: bidirectional_dynamic_rnn

# 需要導入模塊: from my import tensorflow [as 別名]
# 或者: from my.tensorflow import reconstruct [as 別名]
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
                              initial_state_fw=None, initial_state_bw=None,
                              dtype=None, parallel_iterations=None,
                              swap_memory=False, time_major=False, scope=None):
    assert not time_major

    flat_inputs = flatten(inputs, 2)  # [-1, J, d]
    flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')

    (flat_fw_outputs, flat_bw_outputs), final_state = \
        _bidirectional_dynamic_rnn(cell_fw, cell_bw, flat_inputs, sequence_length=flat_len,
                                   initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw,
                                   dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory,
                                   time_major=time_major, scope=scope)

    fw_outputs = reconstruct(flat_fw_outputs, inputs, 2)
    bw_outputs = reconstruct(flat_bw_outputs, inputs, 2)
    # FIXME : final state is not reshaped!
    return (fw_outputs, bw_outputs), final_state


# def bidirectional_rnn(cell_fw, cell_bw, inputs,
#                       initial_state_fw=None, initial_state_bw=None,
#                       dtype=None, sequence_length=None, scope=None):

#     flat_inputs = flatten(inputs, 2)  # [-1, J, d]
#     flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')

#     (flat_fw_outputs, flat_bw_outputs), final_state = \
#         _bidirectional_rnn(cell_fw, cell_bw, flat_inputs, sequence_length=flat_len,
#                            initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw,
#                            dtype=dtype, scope=scope)

#     fw_outputs = reconstruct(flat_fw_outputs, inputs, 2)
#     bw_outputs = reconstruct(flat_bw_outputs, inputs, 2)
#     # FIXME : final state is not reshaped!
#     return (fw_outputs, bw_outputs), final_state 
開發者ID:YichenGong,項目名稱:Densely-Interactive-Inference-Network,代碼行數:39,代碼來源:rnn.py

示例12: softmax

# 需要導入模塊: from my import tensorflow [as 別名]
# 或者: from my.tensorflow import reconstruct [as 別名]
def softmax(logits, mask=None, scope=None):
    with tf.name_scope(scope or "Softmax"):
        if mask is not None:
            logits = exp_mask(logits, mask)
        flat_logits = flatten(logits, 1)
        flat_out = tf.nn.softmax(flat_logits)
        out = reconstruct(flat_out, logits, 1)

        return out 
開發者ID:YichenGong,項目名稱:Densely-Interactive-Inference-Network,代碼行數:11,代碼來源:nn.py


注:本文中的my.tensorflow.reconstruct方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。