当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.exp_mask方法代码示例

本文整理汇总了Python中my.tensorflow.exp_mask方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.exp_mask方法的具体用法?Python tensorflow.exp_mask怎么用?Python tensorflow.exp_mask使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在my.tensorflow的用法示例。


在下文中一共展示了tensorflow.exp_mask方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __call__

# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import exp_mask [as 别名]
def __call__(self, inputs, state, scope=None):
        """
        :param inputs: [N*B, I + B]
        :param state: [N*B, d]
        :param scope:
        :return: [N*B, d]
        """
        with tf.variable_scope(scope or self.__class__.__name__):
            d = self.state_size
            x = tf.slice(inputs, [0, 0], [-1, self._input_size])  # [N*B, I]
            mask = tf.slice(inputs, [0, self._input_size], [-1, -1])  # [N*B, B]
            B = tf.shape(mask)[1]
            prev_state = tf.expand_dims(tf.reshape(state, [-1, B, d]), 1)  # [N, B, d] -> [N, 1, B, d]
            mask = tf.tile(tf.expand_dims(tf.reshape(mask, [-1, B, B]), -1), [1, 1, 1, d])  # [N, B, B, d]
            # prev_state = self._reduce_func(tf.tile(prev_state, [1, B, 1, 1]), 2)
            prev_state = self._reduce_func(exp_mask(prev_state, mask), 2)  # [N, B, d]
            prev_state = tf.reshape(prev_state, [-1, d])  # [N*B, d]
            return self._cell(x, prev_state) 
开发者ID:IsaacChanghau,项目名称:AmusingPythonCodes,代码行数:20,代码来源:rnn_cell.py

示例2: __call__

# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import exp_mask [as 别名]
def __call__(self, inputs, state, scope=None):
        """

        :param inputs: [N, d + JQ + JQ * d]
        :param state: [N, d]
        :param scope:
        :return:
        """
        with tf.variable_scope(scope or self.__class__.__name__):
            c_prev, h_prev = state
            x = tf.slice(inputs, [0, 0], [-1, self._input_size])
            q_mask = tf.slice(inputs, [0, self._input_size], [-1, self._q_len])  # [N, JQ]
            qs = tf.slice(inputs, [0, self._input_size + self._q_len], [-1, -1])
            qs = tf.reshape(qs, [-1, self._q_len, self._input_size])  # [N, JQ, d]
            x_tiled = tf.tile(tf.expand_dims(x, 1), [1, self._q_len, 1])  # [N, JQ, d]
            h_prev_tiled = tf.tile(tf.expand_dims(h_prev, 1), [1, self._q_len, 1])  # [N, JQ, d]
            f = tf.tanh(linear([qs, x_tiled, h_prev_tiled], self._input_size, True, scope='f'))  # [N, JQ, d]
            a = tf.nn.softmax(exp_mask(linear(f, 1, True, squeeze=True, scope='a'), q_mask))  # [N, JQ]
            q = tf.reduce_sum(qs * tf.expand_dims(a, -1), 1)
            z = tf.concat(1, [x, q])  # [N, 2d]
            return self._cell(z, state) 
开发者ID:YichenGong,项目名称:Densely-Interactive-Inference-Network,代码行数:23,代码来源:rnn_cell.py

示例3: softmax

# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import exp_mask [as 别名]
def softmax(logits, mask=None, scope=None):
    with tf.name_scope(scope or "Softmax"):
        if mask is not None:
            logits = exp_mask(logits, mask)
        flat_logits = flatten(logits, 1)
        flat_out = tf.nn.softmax(flat_logits)
        out = reconstruct(flat_out, logits, 1)

        return out 
开发者ID:IsaacChanghau,项目名称:AmusingPythonCodes,代码行数:11,代码来源:nn.py

示例4: double_linear_logits

# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import exp_mask [as 别名]
def double_linear_logits(args, size, bias, bias_start=0.0, scope=None, mask=None, wd=0.0, input_keep_prob=1.0, is_train=None):
    with tf.variable_scope(scope or "Double_Linear_Logits"):
        first = tf.tanh(linear(args, size, bias, bias_start=bias_start, scope='first',
                               wd=wd, input_keep_prob=input_keep_prob, is_train=is_train))
        second = linear(first, 1, bias, bias_start=bias_start, squeeze=True, scope='second',
                        wd=wd, input_keep_prob=input_keep_prob, is_train=is_train)
        if mask is not None:
            second = exp_mask(second, mask)
        return second 
开发者ID:IsaacChanghau,项目名称:AmusingPythonCodes,代码行数:11,代码来源:nn.py

示例5: linear_logits

# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import exp_mask [as 别名]
def linear_logits(args, bias, bias_start=0.0, scope=None, mask=None, wd=0.0, input_keep_prob=1.0, is_train=None):
    with tf.variable_scope(scope or "Linear_Logits"):
        logits = linear(args, 1, bias, bias_start=bias_start, squeeze=True, scope='first',
                        wd=wd, input_keep_prob=input_keep_prob, is_train=is_train)
        if mask is not None:
            logits = exp_mask(logits, mask)
        return logits 
开发者ID:IsaacChanghau,项目名称:AmusingPythonCodes,代码行数:9,代码来源:nn.py

示例6: sum_logits

# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import exp_mask [as 别名]
def sum_logits(args, mask=None, name=None):
    with tf.name_scope(name or "sum_logits"):
        if args is None or (nest.is_sequence(args) and not args):
            raise ValueError("`args` must be specified")
        if not nest.is_sequence(args):
            args = [args]
        rank = len(args[0].get_shape())
        logits = sum(tf.reduce_sum(arg, rank-1) for arg in args)
        if mask is not None:
            logits = exp_mask(logits, mask)
        return logits 
开发者ID:IsaacChanghau,项目名称:AmusingPythonCodes,代码行数:13,代码来源:nn.py

示例7: dense_logits_softmax_features

# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import exp_mask [as 别名]
def dense_logits_softmax_features(config, dense_logit_feature, collection, ph_mask, switch , scope=None):
    with tf.variable_scope(scope or "dense_logits_softmax_features"):
        # assert p_mask != None 
        # assert h_mask != None 
        # PL = dense_logit.get_shape().as_list()[1]
        # HL = dense_logit.get_shape().as_list()[2]

        # p_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(p_mask, 2), [1, 1, HL, 1]), tf.bool), axis=3)
        # h_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(h_mask, 1), [1, PL, 1, 1]), tf.bool), axis=3)
        # ph_mask = p_mask_aug & h_mask_aug #[N, PL, HL]

        # ph_mask_d = tf.tile(tf.expand_dims(ph_mask, 3), [1,1,1,config.dense_logit_features_num])
        dense_logit_with_exp_mask = exp_mask(dense_logit_feature, ph_mask) #[N, PL, HL, 20]
        dense_logit_softmax_col = None 
        dense_logit_softmax_row = None 
        dense_logit_with_exp_mask = tf.expand_dims(dense_logit_with_exp_mask, axis=3)

        if switch[0]:
            print("dense logit with exp mask size")
            print(dense_logit_with_exp_mask.get_shape().as_list())
            dense_logit_softmax_row = tf.nn.softmax(dense_logit_with_exp_mask, dim=2, name='softmax_row')

        if switch[1]:
            dense_logit_softmax_col = tf.nn.softmax(dense_logit_with_exp_mask, dim=1, name='softmax_col')

        

        
        mask = tf.expand_dims(tf.cast(ph_mask,tf.float32), axis=3)
        if dense_logit_softmax_row is not None:
            dense_logit_softmax_row = mask * dense_logit_softmax_row
            print("mask shape")
            print(mask.get_shape().as_list())
            print("single layer feature")
            print(dense_logit_softmax_row.get_shape().as_list())
            collection.append(dense_logit_softmax_row)   
        if dense_logit_softmax_col is not None:
            dense_logit_softmax_col = mask * dense_logit_softmax_col
            collection.append(dense_logit_softmax_col)
        
        # return tf.concat([dense_logit, dense_logit_softmax_col, dense_logit_softmax_row], axis=3) 
开发者ID:YichenGong,项目名称:Densely-Interactive-Inference-Network,代码行数:43,代码来源:attmix_CNN.py


注:本文中的my.tensorflow.exp_mask方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。