当前位置: 首页>>代码示例>>Python>>正文


Python ops.linear方法代码示例

本文整理汇总了Python中ops.linear方法的典型用法代码示例。如果您正苦于以下问题:Python ops.linear方法的具体用法?Python ops.linear怎么用?Python ops.linear使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在ops的用法示例。


在下文中一共展示了ops.linear方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: baselineAttLayer

# 需要导入模块: import ops [as 别名]
# 或者: from ops import linear [as 别名]
def baselineAttLayer(self, images, memory, inDim, hDim, name = "", reuse = None):
        with tf.variable_scope("attLayer" + name, reuse = reuse):         
            # projImages = ops.linear(images, inDim, hDim, name = "projImage")
            # projMemory = tf.expand_dims(ops.linear(memory, inDim, hDim, name = "projMemory"), axis = -2)       
            # if config.saMultiplicative:
            #     interactions = projImages * projMemory
            # else:
            #     interactions = tf.tanh(projImages + projMemory) 
            interactions, _ = ops.mul(images, memory, inDim, proj = {"dim": hDim, "shared": False}, 
                interMod = config.baselineAttType)
            
            attention = ops.inter2att(interactions, hDim)
            summary = ops.att2Smry(attention, images)            
            newMemory = memory + summary
        
        return newMemory 
开发者ID:stanfordnlp,项目名称:mac-network,代码行数:18,代码来源:model.py

示例2: baseline

# 需要导入模块: import ops [as 别名]
# 或者: from ops import linear [as 别名]
def baseline(self, vecQuestions, questionDim, images, imageDim, hDim):
        with tf.variable_scope("baseline"):
            if config.baselineAtt:  
                memory = self.linear(vecQuestions, questionDim, hDim, name = "qProj")
                images = self.linear(images, imageDim, hDim, name = "iProj")

                for i in range(config.baselineAttNumLayers):
                    memory = self.baselineAttLayer(images, memory, hDim, hDim, 
                        name = "baseline%d" % i)
                memDim = hDim
            else:      
                images, imagesDim = ops.linearizeFeatures(images, self.H, self.W, 
                    imageDim, projDim = config.baselineProjDim)
                if config.baselineLSTM and config.baselineCNN:
                    memory = tf.concat([vecQuestions, images], axis = -1)
                    memDim = questionDim + imageDim
                elif config.baselineLSTM:
                    memory = vecQuestions
                    memDim = questionDim
                else: # config.baselineCNN
                    memory = images
                    memDim = imageDim 
                
        return memory, memDim 
开发者ID:stanfordnlp,项目名称:mac-network,代码行数:26,代码来源:model.py

示例3: outputOp

# 需要导入模块: import ops [as 别名]
# 或者: from ops import linear [as 别名]
def outputOp(self, memory, vecQuestions, images, imageInDim):
        with tf.variable_scope("outputUnit"):            
            features = memory
            dim = config.memDim

            if config.outQuestion:
                eVecQuestions = ops.linear(vecQuestions, config.ctrlDim, config.memDim, name = "outQuestion") 
                features, dim = ops.concat(features, eVecQuestions, config.memDim, mul = config.outQuestionMul)
            
            if config.outImage:
                images, imagesDim = ops.linearizeFeatures(images, self.H, self.W, self.imageInDim, 
                    outputDim = config.outImageDim)
                images = ops.linear(images, config.memDim, config.outImageDim, name = "outImage")
                features = tf.concat([features, images], axis = -1)
                dim += config.outImageDim

        return features, dim 
开发者ID:stanfordnlp,项目名称:mac-network,代码行数:19,代码来源:model.py

示例4: generator

# 需要导入模块: import ops [as 别名]
# 或者: from ops import linear [as 别名]
def generator(self, opts, noise, reuse=False):
        """Generator function, suitable for simple toy experiments.

        Args:
            noise: [num_points, dim] array, where dim is dimensionality of the
                latent noise space.
        Returns:
            [num_points, dim1, dim2, dim3] array, where the first coordinate
            indexes the points, which all are of the shape (dim1, dim2, dim3).
        """
        output_shape = self._data.data_shape
        num_filters = opts['g_num_filters']

        with tf.variable_scope("GENERATOR", reuse=reuse):
            h0 = ops.linear(opts, noise, num_filters, 'h0_lin')
            h0 = tf.nn.relu(h0)
            h1 = ops.linear(opts, h0, num_filters, 'h1_lin')
            h1 = tf.nn.relu(h1)
            h2 = ops.linear(opts, h1, np.prod(output_shape), 'h2_lin')
            h2 = tf.reshape(h2, [-1] + list(output_shape))

        if opts['input_normalize_sym']:
            return tf.nn.tanh(h2)
        else:
            return h2 
开发者ID:tolstikhin,项目名称:adagan,代码行数:27,代码来源:gan.py

示例5: discriminator

# 需要导入模块: import ops [as 别名]
# 或者: from ops import linear [as 别名]
def discriminator(self, opts, input_,
                      prefix='DISCRIMINATOR', reuse=False):
        """Discriminator function, suitable for simple toy experiments.

        """
        shape = input_.get_shape().as_list()
        num_filters = opts['d_num_filters']
        assert len(shape) > 0, 'No inputs to discriminate.'

        with tf.variable_scope(prefix, reuse=reuse):
            h0 = ops.linear(opts, input_, num_filters, 'h0_lin')
            h0 = tf.nn.relu(h0)
            h1 = ops.linear(opts, h0, num_filters, 'h1_lin')
            h1 = tf.nn.relu(h1)
            h2 = ops.linear(opts, h1, 1, 'h2_lin')

        return h2 
开发者ID:tolstikhin,项目名称:adagan,代码行数:19,代码来源:gan.py

示例6: discriminator

# 需要导入模块: import ops [as 别名]
# 或者: from ops import linear [as 别名]
def discriminator(self, opts, input_, is_training,
                      prefix='DISCRIMINATOR', reuse=False):
        """Encoder function, suitable for simple toy experiments.

        """
        num_filters = opts['d_num_filters']

        with tf.variable_scope(prefix, reuse=reuse):
            h0 = ops.conv2d(opts, input_, num_filters / 8, scope='h0_conv')
            h0 = ops.batch_norm(opts, h0, is_training, reuse, scope='bn_layer1')
            h0 = tf.nn.relu(h0)
            h1 = ops.conv2d(opts, h0, num_filters / 4, scope='h1_conv')
            h1 = ops.batch_norm(opts, h1, is_training, reuse, scope='bn_layer2')
            h1 = tf.nn.relu(h1)
            h2 = ops.conv2d(opts, h1, num_filters / 2, scope='h2_conv')
            h2 = ops.batch_norm(opts, h2, is_training, reuse, scope='bn_layer3')
            h2 = tf.nn.relu(h2)
            h3 = ops.conv2d(opts, h2, num_filters, scope='h3_conv')
            h3 = ops.batch_norm(opts, h3, is_training, reuse, scope='bn_layer4')
            h3 = tf.nn.relu(h3)
            # Already has NaNs!!
            latent_mean = ops.linear(opts, h3, opts['latent_space_dim'], scope='h3_lin')
            log_latent_sigmas = ops.linear(opts, h3, opts['latent_space_dim'], scope='h3_lin_sigma')

        return latent_mean, log_latent_sigmas 
开发者ID:tolstikhin,项目名称:adagan,代码行数:27,代码来源:vae.py

示例7: discriminator

# 需要导入模块: import ops [as 别名]
# 或者: from ops import linear [as 别名]
def discriminator(self, image, y=None, reuse=False):
        if reuse:
            tf.get_variable_scope().reuse_variables()

        s = self.output_size
        if np.mod(s, 16) == 0:
            h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
            h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
            h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
            h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv')))
            h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')

            return tf.nn.sigmoid(h4), h4
        else:
            h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
            h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
            h2 = linear(tf.reshape(h1, [self.batch_size, -1]), 1, 'd_h2_lin')
            if not self.config.use_kernel:
              return tf.nn.sigmoid(h2), h2
            else:
              return tf.nn.sigmoid(h2), h2, h1, h0 
开发者ID:djsutherland,项目名称:opt-mmd,代码行数:23,代码来源:model_mmd.py

示例8: discriminator_k

# 需要导入模块: import ops [as 别名]
# 或者: from ops import linear [as 别名]
def discriminator_k(self, image, reuse=False):
        if reuse:
            tf.get_variable_scope().reuse_variables()
        #1024, 512, 128
        h0 = tf.nn.sigmoid(linear(image, 512, 'dk_h0_lin', stddev=self.config.init))
        h1 = tf.nn.sigmoid(linear(h0, 256, 'dk_h1_lin', stddev=self.config.init))
        h2 = tf.nn.sigmoid(linear(h1, 256, 'dk_h2_lin', stddev=self.config.init))
        h3 = tf.nn.sigmoid(linear(h2, 128, 'dk_h3_lin', stddev=self.config.init))
        h4 = tf.nn.relu(linear(h3, 64, 'dk_h4_lin', stddev=self.config.init))
        if self.config.use_gan:
          h5 = linear(h4, 1, 'dk_h5_lin', stddev=self.config.init)
          return image, h0, h1, h2, h3, h4, h5
        elif self.config.use_layer_kernel:
          return image, h0, h1, h2, h3, h4
        elif self.config.use_scale_kernel:
          return tf.concat(1, [image, (28.0 * 28.0/512.0) * h0, (28 * 28.0/256.0) * h1, (28.0 * 28.0/256.0) * h2, (28.0 * 28.0/128.0) * h3,
 (28.0 * 28.0/64.0) * h4])

        else:
          return tf.concat(1, [image, h0, h1, h2, h3, h4]) 
开发者ID:djsutherland,项目名称:opt-mmd,代码行数:22,代码来源:model_mmd_fm.py

示例9: discriminator_labeler

# 需要导入模块: import ops [as 别名]
# 或者: from ops import linear [as 别名]
def discriminator_labeler(image, output_dim, config, reuse=None):
    batch_size=tf.shape(image)[0]
    with tf.variable_scope("disc_labeler",reuse=reuse) as vs:
        dl_bn1 = batch_norm(name='dl_bn1')
        dl_bn2 = batch_norm(name='dl_bn2')
        dl_bn3 = batch_norm(name='dl_bn3')

        h0 = lrelu(conv2d(image, config.df_dim, name='dl_h0_conv'))#16,32,32,64
        h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dl_h1_conv')))#16,16,16,128
        h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dl_h2_conv')))#16,16,16,248
        h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dl_h3_conv')))
        dim3=np.prod(h3.get_shape().as_list()[1:])
        h3_flat=tf.reshape(h3, [-1,dim3])
        D_labels_logits = linear(h3_flat, output_dim, 'dl_h3_Label')
        D_labels = tf.nn.sigmoid(D_labels_logits)
        variables = tf.contrib.framework.get_variables(vs)
    return D_labels, D_labels_logits, variables 
开发者ID:mkocaoglu,项目名称:CausalGAN,代码行数:19,代码来源:models.py

示例10: discriminator_gen_labeler

# 需要导入模块: import ops [as 别名]
# 或者: from ops import linear [as 别名]
def discriminator_gen_labeler(image, output_dim, config, reuse=None):
    batch_size=tf.shape(image)[0]
    with tf.variable_scope("disc_gen_labeler",reuse=reuse) as vs:
        dl_bn1 = batch_norm(name='dl_bn1')
        dl_bn2 = batch_norm(name='dl_bn2')
        dl_bn3 = batch_norm(name='dl_bn3')

        h0 = lrelu(conv2d(image, config.df_dim, name='dgl_h0_conv'))#16,32,32,64
        h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dgl_h1_conv')))#16,16,16,128
        h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dgl_h2_conv')))#16,16,16,248
        h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dgl_h3_conv')))
        dim3=np.prod(h3.get_shape().as_list()[1:])
        h3_flat=tf.reshape(h3, [-1,dim3])
        D_labels_logits = linear(h3_flat, output_dim, 'dgl_h3_Label')
        D_labels = tf.nn.sigmoid(D_labels_logits)
        variables = tf.contrib.framework.get_variables(vs)
    return D_labels, D_labels_logits,variables 
开发者ID:mkocaoglu,项目名称:CausalGAN,代码行数:19,代码来源:models.py

示例11: discriminator_on_z

# 需要导入模块: import ops [as 别名]
# 或者: from ops import linear [as 别名]
def discriminator_on_z(image, config, reuse=None):
    batch_size=tf.shape(image)[0]
    with tf.variable_scope("disc_z_labeler",reuse=reuse) as vs:
        dl_bn1 = batch_norm(name='dl_bn1')
        dl_bn2 = batch_norm(name='dl_bn2')
        dl_bn3 = batch_norm(name='dl_bn3')

        h0 = lrelu(conv2d(image, config.df_dim, name='dzl_h0_conv'))#16,32,32,64
        h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dzl_h1_conv')))#16,16,16,128
        h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dzl_h2_conv')))#16,16,16,248
        h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dzl_h3_conv')))
        dim3=np.prod(h3.get_shape().as_list()[1:])
        h3_flat=tf.reshape(h3, [-1,dim3])
        D_labels_logits = linear(h3_flat, config.z_dim, 'dzl_h3_Label')
        D_labels = tf.nn.tanh(D_labels_logits)
        variables = tf.contrib.framework.get_variables(vs)
    return D_labels,variables 
开发者ID:mkocaoglu,项目名称:CausalGAN,代码行数:19,代码来源:models.py

示例12: _create_discriminator

# 需要导入模块: import ops [as 别名]
# 或者: from ops import linear [as 别名]
def _create_discriminator(self, x, train=True, reuse=False, name="discriminator"):
        with tf.variable_scope(name) as scope:
            if reuse:
                scope.reuse_variables()

            h = x
            for i in range(self.num_conv_layers):
                h = lrelu(batch_norm(conv2d(h, self.num_dis_feature_maps * (2 ** i),
                                            stddev=0.02, name="d_h{}_conv".format(i)),
                                     is_training=train,
                                     scope="d_bn{}".format(i)))

            dim = h.get_shape()[1:].num_elements()
            h = tf.reshape(h, [-1, dim])
            d_bin_logits = linear(h, 1, scope='d_bin_logits')
            d_mul_logits = linear(h, self.num_gens, scope='d_mul_logits')
        return d_bin_logits, d_mul_logits 
开发者ID:qhoangdl,项目名称:MGAN,代码行数:19,代码来源:models.py

示例13: forward

# 需要导入模块: import ops [as 别名]
# 或者: from ops import linear [as 别名]
def forward(self, x, meta_loss=None, meta_step_size=None, stop_gradient=False):

        x = linear(inputs=x,
                   weight=self.fc1.weight,
                   bias=self.fc1.bias,
                   meta_loss=meta_loss,
                   meta_step_size=meta_step_size,
                   stop_gradient=stop_gradient)

        x = F.relu(x, inplace=True)

        x = linear(inputs=x,
                   weight=self.fc2.weight,
                   bias=self.fc2.bias,
                   meta_loss=meta_loss,
                   meta_step_size=meta_step_size,
                   stop_gradient=stop_gradient)

        end_points = {'Predictions': F.softmax(input=x, dim=-1)}

        return x, end_points 
开发者ID:HAHA-DL,项目名称:MLDG,代码行数:23,代码来源:mlp.py

示例14: dcgan_encoder

# 需要导入模块: import ops [as 别名]
# 或者: from ops import linear [as 别名]
def dcgan_encoder(opts, inputs, is_training=False, reuse=False):
    num_units = opts['e_num_filters']
    num_layers = opts['e_num_layers']
    layer_x = inputs
    for i in xrange(num_layers):
        scale = 2**(num_layers - i - 1)
        layer_x = ops.conv2d(opts, layer_x, num_units / scale,
                             scope='h%d_conv' % i)
        if opts['batch_norm']:
            layer_x = ops.batch_norm(opts, layer_x, is_training,
                                     reuse, scope='h%d_bn' % i)
        layer_x = tf.nn.relu(layer_x)
    if opts['e_noise'] != 'gaussian':
        res = ops.linear(opts, layer_x, opts['zdim'], scope='hfinal_lin')
        return res
    else:
        mean = ops.linear(opts, layer_x, opts['zdim'], scope='mean_lin')
        log_sigmas = ops.linear(opts, layer_x,
                                opts['zdim'], scope='log_sigmas_lin')
        return mean, log_sigmas 
开发者ID:tolstikhin,项目名称:wae,代码行数:22,代码来源:models.py

示例15: memAutoEnc

# 需要导入模块: import ops [as 别名]
# 或者: from ops import linear [as 别名]
def memAutoEnc(newMemory, info, control, name = "", reuse = None):
        with tf.variable_scope("memAutoEnc" + name, reuse = reuse):
            # inputs to auto encoder
            features = info if config.autoEncMemInputs == "INFO" else newMemory
            features = ops.linear(features, config.memDim, config.ctrlDim, 
                act = config.autoEncMemAct, name = "aeMem")

            # reconstruct control
            if config.autoEncMemLoss == "CONT":
                loss = tf.reduce_mean(tf.squared_difference(control, features))
            else:                    
                interactions, dim = ops.mul(self.questionCntxWords, features, config.ctrlDim, 
                    concat = {"x": config.autoEncMemCnct}, mulBias = config.mulBias, name = "aeMem")
                
                logits = ops.inter2logits(interactions, dim)
                logits = self.expMask(logits, self.questionLengths)

                # reconstruct word attentions
                if config.autoEncMemLoss == "PROB":
                    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
                        labels = self.attentions["question"][-1], logits = logits))
                
                # reconstruct control through words attentions
                else:
                    attention = tf.nn.softmax(logits)
                    summary = ops.att2Smry(attention, self.questionCntxWords)
                    loss = tf.reduce_mean(tf.squared_difference(control, summary))
        
        return loss 
开发者ID:stanfordnlp,项目名称:mac-network,代码行数:31,代码来源:mac_cell.py


注:本文中的ops.linear方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。