当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.matmul方法代码示例

本文整理汇总了Python中tensorflow.matmul方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.matmul方法的具体用法?Python tensorflow.matmul怎么用?Python tensorflow.matmul使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.matmul方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_fgm_gradient_max

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matmul [as 别名]
def test_fgm_gradient_max(self):
        input_dim = 2
        num_classes = 3
        batch_size = 4
        rng = np.random.RandomState([2017, 8, 23])
        x = tf.placeholder(tf.float32, [batch_size, input_dim])
        weights = tf.placeholder(tf.float32, [input_dim, num_classes])
        logits = tf.matmul(x, weights)
        probs = tf.nn.softmax(logits)
        adv_x = fgm(x, probs)
        random_example = rng.randint(batch_size)
        random_feature = rng.randint(input_dim)
        output = tf.slice(adv_x, [random_example, random_feature], [1, 1])
        dx, = tf.gradients(output, x)
        # The following line catches GitHub issue #243
        self.assertIsNotNone(dx)
        dx = self.sess.run(dx, feed_dict=random_feed_dict(rng, [x, weights]))
        ground_truth = np.zeros((batch_size, input_dim))
        ground_truth[random_example, random_feature] = 1.
        self.assertClose(dx, ground_truth) 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:22,代码来源:test_attacks_tf.py

示例2: fprop

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matmul [as 别名]
def fprop(self, x):

        output = OrderedDict()
        # first convolutional layer
        h_conv1 = tf.nn.relu(self._conv2d(x, self.W_conv1) + self.b_conv1)
        h_pool1 = self._max_pool_2x2(h_conv1)

        # second convolutional layer
        h_conv2 = tf.nn.relu(
            self._conv2d(h_pool1, self.W_conv2) + self.b_conv2)
        h_pool2 = self._max_pool_2x2(h_conv2)

        # first fully connected layer

        h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
        h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, self.W_fc1) + self.b_fc1)

        # output layer
        logits = tf.matmul(h_fc1, self.W_fc2) + self.b_fc2

        output = deterministic_dict(locals())
        del output["self"]
        output[self.O_PROBS] = tf.nn.softmax(logits=logits)

        return output 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:27,代码来源:madry_mnist_model.py

示例3: nn_layer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matmul [as 别名]
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
    # 同一层神经网络放在一个统一的命名空间下
    with tf.name_scope(layer_name):
        with tf.name_scope('weights'):
            # 权重及监控变量
            weights = tf.Variable(tf.truncated_normal([input_dim, output_dim], stddev=0.1))
            variable_summaries(weights, layer_name+'/weights')

        with tf.name_scope('biases'):
            # 偏置及监控变量
            biases = tf.Variable(tf.constant(0.0, shape=[output_dim]))
            variable_summaries(biases, layer_name + '/biases')

        with tf.name_scope('Wx_plus_b'):
            preactivate = tf.matmul(input_tensor, weights) + biases
            # 记录神经网络输出节点在经过激活函数之前的分布
            tf.summary.histogram(layer_name + '/pre_activations', preactivate)
        
        activations = act(preactivate, name='activation')
        # 记录神经网络输出节点在经过激活函数之后的分布
        tf.summary.histogram(layer_name + '/activations', activations)
        return activations 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:24,代码来源:mnist_histogram.py

示例4: createLinearModel

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matmul [as 别名]
def createLinearModel(dimension):
    np.random.seed(1024)
    # 定义 x 和 y
    x = tf.placeholder(tf.float64, shape=[None, dimension], name='x')
    # 写成矩阵形式会大大加快运算速度
    y = tf.placeholder(tf.float64, shape=[None, 1], name='y')
    # 定义参数估计值和预测值
    betaPred = tf.Variable(np.random.random([dimension, 1]))
    yPred = tf.matmul(x, betaPred, name='y_pred')
    # 定义损失函数
    loss = tf.reduce_mean(tf.square(yPred - y))
    model = {
        'loss_function': loss,
        'independent_variable': x,
        'dependent_variable': y,
        'prediction': yPred,
        'model_params': betaPred
    }
    return model 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:21,代码来源:2_tf_linear.py

示例5: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matmul [as 别名]
def __call__(self, inputs, state, scope=None):
    """GRU cell with layer normalization."""
    input_dim = inputs.get_shape().as_list()[1]
    num_units = self._num_units

    with tf.variable_scope(scope or "gru_cell"):
      with tf.variable_scope("gates"):
        w_h = tf.get_variable(
            "w_h", [num_units, 2 * num_units],
            initializer=self._w_h_initializer())
        w_x = tf.get_variable(
            "w_x", [input_dim, 2 * num_units],
            initializer=self._w_x_initializer(input_dim))
        z_and_r = (_layer_norm(tf.matmul(state, w_h), scope="layer_norm/w_h") +
                   _layer_norm(tf.matmul(inputs, w_x), scope="layer_norm/w_x"))
        z, r = tf.split(tf.sigmoid(z_and_r), 2, 1)
      with tf.variable_scope("candidate"):
        w = tf.get_variable(
            "w", [input_dim, num_units], initializer=self._w_initializer)
        u = tf.get_variable(
            "u", [num_units, num_units], initializer=self._u_initializer)
        h_hat = (r * _layer_norm(tf.matmul(state, u), scope="layer_norm/u") +
                 _layer_norm(tf.matmul(inputs, w), scope="layer_norm/w"))
      new_h = (1 - z) * state + z * self._activation(h_hat)
    return new_h, new_h 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:27,代码来源:gru_cell.py

示例6: get_hint_pool_idxs

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matmul [as 别名]
def get_hint_pool_idxs(self, normalized_query):
    """Get small set of idxs to compute nearest neighbor queries on.

    This is an expensive look-up on the whole memory that is used to
    avoid more expensive operations later on.

    Args:
      normalized_query: A Tensor of shape [None, key_dim].

    Returns:
      A Tensor of shape [None, choose_k] of indices in memory
      that are closest to the queries.

    """
    # look up in large memory, no gradients
    with tf.device(self.nn_device):
      similarities = tf.matmul(tf.stop_gradient(normalized_query),
                               self.mem_keys, transpose_b=True, name='nn_mmul')
    _, hint_pool_idxs = tf.nn.top_k(
        tf.stop_gradient(similarities), k=self.choose_k, name='nn_topk')
    return hint_pool_idxs 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:23,代码来源:memory.py

示例7: combine_setup

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matmul [as 别名]
def combine_setup(name, combine_type, embed_img, embed_goal, num_img_neuorons=None,
                  num_goal_neurons=None):
  with tf.name_scope(name + '_' + combine_type):
    if combine_type == 'add':
      # Simple concat features from goal and image
      out = embed_img + embed_goal

    elif combine_type == 'multiply':
      # Multiply things together
      re_embed_img = tf.reshape(
          embed_img, shape=[-1, num_img_neuorons / num_goal_neurons,
                            num_goal_neurons])
      re_embed_goal = tf.reshape(embed_goal, shape=[-1, num_goal_neurons, 1])
      x = tf.matmul(re_embed_img, re_embed_goal, transpose_a=False, transpose_b=False)
      out = slim.flatten(x)
    elif combine_type == 'none' or combine_type == 'imgonly':
      out = embed_img
    elif combine_type == 'goalonly':
      out = embed_goal
    else:
      logging.fatal('Undefined combine_type: %s', combine_type)
  return out 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:24,代码来源:vision_baseline_lstm.py

示例8: pass_through_embedding_matrix

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matmul [as 别名]
def pass_through_embedding_matrix(act_block, embedding_matrix, step_idx):
  """Passes the activations through the embedding_matrix.

  Takes care to handle out of bounds lookups.

  Args:
    act_block: matrix of activations.
    embedding_matrix: matrix of weights.
    step_idx: vector containing step indices, with -1 indicating out of bounds.

  Returns:
    the embedded activations.
  """
  # Indicator vector for out of bounds lookups.
  step_idx_mask = tf.expand_dims(tf.equal(step_idx, -1), -1)

  # Pad the last column of the activation vectors with the indicator.
  act_block = tf.concat([act_block, tf.to_float(step_idx_mask)], 1)
  return tf.matmul(act_block, embedding_matrix) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:21,代码来源:network_units.py

示例9: get_cell

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matmul [as 别名]
def get_cell(self):
    self.cell_input_dim = self.internal_dim

    def mlp(cell_input, prev_internal_state):
      w1 = tf.get_variable('w1', [self.cell_input_dim, self.internal_dim])
      b1 = tf.get_variable('b1', [self.internal_dim])

      w2 = tf.get_variable('w2', [self.internal_dim, self.internal_dim])
      b2 = tf.get_variable('b2', [self.internal_dim])

      w3 = tf.get_variable('w3', [self.internal_dim, self.internal_dim])
      b3 = tf.get_variable('b3', [self.internal_dim])

      proj = tf.get_variable(
          'proj', [self.internal_dim, self.output_dim])

      hidden = cell_input
      hidden = tf.tanh(tf.nn.bias_add(tf.matmul(hidden, w1), b1))
      hidden = tf.tanh(tf.nn.bias_add(tf.matmul(hidden, w2), b2))

      output = tf.matmul(hidden, proj)

      return output, hidden

    return mlp 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:27,代码来源:policy.py

示例10: gaussian_kernel_matrix

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matmul [as 别名]
def gaussian_kernel_matrix(x, y, sigmas):
  r"""Computes a Guassian Radial Basis Kernel between the samples of x and y.

  We create a sum of multiple gaussian kernels each having a width sigma_i.

  Args:
    x: a tensor of shape [num_samples, num_features]
    y: a tensor of shape [num_samples, num_features]
    sigmas: a tensor of floats which denote the widths of each of the
      gaussians in the kernel.
  Returns:
    A tensor of shape [num_samples{x}, num_samples{y}] with the RBF kernel.
  """
  beta = 1. / (2. * (tf.expand_dims(sigmas, 1)))

  dist = compute_pairwise_distances(x, y)

  s = tf.matmul(beta, tf.reshape(dist, (1, -1)))

  return tf.reshape(tf.reduce_sum(tf.exp(-s), 0), tf.shape(dist)) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:22,代码来源:utils.py

示例11: fc

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matmul [as 别名]
def fc(inputs, output_size, init_bias=0.0, activation_func=tf.nn.relu, stddev=0.01):
    input_shape = inputs.get_shape().as_list()
    if len(input_shape) == 4:
        fc_weights = tf.Variable(
            tf.random_normal([input_shape[1] * input_shape[2] * input_shape[3], output_size], dtype=tf.float32,
                             stddev=stddev),
            name='weights')
        inputs = tf.reshape(inputs, [-1, fc_weights.get_shape().as_list()[0]])
    else:
        fc_weights = tf.Variable(tf.random_normal([input_shape[-1], output_size], dtype=tf.float32, stddev=stddev),
                                 name='weights')

    fc_biases = tf.Variable(tf.constant(init_bias, shape=[output_size], dtype=tf.float32), name='biases')
    fc_layer = tf.matmul(inputs, fc_weights)
    fc_layer = tf.nn.bias_add(fc_layer, fc_biases)
    if activation_func:
        fc_layer = activation_func(fc_layer)
    return fc_layer 
开发者ID:jireh-father,项目名称:tensorflow-alexnet,代码行数:20,代码来源:ops.py

示例12: vq_nearest_neighbor

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matmul [as 别名]
def vq_nearest_neighbor(x, hparams):
  """Find the nearest element in means to elements in x."""
  bottleneck_size = 2**hparams.bottleneck_bits
  means = hparams.means
  x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
  means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
  scalar_prod = tf.matmul(x, means, transpose_b=True)
  dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
  if hparams.bottleneck_kind == "em":
    x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples)
    x_means_hot = tf.one_hot(
        x_means_idx, depth=bottleneck_size)
    x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
  else:
    x_means_idx = tf.argmax(-dist, axis=-1)
    x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size)
  x_means = tf.matmul(x_means_hot, means)
  e_loss = tf.reduce_mean(tf.square(x - tf.stop_gradient(x_means)))
  return x_means_hot, e_loss 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:21,代码来源:transformer_nat.py

示例13: rank_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matmul [as 别名]
def rank_loss(sentence_emb, image_emb, margin=0.2):
  """Experimental rank loss, thanks to kkurach@ for the code."""
  with tf.name_scope("rank_loss"):
    # Normalize first as this is assumed in cosine similarity later.
    sentence_emb = tf.nn.l2_normalize(sentence_emb, 1)
    image_emb = tf.nn.l2_normalize(image_emb, 1)
    # Both sentence_emb and image_emb have size [batch, depth].
    scores = tf.matmul(image_emb, tf.transpose(sentence_emb))  # [batch, batch]
    diagonal = tf.diag_part(scores)  # [batch]
    cost_s = tf.maximum(0.0, margin - diagonal + scores)  # [batch, batch]
    cost_im = tf.maximum(
        0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores)  # [batch, batch]
    # Clear diagonals.
    batch_size = tf.shape(sentence_emb)[0]
    empty_diagonal_mat = tf.ones_like(cost_s) - tf.eye(batch_size)
    cost_s *= empty_diagonal_mat
    cost_im *= empty_diagonal_mat
    return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:20,代码来源:slicenet.py

示例14: compute_mfcc

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matmul [as 别名]
def compute_mfcc(audio, **kwargs):
    """
    Compute the MFCC for a given audio waveform. This is
    identical to how DeepSpeech does it, but does it all in
    TensorFlow so that we can differentiate through it.
    """

    batch_size, size = audio.get_shape().as_list()
    audio = tf.cast(audio, tf.float32)

    # 1. Pre-emphasizer, a high-pass filter
    audio = tf.concat((audio[:, :1], audio[:, 1:] - 0.97*audio[:, :-1], np.zeros((batch_size,1000),dtype=np.float32)), 1)

    # 2. windowing into frames of 320 samples, overlapping
    windowed = tf.stack([audio[:, i:i+400] for i in range(0,size-320,160)],1)

    # 3. Take the FFT to convert to frequency space
    ffted = tf.spectral.rfft(windowed, [512])
    ffted = 1.0 / 512 * tf.square(tf.abs(ffted))

    # 4. Compute the Mel windowing of the FFT
    energy = tf.reduce_sum(ffted,axis=2)+1e-30
    filters = np.load("filterbanks.npy").T
    feat = tf.matmul(ffted, np.array([filters]*batch_size,dtype=np.float32))+1e-30

    # 5. Take the DCT again, because why not
    feat = tf.log(feat)
    feat = tf.spectral.dct(feat, type=2, norm='ortho')[:,:,:26]

    # 6. Amplify high frequencies for some reason
    _,nframes,ncoeff = feat.get_shape().as_list()
    n = np.arange(ncoeff)
    lift = 1 + (22/2.)*np.sin(np.pi*n/22)
    feat = lift*feat
    width = feat.get_shape().as_list()[1]

    # 7. And now stick the energy next to the features
    feat = tf.concat((tf.reshape(tf.log(energy),(-1,width,1)), feat[:, :, 1:]), axis=2)
    
    return feat 
开发者ID:rtaori,项目名称:Black-Box-Audio,代码行数:42,代码来源:tf_logits.py

示例15: call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matmul [as 别名]
def call(self, inputs, mask=None, **kwargs):

        if self.supports_masking:
            if mask is None:
                raise ValueError(
                    "When supports_masking=True,input must support masking")
            queries, keys = inputs
            key_masks = tf.expand_dims(mask[-1], axis=1)

        else:

            queries, keys, keys_length = inputs
            hist_len = keys.get_shape()[1]
            key_masks = tf.sequence_mask(keys_length, hist_len)

        attention_score = LocalActivationUnit(
            self.hidden_size, self.activation, 0, 1, False, 1024,)([queries, keys])

        outputs = tf.transpose(attention_score, (0, 2, 1))

        if self.weight_normalization:
            paddings = tf.ones_like(outputs) * (-2 ** 32 + 1)
        else:
            paddings = tf.zeros_like(outputs)

        outputs = tf.where(key_masks, outputs, paddings)

        if self.weight_normalization:
            outputs = tf.nn.softmax(outputs)

        outputs = tf.matmul(outputs, keys)

        return outputs 
开发者ID:ShenDezhou,项目名称:icme2019,代码行数:35,代码来源:sequence.py


注:本文中的tensorflow.matmul方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。