当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.stack方法代码示例

本文整理汇总了Python中tensorflow.stack方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.stack方法的具体用法?Python tensorflow.stack怎么用?Python tensorflow.stack使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.stack方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _create_autosummary_var

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import stack [as 别名]
def _create_autosummary_var(name, value_expr):
    assert not _autosummary_finalized
    v = tf.cast(value_expr, tf.float32)
    if v.shape.ndims is 0:
        v = [v, np.float32(1.0)]
    elif v.shape.ndims is 1:
        v = [tf.reduce_sum(v), tf.cast(tf.shape(v)[0], tf.float32)]
    else:
        v = [tf.reduce_sum(v), tf.reduce_prod(tf.cast(tf.shape(v), tf.float32))]
    v = tf.cond(tf.is_finite(v[0]), lambda: tf.stack(v), lambda: tf.zeros(2))
    with tf.control_dependencies(None):
        var = tf.Variable(tf.zeros(2)) # [numerator, denominator]
    update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
    if name in _autosummary_vars:
        _autosummary_vars[name].append(var)
    else:
        _autosummary_vars[name] = [var]
    return update_op

#----------------------------------------------------------------------------
# Call filewriter.add_summary() with all summaries in the default graph,
# automatically finalizing and merging them on the first call. 
开发者ID:zalandoresearch,项目名称:disentangling_conditional_gans,代码行数:24,代码来源:tfutil.py

示例2: softmax_unet

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import stack [as 别名]
def softmax_unet(input_tensor, instruments, params={}):
    """ Apply softmax to multitrack unet in order to have mask suming to one.

    :param input_tensor: Tensor to apply blstm to.
    :param instruments: Iterable that provides a collection of instruments.
    :param params: (Optional) dict of BLSTM parameters.
    :returns: Created output tensor dict.
    """
    logit_mask_list = []
    for instrument in instruments:
        out_name = f'{instrument}_spectrogram'
        logit_mask_list.append(
            apply_unet(
                input_tensor,
                output_name=out_name,
                params=params,
                output_mask_logit=True))
    masks = Softmax(axis=4)(tf.stack(logit_mask_list, axis=4))
    output_dict = {}
    for i, instrument in enumerate(instruments):
        out_name = f'{instrument}_spectrogram'
        output_dict[out_name] = Multiply(name=out_name)([
            masks[..., i],
            input_tensor])
    return output_dict 
开发者ID:deezer,项目名称:spleeter,代码行数:27,代码来源:unet.py

示例3: loop_decode

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import stack [as 别名]
def loop_decode(self):
        # decoder_initial_state: Tuple Tensor (c,h) of size [batch_size x cell.state_size]
        # decoder_first_input: Tensor [batch_size x cell.state_size]

        # Loop the decoding process and collect results
        s,i = self.decoder_initial_state,  tf.cast(self.decoder_first_input,tf.float32)
        for step in range(self.seq_length):
            s, i = self.decode(s,i,step)

        # Return to start
        self.positions.append(self.first_city)

        # Stack visited indices
        self.positions=tf.stack(self.positions,axis=1)  # [Batch,seq_length+1]

        # Sum log_softmax over output steps
        self.log_softmax=tf.add_n(self.log_softmax)  # [Batch,seq_length]

        # Stack attending & pointing distribution
        self.attending=tf.stack(self.attending,axis=1) # [Batch,seq_length,seq_length]
        self.pointing=tf.stack(self.pointing,axis=1) # [Batch,seq_length,seq_length]
        
        # Return stacked lists of visited_indices and log_softmax for backprop
        return self.positions,self.log_softmax 
开发者ID:MichelDeudon,项目名称:neural-combinatorial-optimization-rl-tensorflow,代码行数:26,代码来源:decoder.py

示例4: images_to_sequence

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import stack [as 别名]
def images_to_sequence(tensor):
  """Convert a batch of images into a batch of sequences.

  Args:
    tensor: a (num_images, height, width, depth) tensor

  Returns:
    (width, num_images*height, depth) sequence tensor
  """
  transposed = tf.transpose(tensor, [2, 0, 1, 3])

  shapeT = tf.shape(transposed)
  shapeL = transposed.get_shape().as_list()
  # Calculate the ouput size of the upsampled tensor
  n_shape = tf.stack([
      shapeT[0],
      shapeT[1]*shapeT[2],
      shapeL[3]
  ])
  reshaped = tf.reshape(transposed, n_shape)
  return reshaped 
开发者ID:TobiasGruening,项目名称:ARU-Net,代码行数:23,代码来源:layers.py

示例5: sequence_to_images

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import stack [as 别名]
def sequence_to_images(tensor, num_batches):
  """Convert a batch of sequences into a batch of images.

  Args:
    tensor: (num_steps, num_batchesRNN, depth) sequence tensor
    num_batches: the number of image batches

  Returns:
    (num_batches, height, width, depth) tensor
  """

  shapeT = tf.shape(tensor)
  shapeL = tensor.get_shape().as_list()
  # Calculate the ouput size of the upsampled tensor
  height = tf.to_int32(shapeT[1] / num_batches)
  n_shape = tf.stack([
      shapeT[0],
      num_batches,
      height,
      shapeL[2]
  ])

  reshaped = tf.reshape(tensor, n_shape)
  return tf.transpose(reshaped, [1, 2, 0, 3]) 
开发者ID:TobiasGruening,项目名称:ARU-Net,代码行数:26,代码来源:layers.py

示例6: lstm_online

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import stack [as 别名]
def lstm_online(cell_fn, num_steps, inputs, state, varscope):
  # inputs is B x num_steps x C, C channels.
  # state is 2 tuple with B x 1 x C1, B x 1 x C2 
  # Output state is always B x 1 x C
  inputs = tf.unstack(inputs, axis=1, num=num_steps)
  state = tf.unstack(state, axis=1, num=1)[0]
  outputs = [] 
  
  if num_steps > 1: 
    varscope.reuse_variables()
  
  for s in range(num_steps):
    output, state = cell_fn(inputs[s], state)
    outputs.append(output)
  outputs = tf.stack(outputs, axis=1)
  state = tf.stack([state], axis=1)
  return outputs, state 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:19,代码来源:vision_baseline_lstm.py

示例7: convert_network_state_tensorarray

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import stack [as 别名]
def convert_network_state_tensorarray(tensorarray):
  """Converts a source TensorArray to a source Tensor.

  Performs a permutation between the steps * [stride, D] shape of a
  source TensorArray and the (flattened) [stride * steps, D] shape of
  a source Tensor.

  The TensorArrays used during recurrence have an additional zeroth step that
  needs to be removed.

  Args:
    tensorarray: TensorArray object to be converted.

  Returns:
    Tensor object after conversion.
  """
  tensor = tensorarray.stack()  # Results in a [steps, stride, D] tensor.
  tensor = tf.slice(tensor, [1, 0, 0], [-1, -1, -1])  # Lop off the 0th step.
  tensor = tf.transpose(tensor, [1, 0, 2])  # Switch steps and stride.
  return tf.reshape(tensor, [-1, tf.shape(tensor)[2]]) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:22,代码来源:network_units.py

示例8: one_hot_encoding

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import stack [as 别名]
def one_hot_encoding(labels, num_classes, scope=None):
  """Transform numeric labels into onehot_labels.

  Args:
    labels: [batch_size] target labels.
    num_classes: total number of classes.
    scope: Optional scope for name_scope.
  Returns:
    one hot encoding of the labels.
  """
  with tf.name_scope(scope, 'OneHotEncoding', [labels]):
    batch_size = labels.get_shape()[0]
    indices = tf.expand_dims(tf.range(0, batch_size), 1)
    labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
    concated = tf.concat(axis=1, values=[indices, labels])
    onehot_labels = tf.sparse_to_dense(
        concated, tf.stack([batch_size, num_classes]), 1.0, 0.0)
    onehot_labels.set_shape([batch_size, num_classes])
    return onehot_labels 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:21,代码来源:ops.py

示例9: test_batch_decode

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import stack [as 别名]
def test_batch_decode(self):
    mock_anchor_corners = tf.constant(
        [[0, 0.1, 0.2, 0.3], [0.2, 0.4, 0.4, 0.6]], tf.float32)
    mock_anchors = box_list.BoxList(mock_anchor_corners)
    mock_box_coder = MockBoxCoder()

    expected_boxes = [[[0.0, 0.1, 0.5, 0.6], [0.5, 0.6, 0.7, 0.8]],
                      [[0.1, 0.2, 0.3, 0.4], [0.7, 0.8, 0.9, 1.0]]]

    encoded_boxes_list = [mock_box_coder.encode(
        box_list.BoxList(tf.constant(boxes)), mock_anchors)
                          for boxes in expected_boxes]
    encoded_boxes = tf.stack(encoded_boxes_list)
    decoded_boxes = box_coder.batch_decode(
        encoded_boxes, mock_box_coder, mock_anchors)

    with self.test_session() as sess:
      decoded_boxes_result = sess.run(decoded_boxes)
      self.assertAllClose(expected_boxes, decoded_boxes_result) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:21,代码来源:box_coder_test.py

示例10: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import stack [as 别名]
def __init__(self, x_bxu, z_size, name, var_min=0.0):
    """Create an input dependent diagonal Gaussian distribution.

    Args:
      x: The input tensor from which the mean and variance are computed,
        via a linear transformation of x.  I.e.
          mu = Wx + b, log(var) = Mx + c
      z_size: The size of the distribution.
      name:  The name to prefix to learned variables.
      var_min (optional): Minimal variance allowed.  This is an additional
        way to control the amount of information getting through the stochastic
        layer.
    """
    size_bxn = tf.stack([tf.shape(x_bxu)[0], z_size])
    self.mean_bxn = mean_bxn = linear(x_bxu, z_size, name=(name+"/mean"))
    logvar_bxn = linear(x_bxu, z_size, name=(name+"/logvar"))
    if var_min > 0.0:
      logvar_bxn = tf.log(tf.exp(logvar_bxn) + var_min)
    self.logvar_bxn = logvar_bxn

    self.noise_bxn = noise_bxn = tf.random_normal(size_bxn)
    self.noise_bxn.set_shape([None, z_size])
    self.sample_bxn = mean_bxn + tf.exp(0.5 * logvar_bxn) * noise_bxn 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:25,代码来源:distributions.py

示例11: combine

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import stack [as 别名]
def combine(self, expert_out, multiply_by_gates=True):
    """Sum together the expert output, multiplied by the corresponding gates.

    Args:
      expert_out: a list of `num_experts` `Tensor`s, each with shape
        `[expert_batch_size_i, <extra_output_dims>]`.
      multiply_by_gates: a boolean.

    Returns:
      a list of num_datashards `Tensor`s with shapes
        `[batch_size[d], <extra_output_dims>]`.
    """
    expert_part_sizes = tf.unstack(
        tf.stack([d.part_sizes for d in self._dispatchers]),
        num=self._ep.n,
        axis=1)
    # list of lists of shape [num_experts][num_datashards]
    expert_output_parts = self._ep(tf.split, expert_out, expert_part_sizes)
    expert_output_parts_t = transpose_list_of_lists(expert_output_parts)
    def my_combine(dispatcher, parts):
      return dispatcher.combine(
          common_layers.convert_gradient_to_tensor(tf.concat(parts, 0)),
          multiply_by_gates=multiply_by_gates)
    return self._dp(my_combine, self._dispatchers, expert_output_parts_t) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:26,代码来源:expert_utils.py

示例12: select_dim_value

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import stack [as 别名]
def select_dim_value(x, indices, name=None):
    with tf.name_scope(name, "select-dim-value", values=[x, indices]):
        # x.shape = (rest..., dims)
        rest = tf.shape(x)[:-1]
        dims = tf.shape(x)[-1]
        size = tf.size(indices, out_type=indices.dtype)

        # reshape to (size, dims)
        t = tf.reshape(x, shape=[-1, dims])
        # then index as ([1,2,3,...,size], indices.ravel())
        nd_indices = tf.stack([
            tf.range(0, size, dtype=indices.dtype),
            tf.reshape(indices, shape=[-1])
        ], axis=1)
        t = tf.gather_nd(t, indices=nd_indices)

        # reshape back to (rest...)
        t = tf.reshape(t, rest)
        t.set_shape(x.get_shape()[:-1])
        return t 
开发者ID:distillpub,项目名称:post--memorization-in-rnns,代码行数:22,代码来源:select_dim_value.py

示例13: compute_mfcc

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import stack [as 别名]
def compute_mfcc(audio, **kwargs):
    """
    Compute the MFCC for a given audio waveform. This is
    identical to how DeepSpeech does it, but does it all in
    TensorFlow so that we can differentiate through it.
    """

    batch_size, size = audio.get_shape().as_list()
    audio = tf.cast(audio, tf.float32)

    # 1. Pre-emphasizer, a high-pass filter
    audio = tf.concat((audio[:, :1], audio[:, 1:] - 0.97*audio[:, :-1], np.zeros((batch_size,1000),dtype=np.float32)), 1)

    # 2. windowing into frames of 320 samples, overlapping
    windowed = tf.stack([audio[:, i:i+400] for i in range(0,size-320,160)],1)

    # 3. Take the FFT to convert to frequency space
    ffted = tf.spectral.rfft(windowed, [512])
    ffted = 1.0 / 512 * tf.square(tf.abs(ffted))

    # 4. Compute the Mel windowing of the FFT
    energy = tf.reduce_sum(ffted,axis=2)+1e-30
    filters = np.load("filterbanks.npy").T
    feat = tf.matmul(ffted, np.array([filters]*batch_size,dtype=np.float32))+1e-30

    # 5. Take the DCT again, because why not
    feat = tf.log(feat)
    feat = tf.spectral.dct(feat, type=2, norm='ortho')[:,:,:26]

    # 6. Amplify high frequencies for some reason
    _,nframes,ncoeff = feat.get_shape().as_list()
    n = np.arange(ncoeff)
    lift = 1 + (22/2.)*np.sin(np.pi*n/22)
    feat = lift*feat
    width = feat.get_shape().as_list()[1]

    # 7. And now stick the energy next to the features
    feat = tf.concat((tf.reshape(tf.log(energy),(-1,width,1)), feat[:, :, 1:]), axis=2)
    
    return feat 
开发者ID:rtaori,项目名称:Black-Box-Audio,代码行数:42,代码来源:tf_logits.py

示例14: get_logits

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import stack [as 别名]
def get_logits(new_input, length, first=[]):
    """
    Compute the logits for a given waveform.

    First, preprocess with the TF version of MFC above,
    and then call DeepSpeech on the features.
    """
    # new_input = tf.Print(new_input, [tf.shape(new_input)])

    # We need to init DeepSpeech the first time we're called
    if first == []:
        first.append(False)
        # Okay, so this is ugly again.
        # We just want it to not crash.
        tf.app.flags.FLAGS.alphabet_config_path = "DeepSpeech/data/alphabet.txt"
        DeepSpeech.initialize_globals()
        print('initialized deepspeech globals')

    batch_size = new_input.get_shape()[0]

    # 1. Compute the MFCCs for the input audio
    # (this is differentable with our implementation above)
    empty_context = np.zeros((batch_size, 9, 26), dtype=np.float32)
    new_input_to_mfcc = compute_mfcc(new_input)[:, ::2]
    features = tf.concat((empty_context, new_input_to_mfcc, empty_context), 1)

    # 2. We get to see 9 frames at a time to make our decision,
    # so concatenate them together.
    features = tf.reshape(features, [new_input.get_shape()[0], -1])
    features = tf.stack([features[:, i:i+19*26] for i in range(0,features.shape[1]-19*26+1,26)],1)
    features = tf.reshape(features, [batch_size, -1, 19*26])

    # 3. Whiten the data
    mean, var = tf.nn.moments(features, axes=[0,1,2])
    features = (features-mean)/(var**.5)

    # 4. Finally we process it with DeepSpeech
    logits = DeepSpeech.BiRNN(features, length, [0]*10)

    return logits 
开发者ID:rtaori,项目名称:Black-Box-Audio,代码行数:42,代码来源:tf_logits.py

示例15: call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import stack [as 别名]
def call(self, inputs, **kwargs):
        if K.ndim(inputs) != 3:
            raise ValueError(
                "Unexpected inputs dimensions %d, expect to be 3 dimensions" % (K.ndim(inputs)))

        querys = tf.tensordot(inputs, self.W_Query,
                              axes=(-1, 0))  # None F D*head_num
        keys = tf.tensordot(inputs, self.W_key, axes=(-1, 0))
        values = tf.tensordot(inputs, self.W_Value, axes=(-1, 0))

        # head_num None F D
        querys = tf.stack(tf.split(querys, self.head_num, axis=2))
        keys = tf.stack(tf.split(keys, self.head_num, axis=2))
        values = tf.stack(tf.split(values, self.head_num, axis=2))

        inner_product = tf.matmul(
            querys, keys, transpose_b=True)  # head_num None F F
        self.normalized_att_scores = tf.nn.softmax(inner_product)

        result = tf.matmul(self.normalized_att_scores,
                           values)  # head_num None F D
        result = tf.concat(tf.split(result, self.head_num, ), axis=-1)
        result = tf.squeeze(result, axis=0)  # None F D*head_num

        if self.use_res:
            result += tf.tensordot(inputs, self.W_Res, axes=(-1, 0))
        result = tf.nn.relu(result)

        return result 
开发者ID:ShenDezhou,项目名称:icme2019,代码行数:31,代码来源:interaction.py


注:本文中的tensorflow.stack方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。