当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.reshape方法代码示例

本文整理汇总了Python中tensorflow.reshape方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.reshape方法的具体用法?Python tensorflow.reshape怎么用?Python tensorflow.reshape使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.reshape方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _forward

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reshape [as 别名]
def _forward(self):
        inp = self.inp.out
        shape = inp.get_shape().as_list()
        _, h, w, c = shape
        s = self.lay.stride
        out = list()
        for i in range(int(h/s)):
            row_i = list()
            for j in range(int(w/s)):
                si, sj = s * i, s * j
                boxij = inp[:, si: si+s, sj: sj+s,:]
                flatij = tf.reshape(boxij, [-1,1,1,c*s*s])
                row_i += [flatij]
            out += [tf.concat(row_i, 2)]

        self.out = tf.concat(out, 1) 
开发者ID:AmeyaWagh,项目名称:Traffic_sign_detection_YOLO,代码行数:18,代码来源:convolution.py

示例2: train_lr_rfeinman

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reshape [as 别名]
def train_lr_rfeinman(densities_pos, densities_neg, uncerts_pos, uncerts_neg):
    """
    TODO
    :param densities_pos:
    :param densities_neg:
    :param uncerts_pos:
    :param uncerts_neg:
    :return:
    """
    values_neg = np.concatenate(
        (densities_neg.reshape((1, -1)),
         uncerts_neg.reshape((1, -1))),
        axis=0).transpose([1, 0])
    values_pos = np.concatenate(
        (densities_pos.reshape((1, -1)),
         uncerts_pos.reshape((1, -1))),
        axis=0).transpose([1, 0])

    values = np.concatenate((values_neg, values_pos))
    labels = np.concatenate(
        (np.zeros_like(densities_neg), np.ones_like(densities_pos)))

    lr = LogisticRegressionCV(n_jobs=-1).fit(values, labels)

    return values, labels, lr 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:27,代码来源:util.py

示例3: fprop

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reshape [as 别名]
def fprop(self, x):

        output = OrderedDict()
        # first convolutional layer
        h_conv1 = tf.nn.relu(self._conv2d(x, self.W_conv1) + self.b_conv1)
        h_pool1 = self._max_pool_2x2(h_conv1)

        # second convolutional layer
        h_conv2 = tf.nn.relu(
            self._conv2d(h_pool1, self.W_conv2) + self.b_conv2)
        h_pool2 = self._max_pool_2x2(h_conv2)

        # first fully connected layer

        h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
        h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, self.W_fc1) + self.b_fc1)

        # output layer
        logits = tf.matmul(h_fc1, self.W_fc2) + self.b_fc2

        output = deterministic_dict(locals())
        del output["self"]
        output[self.O_PROBS] = tf.nn.softmax(logits=logits)

        return output 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:27,代码来源:madry_mnist_model.py

示例4: preprocess_batch

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reshape [as 别名]
def preprocess_batch(images_batch, preproc_func=None):
    """
    Creates a preprocessing graph for a batch given a function that processes
    a single image.

    :param images_batch: A tensor for an image batch.
    :param preproc_func: (optional function) A function that takes in a
        tensor and returns a preprocessed input.
    """
    if preproc_func is None:
        return images_batch

    with tf.variable_scope('preprocess'):
        images_list = tf.split(images_batch, int(images_batch.shape[0]))
        result_list = []
        for img in images_list:
            reshaped_img = tf.reshape(img, img.shape[1:])
            processed_img = preproc_func(reshaped_img)
            result_list.append(tf.expand_dims(processed_img, axis=0))
        result_images = tf.concat(result_list, axis=0)
    return result_images 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:23,代码来源:utils.py

示例5: pad_and_reshape

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reshape [as 别名]
def pad_and_reshape(instr_spec, frame_length, F):
    """
    :param instr_spec:
    :param frame_length:
    :param F:
    :returns:
    """
    spec_shape = tf.shape(instr_spec)
    extension_row = tf.zeros((spec_shape[0], spec_shape[1], 1, spec_shape[-1]))
    n_extra_row = (frame_length) // 2 + 1 - F
    extension = tf.tile(extension_row, [1, 1, n_extra_row, 1])
    extended_spec = tf.concat([instr_spec, extension], axis=2)
    old_shape = tf.shape(extended_spec)
    new_shape = tf.concat([
        [old_shape[0] * old_shape[1]],
        old_shape[2:]],
        axis=0)
    processed_instr_spec = tf.reshape(extended_spec, new_shape)
    return processed_instr_spec 
开发者ID:deezer,项目名称:spleeter,代码行数:21,代码来源:tensor.py

示例6: _inverse_stft

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reshape [as 别名]
def _inverse_stft(self, stft_t, time_crop=None):
        """ Inverse and reshape the given STFT

        :param stft_t: input STFT
        :returns: inverse STFT (waveform)
        """
        inversed = inverse_stft(
            tf.transpose(stft_t, perm=[2, 0, 1]),
            self._frame_length,
            self._frame_step,
            window_fn=lambda frame_length, dtype: (
                hann_window(frame_length, periodic=True, dtype=dtype))
        ) * self.WINDOW_COMPENSATION_FACTOR
        reshaped = tf.transpose(inversed)
        if time_crop is None:
            time_crop = tf.shape(self._features['waveform'])[0]
        return reshaped[:time_crop, :] 
开发者ID:deezer,项目名称:spleeter,代码行数:19,代码来源:__init__.py

示例7: read_from_tfrecord

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reshape [as 别名]
def read_from_tfrecord(filenames):
    tfrecord_file_queue = tf.train.string_input_producer(filenames, name='queue')
    reader = tf.TFRecordReader()
    _, tfrecord_serialized = reader.read(tfrecord_file_queue)

    tfrecord_features = tf.parse_single_example(tfrecord_serialized, features={
        'label': tf.FixedLenFeature([],tf.int64),
        'shape': tf.FixedLenFeature([],tf.string),
        'image': tf.FixedLenFeature([],tf.string),
    }, name='features')

    image = tf.decode_raw(tfrecord_features['image'], tf.uint8)
    shape = tf.decode_raw(tfrecord_features['shape'], tf.int32)

    image = tf.reshape(image, shape)
    label = tfrecord_features['label']
    return label, shape, image 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:19,代码来源:18_basic_tfrecord.py

示例8: images_to_sequence

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reshape [as 别名]
def images_to_sequence(tensor):
  """Convert a batch of images into a batch of sequences.

  Args:
    tensor: a (num_images, height, width, depth) tensor

  Returns:
    (width, num_images*height, depth) sequence tensor
  """
  transposed = tf.transpose(tensor, [2, 0, 1, 3])

  shapeT = tf.shape(transposed)
  shapeL = transposed.get_shape().as_list()
  # Calculate the ouput size of the upsampled tensor
  n_shape = tf.stack([
      shapeT[0],
      shapeT[1]*shapeT[2],
      shapeL[3]
  ])
  reshaped = tf.reshape(transposed, n_shape)
  return reshaped 
开发者ID:TobiasGruening,项目名称:ARU-Net,代码行数:23,代码来源:layers.py

示例9: sequence_to_images

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reshape [as 别名]
def sequence_to_images(tensor, num_batches):
  """Convert a batch of sequences into a batch of images.

  Args:
    tensor: (num_steps, num_batchesRNN, depth) sequence tensor
    num_batches: the number of image batches

  Returns:
    (num_batches, height, width, depth) tensor
  """

  shapeT = tf.shape(tensor)
  shapeL = tensor.get_shape().as_list()
  # Calculate the ouput size of the upsampled tensor
  height = tf.to_int32(shapeT[1] / num_batches)
  n_shape = tf.stack([
      shapeT[0],
      num_batches,
      height,
      shapeL[2]
  ])

  reshaped = tf.reshape(tensor, n_shape)
  return tf.transpose(reshaped, [1, 2, 0, 3]) 
开发者ID:TobiasGruening,项目名称:ARU-Net,代码行数:26,代码来源:layers.py

示例10: create_test_input

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reshape [as 别名]
def create_test_input(batch_size, height, width, channels):
  """Create test input tensor.

  Args:
    batch_size: The number of images per batch or `None` if unknown.
    height: The height of each image or `None` if unknown.
    width: The width of each image or `None` if unknown.
    channels: The number of channels per image or `None` if unknown.

  Returns:
    Either a placeholder `Tensor` of dimension
      [batch_size, height, width, channels] if any of the inputs are `None` or a
    constant `Tensor` with the mesh grid values along the spatial dimensions.
  """
  if None in [batch_size, height, width, channels]:
    return tf.placeholder(tf.float32, (batch_size, height, width, channels))
  else:
    return tf.to_float(
        np.tile(
            np.reshape(
                np.reshape(np.arange(height), [height, 1]) +
                np.reshape(np.arange(width), [1, width]),
                [1, height, width, 1]),
            [batch_size, 1, 1, channels])) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:26,代码来源:resnet_v2_test.py

示例11: depool_2x2

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reshape [as 别名]
def depool_2x2(input_, stride=2):
    """Depooling."""
    shape = input_.get_shape().as_list()
    batch_size = shape[0]
    height = shape[1]
    width = shape[2]
    channels = shape[3]
    res = tf.reshape(input_, [batch_size, height, 1, width, 1, channels])
    res = tf.concat(
        axis=2, values=[res, tf.zeros([batch_size, height, stride - 1, width, 1, channels])])
    res = tf.concat(axis=4, values=[
        res, tf.zeros([batch_size, height, stride, width, stride - 1, channels])
    ])
    res = tf.reshape(res, [batch_size, stride * height, stride * width, channels])

    return res


# random flip on a batch of images 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:21,代码来源:real_nvp_utils.py

示例12: batch_random_flip

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reshape [as 别名]
def batch_random_flip(input_):
    """Simultaneous horizontal random flip."""
    if isinstance(input_, (float, int)):
        return input_
    shape = input_.get_shape().as_list()
    batch_size = shape[0]
    height = shape[1]
    width = shape[2]
    channels = shape[3]
    res = tf.split(axis=0, num_or_size_splits=batch_size, value=input_)
    res = [elem[0, :, :, :] for elem in res]
    res = [tf.image.random_flip_left_right(elem) for elem in res]
    res = [tf.reshape(elem, [1, height, width, channels]) for elem in res]
    res = tf.concat(axis=0, values=res)

    return res


# build a one hot representation corresponding to the integer tensor
# the one-hot dimension is appended to the integer tensor shape 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:22,代码来源:real_nvp_utils.py

示例13: get_image_feature

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reshape [as 别名]
def get_image_feature(self, char_index):
    """Returns a subset of image features for a character.

    Args:
      char_index: an index of a character.

    Returns:
      A tensor with shape [batch_size, ?]. The output depth depends on the
      depth of input net.
    """
    batch_size, features_num, _ = [d.value for d in self._net.get_shape()]
    slice_len = int(features_num / self._params.seq_length)
    # In case when features_num != seq_length, we just pick a subset of image
    # features, this choice is arbitrary and there is no intuitive geometrical
    # interpretation. If features_num is not dividable by seq_length there will
    # be unused image features.
    net_slice = self._net[:, char_index:char_index + slice_len, :]
    feature = tf.reshape(net_slice, [batch_size, -1])
    logging.debug('Image feature: %s', feature)
    return feature 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:22,代码来源:sequence_layers.py

示例14: compute_mfcc

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reshape [as 别名]
def compute_mfcc(audio, **kwargs):
    """
    Compute the MFCC for a given audio waveform. This is
    identical to how DeepSpeech does it, but does it all in
    TensorFlow so that we can differentiate through it.
    """

    batch_size, size = audio.get_shape().as_list()
    audio = tf.cast(audio, tf.float32)

    # 1. Pre-emphasizer, a high-pass filter
    audio = tf.concat((audio[:, :1], audio[:, 1:] - 0.97*audio[:, :-1], np.zeros((batch_size,1000),dtype=np.float32)), 1)

    # 2. windowing into frames of 320 samples, overlapping
    windowed = tf.stack([audio[:, i:i+400] for i in range(0,size-320,160)],1)

    # 3. Take the FFT to convert to frequency space
    ffted = tf.spectral.rfft(windowed, [512])
    ffted = 1.0 / 512 * tf.square(tf.abs(ffted))

    # 4. Compute the Mel windowing of the FFT
    energy = tf.reduce_sum(ffted,axis=2)+1e-30
    filters = np.load("filterbanks.npy").T
    feat = tf.matmul(ffted, np.array([filters]*batch_size,dtype=np.float32))+1e-30

    # 5. Take the DCT again, because why not
    feat = tf.log(feat)
    feat = tf.spectral.dct(feat, type=2, norm='ortho')[:,:,:26]

    # 6. Amplify high frequencies for some reason
    _,nframes,ncoeff = feat.get_shape().as_list()
    n = np.arange(ncoeff)
    lift = 1 + (22/2.)*np.sin(np.pi*n/22)
    feat = lift*feat
    width = feat.get_shape().as_list()[1]

    # 7. And now stick the energy next to the features
    feat = tf.concat((tf.reshape(tf.log(energy),(-1,width,1)), feat[:, :, 1:]), axis=2)
    
    return feat 
开发者ID:rtaori,项目名称:Black-Box-Audio,代码行数:42,代码来源:tf_logits.py

示例15: get_logits

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reshape [as 别名]
def get_logits(new_input, length, first=[]):
    """
    Compute the logits for a given waveform.

    First, preprocess with the TF version of MFC above,
    and then call DeepSpeech on the features.
    """
    # new_input = tf.Print(new_input, [tf.shape(new_input)])

    # We need to init DeepSpeech the first time we're called
    if first == []:
        first.append(False)
        # Okay, so this is ugly again.
        # We just want it to not crash.
        tf.app.flags.FLAGS.alphabet_config_path = "DeepSpeech/data/alphabet.txt"
        DeepSpeech.initialize_globals()
        print('initialized deepspeech globals')

    batch_size = new_input.get_shape()[0]

    # 1. Compute the MFCCs for the input audio
    # (this is differentable with our implementation above)
    empty_context = np.zeros((batch_size, 9, 26), dtype=np.float32)
    new_input_to_mfcc = compute_mfcc(new_input)[:, ::2]
    features = tf.concat((empty_context, new_input_to_mfcc, empty_context), 1)

    # 2. We get to see 9 frames at a time to make our decision,
    # so concatenate them together.
    features = tf.reshape(features, [new_input.get_shape()[0], -1])
    features = tf.stack([features[:, i:i+19*26] for i in range(0,features.shape[1]-19*26+1,26)],1)
    features = tf.reshape(features, [batch_size, -1, 19*26])

    # 3. Whiten the data
    mean, var = tf.nn.moments(features, axes=[0,1,2])
    features = (features-mean)/(var**.5)

    # 4. Finally we process it with DeepSpeech
    logits = DeepSpeech.BiRNN(features, length, [0]*10)

    return logits 
开发者ID:rtaori,项目名称:Black-Box-Audio,代码行数:42,代码来源:tf_logits.py


注:本文中的tensorflow.reshape方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。