当前位置: 首页>>代码示例>>Python>>正文


Python numpy.clip方法代码示例

本文整理汇总了Python中numpy.clip方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.clip方法的具体用法?Python numpy.clip怎么用?Python numpy.clip使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在numpy的用法示例。


在下文中一共展示了numpy.clip方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: draw_heatmap

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import clip [as 别名]
def draw_heatmap(img, heatmap, alpha=0.5):
    """Draw a heatmap overlay over an image."""
    assert len(heatmap.shape) == 2 or \
        (len(heatmap.shape) == 3 and heatmap.shape[2] == 1)
    assert img.dtype in [np.uint8, np.int32, np.int64]
    assert heatmap.dtype in [np.float32, np.float64]

    if img.shape[0:2] != heatmap.shape[0:2]:
        heatmap_rs = np.clip(heatmap * 255, 0, 255).astype(np.uint8)
        heatmap_rs = ia.imresize_single_image(
            heatmap_rs[..., np.newaxis],
            img.shape[0:2],
            interpolation="nearest"
        )
        heatmap = np.squeeze(heatmap_rs) / 255.0

    cmap = plt.get_cmap('jet')
    heatmap_cmapped = cmap(heatmap)
    heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
    heatmap_cmapped = heatmap_cmapped * 255
    mix = (1-alpha) * img + alpha * heatmap_cmapped
    mix = np.clip(mix, 0, 255).astype(np.uint8)
    return mix 
开发者ID:aleju,项目名称:cat-bbs,代码行数:25,代码来源:common.py

示例2: add_image

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import clip [as 别名]
def add_image(self, img):
        if self.print_progress and self.cur_images % self.progress_interval == 0:
            print('%d / %d\r' % (self.cur_images, self.expected_images), end='', flush=True)
            sys.stdout.flush()
        if self.shape is None:
            self.shape = img.shape
            self.resolution_log2 = int(np.log2(self.shape[1]))
            assert self.shape[0] in [1, 3]
            assert self.shape[1] == self.shape[2]
            assert self.shape[1] == 2**self.resolution_log2
            tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
            for lod in range(self.resolution_log2 - 1):
                tfr_file = self.tfr_prefix + '-r%02d.tfrecords' % (self.resolution_log2 - lod)
                self.tfr_writers.append(tf.python_io.TFRecordWriter(tfr_file, tfr_opt))
        assert img.shape == self.shape
        for lod, tfr_writer in enumerate(self.tfr_writers):
            if lod:
                img = img.astype(np.float32)
                img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] + img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25
            quant = np.rint(img).clip(0, 255).astype(np.uint8)
            ex = tf.train.Example(features=tf.train.Features(feature={
                'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=quant.shape)),
                'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))}))
            tfr_writer.write(ex.SerializeToString())
        self.cur_images += 1 
开发者ID:zalandoresearch,项目名称:disentangling_conditional_gans,代码行数:27,代码来源:dataset_tool.py

示例3: crop

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import clip [as 别名]
def crop(self, bbox):
        """See :func:`BaseInstanceMasks.crop`."""
        assert isinstance(bbox, np.ndarray)
        assert bbox.ndim == 1

        # clip the boundary
        bbox = bbox.copy()
        bbox[0::2] = np.clip(bbox[0::2], 0, self.width)
        bbox[1::2] = np.clip(bbox[1::2], 0, self.height)
        x1, y1, x2, y2 = bbox
        w = np.maximum(x2 - x1, 1)
        h = np.maximum(y2 - y1, 1)

        if len(self.masks) == 0:
            cropped_masks = np.empty((0, h, w), dtype=np.uint8)
        else:
            cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w]
        return BitmapMasks(cropped_masks, h, w) 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:20,代码来源:structures.py

示例4: perturb

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import clip [as 别名]
def perturb(self, x_nat, y, sess):
    """Given a set of examples (x_nat, y), returns a set of adversarial
       examples within epsilon of x_nat in l_infinity norm."""
    if self.rand:
      x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
    else:
      x = np.copy(x_nat)

    for i in range(self.k):
      grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
                                            self.model.y_input: y})

      x += self.a * np.sign(grad)

      x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
      x = np.clip(x, 0, 1) # ensure valid pixel range

    return x 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:20,代码来源:pgd_cw_whitebox.py

示例5: cleverhans_attack_wrapper

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import clip [as 别名]
def cleverhans_attack_wrapper(cleverhans_attack_fn, reset=True):
    def attack(a):
        session = tf.Session()
        with session.as_default():
            model = RVBCleverhansModel(a)
            adversarial_image = cleverhans_attack_fn(model, session, a)
            adversarial_image = np.squeeze(adversarial_image, axis=0)
            if reset:
                # optionally, reset to ignore other adversarials
                # found during the search
                a._reset()
            # run predictions to make sure the returned adversarial
            # is taken into account
            min_, max_ = a.bounds()
            adversarial_image = np.clip(adversarial_image, min_, max_)
            a.predictions(adversarial_image)
    return attack 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:19,代码来源:utils.py

示例6: Chainer2PIL

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import clip [as 别名]
def Chainer2PIL(data, rescale=True):
    data = np.array(data)
    if rescale:
        data *= 256
        # data += 128
    if data.dtype != np.uint8:
        data = np.clip(data, 0, 255)
        data = data.astype(np.uint8)
    if data.shape[0] == 1:
        buf = data.astype(np.uint8).reshape((data.shape[1], data.shape[2]))
    else:
        buf = np.zeros((data.shape[1], data.shape[2], data.shape[0]), dtype=np.uint8)
        for i in range(3):
            a = data[i,:,:]
            buf[:,:,i] = a
    img = Image.fromarray(buf)
    return img 
开发者ID:pstuvwx,项目名称:Deep_VoiceChanger,代码行数:19,代码来源:image.py

示例7: wave2input_image

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import clip [as 别名]
def wave2input_image(wave, window, pos=0, pad=0):
    wave_image = np.hstack([wave[pos+i*sride:pos+(i+pad*2)*sride+dif].reshape(height+pad*2, sride) for i in range(256//sride)])[:,:254]
    wave_image *= window
    spectrum_image = np.fft.fft(wave_image, axis=1)
    input_image = np.abs(spectrum_image[:,:128].reshape(1, height+pad*2, 128), dtype=np.float32)

    np.clip(input_image, 1000, None, out=input_image)
    np.log(input_image, out=input_image)
    input_image += bias
    input_image /= scale

    if np.max(input_image) > 0.95:
        print('input image max bigger than 0.95', np.max(input_image))
    if np.min(input_image) < 0.05:
        print('input image min smaller than 0.05', np.min(input_image))

    return input_image 
开发者ID:pstuvwx,项目名称:Deep_VoiceChanger,代码行数:19,代码来源:dataset.py

示例8: visual

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import clip [as 别名]
def visual(title, X, activation):
    '''create a grid of images and save it as a final image
    title : grid image name
    X : array of images
    '''
    assert len(X.shape) == 4

    X = X.transpose((0, 2, 3, 1))
    if activation == 'sigmoid':
        X = np.clip((X)*(255.0), 0, 255).astype(np.uint8)
    elif activation == 'tanh':
        X = np.clip((X+1.0)*(255.0/2.0), 0, 255).astype(np.uint8)
    n = np.ceil(np.sqrt(X.shape[0]))
    buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
    for i, img in enumerate(X):
        fill_buf(buff, i, img, X.shape[1:3])
    cv2.imwrite('%s.jpg' % (title), buff) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:19,代码来源:vaegan_mxnet.py

示例9: play

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import clip [as 别名]
def play(self, a):
        assert not self.episode_terminate,\
            "Warning, the episode seems to have terminated. " \
            "We need to call either game.begin_episode(max_episode_step) to continue a new " \
            "episode or game.start() to force restart."
        self.episode_step += 1
        reward = 0.0
        action = self.action_set[a]
        for i in range(self.frame_skip):
            reward += self.ale.act(action)
            self.ale.getScreenGrayscale(self.screen_buffer[i % self.screen_buffer_length, :, :])
        self.total_reward += reward
        self.episode_reward += reward
        ob = self.get_observation()
        terminate_flag = self.episode_terminate
        self.replay_memory.append(ob, a, numpy.clip(reward, -1, 1), terminate_flag)
        return reward, terminate_flag 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:19,代码来源:atari_game.py

示例10: spectrogram2wav

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import clip [as 别名]
def spectrogram2wav(mag):
    '''# Generate wave file from spectrogram'''
    # transpose
    mag = mag.T

    # de-noramlize
    mag = (np.clip(mag, 0, 1) * hp.max_db) - hp.max_db + hp.ref_db

    # to amplitude
    mag = np.power(10.0, mag * 0.05)

    # wav reconstruction
    wav = griffin_lim(mag)

    # de-preemphasis
    wav = signal.lfilter([1], [1, -hp.preemphasis], wav)

    # trim
    wav, _ = librosa.effects.trim(wav)

    return wav.astype(np.float32) 
开发者ID:KinglittleQ,项目名称:GST-Tacotron,代码行数:23,代码来源:utils.py

示例11: convert_to_torque

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import clip [as 别名]
def convert_to_torque(self, motor_commands, current_motor_angle,
                        current_motor_velocity):
    """Convert the commands (position control or torque control) to torque.

    Args:
      motor_commands: The desired motor angle if the motor is in position
        control mode. The pwm signal if the motor is in torque control mode.
      current_motor_angle: The motor angle at the current time step.
      current_motor_velocity: The motor velocity at the current time step.
    Returns:
      actual_torque: The torque that needs to be applied to the motor.
      observed_torque: The torque observed by the sensor.
    """
    if self._torque_control_enabled:
      pwm = motor_commands
    else:
      pwm = (-self._kp * (current_motor_angle - motor_commands)
             - self._kd * current_motor_velocity)
    pwm = np.clip(pwm, -1.0, 1.0)
    return self._convert_to_torque_from_pwm(pwm, current_motor_velocity) 
开发者ID:utra-robosoccer,项目名称:soccer-matlab,代码行数:22,代码来源:motor.py

示例12: augment_hsv

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import clip [as 别名]
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
    x = (np.random.uniform(-1, 1, 3) * np.array([hgain, sgain, vgain]) + 1).astype(np.float32)  # random gains
    img_hsv = (cv2.cvtColor(img, cv2.COLOR_BGR2HSV) * x.reshape((1, 1, 3))).clip(None, 255).astype(np.uint8)
    cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)  # no return needed


# def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):  # original version
#     # SV augmentation by 50%
#     img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)  # hue, sat, val
#
#     S = img_hsv[:, :, 1].astype(np.float32)  # saturation
#     V = img_hsv[:, :, 2].astype(np.float32)  # value
#
#     a = random.uniform(-1, 1) * sgain + 1
#     b = random.uniform(-1, 1) * vgain + 1
#     S *= a
#     V *= b
#
#     img_hsv[:, :, 1] = S if a < 1 else S.clip(None, 255)
#     img_hsv[:, :, 2] = V if b < 1 else V.clip(None, 255)
#     cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)  # no return needed 
开发者ID:zbyuan,项目名称:pruning_yolov3,代码行数:23,代码来源:datasets.py

示例13: save_wav

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import clip [as 别名]
def save_wav(audio, output_wav_file):
    wav.write(output_wav_file, 16000, np.array(np.clip(np.round(audio), -2**15, 2**15-1), dtype=np.int16))
    print('output dB', db(audio)) 
开发者ID:rtaori,项目名称:Black-Box-Audio,代码行数:5,代码来源:run_audio_attack.py

示例14: setup_graph

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import clip [as 别名]
def setup_graph(self, input_audio_batch, target_phrase): 
        batch_size = input_audio_batch.shape[0]
        weird = (input_audio_batch.shape[1] - 1) // 320 
        logits_arg2 = np.tile(weird, batch_size)
        dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32)
        dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32)
        
        pass_in = np.clip(input_audio_batch, -2**15, 2**15-1)
        seq_len = np.tile(weird, batch_size).astype(np.int32)
        
        with tf.variable_scope('', reuse=tf.AUTO_REUSE):
            
            inputs = tf.placeholder(tf.float32, shape=pass_in.shape, name='a')
            len_batch = tf.placeholder(tf.float32, name='b')
            arg2_logits = tf.placeholder(tf.int32, shape=logits_arg2.shape, name='c')
            arg1_dense = tf.placeholder(tf.float32, shape=dense_arg1.shape, name='d')
            arg2_dense = tf.placeholder(tf.int32, shape=dense_arg2.shape, name='e')
            len_seq = tf.placeholder(tf.int32, shape=seq_len.shape, name='f')
            
            logits = get_logits(inputs, arg2_logits)
            target = ctc_label_dense_to_sparse(arg1_dense, arg2_dense, len_batch)
            ctcloss = tf.nn.ctc_loss(labels=tf.cast(target, tf.int32), inputs=logits, sequence_length=len_seq)
            decoded, _ = tf.nn.ctc_greedy_decoder(logits, arg2_logits, merge_repeated=True)
            
            sess = tf.Session()
            saver = tf.train.Saver(tf.global_variables())
            saver.restore(sess, "models/session_dump")
            
        func1 = lambda a, b, c, d, e, f: sess.run(ctcloss, 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        func2 = lambda a, b, c, d, e, f: sess.run([ctcloss, decoded], 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        return (func1, func2) 
开发者ID:rtaori,项目名称:Black-Box-Audio,代码行数:35,代码来源:run_audio_attack.py

示例15: getctcloss

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import clip [as 别名]
def getctcloss(self, input_audio_batch, target_phrase, decode=False):
        batch_size = input_audio_batch.shape[0]
        weird = (input_audio_batch.shape[1] - 1) // 320 
        logits_arg2 = np.tile(weird, batch_size)
        dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32)
        dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32)
        
        pass_in = np.clip(input_audio_batch, -2**15, 2**15-1)
        seq_len = np.tile(weird, batch_size).astype(np.int32)

        if decode:
            return self.funcs[1](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len)
        else:
            return self.funcs[0](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len) 
开发者ID:rtaori,项目名称:Black-Box-Audio,代码行数:16,代码来源:run_audio_attack.py


注:本文中的numpy.clip方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。