當前位置: 首頁>>代碼示例>>Python>>正文


Python misc.toimage方法代碼示例

本文整理匯總了Python中scipy.misc.toimage方法的典型用法代碼示例。如果您正苦於以下問題:Python misc.toimage方法的具體用法?Python misc.toimage怎麽用?Python misc.toimage使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在scipy.misc的用法示例。


在下文中一共展示了misc.toimage方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: setup

# 需要導入模塊: from scipy import misc [as 別名]
# 或者: from scipy.misc import toimage [as 別名]
def setup(self, pre_encode=False):

        target_path = self.root + '/combined_annotations/'
        if not os.path.exists(target_path):
            os.makedirs(target_path)

        if pre_encode:
            print("Pre-encoding segmentation masks...")
            for i in tqdm(self.sbd_train_list):
                lbl_path = self.sbd_path + 'dataset/cls/' + i + '.mat'
                lbl = io.loadmat(lbl_path)['GTcls'][0]['Segmentation'][0].astype(np.int32)
                lbl = m.toimage(lbl, high=self.ignore_index, low=0)
                m.imsave(target_path + i + '.png', lbl)
            for i in tqdm(self.sbd_val_list):
                lbl_path = self.sbd_path + 'dataset/cls/' + i + '.mat'
                lbl = io.loadmat(lbl_path)['GTcls'][0]['Segmentation'][0].astype(np.int32)
                lbl = m.toimage(lbl, high=self.ignore_index, low=0)
                m.imsave(target_path + i + '.png', lbl)
            for i in tqdm(self.files['trainval']):
                lbl_path = self.voc_path + 'SegmentationClass/' + i + '.png'
                lbl = self.encode_segmap(m.imread(lbl_path))
                lbl = m.toimage(lbl, high=self.ignore_index, low=0)
                m.imsave(target_path + i + '.png', lbl) 
開發者ID:shahsohil,項目名稱:sunets,代碼行數:25,代碼來源:pascal_voc_loader.py

示例2: _render_devset

# 需要導入模塊: from scipy import misc [as 別名]
# 或者: from scipy.misc import toimage [as 別名]
def _render_devset(self):
        sys.stdout.write('\tRendering devset\n')
        file_index = 1
        for file in self.devset.files[:5]:
            sys.stdout.write(
                "\t\t" + str(file_index) + "/" + str(len(self.devset.files)) + " processing file " + file + " \n")
            sys.stdout.flush()
            file_index += 1
            mgc_file = file + ".mgc.npy"
            mgc = np.load(mgc_file)
            # print mgc.shape
            output_file = 'data/output/' + file[file.rfind('/') + 1:] + '.png'
            bitmap = np.zeros((mgc.shape[1], mgc.shape[0], 3), dtype=np.uint8)
            for x in range(mgc.shape[0]):
                for y in range(mgc.shape[1]):
                    val = mgc[x, y]
                    color = val * 255
                    bitmap[y, x] = [color, color, color]
            import scipy.misc as smp
            img = smp.toimage(bitmap)
            img.save(output_file) 
開發者ID:tiberiu44,項目名稱:TTS-Cube,代碼行數:23,代碼來源:vocoder.py

示例3: DeepQPredictBestAction

# 需要導入模塊: from scipy import misc [as 別名]
# 或者: from scipy.misc import toimage [as 別名]
def DeepQPredictBestAction(qstate):
    qs_a = qstate
    #print("qstate.shape",qstate.shape)
    predX = np.zeros(shape=(1,IMG_DIM,IMG_DIM*3))
    predX[0] = qs_a
    #print("predX[0].shape",predX[0].shape)
    #print("qs_a",qs_a.shape)
    #img = predX[0].reshape(1,1,IMG_DIM,IMG_DIM*3)
    #toimage(img[0][0]).show()
    #print("trying to predict reward at qs_a", predX[0])
    pred = Qmodel.predict(predX[0].reshape(1,IMG_DIM,IMG_DIM*3,1))
    remembered_total_reward = pred[0]
    return remembered_total_reward




#Play the game 500 times 
開發者ID:FitMachineLearning,項目名稱:FitML,代碼行數:20,代碼來源:Main_Gym_Channel_Last.py

示例4: DeepQPredictBestAction

# 需要導入模塊: from scipy import misc [as 別名]
# 或者: from scipy.misc import toimage [as 別名]
def DeepQPredictBestAction(qstate):
    qs_a = qstate
    predX = np.zeros(shape=(1,IMG_DIM,IMG_DIM*3))
    predX[0] = qs_a
    #print("qs_a",qs_a.shape)
    #img = predX[0].reshape(1,1,IMG_DIM,IMG_DIM*3)
    #toimage(img[0][0]).show()
    #print("trying to predict reward at qs_a", predX[0])
    pred = Qmodel.predict(predX[0].reshape(1,1,IMG_DIM,IMG_DIM*3))
    remembered_total_reward = pred[0]
    return remembered_total_reward




#Play the game 500 times 
開發者ID:FitMachineLearning,項目名稱:FitML,代碼行數:18,代碼來源:Main_Gym.py

示例5: DeepQPredictBestAction

# 需要導入模塊: from scipy import misc [as 別名]
# 或者: from scipy.misc import toimage [as 別名]
def DeepQPredictBestAction(qstate,is_noisy_game = False):
    qs_a = qstate
    predX = np.zeros(shape=(1,IMG_DIM,IMG_DIM*3))
    predX[0] = qs_a
    #print("qs_a",qs_a.shape)
    #img = predX[0].reshape(1,1,IMG_DIM,IMG_DIM*3)
    #toimage(img[0][0]).show()
    #print("trying to predict reward at qs_a", predX[0])
    if is_noisy_game:
        pred = Qmodel.predict(predX[0].reshape(1,1,IMG_DIM,IMG_DIM*3))
    else:
        pred = noisyModel.predict(predX[0].reshape(1,1,IMG_DIM,IMG_DIM*3))

    remembered_total_reward = pred[0]
    return remembered_total_reward




#Play the game 500 times 
開發者ID:FitMachineLearning,項目名稱:FitML,代碼行數:22,代碼來源:Main_Gym_Param_Noise.py

示例6: DeepQPredictBestAction

# 需要導入模塊: from scipy import misc [as 別名]
# 或者: from scipy.misc import toimage [as 別名]
def DeepQPredictBestAction(qstate):
    qs_a = qstate
    predX = np.zeros(shape=(1,IMG_DIM,IMG_DIM*3))
    predX[0] = qs_a
    #print("qs_a",qs_a.shape)
    #img = predX[0].reshape(1,1,IMG_DIM,IMG_DIM*3)
    #toimage(img[0][0]).show()
    #print("trying to predict reward at qs_a", predX[0])
    inputX = predX[0].reshape(1,1,IMG_DIM,IMG_DIM*3)
    if CHANNEL_LAST:
        inputX = np.transpose(inputX, (0, 2,3,1))
        #toimage(inputX[0]).show()
        #print("inputX",inputX.shape)
    pred = Qmodel.predict(inputX)
    remembered_total_reward = pred[0]
    return remembered_total_reward



#Play the game 500 times 
開發者ID:FitMachineLearning,項目名稱:FitML,代碼行數:22,代碼來源:Main_Gym_2_Channels.py

示例7: draw

# 需要導入模塊: from scipy import misc [as 別名]
# 或者: from scipy.misc import toimage [as 別名]
def draw(X):
    """Xは4次元テンソルの畫像集合、最初の16枚の畫像を描畫する"""
    assert X.shape[0] >= 16

    plt.figure()
    pos = 1
    for i in range(16):
        plt.subplot(4, 4, pos)
        img = toimage(X[i])
        plt.imshow(img)
        plt.axis('off')
        pos += 1
    plt.show()


# CIFAR10畫像をロード(Xは4次元テンソル)
# 訓練畫像集合5萬枚のみ対象とする、他は捨てる 
開發者ID:aidiary,項目名稱:keras-examples,代碼行數:19,代碼來源:zca_whitening.py

示例8: plot_cifar10

# 需要導入模塊: from scipy import misc [as 別名]
# 或者: from scipy.misc import toimage [as 別名]
def plot_cifar10(X, y, result_dir):
    plt.figure()

    # 畫像を描畫
    nclasses = 10
    pos = 1
    for targetClass in range(nclasses):
        targetIdx = []
        # クラスclassIDの畫像のインデックスリストを取得
        for i in range(len(y)):
            if y[i][0] == targetClass:
                targetIdx.append(i)

        # 各クラスからランダムに選んだ最初の10個の畫像を描畫
        np.random.shuffle(targetIdx)
        for idx in targetIdx[:10]:
            img = toimage(X[idx])
            plt.subplot(10, 10, pos)
            plt.imshow(img)
            plt.axis('off')
            pos += 1

    plt.savefig(os.path.join(result_dir, 'plot.png')) 
開發者ID:aidiary,項目名稱:keras-examples,代碼行數:25,代碼來源:cifar10.py

示例9: render_fonts_image

# 需要導入模塊: from scipy import misc [as 別名]
# 或者: from scipy.misc import toimage [as 別名]
def render_fonts_image(x, path, img_per_row, unit_scale=True):
    if unit_scale:
        # scale 0-1 matrix back to gray scale bitmaps
        bitmaps = (x * 255.).astype(dtype=np.int16) % 256
    else:
        bitmaps = x
    num_imgs, w, h = x.shape
    assert w == h
    side = int(w)
    width = img_per_row * side
    height = int(np.ceil(float(num_imgs) / img_per_row)) * side
    canvas = np.zeros(shape=(height, width), dtype=np.int16)
    # make the canvas all white
    canvas.fill(255)
    for idx, bm in enumerate(bitmaps):
        x = side * int(idx / img_per_row)
        y = side * int(idx % img_per_row)
        canvas[x: x + side, y: y + side] = bm
    misc.toimage(canvas).save(path)
    return path 
開發者ID:kaonashi-tyc,項目名稱:Rewrite,代碼行數:22,代碼來源:utils.py

示例10: mask_img

# 需要導入模塊: from scipy import misc [as 別名]
# 或者: from scipy.misc import toimage [as 別名]
def mask_img(img, attn, upscale=32):
		"""
		Put attention weights to each region in image.
		--------------------
		Arguments:
			img (ndarray: H x W x C): image data.
			attn (ndarray: 14 x 14): attention weights of each region.
			upscale (int): the ratio between attention size and image size.
		"""
		attn = transform.pyramid_expand(attn, upscale=upscale, sigma=20)
		attn = misc.toimage(attn).convert("L")
		mask = misc.toimage(np.zeros(img.shape, dtype=np.uint8)).convert("RGBA")
		img = misc.toimage(img).convert("RGBA")
		img = Image.composite(img, mask, attn)

		return img 
開發者ID:cvlab-tohoku,項目名稱:Dense-CoAttention-Network,代碼行數:18,代碼來源:utils.py

示例11: dots

# 需要導入模塊: from scipy import misc [as 別名]
# 或者: from scipy.misc import toimage [as 別名]
def dots(self):
        dotsF = numpy.zeros((156,208))
        dotsI = dotsF.astype('uint8')
        k = 10

        for i in range(0,155,1):
            for j in range(k,206,15):
#                print i,j
		    dotsI[i,j] = 255
		    k = k - 4
    	    if k < 0: k = k + 15

	return dotsI
# display it to see if it matches the Seek black dot hex pattern

#	zz = Image.fromstring("I", (208,156), dotsI, "raw", "I;8")
#	toimage(zz).show()
#        print dotsI
##################################################################### 
開發者ID:lod,項目名稱:seek-thermal-documentation,代碼行數:21,代碼來源:Seek_2.0.matlab_export.py

示例12: compress_img

# 需要導入模塊: from scipy import misc [as 別名]
# 或者: from scipy.misc import toimage [as 別名]
def compress_img(img, method):
    """Compress an image (numpy array) using the provided image compression
    method."""
    img_compressed_buffer = StringIO.StringIO()
    im = misc.toimage(img)
    im.save(img_compressed_buffer, format=method)
    img_compressed = img_compressed_buffer.getvalue()
    img_compressed_buffer.close()
    return img_compressed 
開發者ID:aleju,項目名稱:cat-bbs,代碼行數:11,代碼來源:create_dataset.py

示例13: convertRGB2YUV

# 需要導入模塊: from scipy import misc [as 別名]
# 或者: from scipy.misc import toimage [as 別名]
def convertRGB2YUV(img): 
    """Converts RGB image to YUV"""
    iplImg = misc.toimage(img)
    imgYUV = iplImg.convert('YCbCr') 
    # np.asarray(imgYUV) gives a HxWx4 array which is wrong 
    # so we're decoding the image byte string ourselves
    # http://mail.python.org/pipermail/image-sig/2010-November/006565.html
    imgArr = np.ndarray((iplImg.size[1], iplImg.size[0], 3), 'u1', 
                imgYUV.tostring())
    return imgArr 
開發者ID:coxlab,項目名稱:edn-cvpr2014,代碼行數:12,代碼來源:imageOps.py

示例14: save_img

# 需要導入模塊: from scipy import misc [as 別名]
# 或者: from scipy.misc import toimage [as 別名]
def save_img(img_np, file_name, out_dir='./'):
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    image = img_np.copy().transpose(1, 2, 0)
    # for gray image
    if img_np.shape[2] == 1:
        image = image[:, :, 0]
    # Revise values to save the matrix as png image
    image[image > 1.] = 1.
    image[image < 0.] = 0.
    spmi.toimage(image, cmin=0, cmax=1).save(out_dir + file_name) 
開發者ID:shirakawas,項目名稱:ASNG-NAS,代碼行數:13,代碼來源:eval_test.py

示例15: set_line_image_data

# 需要導入模塊: from scipy import misc [as 別名]
# 或者: from scipy.misc import toimage [as 別名]
def set_line_image_data(image, line_id, image_fh, transcription):
    """ Given an image, saves a flipped line image. It also stores
        line image and transcription mapping.
    """

    line_image_file_name = line_id + '.png'
    image_path = os.path.join(args.out_dir, line_image_file_name)
    imgray = image.convert('L')
    imgray_rev_arr = np.fliplr(imgray)
    imgray_rev = toimage(imgray_rev_arr)
    imgray_rev.save(image_path)
    image_fh.write(image_path + ' ' + transcription + '\n') 
開發者ID:waldo-seg,項目名稱:waldo,代碼行數:14,代碼來源:get_line_image_from_mar.py


注:本文中的scipy.misc.toimage方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。