当前位置: 首页>>代码示例>>Python>>正文


Python pyplot.imsave函数代码示例

本文整理汇总了Python中matplotlib.pyplot.imsave函数的典型用法代码示例。如果您正苦于以下问题:Python imsave函数的具体用法?Python imsave怎么用?Python imsave使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了imsave函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_concentration_image

 def create_concentration_image(self, image_data):
     cmap = cm.get_cmap()
     cmap._init()
     is_comparison_run = isinstance(self.scenario_run, models.ComparisonScenarioRun)
     if not is_comparison_run or self.scenario_run.comparison_mode == "Absolute":
         alphas = np.abs([min(n, 1.0) for n in np.linspace(0, 2, cmap.N)])
         vmax = np.max(image_data)
         vmin = np.min(image_data)
     else:
         results_max = np.max(image_data)
         results_min = np.min(image_data)
         if np.abs(results_max) > np.abs(results_min):
             vmax = results_max
             vmin = -results_max
         else:
             vmax = -results_min
             vmin = results_min
         results_range = vmax - vmin
         value_array = np.linspace(vmin, vmax, cmap.N)
         alphas = np.array([min(np.abs(v) / results_range * 2, 1.0) for v in value_array])
     cmap._lut[:-3, -1] = alphas
     if is_comparison_run:
         output_directory = self.scenario_run.output_directory_1
     else:
         output_directory = self.scenario_run.output_directory
     plt.imsave(fname=os.path.join(output_directory, "concentrations.png"),
                arr=image_data, format='png', vmax=vmax, vmin=vmin)
开发者ID:nathan-rice,项目名称:ctools-backend,代码行数:27,代码来源:raster.py

示例2: input_image_setup

def input_image_setup(img_name, img2_name):
	'''	Nimmt ein Bild als input, erstellt eine "Regel-Karte". Bei der Bild-
	erstellung bedenken: Rot = Gitter, Gruen = Verzweigt, Blau = Radial, wobei ein schwarzer
	Pixel ein Zentrum definiert. '''
	#TODO: Document
	import matplotlib.image as mpimg
	import matplotlib.pyplot as plt
	import procedural_city_generation
	import os
	#TODO:translate	
	
	img = mpimg.imread(img_name)
	img2 = mpimg.imread(img2_name)
	
	import matplotlib.pyplot as plt
	path=os.path.dirname(procedural_city_generation.__file__)
	print path
	plt.imsave(path+"/temp/diffused.png",img2,cmap='gray')
	with open(path+"/temp/isdiffused.txt",'w') as f:
		f.write("False")
	
	
	img*=255
	img2*=255
	return img, img2
开发者ID:CodeMason,项目名称:procedural_city_generation,代码行数:25,代码来源:input_image_setup.py

示例3: plot_brights

def plot_brights(ax, path, star, regionList, goal=False):
    '''
    Components of this routine:
        Projected brightness map
         
    Please note that this has been modified for use in diagnostic plots, 
    there should really be a way to specify a windowNumber for real data
    '''
    currentWindow = 0

    ###########################
    # Make the brightness map #
    ###########################
    img = make_bright_image(star, regionList, currentWindow, goal=goal)
    
    plt.imsave(path + "temp.jpg", img, cmap='hot', vmin=0.85, vmax=1.15)
    plt.imshow(img, cmap='hot')
    #Create the plot
    bmap = Basemap(projection='moll', lon_0 = 0, ax=ax)
    bmap.warpimage(path + "temp.jpg", ax=ax)
    
    if goal:
        ax.set_title("Desired Map")
    else:
        ax.set_title("Average Map")
开发者ID:rapidsnow,项目名称:Eclipse-Mapping,代码行数:25,代码来源:plots_scratch.py

示例4: main

def main():
    """
    Args: save_path output_dir
    """
    args = sys.argv
    save_dir = args[1]
    output_dir = args[2]

    layer_list = [
        'conv1/Conv/Conv2D',
        'conv2/Conv/Conv2D',
        'conv3/Conv/Conv2D',
        'conv4/Conv/Conv2D',
        'conv5/Conv/Conv2D',
        'conv5/Conv_1/Conv2D',
        'conv6/Conv/Conv2D'
    ]
    channels = [16, 32, 64, 64, 128, 256, 2]

    sess = tf.Session()

    with sess.as_default():
        maximize_output_multi = layers.prepare_graph(movie.build_net, save_dir)

        for i, layer in enumerate(layer_list):
            folder_name = layer.replace('/', '_')
            directory = os.path.join(output_dir, folder_name)
            create_dir(directory)
            for channel in range(channels[i]):
                result = maximize_output_multi(layer, channel, octave_n=4, iter_n=100, step=5.0, seed=123)
                plt.imsave(os.path.join(directory, str(channel) + '.png'), result)
开发者ID:vlpolyansky,项目名称:video-cnn,代码行数:31,代码来源:movie_save_all_layers.py

示例5: toFilenameAndTiff

 def toFilenameAndTiff(outputDirPath, kv):
     key, image=kv
     fname = outputDirPath+'/w_'+str(key)+'.tif'
     if(len(image.shape)==3):
         image=image.T
         image=np.swapaxes(image,1,2)
     imsave(fname, image)
开发者ID:genialwang,项目名称:lambda-image,代码行数:7,代码来源:images.py

示例6: draw_tile

def draw_tile(metadata, config, target_path):
    decoder = config.build_decoder()
    decoder_layers = nn.layers.get_all_layers(decoder.l_out)
    print "  decoder layer output shapes:"
    nparams = len(nn.layers.get_all_params(decoder.l_out))
    nn.layers.set_all_param_values(decoder.l_out, metadata['param_values'][-nparams:])

    for layer in decoder_layers:
        name = layer.__class__.__name__
        print "    %s %s" % (string.ljust(name, 32), nn.layers.get_output_shape(layer))

    mesh = np.linspace(0.001, 0.999, 20)
    z = np.zeros((400, 2), dtype='float32')
    for i in xrange(20):
        for j in xrange(20):
            z[20 * i + j, :] = np.array([norm.ppf(mesh[i]), norm.ppf(mesh[j])])

    sample = theano.function([decoder.l_z.input_var], nn.layers.get_output(decoder_layers[-1]))

    digits = sample(z)

    tile = np.zeros((20 * 28, 20 * 28), dtype='float32')

    for i in xrange(20):
        for j in xrange(20):
            d = np.reshape(digits[20 * i + j, :], (28, 28))
            tile[i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = d

    plt.imsave(target_path + 'tile.png', tile, cmap=matplotlib.cm.Greys)
开发者ID:IraKorshunova,项目名称:vae,代码行数:29,代码来源:draw.py

示例7: segment

def segment(sourceImage, DstImage):
    import sys
    sys.path.insert(0,'/home/joe/github/caffe-with_crop/python')
    import numpy as np
    from PIL import Image
    import matplotlib.pyplot as plt
    import caffe
    # caffe.set_mode_gpu()
    # caffe.set_device(0)
    # load image, switch to BGR, subtract mean, and make dims C x H x W for Caffe
    im = Image.open(sourceImage)
    in_ = np.array(im, dtype=np.float32)
    in_ = in_[:,:,::-1]
    in_ -= np.array((104.00698793,116.66876762,122.67891434))
    in_ = in_.transpose((2,0,1))

    # load net
    net = caffe.Net('/home/joe/github/caffe-with_crop/examples/fcn-32s-pascal-context/deploy.prototxt', '/home/joe/github/caffe-with_crop/examples/fcn-32s-pascal-context/fcn-32s-pascalcontext.caffemodel', caffe.TEST)
    # shape for input (data blob is N x C x H x W), set data
    net.blobs['data'].reshape(1, *in_.shape)
    net.blobs['data'].data[...] = in_
    # run net and take argmax for prediction
    net.forward()
    out = net.blobs['score'].data[0].argmax(axis=0)
#    plt.imshow(out)
    plt.imsave(DstImage, out)
开发者ID:joe8767,项目名称:fcn-web,代码行数:26,代码来源:application.py

示例8: showEigFace

 def showEigFace(self, idx=0):
     eigface = np.float32(self.eigenfaces[idx])
     print 'Eigface', eigface
     print 'SHAPE:', eigface.shape
     im = np.reshape(eigface, self.imsize)
     picName = 'eigFaceImage' + str(idx) + '.png'
     plt.imsave(picName, im, cmap=pylab.gray())
开发者ID:jztein,项目名称:eigface,代码行数:7,代码来源:getEigenface.py

示例9: main

def main():
    logging.basicConfig(level=logging.INFO)

    imagename = "1_27_s.bmp"
    unaryfilename = "1_27_s.c_unary.txt"

    logging.info("Read image.")
    img = utility.readimg(imagename)

    logging.info("Load unaries.")
    unaries = utility.loadunaryfile(os.path.join("data", unaryfilename))

    # Calculate energy
    unaries = -np.log(unaries)
    numlabels = unaries.shape[2]

    w = 100000
    l = 0.5
    pd1 = PD1(img, unaries, numlabels, w, l)
    pd1.segment()
    img = pd1.get_labeled_image()

    logging.info("Save image.")
    plt.imsave("img_out", img)

    plt.imshow(img)
    plt.show()
开发者ID:JeGa,项目名称:PD1,代码行数:27,代码来源:pd1.py

示例10: save

	def save(self, face_img, face_id, pose, landmark, name):
		# save the face image
		pose_bin_id = self.get_pose_bin_id(pose.yaw, pose.pitch)
		#print "Yaw=%d Pitch=%d Bin Name=%d" % (int(pose.yaw), int(pose.pitch), pose_bin_id)
		save_path = self.bin2path(pose_bin_id) + '/' + name + '.png'
		#print "saving image to path:", save_path
		plt.imsave(save_path, face_img)
		# save the data file
		# this data file could be optimized, by sorting the face_id
		print "saving image data file to path:", self.data_file_path()

		# here will change what we want to store
		# basically, we need the following data for ranking
		# 1. yaw, pitch and roll: yaw and pitch should match first, 
		# and then roll.
		# 2. distance between eyes: to estimate the resolution
		# 3. and also I think I need the name, but this could be provided by
		# using the image file name
		data_file_handler = open(self.data_file_path(), 'a')
		print >>data_file_handler, "%d"%pose_bin_id,
		print >>data_file_handler, "%s"%name,
		print >>data_file_handler, "%d %d %d"%(pose.yaw, pose.pitch, pose.roll),
		for point in landmark.all_points:
			print >>data_file_handler, point,
		print >>data_file_handler, ""
		return save_path
开发者ID:kumasento,项目名称:ImageProcFinal_Faceswap,代码行数:26,代码来源:faceposebin.py

示例11: detect

def detect():
    try:
        image = request.files.get('file')
        recog_face = bool(int(request.files.get('face').file.read()))
        files = {'file': StringIO(image.file.read())}
        objs = requests.post('http://{}/object'.format(OBJ_SERVER),
                             files=files)
        objs = json.loads(objs.text)

        print recog_face
        if recog_face:
            img = None
            for bb in objs:
                if bb['label'] == 'person':
                    if img is None:
                        image.file.seek(0)
                        img = io.imread(StringIO(image.file.read()))
                    x1, y1, x2, y2 = bb['bbox']
                    person_img = img[y1:y2, x1:x2]

                    # detect face
                    s = StringIO()
                    plt.imsave(s, person_img)
                    s.seek(0)
                    faces = requests.post('http://{}/face'.format(FACE_SERVER),
                                          files={'file': s})
                    bb['face'] = json.loads(faces.text)

        return json.dumps(objs)

    except Exception as e:
        print str(type(e)), e
开发者ID:mitmul,项目名称:cvmodules,代码行数:32,代码来源:server.py

示例12: save_results

def save_results(true_labels, predicted_labels, clf_name, classification_dir):
  cm_int = confusion_matrix (true_labels, predicted_labels);
  cm_float = cm_int/np.apply_along_axis(np.sum, 1, cm_int).astype('float');
  report = classification_report(true_labels, predicted_labels, np.arange(0,len(class_names)), class_names);
  # Save results
  plt.imsave(classification_dir + '/' + clf_name + "_cm.png", cm_float ,  cmap=cmt.gray)
  float_cm_file = classification_dir +'/' + clf_name + "_float_cm.txt"
  fos = open(float_cm_file, 'w');
  np.savetxt(fos,cm_float);
  fos.close();
  int_cm_file = classification_dir +'/' + clf_name + "_int_cm.txt"
  fos = open(int_cm_file, 'w');
  np.savetxt(fos,cm_int);
  fos.close();
  report_file = classification_dir +'/' + clf_name + "_report.txt"
  fos = open(report_file, 'w');
  fos.write(report);
  fos.close();
  labels_file = classification_dir +'/' + clf_name + "_labels.txt"
  fos = open(labels_file, 'w');
  np.savetxt(fos,np.column_stack((true_labels,predicted_labels)));
  fos.close();

  p, r, f1, s = precision_recall_fscore_support(true_labels, predicted_labels,labels=np.arange(0,len(class_names)))
  prf1s_file = classification_dir +'/' + clf_name + "_prf1s.txt"
  fos = open(prf1s_file, 'w');
  np.savetxt(fos,np.column_stack((p, r, f1, s)));
  fos.close();
开发者ID:mirestrepo,项目名称:voxels-at-lems,代码行数:28,代码来源:classify_no_object.py

示例13: plot2

def plot2(fn,p,wa,vmin,vmax,ups):
    
    # Check matrix dimensions are the same
    (m,n)=p.shape
    if (m,n)!=wa.shape:
        print "Matrix dimension mismatch"

    # Set up output array and scaling constant
    o=np.zeros((m*ups,n*ups,3))
    vsca=1.0/(vmax-vmin)

    # Assemble the output array
    for i in range(m):
        iu=i*ups
        for j in range(n):
            ju=j*ups
            if wa[i,j]==1:
                o[iu:iu+ups,ju:ju+ups,0]=1
                o[iu:iu+ups,ju:ju+ups,1]=1
                o[iu:iu+ups,ju:ju+ups,2]=1
            else:
                (re,gr,bl)=palette2(fscale(p[i,j],vmin,vsca))
                o[iu:iu+ups,ju:ju+ups,0]=re
                o[iu:iu+ups,ju:ju+ups,1]=gr
                o[iu:iu+ups,ju:ju+ups,2]=bl

    # Save the image
    plt.imsave(fn,o)
开发者ID:dustinvtran,项目名称:am205-fall-2014,代码行数:28,代码来源:custom_plot.py

示例14: main

def main():
    parser = argparse.ArgumentParser(
        description="Photometric Stereo",
    )
    parser.add_argument(
        "--lightning",
        nargs="?",
        help="Filename of JSON file containing lightning information",
    )
    parser.add_argument(
        "--mask",
        nargs="?",
        help="Filename of an image containing a mask of the object",
    )
    parser.add_argument(
        "image",
        nargs="*",
        help="Images filenames",
    )
    parser.add_argument(
        "--generate-map",
        action='store_true',
        help="Generate a map.png file which represends the colors of the "
             "normal mapping.",
    )
    args = parser.parse_args()

    if args.generate_map:
        normals = generateNormalMap()
        plt.imsave('map.png', normals)
        return

    if not len(args.image) >= 3:
        print("Please specify 3+ image files.")
        return

    if args.lightning:
        normals = photometricStereo(args.lightning, args.image)
        if False:
            try:
                with open('data.pkl', 'rb') as fhdl:
                    normals = pickle.load(fhdl)
            except:
                
                with open('data.pkl', 'wb') as fhdl:
                    pickle.dump(normals, fhdl)
    else:
        normals = photometricStereoWithoutLightning(args.image)

    if args.mask:
        mask = getImage(args.mask)
        mask = mask.T
        print(normals.shape, mask.shape)
        normals[mask<(mask.max() - mask.min())/2.] = np.nan

    color = colorizeNormals(normals)
    plt.imsave('out.png', color)
    mesh.write3dNormals(normals, 'out-3dn.stl')
    surface = mesh.surfaceFromNormals(normals)
    mesh.writeMesh(surface, normals, 'out-mesh.stl')
开发者ID:patricksnape,项目名称:pms,代码行数:60,代码来源:pms.py

示例15: prep_image

def prep_image(url,idx,dataset,datadir,width=224,filetype='jpg',verbose=False):
    '''
    Check to see image file has been downloaded at current size.  If it has not,
    download and resize image. Saves file to datadir/images/[dataset]_[idx]_w[width].[filetype]
    e.g. datadir/images/train_10001_w256.bmp

    args:
        url: url of image source
        idx: image row index
        dataset: string 'train' or 'test' or other identifier
        datadir: data directory
        width: desired width of image. Will be resized to width squared
    returns:
        rawim: scaled and cropped image
    '''
    outpath = datadir + 'images/' + dataset + '_' +  str(idx) + '_w' + str(width) + '.' + filetype

    if not os.path.isfile(outpath):
        if verbose:
            print "downloading image #%s..." %str(idx)
        try:
            rawim = download_and_resize(url,width)
            plt.imsave(outpath,rawim)
            return rawim
        except:
            print "unable to download image #%i from url %s..." %(idx,url)
            return None
    else:
        if verbose:
            print "Image %i already downloaded. Loading from file..." % idx
        rawim = plt.imread(outpath)
        return rawim
开发者ID:LucyWang2014,项目名称:NLP_Product_Classification,代码行数:32,代码来源:download_images_to_directory.py


注:本文中的matplotlib.pyplot.imsave函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。