当前位置: 首页>>代码示例>>Python>>正文


Python scipy.io方法代码示例

本文整理汇总了Python中scipy.io方法的典型用法代码示例。如果您正苦于以下问题:Python scipy.io方法的具体用法?Python scipy.io怎么用?Python scipy.io使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scipy的用法示例。


在下文中一共展示了scipy.io方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: cal_pca_matrix

# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import io [as 别名]
def cal_pca_matrix(path='PCA_matrix.mat', ksize=15, l_max=12.0, dim_pca=15, num_samples=500):
    kernels = np.zeros([ksize*ksize, num_samples], dtype=np.float32)
    for i in range(num_samples):

        theta = np.pi*np.random.rand(1)
        l1    = 0.1+l_max*np.random.rand(1)
        l2    = 0.1+(l1-0.1)*np.random.rand(1)

        k = anisotropic_Gaussian(ksize=ksize, theta=theta[0], l1=l1[0], l2=l2[0])

        # util.imshow(k)

        kernels[:, i] = np.reshape(k, (-1), order="F")  # k.flatten(order='F')

    # io.savemat('k.mat', {'k': kernels})

    pca_matrix = get_pca_matrix(kernels, dim_pca=dim_pca)

    io.savemat(path, {'p': pca_matrix})

    return pca_matrix 
开发者ID:cszn,项目名称:KAIR,代码行数:23,代码来源:utils_sisr.py

示例2: load

# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import io [as 别名]
def load(io: Union[str, BinaryIO]):
        """
        Args:
            io: (str or binary file-like object): input file to load data from
        Returns:
            An instance of `DensePoseTransformData` with transforms loaded from the file
        """
        import scipy.io

        uv_symmetry_map = scipy.io.loadmat(io)
        uv_symmetry_map_torch = {}
        for key in ["U_transforms", "V_transforms"]:
            uv_symmetry_map_torch[key] = []
            map_src = uv_symmetry_map[key]
            map_dst = uv_symmetry_map_torch[key]
            for i in range(map_src.shape[1]):
                map_dst.append(torch.from_numpy(map_src[0, i]).to(dtype=torch.float))
            uv_symmetry_map_torch[key] = torch.stack(map_dst, dim=0)
        transform_data = DensePoseTransformData(uv_symmetry_map_torch, device=torch.device("cpu"))
        return transform_data 
开发者ID:facebookresearch,项目名称:detectron2,代码行数:22,代码来源:structures.py

示例3: load_mat_file

# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import io [as 别名]
def load_mat_file(name, path, matname, load_zeros = False, prop_valid_set = .1, prop_test_set=0):

	x = scipy.io.loadmat(path + name)[matname]


	if sp.issparse(x): 
		if not load_zeros:
			idxs = x.nonzero()

			indexes = np.array(zip(idxs[0], np.zeros_like(idxs[0]), idxs[1]))
			np.random.shuffle(indexes)

			nb = indexes.shape[0]
			i_valid = int(nb - nb*prop_valid_set - nb * prop_test_set)
			i_test = i_valid + int( nb*prop_valid_set)

			train = Triplets_set(indexes[:i_valid,:], np.ones(i_valid))
			valid = Triplets_set(indexes[i_valid:i_test,:], np.ones(i_test - i_valid))
			test = Triplets_set(indexes[i_test:,:], np.ones(nb - i_test))


	return Experiment(name,train, valid, test, positives_only = True, compute_ranking_scores = True) 
开发者ID:dbpedia,项目名称:embeddings,代码行数:24,代码来源:exp_generators.py

示例4: read_edf

# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import io [as 别名]
def read_edf(edf_fname, from_t, to_t):
    import mne.io
    edf_raw = mne.io.read_raw_edf(edf_fname, preload=True)
    edf_raw.notch_filter(np.arange(60, 241, 60))
    dt = (edf_raw.times[1] - edf_raw.times[0])
    hz = int(1/ dt)
    T = edf_raw.times[-1] # sec
    live_channels = find_live_channels(edf_raw, hz)

    ylim = [-0.0015, 0.0015]
    from_t = 17
    window = to_t - from_t
    # plot_window(edf_raw, live_channels, t_start, window, hz, ylim)
    # plot_all_windows(edf_raw, live_channels, T, hz, window, edf_fname, ylim)

    data, times = edf_raw[:, int(from_t*hz):int(from_t*hz) + hz * window]
    # plot_power(data[0], dt)
    # edf_raw.plot(None, 1, 20, 20) 
开发者ID:pelednoam,项目名称:mmvt,代码行数:20,代码来源:electrodes.py

示例5: find_file_encoding

# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import io [as 别名]
def find_file_encoding(fname):
    import io
    encodings = ['utf-8', 'windows-1250', 'windows-1252']
    for e in encodings:
        try:
            fh = io.open(fname, 'r', encoding=e)
            fh.readlines()
            fh.seek(0)
        except UnicodeDecodeError:
            # print('got unicode error with %s , trying different encoding' % e)
            continue
        else:
            # print('opening the file with encoding:  %s ' % e)
            return e
    else:
        return None 
开发者ID:pelednoam,项目名称:mmvt,代码行数:18,代码来源:mmvt_utils.py

示例6: calculate_psnr_fast_srgb

# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import io [as 别名]
def calculate_psnr_fast_srgb(prediction, target):
    avg_psnr = 0
    srgb_params = init_colortransformation_gamma()
    psnr_list = []
    for i in range(prediction.shape[0]):
        ref = target[i]
        out = prediction[i]
        out = out.transpose((2, 0, 1))
        out = np.clip(out, 0, 255)
        result_rgb = apply_colortransformation_gamma(np.expand_dims(out,0), srgb_params)
        result_rgb = np.clip(result_rgb[0], 0, 255)
        result_rgb = result_rgb.transpose((1, 2, 0))
        #io.imsave(file.replace('_output','_output_srgb'),result_rgb.astype('uint8'))
        result_rgb = result_rgb.astype(np.float32)
        ref = ref/255
        result_rgb = result_rgb/255
        psnr = 10 * np.log10(1**2/np.mean((ref - result_rgb)**2))
        result_rgb = result_rgb * 255
        result_rgb = result_rgb.transpose((2, 0, 1))
        psnr_list.append(psnr)
    return psnr_list, torch.FloatTensor(result_rgb[None,:]) 
开发者ID:cig-skoltech,项目名称:deep_demosaick,代码行数:23,代码来源:utils.py

示例7: _openResources

# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import io [as 别名]
def _openResources(self):
        """ Uses numpy.loadtxt to open the underlying file
        """
        self._dictionary = scipy.io.loadmat(self._fileName) 
开发者ID:titusjan,项目名称:argos,代码行数:6,代码来源:scipyio.py

示例8: main

# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import io [as 别名]
def main():
	# for AwA dataset: Perfectly works.
	opts = parse_args()
	awa = scipy.io.loadmat('awa_demo_data.mat')
	train_data = awa['X_tr']
	test_data = awa['X_te']
	train_class_attributes_labels_continuous_allset = awa['S_tr']
	opts.test_labels = awa['test_labels']
	opts.test_classes_id = awa['testclasses_id']
	test_class_attributes_labels_continuous = awa['S_te_gt']
	
	##### Normalize the data
	train_data = normalizeFeature(train_data.transpose()).transpose() 

	##### Training
	# SAE
	W = SAE(train_data.transpose(), train_class_attributes_labels_continuous_allset.transpose(), opts.ld) 

	##### Test
	opts.HITK = 1
	
	# [F --> S], projecting data from feature space to semantic space: 84.68% for AwA dataset
	semantic_predicted = np.dot(test_data, normalizeFeature(W).transpose())
	[zsl_accuracy, y_hit_k] = zsl_acc(semantic_predicted, test_class_attributes_labels_continuous, opts)
	print('[1] zsl accuracy for AwA dataset [F >>> S]: {:.2f}%'.format(zsl_accuracy))

	# [S --> F], projecting from semantic to visual space: 84.00% for AwA dataset
	test_predicted = np.dot(normalizeFeature(test_class_attributes_labels_continuous.transpose()).transpose(), normalizeFeature(W))
	[zsl_accuracy, y_hit_k] = zsl_acc(test_data, test_predicted, opts)
	print('[2] zsl accuracy for AwA dataset [S >>> F]: {:.2f}%'.format(zsl_accuracy)) 
开发者ID:hoseong-kim,项目名称:sae-pytorch,代码行数:32,代码来源:sae.py

示例9: initialize_transformer

# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import io [as 别名]
def initialize_transformer(image_mean):
  shape = (10*16, 3, 227, 227)
  transformer = caffe.io.Transformer({'data': shape})
  channel_mean = np.zeros((3,227,227))
  for channel_index, mean_val in enumerate(image_mean):
    channel_mean[channel_index, ...] = mean_val
  transformer.set_mean('data', channel_mean)
  transformer.set_raw_scale('data', 255)
  transformer.set_channel_swap('data', (2, 1, 0))
  transformer.set_transpose('data', (2, 0, 1))
  #transformer.set_is_flow('data', is_flow)
  return transformer 
开发者ID:MikeMpapa,项目名称:CNNs-Speech-Music-Discrimination,代码行数:14,代码来源:ClassifyWav.py

示例10: singleFrame_classify_video

# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import io [as 别名]
def singleFrame_classify_video(signal, net, transformer, with_smoothing, classNamesCNN):
    batch_size = 1 
    input_images = []

    input_im = caffe.io.load_image(signal.replace(".wav",".png"))        
    input_images.append(input_im)
    os.remove(signal.replace(".wav",".png"))    
    #Initialize predictions matrix                
    output_predictions = np.zeros((len(input_images),2))
    output_classes = []
    #print [method for method in dir(net) if callable(getattr(net, method))]    

    for i in range(0,len(input_images)):        
        # print "Classifying Spectrogram: ",i+1         
        clip_input = input_images[i:min(i+batch_size, len(input_images))] #get every image -- batch_size==1
        clip_input = caffe.io.oversample(clip_input,[227,227]) #make it 227x227        
        caffe_in = np.zeros(np.array(clip_input.shape)[[0,3,1,2]], dtype=np.float32) #initialize input matrix
        for ix, inputs in enumerate(clip_input):
            caffe_in[ix] = transformer.preprocess('data',inputs) # transform input data appropriatelly and add to input matrix        
        net.blobs['data'].reshape(caffe_in.shape[0], caffe_in.shape[1], caffe_in.shape[2], caffe_in.shape[3]) #make input caffe readable        
        out = net.forward_all(data=caffe_in) #feed input to the network
        output_predictions[i:i+batch_size] = np.mean(out['probs'].reshape(10,caffe_in.shape[0]/10,2),0) #predict labels        
        
        #Store predicted Labels without smoothing        
        iMAX = output_predictions[i:i+batch_size].argmax(axis=1)[0]
        prediction = classNamesCNN[iMAX]
        output_classes.append(prediction)
        #print "Predicted Label for file -->  ", signal.upper() ,":",    prediction
    return output_classes, output_predictions 
开发者ID:MikeMpapa,项目名称:CNNs-Speech-Music-Discrimination,代码行数:31,代码来源:ClassifyWav.py

示例11: save_model

# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import io [as 别名]
def save_model(self, weight_path, pmf_path=None):
        self.saver.save(self.sess, weight_path)
        logging.info("Weights saved at " + weight_path)
        if pmf_path is not None:
            scipy.io.savemat(pmf_path,{"m_U": self.m_U, "m_V": self.m_V, "m_theta": self.m_theta})
            logging.info("Weights saved at " + pmf_path) 
开发者ID:eelxpeng,项目名称:CollaborativeVAE,代码行数:8,代码来源:cvae.py

示例12: load_model

# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import io [as 别名]
def load_model(self, weight_path, pmf_path=None):
        logging.info("Loading weights from " + weight_path)
        self.saver.restore(self.sess, weight_path)
        if pmf_path is not None:
            logging.info("Loading pmf data from " + pmf_path)
            data = scipy.io.loadmat(pmf_path)
            self.m_U[:] = data["m_U"]
            self.m_V[:] = data["m_V"]
            self.m_theta[:] = data["m_theta"] 
开发者ID:eelxpeng,项目名称:CollaborativeVAE,代码行数:11,代码来源:cvae.py

示例13: load_mat_to_bag

# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import io [as 别名]
def load_mat_to_bag(mat_fname):
    import scipy.io as sio
    return Bag(dict(**sio.loadmat(mat_fname))) 
开发者ID:pelednoam,项目名称:mmvt,代码行数:5,代码来源:mmvt_utils.py

示例14: sta_calculation_parallel

# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import io [as 别名]
def sta_calculation_parallel(arg_in):
    
    WN_stim = arg_in[0]
    binned_spikes = arg_in[1]
    which_spikes = arg_in[2]
    STA_temporal_length = arg_in[3]
    stim_size = arg_in[4]
    fname = arg_in[5]
        
    ####################
    ### Calculate STA ##
    ####################

    ## Swap out fastest version here 
    _, n_color_channels, n_pixels = WN_stim.shape
    STA = np.zeros((STA_temporal_length, n_color_channels, n_pixels))
    for i in range(which_spikes.shape[0]):
        bin_number = which_spikes[i]
        STA += binned_spikes[bin_number]*WN_stim[bin_number-(STA_temporal_length-1):bin_number+1]

    if which_spikes.shape[0] == 0:
        STA += 0.5

    # full sta
    if np.sum(binned_spikes[STA_temporal_length:])>0:
        STA = STA/np.sum(binned_spikes[STA_temporal_length:])
    STA = STA.reshape(STA_temporal_length, n_color_channels,
                      stim_size[0], stim_size[1])
    STA = STA.transpose(2,3,1,0)

    scipy.io.savemat(fname, mdict={'temp_stas': STA}) 
开发者ID:paninski-lab,项目名称:yass,代码行数:33,代码来源:run.py

示例15: test_mat_read

# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import io [as 别名]
def test_mat_read(self):
        # Test mat file reading and writing for the SPM analyze types
        img_klass = self.image_class
        arr = np.arange(24, dtype=np.int32).reshape((2,3,4))
        aff = np.diag([2,3,4,1]) # no LR flip in affine
        img = img_klass(arr, aff)
        fm = img.file_map
        for key, value in fm.items():
            value.fileobj = BytesIO()
        # Test round trip
        img.to_file_map()
        r_img = img_klass.from_file_map(fm)
        assert_array_equal(r_img.get_data(), arr)
        assert_array_equal(r_img.get_affine(), aff)
        # mat files are for matlab and have 111 voxel origins.  We need to
        # adjust for that, when loading and saving.  Check for signs of that in
        # the saved mat file
        mat_fileobj = img.file_map['mat'].fileobj
        from scipy.io import loadmat, savemat
        mat_fileobj.seek(0)
        mats = loadmat(mat_fileobj)
        assert_true('M' in mats and 'mat' in mats)
        from_111 = np.eye(4)
        from_111[:3,3] = -1
        to_111 = np.eye(4)
        to_111[:3,3] = 1
        assert_array_equal(mats['mat'], np.dot(aff, from_111))
        # The M matrix does not include flips, so if we only
        # have the M matrix in the mat file, and we have default flipping, the
        # mat resulting should have a flip.  The 'mat' matrix does include flips
        # and so should be unaffected by the flipping.  If both are present we
        # prefer the the 'mat' matrix.
        assert_true(img.get_header().default_x_flip) # check the default
        flipper = np.diag([-1,1,1,1])
        assert_array_equal(mats['M'], np.dot(aff, np.dot(flipper, from_111)))
        mat_fileobj.seek(0)
        savemat(mat_fileobj, dict(M=np.diag([3,4,5,1]), mat=np.diag([6,7,8,1])))
        # Check we are preferring the 'mat' matrix
        r_img = img_klass.from_file_map(fm)
        assert_array_equal(r_img.get_data(), arr)
        assert_array_equal(r_img.get_affine(),
                           np.dot(np.diag([6,7,8,1]), to_111))
        # But will use M if present
        mat_fileobj.seek(0)
        mat_fileobj.truncate(0)
        savemat(mat_fileobj, dict(M=np.diag([3,4,5,1])))
        r_img = img_klass.from_file_map(fm)
        assert_array_equal(r_img.get_data(), arr)
        assert_array_equal(r_img.get_affine(),
                           np.dot(np.diag([3,4,5,1]), np.dot(flipper, to_111))) 
开发者ID:ME-ICA,项目名称:me-ica,代码行数:52,代码来源:test_spm99analyze.py


注:本文中的scipy.io方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。