当前位置: 首页>>代码示例>>Python>>正文


Python matlab.loadmat函数代码示例

本文整理汇总了Python中scipy.io.matlab.loadmat函数的典型用法代码示例。如果您正苦于以下问题:Python loadmat函数的具体用法?Python loadmat怎么用?Python loadmat使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了loadmat函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_FindWavKurt

    def test_FindWavKurt(self):
        from scipy.io.matlab import loadmat

        N = 16
        fcut = 0.4
        level_index = 11
        freq_index = 24
        lev = self.level_w[level_index]

        base_path = os.getenv("WAVELOC_PATH")
        matlab_file = os.path.join(base_path, "test_data", "c.mat")
        c_dict = loadmat(matlab_file)
        c_exp = c_dict["c"]

        matlab_file = os.path.join(base_path, "test_data", "S.mat")
        S_dict = loadmat(matlab_file)
        S_exp = S_dict["S"]

        # get bw and frequency (Hz)
        bw_hz, fc_hz, fi, l1 = getBandwidthAndFrequency(
            self.nlevel, self.Fs, self.level_w, self.freq_w, level_index, freq_index
        )

        # get basic filter parameters
        h, g, h1, h2, h3 = get_h_parameters(N, fcut)
        c, s, threshold, Bw, fc = Find_wav_kurt(self.x, h, g, h1, h2, h3, self.nlevel, lev, fi, Fs=self.Fs)
        S = getFTSquaredEnvelope(c)

        # do tests
        self.assertAlmostEqual(Bw * self.Fs, bw_hz)
        self.assertAlmostEqual(fc * self.Fs, fc_hz)
        np.testing.assert_allclose(c.flatten(), c_exp.flatten(), atol=1e-3)
        np.testing.assert_allclose(S.flatten(), S_exp.flatten(), atol=1e-6)
开发者ID:nlanget,项目名称:waveloc,代码行数:33,代码来源:test_kurtogram.py

示例2: test_FindWavKurt

  def test_FindWavKurt(self):

    N=16
    fcut=0.4
    level_index=11
    freq_index=24
    lev=self.level_w[level_index]

    c_dict=loadmat("test_data/c.mat")
    c_exp = c_dict['c']

    S_dict=loadmat("test_data/S.mat")
    S_exp = S_dict['S']
   
    # get bw and frequency (Hz)
    bw_hz, fc_hz, fi = getBandwidthAndFrequency(self.nlevel, self.Fs,
            self.level_w, self.freq_w, level_index, freq_index)

    # get basic filter parameters
    h, g, h1, h2, h3 = get_h_parameters(N, fcut)
    c,s,threshold,Bw,fc = Find_wav_kurt(self.x, h, g, h1, h2, h3,
            self.nlevel,lev, fi, self.Fs)
   
    S=getFTSquaredEnvelope(c)

    # do tests
    self.assertAlmostEqual(Bw*self.Fs,bw_hz)
    self.assertAlmostEqual(fc*self.Fs,fc_hz)
    np.testing.assert_allclose(c.flatten(),c_exp.flatten(),atol=1e-3)
    np.testing.assert_allclose(S.flatten(),S_exp.flatten(),atol=1e-6)
开发者ID:amaggi,项目名称:seismokurt,代码行数:30,代码来源:test_kurtogram.py

示例3: __get_excit_wfm

    def __get_excit_wfm(filepath):
        """
        Returns the excitation BE waveform present in the more parms.mat file
        
        Parameters
        ------------
        filepath : String / unicode
            Absolute filepath of the .mat parameter file
        
        Returns
        -----------
        ex_wfm : 1D numpy float array
            Band Excitation waveform

        """
        if not path.exists(filepath):
            warn('BEPSndfTranslator - NO more_parms.mat file found')
            return np.zeros(1000, dtype=np.float32)

        if 'more_parms' in filepath:
            matread = loadmat(filepath, variable_names=['FFT_BE_wave'])
            fft_full = np.complex64(np.squeeze(matread['FFT_BE_wave']))
            bin_inds = None
            fft_full_rev = None
        else:
            matread = loadmat(filepath, variable_names=['FFT_BE_wave', 'FFT_BE_rev_wave', 'BE_bin_ind'])
            bin_inds = np.uint(np.squeeze(matread['BE_bin_ind'])) - 1
            fft_full = np.complex64(np.squeeze(matread['FFT_BE_wave']))
            fft_full_rev = np.complex64(np.squeeze(matread['FFT_BE_rev_wave']))

        return fft_full, fft_full_rev, bin_inds
开发者ID:pycroscopy,项目名称:pycroscopy,代码行数:31,代码来源:beps_ndf.py

示例4: test_srmr_norm

def test_srmr_norm():
    fs = 16000
    s = loadmat("test/test.mat")["s"][:,0]

    correct_ratios = loadmat("test/correct_ratios.mat")['correct_ratios'][0]
    srmr = SRMR(fs, fast=False, norm=True, max_cf=30)
    out = srmr.predict(s, s, s)
    ratio_norm, avg_energy_norm = out['p']['srmr'], out['avg_energy']
    assert np.allclose(ratio_norm, correct_ratios[3], rtol=1e-6, atol=1e-12)
开发者ID:achabotl,项目名称:SRMRpy,代码行数:9,代码来源:test_srmr.py

示例5: test_srmr_slow

def test_srmr_slow():
    fs = 16000
    s = loadmat("test/test.mat")["s"][:,0]

    correct_ratios = loadmat("test/correct_ratios.mat")['correct_ratios'][0]
    srmr = SRMR(fs, fast=False)
    out = srmr.predict(s, s, s)
    ratio_slow, avg_energy_slow = out['p']['srmr'], out['avg_energy']
    assert np.allclose(ratio_slow, correct_ratios[0], rtol=1e-6, atol=1e-12)
开发者ID:achabotl,项目名称:SRMRpy,代码行数:9,代码来源:test_srmr.py

示例6: timeseries_design

def timeseries_design(subject_id,whatParadigm,onsets_dir):
    import scipy.signal
    import scipy.special as sp
    import numpy as np
    import math
    from nipype.interfaces.base import Bunch
    from copy import deepcopy
    from scipy.io.matlab import loadmat
    import glob
    import os
    #from Facematch import onsets_dir
    print "Entered timeseries_design once with arguments SUBID = "+subject_id+", paradigm = "+whatParadigm+", and onsets dir = "+onsets_dir+"."
    output = []
    regressor_names = None
    regressors = None
    onsets_temp = os.path.join(onsets_dir, subject_id+'*onsets.mat')
    onsets_files = sorted(glob.glob(onsets_temp))
    testmat = loadmat(onsets_files[0], struct_as_record=False)
    testnames = testmat['names'][0]
    names_count_vec = np.zeros(len(testnames))

    for r in range(len(onsets_files)):
        mat = loadmat(onsets_files[r], struct_as_record=False)
        ons = mat['onsets'][0]
        nam = mat['names'][0]
        dur = mat['durations'][0]

        names = []
        durations = []
        run_onsets = []
        for condition in range(len(nam)):
            for onset in range(len(ons[condition][0])): 
                names_count_vec[condition] += 1          
                names.append(str(nam[condition][0])+'_%d'%(names_count_vec[condition]))


                run_onsets.append([ons[condition][0][onset]])
                durations.append(dur[condition][0])
  

        print run_onsets
        print names
        print durations
        output.insert(r,
            Bunch(conditions=deepcopy(names),
                onsets=deepcopy(run_onsets),
                durations=deepcopy(durations),
                amplitudes=None,
                tmod=None,
                pmod=None,
                regressor_names=None,
                regressors=regressors)) #here is where we can do linear, quad, etc detrending
        
    return output
开发者ID:jsalva,项目名称:gates_analysis,代码行数:54,代码来源:beta_series_analysis.py

示例7: main

def main(argv):
	dim = 64
	imidx = 7

	# load unnormalized log-likelihood
	results = loadmat('results/vanhateren/poe/AIS_GibbsTrain_white_studentt_L=064_M=256_B=0100000_learner=PMPFdH1_20120523T112539.mat')
	loglik = -mean(results['E'][:, :10000]) - results['logZ']

	# load importance weights for partition function
	ais_weights = loadmat('results/vanhateren/poe/matlab_up=022150_T=10000000_ais.mat')['logweights']
	ais_weights.shape

	# number of samples to probe
	num_samples = 2**arange(0, ceil(log2(ais_weights.shape[0])) + 1, dtype='int32')
	num_samples[-1] = max([num_samples[-1], ais_weights.shape[0]])
	num_repetitions = ceil(2.**16 / num_samples)
	estimates = []

	print loadmat('results/vanhateren/poe/matlab_up=022150_T=10000000_ais.mat')['t_range'][:, imidx], 'intermediate distributions'

	logZ = logmeanexp(ais_weights[:, -1])

	for k in arange(len(num_samples)):
		estimates_ = []

		for _ in arange(num_repetitions[k]):
			# pick samples at random
			idx = permutation(ais_weights.shape[0])[:num_samples[k]]

			# estimate log-partf. using num_samples[k] samples
			loglik_ = loglik + (logZ - logmeanexp(ais_weights[idx, imidx]))

			# store estimate of log-likelihood 
			estimates_.append(loglik_)

		estimates.append(mean(estimates_))

	gca().width = 5
	gca().height = 5
#	gca().ymin = 0.85
#	gca().ymax = 1.55
#	ytick([0.9, 1.1, 1.3, 1.5])
	semilogx(num_samples, estimates / log(2.) / dim, '.-')
	xlabel('number of AIS samples')
	ylabel('estimated log-likelihood')
	savefig('results/vanhateren/convergence_poe.tex')
	draw()

	return 0
开发者ID:lucastheis,项目名称:isa,代码行数:49,代码来源:convergence_poe.py

示例8: load_dataset

def load_dataset(dataset):
    if dataset == 'umls':
        mat = loadmat('../data/%s/uml.mat' % (dataset))
        T = np.array(mat['Rs'], np.float32)
    elif dataset == 'nation':
        mat = loadmat('../data/%s/dnations.mat' % (dataset))
        T = np.array(mat['R'], np.float32)
    elif dataset == 'kinship':
        mat = loadmat('../data/%s/alyawarradata.mat' % (dataset))
        T = np.array(mat['Rs'], np.float32)
    elif dataset == 'wordnet':
        T = pickle.load(open('../data/%s/reduced_wordnet.pkl' % (dataset), 'rb'))

    T[np.isnan(T)] = 0
    return T
开发者ID:arongdari,项目名称:almc,代码行数:15,代码来源:amdc_runner.py

示例9: get_top_scores

 def get_top_scores(self, i=100, force_num=True):
     fn_scores = os.path.join(self.ds.path, "cpmc", "MySegmentsMat", self.name, "scores.mat")
     sc = ml.loadmat(fn_scores)["scores"]
     scores = list(np.sort(sc.ravel())[-1 : (-1 - i) : -1])
     if len(scores) < i and force_num:
         scores = (list(scores) * 100)[:100]
     return scores
开发者ID:amiltonwong,项目名称:pottics,代码行数:7,代码来源:dataset.py

示例10: ReadDatasetFile

def ReadDatasetFile(dataset_file_path):
  """Reads dataset file in Revisited Oxford/Paris ".mat" format.

  Args:
    dataset_file_path: Path to dataset file, in .mat format.

  Returns:
    query_list: List of query image names.
    index_list: List of index image names.
    ground_truth: List containing ground-truth information for dataset. Each
      entry is a dict corresponding to the ground-truth information for a query.
      The dict may have keys 'easy', 'hard', 'junk' or 'ok', mapping to a list
      of integers; additionally, it has a key 'bbx' mapping to a list of floats
      with bounding box coordinates.
  """
  with tf.gfile.GFile(dataset_file_path, 'r') as f:
    cfg = matlab.loadmat(f)

  # Parse outputs according to the specificities of the dataset file.
  query_list = [str(im_array[0]) for im_array in np.squeeze(cfg['qimlist'])]
  index_list = [str(im_array[0]) for im_array in np.squeeze(cfg['imlist'])]
  ground_truth_raw = np.squeeze(cfg['gnd'])
  ground_truth = []
  for query_ground_truth_raw in ground_truth_raw:
    query_ground_truth = {}
    for ground_truth_key in _GROUND_TRUTH_KEYS:
      if ground_truth_key in query_ground_truth_raw.dtype.names:
        adjusted_labels = query_ground_truth_raw[ground_truth_key] - 1
        query_ground_truth[ground_truth_key] = adjusted_labels.flatten()

    query_ground_truth['bbx'] = np.squeeze(query_ground_truth_raw['bbx'])
    ground_truth.append(query_ground_truth)

  return query_list, index_list, ground_truth
开发者ID:rder96,项目名称:models,代码行数:34,代码来源:dataset.py

示例11: show_predictions

def show_predictions(alpha="alpha", symbol="GE", xtn=".PNG"):
    if type(alpha) == str:
        print ("Loading file named " + alpha + ".mat")
        a = mat.loadmat(
            alpha + ".mat", mat_dtype=False
        )  # load a matlab style set of matrices from the file named by the string alpha
        if a.has_key(alpha):
            alpha = a.get(alpha).reshape(-1)  # get the variable with the name of the string in alpha
        else:
            alpha = a.get(a.keys()[2]).reshape(-1)  # get the first non-hidden key and reshape into a 1-D array
    print ("Loading financial data for stock symbol", symbol)
    r = np.recfromcsv("/home/hobs/Desktop/References/quant/lyle/data/" + symbol + "_yahoo.csv", skiprows=1)
    r.sort()
    r.high = r.high * r.adj_close / r.close  # adjust the high and low prices for stock splits
    r.low = r.low * r.adj_close / r.close  # adjust the high and low prices for stock splits
    daily_returns = r.adj_close[1:] / r.adj_close[0:-1] - 1
    predictions = lfilt(alpha, daily_returns)
    print (
        "Plotting a scatter plot of",
        len(daily_returns),
        "returns vs",
        len(predictions),
        "predictions using a filter of length",
        len(alpha),
    )
    (ax, fig) = plot(predictions, daily_returns[len(alpha) :], s="bo", xtn=".PNG")
    ax.set_xlabel("Predicted Returns")
    ax.set_ylabel("Actual Returns")
    big_mask = np.abs(predictions) > np.std(predictions) * 1.2
    bigs = predictions[big_mask]
    true_bigs = daily_returns[big_mask]
    (ax, fig) = plot(bigs, true_bigs, s="r.", xtn=".PNG")
    fig.show()
    return (predictions, daily_returns, bigs, true_bigs, big_mask)
开发者ID:hobson,项目名称:tagim,代码行数:34,代码来源:finance.py

示例12: subtract_background_from_stacks

def subtract_background_from_stacks(scanfile, indir, outdir, scannumber=-1):
    """Subtract background from SAXS data in MAT-file stacks.
    """
    scans = read_yaml(scanfile)
    if scannumber > 0:
        scannos = [ scannumber ]
    else:
        scannos = scans.keys()
        scannos.sort()
    for scanno in scannos:
        print("Scan #%03d" % scanno)
        try:
            bufscan = scans[scanno][0]
        except TypeError:
            print("Scan #%03d is a buffer" % scanno)
            continue
        try:
            conc = scans[scanno][1]
        except TypeError:
            print("No concentration for scan #02d." % scanno)
            conc = 1.0
        print("Using concentration %g g/l." % conc)
        stackname = "s%03d" % scanno
        stack = loadmat(indir+'/'+stackname+'.mat')[stackname]
        subs = np.zeros_like(stack)
        (npos, nrep, _, _) = stack.shape
        for pos in range(npos):
            print(pos)
            buf = get_bg(indir, bufscan, pos)
            for rep in range(nrep):
                subs[pos,rep,...] = errsubtract(stack[pos,rep,...], buf)
                subs[pos,rep,1:3,:] = subs[pos,rep,1:3,:] / conc
        outname = "subs%03d" % scanno
        savemat(outdir+'/'+outname + ".mat", {outname: subs}, do_compression=1,
                oned_as='row')
开发者ID:tpikonen,项目名称:solution,代码行数:35,代码来源:subtraction.py

示例13: preprocess_dataset

    def preprocess_dataset(self, dataset, n_jobs=-1, verbosity=2):
        """

        :param dataset:
        :param n_jobs:
        :return:
        """
        if self.skip:
            return

        if verbosity > 1: print("   Loading masks from .mat file")
        data = loadmat(self.path)
        masks = data[self.var_name][0]

        if not self.invert:
            masks_probe = masks.take(range(0, masks.size, 2))
            masks_gallery = masks.take(range(1, masks.size, 2))
        else:
            masks_gallery = masks.take(range(1, masks.size, 2))
            masks_probe = masks.take(range(0, masks.size, 2))

        dataset.probe.masks_train = list(masks_probe[dataset.train_indexes])
        dataset.probe.masks_test = list(masks_probe[dataset.test_indexes])
        dataset.gallery.masks_train = list(masks_gallery[dataset.train_indexes])
        dataset.gallery.masks_test = list(masks_gallery[dataset.test_indexes])
开发者ID:AShedko,项目名称:PyReID,代码行数:25,代码来源:preprocessing.py

示例14: test

def test():
    """
    Test with Kinship dataset
    Use all positive triples and negative triples as a training set
    See how the reconstruction error is reduced during training
    """
    from scipy.io.matlab import loadmat
    mat = loadmat('../data/kinship/alyawarradata.mat')
    T = np.array(mat['Rs'], np.float32)
    T[T == 0] = -1  # set negative value to -1
    E, K = T.shape[0], T.shape[2]
    max_iter = E * E * K * 10

    n_dim = 10

    # p_idx = np.ravel_multi_index((T == 1).nonzero(), T.shape)  # raveled positive index
    # n_idx = np.ravel_multi_index((T == -1).nonzero(), T.shape)  # raveled negative index
    # model.fit(T, p_idx, n_idx, max_iter, e_gap=10000)

    training = np.random.binomial(1., 0.01, T.shape)
    testing = np.random.binomial(1., 0.5, T.shape)
    testing[training == 1] = 0

    model = AMDC(n_dim)
    model.population = True
    model.do_active_learning(T, training, 15000, testing)
开发者ID:arongdari,项目名称:almc,代码行数:26,代码来源:amdc.py

示例15: read_mat_profile_files

def read_mat_profile_files(
        path,
        loc,
        var,
        dataSetName='test',
        dataSetType='ms'):
    """Reads generic time series from matlab file and converts data
    to python format"""
    varToChar = {'salt': 's', 'elev': 'e', 'temp': 't', 'u': 'u', 'v': 'v'}
    pattern = os.path.join(
        path,
        dataSetName +
        '.' +
        dataSetType +
        '.' +
        varToChar[var] +
        '.' +
        loc +
        '.mat')
    fList = sorted(glob.glob(pattern))
    if not fList:
        raise Exception('File not found: ' + pattern)
    f = fList[0]
    print 'Reading', f
    d = loadmat(f)
    t = d['t'].flatten()  # (1,nTime)
    z = d['z']  # (nVert,nTime)
    data = d['data']  # (nVert,nTime)
    # convert time from Matlab datenum (in PST) to epoch (UTC)
    time = datenumPSTToEpoch(t)
    # round to nearest minute
    time = np.round(time / 60.) * 60.
    print '  Loaded data range: ', str(timeArray.epochToDatetime(time[0])), ' -> ', str(timeArray.epochToDatetime(time[-1]))
    return time, z, data
开发者ID:tkarna,项目名称:crane,代码行数:34,代码来源:convSurrogateOutputToNC.py


注:本文中的scipy.io.matlab.loadmat函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。