当前位置: 首页>>代码示例>>Python>>正文


Python scipy.loadtxt函数代码示例

本文整理汇总了Python中scipy.loadtxt函数的典型用法代码示例。如果您正苦于以下问题:Python loadtxt函数的具体用法?Python loadtxt怎么用?Python loadtxt使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了loadtxt函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: time_storage

def time_storage (**cfg):
  nStore = int(cfg['MEFourier']['{storage_harmonic}'])
  nWriteTime  = int(cfg['OCTime']['{write_timecnt}'])
  nReadTime   = int(cfg['OCTime']['{read_timecnt}'])
  nStoreTime  = int(cfg['METime']['{storage_timecnt}'])
  omega_c     = float(cfg['NVSETUP']['{omega_c}'])

  name_readwrite = getNameReadWrite(**cfg)
  prefix         = cfg['FILES']['{prefix}']
  filename       = name_readwrite+"harmonic{0:0>4}_cavityMode_".format(0)
  postfix       =cfg['FILES']['{postfix}']

  print (" read data : storage and read time")

  ### reading <down> cavity-amplitudes ###################################################
  timeStore,__,__ = sp.loadtxt(prefix+filename+"reg2_store_down"+postfix).T
  timeRead ,__,__ = sp.loadtxt(prefix+filename+"reg3_read_stored_down"+postfix).T

  time=functionaltimes_readwrite (**cfg)
  time['store']=timeStore
  time['read'] =timeRead 


  time['ti'] =timeRead[int(time['idx_ti'])-1]
  time['tf'] =timeRead[int(time['idx_tf'])-1]

  return time
开发者ID:bhartl,项目名称:optimal-control,代码行数:27,代码来源:IOHelper.py

示例2: setUp

 def setUp(self):
     sp.random.seed(0)
     self.Y = sp.loadtxt(os.path.join(base_folder, 'Y.txt')) 
     self.XX = sp.loadtxt(os.path.join(base_folder, 'XX.txt')) 
     self.Xr = sp.loadtxt(os.path.join(base_folder, 'Xr.txt')) 
     self.N,self.P = self.Y.shape
     self.write = False 
开发者ID:PMBio,项目名称:limix,代码行数:7,代码来源:test_mtset.py

示例3: compare_mixed_files

def compare_mixed_files(file1,file2,tol=1e-8,delimiter="\t"):
    '''
    Given two files, compare the contents, including numbers up to absolute tolerance, tol
    Returns: val,msg 
    where val is True/False (true means files to compare to each other) and a msg for the failure.
    '''
    dat1=sp.loadtxt(file1,dtype='str',delimiter=delimiter,comments=None)
    dat2=sp.loadtxt(file2,dtype='str',delimiter=delimiter,comments=None)

    ncol1=dat1[0].size
    ncol2=dat2[0].size

    if ncol1!=ncol2:         
        return False,"num columns do not match up"

    try:
        r_count = dat1.shape[0]
        c_count = dat1.shape[1]
    except:
        #file contains just a single column.
        return sp.all(dat1==dat2), "single column result doesn't match exactly ('{0}')".format(file1)

    for r in xrange(r_count):
        for c in xrange(c_count):
            val1 = dat1[r,c]
            val2 = dat2[r,c]
            if val1!=val2:
                try:
                    f1 = float(val1)
                    f2 = float(val2)
                except:
                    return False, "Values do not match up (file='{0}', '{1}' =?= '{2}')".format(file1, val1, val2)
                if abs(f1-f2) > tol:
                    return False, "Values too different (file='{0}', '{1}' =?= '{2}')".format(file1, val1, val2)
    return True, "files are comparable within abs tolerance=%e" % tol
开发者ID:42binwang,项目名称:FaST-LMM,代码行数:35,代码来源:util.py

示例4: get_average_column

def get_average_column(path, column=0):
    """
    Get the index-based average column for a series of results files.

    Args:
        path(str): the path containing the results files.

    Kwargs:
        column (int): the column index in a results file.

    Returns:
        A numpy.ndarray containing the average values for the specified
        column-index of a series of results files.

    """
    files = [f for f in listdir(path) if isfile(join(path, f))
             and f.endswith(".txt")]

    col_seq = column,

    sum_col = loadtxt(join(path, files[0]), usecols=col_seq, unpack=True)

    for file in files[1:]:
        sum_col = sum_col + loadtxt(join(path, file), usecols=col_seq,
                                    unpack=True)

    return sum_col / len(files)
开发者ID:kingarpharazon,项目名称:capsoplot,代码行数:27,代码来源:capsoplot.py

示例5: functionaltimes_readwrite

def functionaltimes_readwrite (**cfg):

  nWrite = int(cfg['OCFourier']['{write_harmonic}'])
  nTime  = int(cfg['OCTime']['{read_timecnt}'])

  prefix        =cfg['FILES']['{prefix}']
  postfix       =cfg['FILES']['{postfix}']
  name_readwrite=getNameReadWrite(**cfg)
  name_optimized=cfg['FILES']['{name_optimized}']

  print (" read data : functional time")
  timeRead  =sp.zeros([nTime],float)

  # load time for reading section memory - part of reg2
  filename=prefix+"harmonic{0:0>4}_cavityMode_reg2_memory".format(nWrite)+postfix
  timeRead ,__,__ = sp.loadtxt(filename).T

  filename=prefix+"harmonic{0:0>4}_cavityMode_reg1_write".format(nWrite)+postfix
  timeWrite,__,__ = sp.loadtxt(filename).T

  # read funtional times t2, t3
  configParser2 = cp.ConfigParser()
  configParser2.read(prefix+name_readwrite+name_optimized+"FunctionalTimes"+postfix)

  time=configParser2.__dict__['_sections']['functime'] # in seconds*wc
  time['read'] =timeRead  # in seconds*wc
  time['write']=timeWrite # in seconds*wc
  # read funtional times t2, t3

  cfg['METime']['{fidelity_ti}'] = time['idx_ti']
  cfg['METime']['{fidelity_tf}'] = time['idx_tf']

  return time
开发者ID:bhartl,项目名称:optimal-control,代码行数:33,代码来源:IOHelper.py

示例6: harmonics_readwrite

def harmonics_readwrite (**cfg):
  nWrite = int(cfg['OCFourier']['{write_harmonic}'])
  nRead  = int(cfg['OCFourier']['{read_harmonic}'])
  nTime  = int(cfg['OCTime']['{read_timecnt}'])
  wTime  = int(cfg['OCTime']['{write_timecnt}'])

  prefix        =cfg['FILES']['{prefix}']
  postfix       =cfg['FILES']['{postfix}']

  print (" read data : cavity modes")
  cavityWrite=sp.zeros([nWrite,wTime],complex)
  cavityMemo =sp.zeros([nWrite,nTime],complex)
  cavityRead =sp.zeros([nRead,nTime],complex)

  # load memory - part of reg2
  for iMemo in range(nWrite):
    filename=prefix+"harmonic"+"{0:0>4}".format(iMemo+1)+"_cavityMode_reg1_write"+postfix
    __,real,imag = sp.loadtxt(filename).T
  #  time,real,imag=sp.loadtxt(filename,unpack=True)
    cavityWrite[iMemo,:] = real[:]+1j*imag[:]

    filename=prefix+"harmonic"+"{0:0>4}".format(iMemo+1)+"_cavityMode_reg2_memory"+postfix
    __,real,imag = sp.loadtxt(filename).T
  #  time,real,imag=sp.loadtxt(filename,unpack=True)
    cavityMemo[iMemo,:] = real[:]+1j*imag[:]

  # load memory - part of reg2
  for iRead in range(nRead):
    filename=prefix+"harmonic"+"{0:0>4}".format(iRead+1)+"_cavityMode_reg2_read"+postfix
    __,real,imag = sp.loadtxt(filename).T
  #  time,real,imag=sp.loadtxt(filename,unpack=True)
    cavityRead[iRead,:] = real[:]+1j*imag[:]

  return cavityWrite,cavityMemo,cavityRead
开发者ID:bhartl,项目名称:optimal-control,代码行数:34,代码来源:IOHelper.py

示例7: convert_g012

    def convert_g012(self,hdf,g012_file,chrom,start,end):
        """convert g012 file to LIMIX hdf5
        hdf: handle for hdf5 file (target)
        g012_file: filename of g012 file
        chrom: select chromosome for conversion
        start: select start position for conversion
        end:  select end position for conversion
        """
        if ((start is not None) or (end is not None) or (chrom is not None)):
            print "cannot handle start/stop/chrom boundaries for g012 file"
            return
        #store
        if 'genotype' in hdf.keys():
            del(hdf['genotype'])
        genotype = hdf.create_group('genotype')
        col_header = genotype.create_group('col_header')
        row_header = genotype.create_group('row_header')
        #load position and meta information
        indv_file = g012_file + '.indv'
        pos_file  = g012_file + '.pos'
        sample_ID = sp.loadtxt(indv_file,dtype='str')
        pos  = sp.loadtxt(pos_file,dtype='str')
        chrom = pos[:,0]
        pos   = sp.array(pos[:,1],dtype='int')

        row_header.create_dataset(name='sample_ID',data=sample_ID)
        col_header.create_dataset(name='chrom',data=chrom)
        col_header.create_dataset(name='pos',data=pos)
        M = sp.loadtxt(g012_file,dtype='uint8')
        snps = M[:,1::]
        genotype.create_dataset(name='matrix',data=snps,chunks=(snps.shape[0],min(10000,snps.shape[1])),compression='gzip')
        pass
开发者ID:MMesbahU,项目名称:limix,代码行数:32,代码来源:conversion.py

示例8: setUp

 def setUp(self):
     SP.random.seed(0)
     self.Y = SP.loadtxt('./data/Y.txt') 
     self.XX = SP.loadtxt('./data/XX.txt') 
     self.Xr = SP.loadtxt('./data/Xr.txt') 
     self.N,self.P = self.Y.shape
     self.write = False 
开发者ID:PMBio,项目名称:mtSet,代码行数:7,代码来源:run_test.py

示例9: main

def main(argv):
    import scipy
    from sklearn import metrics
    from sklearn.multiclass import OneVsOneClassifier
    from sklearn.naive_bayes import GaussianNB
    from sklearn.cross_validation import cross_val_score
    from sklearn.svm import SVC
    from sklearn.neighbors import KNeighborsClassifier
    from sklearn.tree import DecisionTreeClassifier
    from sklearn import preprocessing
    import similarity
    
    class ScaledSVC(SVC):
        def _scale(self, data):
            return preprocessing.scale(data)
        def fit(self, X, Y):
            return super(ScaledSVC, self).fit(self._scale(X), Y)
        def predict(self, X):
            return super(ScaledSVC, self).predict(self._scale(X))

    data, labels = scipy.loadtxt(argv[1]), scipy.loadtxt(argv[2])
    if len(argv) > 3:
        features = np.array([int(s) for s in argv[3].split(',')])
        data = data[:, features]
        
    def ovo(model, adj_strat):
        return OneVsOneClassifier(BinaryTiloClassifier(model, adj_strat))

    classifiers = [
        ('TILO/PRC/Gaussian',
         ovo(PinchRatioCutStrategy(),
             similarity.Gaussian())),
        ("TILO/Nearest/Gaussian",
         ovo(NearestCutStrategy(),
             similarity.Gaussian())),
        ("TILO/PRC/KNN",
         ovo(PinchRatioCutStrategy(),
             similarity.KNN())),
        ("TILO/Nearest/KNN",
         ovo(NearestCutStrategy(),
             similarity.KNN())),
        ("SVC", ScaledSVC()),
        ("Gaussian Naive Bayes", GaussianNB()),
        ("K Neighbors", KNeighborsClassifier()),
        ("Decision Tree", DecisionTreeClassifier())]
    format_str = '{:<30} {} {} {}'
    print '{:<30} {:<10}         RAND   Accuracy'.format('method', 'accuracy')
    for name, c in classifiers:
        scores = cross_val_score(c, data, labels, cv=5)
        #scores = np.array([1., 1.])
        model = c.fit(data, labels)
        guesses = model.predict(data)
        acc = metrics.zero_one_score(guesses, labels)
        rand = metrics.adjusted_rand_score(guesses, labels)
        print '{:<30} {:.4f} +/- {:.4f} {: .4f} {:.4f}'.format(name, scores.mean(),
                                                               scores.std() / 2,
                                                               rand, acc)
开发者ID:rsbowman,项目名称:sklearn-prc,代码行数:57,代码来源:classify.py

示例10: calc_loss_deagg_suburb

def calc_loss_deagg_suburb(bval_path_file, total_building_loss_path_file, site_db_path_file, file_out):
    """ Given EQRM ouput data, produce a csv file showing loss per suburb

    The produced csv file shows total building loss, total building
    value and loss as a percentage.  All of this is shown per suburb.

    bval_path_file - location and name of building value file produced by EQRM
    total_building_loss_path_file - location and name of the total building
      loss file
    site_db_path_file - location and name of the site database file

    Note: This can be generalised pretty easily, to get results
          deaggregated on other columns of the site_db
    """
    aggregate_on = ["SUBURB"]

    # Load all of the files.
    site = csv_to_arrays(site_db_path_file, **attribute_conversions)
    # print "site", site
    bvals = loadtxt(bval_path_file, dtype=scipy.float64, delimiter=",", skiprows=0)
    # print "bvals", bvals
    # print "len(bvals", len(bvals)

    total_building_loss = loadtxt(total_building_loss_path_file, dtype=scipy.float64, delimiter=" ", skiprows=1)
    # print "total_building_loss", total_building_loss
    # print "total_building_loss shape", total_building_loss.shape
    site_count = len(site["BID"])
    assert site_count == len(bvals)
    assert site_count == total_building_loss.shape[1]
    # For aggregates
    # key is the unique AGGREGATE_ON combination .eg ('Hughes', 2605,...)
    # Values are a list of indices where the combinations are repeated in site
    aggregates = {}
    for i in range(site_count):
        assert site["BID"][i] == int(total_building_loss[0, i])
        marker = []
        for name in aggregate_on:
            marker.append(site[name][i])
        marker = tuple(marker)
        aggregates.setdefault(marker, []).append(i)
    # print "aggregates", aggregates

    handle = csv.writer(open(file_out, "w"), lineterminator="\n")

    handle.writerow(["percent losses (building and content) by suburb"])
    handle.writerow(["suburb", "loss", "value", "percent loss"])
    handle.writerow(["", " ($ millions)", " ($ millions)", ""])
    keys = aggregates.keys()
    keys.sort()
    for key in keys:
        sum_loss = 0
        sum_bval = 0
        for row in aggregates[key]:
            sum_loss += total_building_loss[1][row]
            sum_bval += bvals[row]
        handle.writerow([key[0], sum_loss / 1000000.0, sum_bval / 1000000.0, sum_loss / sum_bval * 100.0])
开发者ID:vipkolon,项目名称:eqrm,代码行数:56,代码来源:postprocessing.py

示例11: plotSummary

def plotSummary(mctraj_filename, ratespec_filename, nskip=0):
    """Read in the MC trajectory data and make a plot of it.
    Skip the first nskip points (as these may be far from the mean)"""

    data = scipy.loadtxt(mctraj_filename)  # step     w       sigma   tau     neglogP
    ratespec_data = scipy.loadtxt(ratespec_filename)

    figure()

    # plot the lambda trajectory
    subplot(2,2,1)
    plot(data[nskip:,0], data[nskip:,1])
    xlabel('accepted steps')
    ylabel('$\lambda$')
    #title(mctraj_filename)

    # try a contour plot of sigma and tau
    subplot(2,2,2)
    myhist, myextent = histBin( data[nskip:,2], data[nskip:,3], 20)
    # convert to log scale
    myhist = np.log(np.array(myhist) + 1.)
    #contour(myhist, extent = myextent, interpolation = 'nearest')
    contourf(myhist, extent = myextent, interpolation = 'nearest')

    # plot mean +/- std spectrum
    ax = subplot(2,2,3)
    Timescales = ratespec_data[:,0]
    maxLikA = ratespec_data[:,1]
    meanA = ratespec_data[:,2]
    stdA = ratespec_data[:,3]
    ci_5pc = ratespec_data[:,4]
    ci_95pc = ratespec_data[:,5]
    #matplotlib.pyplot.errorbar(Timescales, meanA, yerr=stdA)
    PlotStd = False
    plot(Timescales, meanA, 'k-', linewidth=2)
    hold(True)
    if PlotStd:
        plot(Timescales, meanA+stdA, 'k-', linewidth=1)
        hold(True)
        plot(Timescales, meanA-stdA, 'k-', linewidth=1)
    else:
        plot(Timescales, ci_5pc, 'k-', linewidth=1)
        hold(True)
        plot(Timescales, ci_95pc, 'k-', linewidth=1)
    ax.set_xscale('log')
    xlabel('timescale (s)')

    # plot mean +/- std spectrum
    subplot(2,2,4)
    wcounts, wbins = np.histogram(data[nskip:,1], bins=30)
    plot(wbins[0:-1], wcounts, linestyle='steps', linewidth=2)
    xlabel('$\lambda$')

    show()
开发者ID:vvoelz,项目名称:ratespec,代码行数:54,代码来源:PlottingTools.py

示例12: load_dataset

def load_dataset(path):
  sortedfilesbyglob = lambda x: sorted(glob.glob(os.path.join(path, '%s*' % x)))
  inptfiles = sortedfilesbyglob('input')
  targetfiles = sortedfilesbyglob('target')

  data = []
  for infn, targetfn in itertools.izip(inptfiles, targetfiles):
    inpt = scipy.loadtxt(infn)
    target = scipy.loadtxt(targetfn)
    target.shape = scipy.size(target), 1
    data.append((inpt, target))
  return data
开发者ID:bayerj,项目名称:theano-rnn,代码行数:12,代码来源:evolearn.py

示例13: read_CavityMemory

def read_CavityMemory (**cfgFiles):
  filename  = cfgFiles['{prefix}']+cfgFiles['{name_readwrite}']+ \
              cfgFiles['{name_optimized}']+cfgFiles['{name_cavity}']

  print ("### read initial value for cavity up")
  cavity      = sp.loadtxt(filename+"up"+cfgFiles['{postfix}']    )
  cavity_up   = cavity[0] + 1j*cavity[1]

  print ("### read initial value for cavity down")
  cavity      = sp.loadtxt(filename+"down"+cfgFiles['{postfix}']    )
  cavity_down = cavity[0] + 1j*cavity[1]

  return cavity_down, cavity_up
开发者ID:bhartl,项目名称:optimal-control,代码行数:13,代码来源:IOHelper.py

示例14: compare_files

def compare_files(file1,file2,tol=1e-8,delimiter="\t"):
    '''
    Given two files, compare the contents, including numbers up to absolute tolerance, tol
    Returns: val,msg 
    where val is True/False (true means files to compare to each other) and a msg for the failure.
    '''
    dat1=sp.loadtxt(file1,dtype='str',delimiter=delimiter,comments=None)
    dat2=sp.loadtxt(file2,dtype='str',delimiter=delimiter,comments=None)

    ncol1=dat1[0].size
    ncol2=dat2[0].size

    if ncol1!=ncol2:         
        return False,"num columns do not match up"

    try:
        head1=dat1[0,:]
        head2=dat2[0,:]
    except:
        #file contains just a single column.
        return sp.all(dat1==dat2), "single column result doesn't match exactly ('{0}')".format(file1)

    #logging.warn("DO headers match up? (file='{0}', '{1}' =?= '{2}')".format(file1, head1,head2))
    if not sp.all(head1==head2):         
        return False, "headers do not match up (file='{0}', '{1}' =?= '{2}')".format(file1, head1,head2)
        
    for c in range(ncol1):
        checked=False
        col1=dat1[1:,c]
        col2=dat2[1:,c]        
        try:
            #if it is numeric
            col1=sp.array(col1,dtype='float64')
            col2=sp.array(col2,dtype='float64')                    
        except Exception:
            # if it is a string
            pass
            if not sp.all(col1==col2):     
                return False, "string column %s does not match" % head1[c]
            checked=True

        #if it is numeric
        if not checked:
            absdiff=sp.absolute(col1-col2)
            if sp.any(absdiff>tol):
                try:                
                    return False, "numeric column %s does diff of %e not match within tolerance %e" % (head1[c],max(absdiff),  tol)
                except:
                    return False, "Error trying to print error message while comparing '{0}' and '{1}'".format(file1,file2)
        
    return True, "files are comparable within abs tolerance=%e" % tol
开发者ID:xiaofeng007,项目名称:FaST-LMM,代码行数:51,代码来源:util.py

示例15: plotmonetvspg

def plotmonetvspg():
    x1=s.linspace(0,22,22,endpoint=False)
    y1=s.loadtxt('average-monet.log')
    y2=s.loadtxt('average-pg.log')
    y3=s.loadtxt('result-mysql.log')
    p1=py.bar(x1,y1,width=0.35)
    p2=py.bar(x1+0.4,y2,width=0.4,color='green')
    p3=py.bar(x1+0.8,y3,width=0.4,color='magenta')
    py.xlabel('queries')
    py.xlim(0,22)
    py.ylabel('reponse time in seconds')
    #py.xticks((p1,p2),('m','p'))
    py.legend((p1,p2,p3),('monetdb','postgresql','mysql'),loc='upper left')
    py.title('TPC-H benchmark with Postgresql and MonetDB')
    py.savefig('monetvspg_mysql.jpg')
开发者ID:wan-meng,项目名称:TPC-H,代码行数:15,代码来源:plot.py


注:本文中的scipy.loadtxt函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。