当前位置: 首页>>代码示例>>Python>>正文


Python mlab.csv2rec函数代码示例

本文整理汇总了Python中matplotlib.mlab.csv2rec函数的典型用法代码示例。如果您正苦于以下问题:Python csv2rec函数的具体用法?Python csv2rec怎么用?Python csv2rec使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了csv2rec函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: makediffs

def makediffs(models = _allmodels, verbose = False, kpp = True):
    for model in models:
        model = os.path.splitext(os.path.basename(model))[0]
        if kpp:
            kppdat = csv2rec(os.path.join(model, model + '.dat'), delimiter = ' ')
        else:
            if model not in _modelconfigs:
                raise IOError('If KPP is not properly installed, you cannot run tests on mechanisms other than cbm4, saprc99, and small_strato.')
            kppdat = csv2rec(os.path.join(os.path.dirname(__file__), model + '.dat'), delimiter = ' ')
        pykppdat = csv2rec(os.path.join(model, model + '.pykpp.dat'), delimiter = ',')
        diff = pykppdat.copy()
        pct = pykppdat.copy()
        keys = set(kppdat.dtype.names).intersection(pykppdat.dtype.names)
        notkeys = set(pykppdat.dtype.names).difference(kppdat.dtype.names)
        notkeys.remove('t')
        for k in notkeys:
            diff[k] = np.nan
            pct[k] = np.nan
    
        for k in keys:
            diff[k] = pykppdat[k] - kppdat[k][:]
            pct[k] = diff[k] / kppdat[k][:] * 100
        diff['t'] = pykppdat['t'] - (kppdat['time'] * 3600. + pykppdat['t'][0])
        pct['t'] = diff['t'] / (kppdat['time'] * 3600. + pykppdat['t'][0]) * 100
        
        rec2csv(diff, os.path.join(model, model + '.diff.csv'), delimiter = ',')
        rec2csv(pct, os.path.join(model, model + '.pct.csv'), delimiter = ',')
开发者ID:barronh,项目名称:pykpp,代码行数:27,代码来源:test.py

示例2: extract_lai_fpar

def extract_lai_fpar(above_par_dat, below_par_dat):
    above_par_ra = mlab.csv2rec(above_par_dat)
    below_par_ra = mlab.csv2rec(below_par_dat)
    points_ra = mlab.csv2rec('lonlat_threet.csv')
    plot = below_par_ra['plot']
    date = below_par_ra['date']
    below_par = below_par_ra['par']
    lats = np.array(points_ra['latitude'].tolist()*2)
    lons = np.array(points_ra['longitude'].tolist()*2)
    above_par = []
    fapar = []
    for time in enumerate(date):
        par_idx = find_nearest_idx(above_par_ra['date'], time[1])
        above_par.append(np.mean((above_par_ra['par'][par_idx-1], above_par_ra['par'][par_idx],
                                 above_par_ra['par'][par_idx+1])))
        if above_par_ra['par'][par_idx] < below_par[time[0]]:
            fapar.append(0)
        else:
            fapar.append((above_par_ra['par'][par_idx] - below_par[time[0]]) /
                     above_par_ra['par'][par_idx])
    above_par = np.array(above_par)
    fapar = np.array(fapar)
    newra = np.column_stack((date, plot, lats, lons, above_par, below_par, fapar))
    new_ra = np.core.records.fromarrays(newra.transpose(),
                                        dtype=[('date', 'object'),
                                               ('plot', 'i'), ('lat', 'f'),
                                               ('lon', 'f'), ('above_par', 'f'),
                                               ('below_par', 'f'), ('fapar', 'f')])
    return new_ra
开发者ID:Ewan82,项目名称:ah_data,代码行数:29,代码来源:lai_cept.py

示例3: _make

 def _make(self, output_file, basin_poly, ba_csv, fa_ncons_csv, area_csv, arid_thresh=0.03, use_thresh=0.012, **kwargs):
     print "loading data"
     ba = np.genfromtxt(ba_csv,np.double,skip_header=1,delimiter=',')
     area_arr = mlab.csv2rec(area_csv)
     nc_arr = mlab.csv2rec(fa_ncons_csv)
     
     ids = ba[:,0]
     
     mean_ba = np.mean(ba[:,1:],1)
     ncons = gen_merge.arrange_vector_by_ids(nc_arr["ncons"],nc_arr["basinid"],ids).astype(np.double)
     area = gen_merge.arrange_vector_by_ids(area_arr["f_area"],area_arr["basinid"],ids).astype(np.double)
     
     wri = ncons/mean_ba
     
     miscmask = (ncons/area<use_thresh)*(mean_ba/area<arid_thresh)
     wri_s = self.score(wri)
     wri_s[miscmask] = MINSCORE
     wri_cat = self.categorize(wri_s, miscmask)
     
     
     joinarray = np.rec.fromarrays((ba[:,0],mean_ba,ncons,wri,wri_s,wri_cat),names=(BASIN_ID_FIELD,"BA","FA_NCONS",self.plot_field_name,"%s_s" % self.plot_field_name,"%s_cat" % self.plot_field_name))
     
     print "joining data"
     ap.CopyFeatures_management(basin_poly,output_file)
     ap.da.ExtendTable(output_file,BASIN_ID_FIELD,joinarray,BASIN_ID_FIELD)
开发者ID:fgassert,项目名称:aqueduct_atlas,代码行数:25,代码来源:gen_WRI.py

示例4: test_sanity

def test_sanity():
    from nipy.modalities.fmri.fmristat.tests import FIACdesigns

    """
    Single subject fitting of FIAC model
    """

    # Based on file
    # subj3_evt_fonc1.txt
    # subj3_bloc_fonc3.txt

    for subj, run, dtype in [(3, 1, "event"), (3, 3, "block")]:
        nvol = 191
        TR = 2.5
        Tstart = 1.25

        volume_times = np.arange(nvol) * TR + Tstart
        volume_times_rec = formula.make_recarray(volume_times, "t")

        path_dict = {"subj": subj, "run": run}
        if exists(pjoin(DATADIR, "fiac_%(subj)02d", "block", "initial_%(run)02d.csv") % path_dict):
            path_dict["design"] = "block"
        else:
            path_dict["design"] = "event"

        experiment = csv2rec(pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "experiment_%(run)02d.csv") % path_dict)
        initial = csv2rec(pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "initial_%(run)02d.csv") % path_dict)

        X_exper, cons_exper = design.event_design(experiment, volume_times_rec, hrfs=delay.spectral)
        X_initial, _ = design.event_design(initial, volume_times_rec, hrfs=[hrf.glover])
        X, cons = design.stack_designs((X_exper, cons_exper), (X_initial, {}))

        Xf = np.loadtxt(StringIO(FIACdesigns.designs[dtype]))
        for i in range(X.shape[1]):
            yield nitest.assert_true, (matchcol(X[:, i], Xf.T)[1] > 0.999)
开发者ID:yarikoptic,项目名称:NiPy-OLD,代码行数:35,代码来源:fiac_util.py

示例5: test_transform_data

def test_transform_data():
    """ 
    Testing the transformation of the data from raw data to functions 
    used for fitting a function.
    
    """
    # We start with actual data. We test here just that reading the data in 
    # different ways ultimately generates the same arrays. 
    from matplotlib import mlab 
    ortho = mlab.csv2rec(op.join(data_path, 'ortho.csv'))
    para = mlab.csv2rec(op.join(data_path, 'para.csv'))
    x1, y1, n1 = sb.transform_data(ortho)
    x2, y2, n2 = sb.transform_data(op.join(data_path, 'ortho.csv'))
    npt.assert_equal(x1, x2)
    npt.assert_equal(y1, y2)
    # We can also be a bit more critical, by testing with data that we 
    # generate, and should produce a particular answer:
    my_data = pd.DataFrame(
        np.array([[0.1, 2], [0.1, 1], [0.2, 2], [0.2, 2], [0.3, 1], 
                  [0.3, 1]]),
        columns=['contrast1', 'answer'])
    my_x, my_y, my_n = sb.transform_data(my_data)
    npt.assert_equal(my_x, np.array([0.1, 0.2, 0.3]))
    npt.assert_equal(my_y, np.array([0.5, 0, 1.0]))
    npt.assert_equal(my_n, np.array([2, 2, 2]))
开发者ID:amsjavan,项目名称:nazarkav,代码行数:25,代码来源:test_nazarkav.py

示例6: get_experiment_initial

def get_experiment_initial(path_dict):
    """Get the record arrays for the experimental/initial designs.

    Parameters
    ----------
    path_dict : dict
        containing key 'rootdir', 'run', 'subj'

    Returns
    -------
    experiment, initial : Two record arrays.

    """
    # The following two lines read in the .csv files
    # and return recarrays, with fields
    # experiment: ['time', 'sentence', 'speaker']
    # initial: ['time', 'initial']

    rootdir = path_dict['rootdir']
    if not exists(pjoin(rootdir, "experiment_%(run)02d.csv") % path_dict):
        e = "can't find design for subject=%(subj)d,run=%(subj)d" % path_dict
        raise IOError(e)

    experiment = csv2rec(pjoin(rootdir, "experiment_%(run)02d.csv") % path_dict)
    initial = csv2rec(pjoin(rootdir, "initial_%(run)02d.csv") % path_dict)

    return experiment, initial
开发者ID:GaelVaroquaux,项目名称:nipy,代码行数:27,代码来源:fiac_util.py

示例7: test_fractionate

 def test_fractionate(self):
     data1 = csv2rec('arch1.csv')
     data2 = csv2rec('arch2.csv')
     dl = [data1, data2]
     fr = fractionate(dl, (10, 10), (5, 5), ['row', 'column'])
     self.assertTrue(fr[0]['row'][3] == 5)
     self.assertTrue(fr[1]['column'][2] == 0)
开发者ID:ethanwhite,项目名称:macroeco,代码行数:7,代码来源:test_form_func.py

示例8: _make

 def _make(self, output_file, basin_poly, ba_csv, withdrawal_csv, consumption_csv, area_csv, arid_thresh=0.03, use_thresh=0.012, **kwargs):
     print "loading data"
     ba = np.genfromtxt(ba_csv,np.double,skip_header=1,delimiter=',')
     area_arr = mlab.csv2rec(area_csv)
     ut_arr = mlab.csv2rec(withdrawal_csv)
     ct_arr = mlab.csv2rec(consumption_csv)
     
     ids = ba[:,0]
     
     mean_ba = np.mean(ba[:,1:],1)
     ut = gen_merge.arrange_vector_by_ids(ut_arr["ut"],ut_arr["basinid"],ids).astype(np.double)
     uc = gen_merge.arrange_vector_by_ids(ct_arr["ct"],ct_arr["basinid"],ids).astype(np.double)
     area = gen_merge.arrange_vector_by_ids(area_arr["f_area"],area_arr["basinid"],ids).astype(np.double)
     bws = ut/mean_ba
     
     miscmask = (ut/area<use_thresh)*(mean_ba/area<arid_thresh)
     #miscmask2 = (ut/area[:,1]<use_thresh)*(mean_ba/area[:,1]<arid_thresh)*(bws<.8)
     bws_s = self.score(bws)
     bws_s[miscmask] = MAXSCORE
     bws_cat = self.categorize(bws_s, miscmask)
     
     joinarray = np.rec.fromarrays((ba[:,0],mean_ba,ut,uc,bws,bws_s,bws_cat,area),names=(BASIN_ID_FIELD,"BA","WITHDRAWAL","CONSUMPTION",self.plot_field_name,"%s_s" % self.plot_field_name,"%s_cat" % self.plot_field_name,"AREAM3"))
     
     print "joining data"
     ap.CopyFeatures_management(basin_poly,output_file)
     ap.da.ExtendTable(output_file,BASIN_ID_FIELD,joinarray,BASIN_ID_FIELD)
开发者ID:fgassert,项目名称:aqueduct_atlas,代码行数:26,代码来源:gen_BWS.py

示例9: test_merge_formatted

 def test_merge_formatted(self):
     data1 = csv2rec('arch1.csv')
     data2 = csv2rec('arch2.csv')
     dl = [data1, data2]
     merged = merge_formatted(dl)
     self.assertTrue(sum(merged['rew']) == 2)
     self.assertTrue(sum(merged['column']) == 12)
开发者ID:ethanwhite,项目名称:macroeco,代码行数:7,代码来源:test_form_func.py

示例10: rewrite_spec

def rewrite_spec(subj, run, root = "/home/jtaylo/FIAC-HBM2009"):
    """
    Take a FIAC specification file and get two specifications
    (experiment, begin).

    This creates two new .csv files, one for the experimental
    conditions, the other for the "initial" confounding trials that
    are to be modelled out. 

    For the block design, the "initial" trials are the first
    trials of each block. For the event designs, the 
    "initial" trials are made up of just the first trial.

    """

    if exists(pjoin("%(root)s", "fiac%(subj)d", "subj%(subj)d_evt_fonc%(run)d.txt") % {'root':root, 'subj':subj, 'run':run}):
        designtype = 'evt'
    else:
        designtype = 'bloc'

    # Fix the format of the specification so it is
    # more in the form of a 2-way ANOVA

    eventdict = {1:'SSt_SSp', 2:'SSt_DSp', 3:'DSt_SSp', 4:'DSt_DSp'}
    s = StringIO()
    w = csv.writer(s)
    w.writerow(['time', 'sentence', 'speaker'])

    specfile = pjoin("%(root)s", "fiac%(subj)d", "subj%(subj)d_%(design)s_fonc%(run)d.txt") % {'root':root, 'subj':subj, 'run':run, 'design':designtype}
    d = np.loadtxt(specfile)
    for row in d:
        w.writerow([row[0]] + eventdict[row[1]].split('_'))
    s.seek(0)
    d = csv2rec(s)

    # Now, take care of the 'begin' event
    # This is due to the FIAC design

    if designtype == 'evt':
        b = np.array([(d[0]['time'], 1)], np.dtype([('time', np.float),
                                                    ('initial', np.int)]))
        d = d[1:]
    else:
        k = np.equal(np.arange(d.shape[0]) % 6, 0)
        b = np.array([(tt, 1) for tt in d[k]['time']], np.dtype([('time', np.float),
                                                                 ('initial', np.int)]))
        d = d[~k]

    designtype = {'bloc':'block', 'evt':'event'}[designtype]

    fname = pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "experiment_%(run)02d.csv") % {'root':root, 'subj':subj, 'run':run, 'design':designtype}
    rec2csv(d, fname)
    experiment = csv2rec(fname)

    fname = pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "initial_%(run)02d.csv") % {'root':root, 'subj':subj, 'run':run, 'design':designtype}
    rec2csv(b, fname)
    initial = csv2rec(fname)

    return d, b
开发者ID:GaelVaroquaux,项目名称:nipy,代码行数:59,代码来源:fiac_util.py

示例11: append_rec

def append_rec(recs):
    base = mlab.csv2rec(recs[0]["file"])

    for nw in recs[1:]:
        append = mlab.csv2rec(nw["file"])
        for k,v in append.dtype.fields.iteritems():
            base = mlab.recs_join("sys_tick",k,[base,append],missing=0)
    return base
开发者ID:archaelus,项目名称:emetric,代码行数:8,代码来源:merge.py

示例12: test_format_dense

 def test_format_dense(self):
     data1 = csv2rec('arch1.csv')
     data2 = csv2rec('arch2.csv')
     dl = [data1, data2]
     form = format_dense(dl, 3, (4,4))
     self.assertTrue(np.all(form[0]['count'][:4] == np.array([1,1,3,3])))
     self.assertTrue(np.all(form[1]['count'] ==
                                            np.array([1,1,3,3,1,1,5,1])))
开发者ID:ethanwhite,项目名称:macroeco,代码行数:8,代码来源:test_form_func.py

示例13: test_add_data_fields

 def test_add_data_fields(self):
     data1 = csv2rec('arch1.csv')
     data2 = csv2rec('arch2.csv')
     dl = [data1, data2]
     alt_data = add_data_fields(dl, {'year': (1998, 2002)})
     self.assertTrue(np.all(alt_data[0]['year'] == '1998'))
     self.assertTrue(np.all(alt_data[1]['year'] == '2002'))
     alt_data = add_data_fields(dl, {'year' : (1998, 2002), 'why': ('h',
                                 'a')})
     self.assertTrue(np.all(alt_data[0]['why'] == 'h'))
开发者ID:ethanwhite,项目名称:macroeco,代码行数:10,代码来源:test_form_func.py

示例14: plotGraphs

def plotGraphs():
  global gDateStr, gTimeStr
  
  print "Plotting..." 
  print "temperatures"
  filename = "./data/" + gDateStr + "_temperatures.csv";
  r = mlab.csv2rec(filename, delimiter=',')

  fig = Figure(figsize=(6,6))
  canvas = FigureCanvas(fig)

  ax = fig.add_subplot(111)
  ax.set_title('Temperatures '+gDateStr,fontsize=14)

  ax.set_xlabel('Time',fontsize=6)
  ax.set_ylabel('Temperature (C)',fontsize=6)

  ax.grid(True,linestyle='-',color='0.75')

  # run two sanitize passes over the data
  r[r.dtype.names[1]] = arrayops.sanitize( r[r.dtype.names[1]] )
  r[r.dtype.names[2]] = arrayops.sanitize( r[r.dtype.names[2]] )


  # Generate the plot.
  ax.plot(r[r.dtype.names[0]],r[r.dtype.names[1]],color='tomato');
  ax.plot(r[r.dtype.names[0]],r[r.dtype.names[2]],color='green');

  # plot pump on times
  print "pump on"
  filename = "./data/" + gDateStr + "_pumpON.csv";
  if os.path.exists(filename):
    r = mlab.csv2rec(filename, delimiter=',')
    ax.scatter(r[r.dtype.names[0]],r[r.dtype.names[1]],color='orange');

  # plot pump off times
  print "pump off"
  filename = "./data/" + gDateStr + "_pumpOFF.csv";
  if os.path.exists(filename):
    r = mlab.csv2rec(filename, delimiter=',')
    ax.scatter(r[r.dtype.names[0]],r[r.dtype.names[1]],color='blue');


  for tick in ax.xaxis.get_major_ticks():
	  tick.label.set_fontsize(6)

  for tick in ax.yaxis.get_major_ticks():
	  tick.label.set_fontsize(6)

  ax.set_ylim(-5, 35)

  # Save the generated Plot to a PNG file.
  filename = "/var/www/Prometheus/data/"+gDateStr+"_temperatures.png"
  canvas.print_figure(filename,dpi=100)
  os.system('ln -sf '+filename+' /var/www/Prometheus/data/current_temperatures.png')
开发者ID:opiesche,项目名称:Prometheus,代码行数:55,代码来源:prometheus_controller.py

示例15: main

def main():
    inputlist = ["bin/global_BWS_20121015.csv","bin/global_WRI_20121015.csv"]
    lhs = mlab.csv2rec("bin/global_GU_20121015.csv")
    rhslist = []
    for x in inputlist:
        rhslist.append(mlab.csv2rec(x))
    
    rhslist[0]["basinid"] = rhslist[0]["basinid"].astype(np.long)
    keys = ("basinid","countryid","id")
    lhs = join_recs_on_keys(lhs,rhslist,keys)
    mlab.rec2csv(lhs,"bin/test.csv")
    print "complete"
开发者ID:fgassert,项目名称:aqueduct_atlas,代码行数:12,代码来源:gen_merge.py


注:本文中的matplotlib.mlab.csv2rec函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。