当前位置: 首页>>代码示例>>Python>>正文


Python DataIO.writeFile方法代码示例

本文整理汇总了Python中cc.tools.io.DataIO.writeFile方法的典型用法代码示例。如果您正苦于以下问题:Python DataIO.writeFile方法的具体用法?Python DataIO.writeFile怎么用?Python DataIO.writeFile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cc.tools.io.DataIO的用法示例。


在下文中一共展示了DataIO.writeFile方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: doMline

# 需要导入模块: from cc.tools.io import DataIO [as 别名]
# 或者: from cc.tools.io.DataIO import writeFile [as 别名]
    def doMline(self,star):
        
        """
        Run mline.
        
        First, database is checked for retrieval of old models. 

        @param star: The parameter set for this session
        @type star: Star()
        
        """
        
        model_bools = self.checkMlineDatabase()
        del self.command_list['R_OUTER']
        del self.command_list['OUTER_R_MODE']
        for molec,model_bool in zip(self.molec_list,model_bools):
            if not model_bool:
                self.updateModel(molec.getModelId())
                commandfile = ['%s=%s'%(k,v) 
                               for k,v in sorted(self.command_list.items())
                               if k != 'R_POINTS_MASS_LOSS'] +\
                              ['####'] + \
                              ['%s=%s'%(k,v) 
                               for k,v in sorted(molec.makeDict().items())] +\
                              ['####']
                if self.command_list.has_key('R_POINTS_MASS_LOSS'):
                    commandfile.extend(['%s=%s'%('R_POINTS_MASS_LOSS',v) 
                                        for v in self.command_list\
                                                    ['R_POINTS_MASS_LOSS']] +\
                                       ['####'])
                filename = os.path.join(cc.path.gout,'models',\
                                        'gastronoom_%s.inp'%molec.getModelId())
                DataIO.writeFile(filename,commandfile)                
                self.execGastronoom(subcode='mline',filename=filename)
                self.mline_done=True
                if len([f for f in glob(os.path.join(cc.path.gout,'models',\
                                        molec.getModelId(),'ml*%s_%s.dat'\
                                        %(molec.getModelId(),molec.molecule)))])\
                        == 3:
                    self.ml_db[self.model_id][molec.getModelId()]\
                              [molec.molecule] = molec.makeDict()
                    self.ml_db.addChangedKey(self.model_id)
                    self.ml_db.sync()
                else:
                    print 'Mline model calculation failed for'\
                          '%s. No entry is added to the database.'\
                          %(molec.molecule)
                    molec.setModelId('')
        if set([molec.getModelId() for molec in self.molec_list]) == set(['']):  
            #- no mline models calculated: stop GASTRoNOoM here
            self.model_id = ''
            print 'Mline model calculation failed for all requested ' + \
                  'molecules. Stopping GASTRoNOoM here!'
        else:        
            #- at least one molecule was successfully calculated, so start  
            #- Sphinx, hence if vic is requested, the cooling model_id can now  
            #- be added to the models list
            if self.vic <> None and self.sphinx: 
                #- add the command list to the vic models list
                self.vic.addModel(self.model_id,self.command_list)
开发者ID:FungKu01,项目名称:ComboCode,代码行数:62,代码来源:Gastronoom.py

示例2: makeOpa

# 需要导入模块: from cc.tools.io import DataIO [as 别名]
# 或者: from cc.tools.io.DataIO import writeFile [as 别名]
 def makeOpa(self,mode='ZERO',**args):
     
     """
     Making custom .particle files.
     
     Every method called here will put the results in self.output_data.
     
     @keyword mode: type of extrapolation (ZERO,FUNCTION,HONY,ZHANG)
                     
                    (default: 'ZERO')
     @type mode: string
     @keyword args: Optional keywords required for the other methods of the 
                    class
     @type args: dict
     
     """
     
     self.output_data = []
     mode = mode.upper()
     if hasattr(self,'do' + mode):
         getattr(self,'do' + mode)(**args)
         self.output_data = [' '.join(str(line)) 
                             for line in self.output_data]
         output_filename = '_'.join(['customOpacity',mode] + \
                                    sorted(args.values()) + \
                                    [self.filename])
         if self.opacity_file:
             output_filename.replace('.particle','.opacity')
         DataIO.writeFile(filename=os.path.join(cc.path.mopac,\
                                                output_filename),\
                          input_lines=self.output_data)
         new_short = self.species + mode
         #- filename is already present: the new file should have the same
         #- parameters and the short name can be kept, 
         #- nothing is changed in the Dust.dat file
         try:    
             DataIO.getInputData(keyword='PART_FILE',filename='Dust.dat')\
                                .index(output_filename)
         #- filename is not present: do the normal procedure, ie check if 
         #- short name is already present
         except ValueError:        
             i=0
             while ' '.join(DataIO.getInputData(keyword='SPECIES_SHORT',\
                                                filename='Dust.dat'))\
                                               .find(new_short) != -1:
                 i+=1    
                 new_short = new_short + str(i)
             adding_line = [new_short] + \
                           [str(DataIO.getInputData(keyword=key,\
                                                    filename='Dust.dat',\
                                                    rindex=self.index))
                            for key in ['SPEC_DENS','T_DES','T_DESA','T_DESB']]
             adding_line.insert(2,output_filename)
             adding_line = '\t\t'.join(adding_line)
             DataIO.writeFile(os.path.join(cc.path.usr,'Dust.dat'),\
                              [adding_line+'\n'],mode='a')
     else:
         print 'Mode "' + mode + '" not available. Aborting.'
开发者ID:IvS-KULeuven,项目名称:ComboCode,代码行数:60,代码来源:DustOpacity.py

示例3: finalizeVic

# 需要导入模块: from cc.tools.io import DataIO [as 别名]
# 或者: from cc.tools.io.DataIO import writeFile [as 别名]
 def finalizeVic(self):
     
     '''
     Finalize a modeling procedure on VIC: successful and failed results 
     are printed to a file, including the transitions.
     
     This log file can be used as input for ComboCode again by putting 
     LINE_LISTS=2.
     
     '''
     
     for trans in self.trans_in_progress:
         filename = os.path.join(cc.path.gastronoom,\
                                 self.path,'models',trans.getModelId(),\
                                 trans.makeSphinxFilename(2))
         if not os.path.isfile(filename):              
             trans.setModelId('') 
     if self.models.keys():
         time_stamp = '%.4i-%.2i-%.2ih%.2i:%.2i:%.2i' \
                      %(gmtime()[0],gmtime()[1],gmtime()[2],\
                        gmtime()[3],gmtime()[4],gmtime()[5])
         results = ['# Successfully calculated models:'] \
                 + [self.models[current_model] 
                    for current_model in self.models.keys() 
                    if current_model not in self.failed.keys()] \
                 + ['# Unsuccessfully calculated models (see 3 logfiles '+ \
                    'for these models):'] \
                 + [self.models[current_model] 
                    for current_model in self.models.keys() 
                    if current_model in self.failed.keys()]
         DataIO.writeFile(os.path.join(cc.path.gastronoom,self.path,\
                                       'vic_results','log_' + time_stamp),\
                          results)
         for current_model,model_id in self.models.items():
             model_results = ['# Successfully calculated transitions:'] + \
                 ['Sphinx %s: %s' %(trans.getModelId(),str(trans)) 
                  for trans in self.finished[current_model]] + \
                 ['# Unsuccessfully calculated transitions (see 2 other ' + \
                  'logfiles for these transitions):'] + \
                 ['Sphinx %s: %s' %(trans.getModelId(),str(trans)) 
                  for trans in self.failed[current_model]]
             DataIO.writeFile(os.path.join(cc.path.gastronoom,self.path,\
                                           'vic_results','log_results%s_%i'\
                                           %(time_stamp,current_model)),\
                              model_results)
             for this_id in self.sphinx_model_ids[current_model]:
                 sphinx_files = os.path.join(cc.path.gastronoom,self.path,\
                                             'models',this_id,'sph*')
                 subprocess.call(['chmod a+r %s'%sphinx_files],shell=True)
开发者ID:IvS-KULeuven,项目名称:ComboCode,代码行数:51,代码来源:Vic.py

示例4: writeChi2

# 需要导入模块: from cc.tools.io import DataIO [as 别名]
# 或者: from cc.tools.io.DataIO import writeFile [as 别名]
    def writeChi2(self,fn,sort=1,parameters=[]):
        
        '''
        Write the Chi^2 values to a file. Lists the model id in the first column
        with the chi^2 value in the second. 
        
        The chi^2 values can be requested to be sorted.
        
        Parameters from the Star() objects can be added as additional columns.
        Given parameters must be valid.
        
        @param fn: The output filename
        @type fn: str
        
        @keyword sort: Sort the star_grid according to the chi^2 values from 
                       lowest to highest. Requires calcChi2 to be ran first.
        
                       (default: 1)
        @type sort: bool
        @keyword parameters: The additional model parameters to be added as 
                             columns in the file. 
                             
                             (default: [])
        @type parameters: list(str)
        
        '''
        
        #-- If no chi^2 was calculated, do nothing
        if not self.chi2.size:
            return
            
        #-- Write the header
        comments = ['# '] + ['ID','RedChi^2'] + parameters + ['\n']
        DataIO.writeFile(filename=fn,input_lines=comments,delimiter='\t')
        
        #-- Define the columns
        cols = [[s['LAST_MCMAX_MODEL'] for s in self.getStarGrid(sort=sort)]]
        if sort: 
            isort = np.argsort(self.chi2) 
            cols.append(self.chi2[isort])
        else: 
            cols.append(self.chi2)
        
        #-- Add additional model parameters if requested
        for par in parameters:
            cols.append([s[par] for s in self.getStarGrid(sort=sort)])

        #-- Append the columns to the file after the header
        DataIO.writeCols(filename=fn,cols=cols,mode='a')
开发者ID:IvS-KULeuven,项目名称:ComboCode,代码行数:51,代码来源:SedStats.py

示例5: makeIdLog

# 需要导入模块: from cc.tools.io import DataIO [as 别名]
# 或者: from cc.tools.io.DataIO import writeFile [as 别名]
 def makeIdLog(self, new_id,molec_id=None):
     
     '''
     Make a text file with the original cooling id in it.
     
     This is used when creating a Transition() from a sphinx outputfilename.
     
     @param new_id: the new id for either mline or sphinx
     @type new_id: string
     @keyword molec_id: if given, an mline_id.log will be made including the
                        mline id to which molecules are linked
                        
                        (default: None)
     @type molec_id: string
     
     '''
     
     DataIO.writeFile(filename=os.path.join(cc.path.gout,'models',new_id,\
                      'cooling_id.log'),input_lines=[self.model_id])
     if molec_id <> None:
         DataIO.writeFile(filename=os.path.join(cc.path.gout,'models',\
                          new_id,'mline_id.log'),input_lines=[molec_id])
开发者ID:FungKu01,项目名称:ComboCode,代码行数:24,代码来源:Gastronoom.py

示例6: appendResults

# 需要导入模块: from cc.tools.io import DataIO [as 别名]
# 或者: from cc.tools.io.DataIO import writeFile [as 别名]
 def appendResults(self):    
     
     '''
     Append results at the end of the inputfile.
     
     '''
     
     print '** Appending results to inputfile and copying to output folders.'
     print '***********************************'
     #-- Check if the transition was intended to be calculated, and if it was 
     #-- successful (ie don't add if it had already been done)
     timestring = '%.4i-%.2i-%.2ih%.2i-%.2i-%.2i'\
                   %(time.gmtime()[0],time.gmtime()[1],time.gmtime()[2],\
                     time.gmtime()[3],time.gmtime()[4],time.gmtime()[5])
     appendage = []
     if self.model_manager.trans_bool_list:
         model_ids_list = [list(set([(trans.molecule.molecule,\
                                      trans.getModelId())
                                     for boolean,trans in zip(trans_bool,\
                                                          star['GAS_LINES']) 
                                     if trans.getModelId() \
                                         and (not trans_bool \
                                         or self.append_results)]))
                           for star,trans_bool in zip(self.star_grid,\
                                        self.model_manager.trans_bool_list)]
         #-- all unique molecules over all stars
         molec_list = list(set([molec 
                                for model_ids in model_ids_list 
                                for molec,model_id in model_ids 
                                if model_ids]))
         #-- all unique modelids for every star separately
         model_id_unique = [list(set([model_id 
                                      for molec,model_id in model_ids])) 
                            for model_ids in model_ids_list]    
         if [modelids for modelids in model_ids_list if modelids] != []:
             appendage += \
                   ['#########################################',\
                    '## Successfully calculated transition model_ids on %s:'\
                    %timestring]
             appendage.extend(['## molecule %s'%molec 
                               for molec in molec_list])
             for i,(star,model_ids) in enumerate(zip(self.star_grid,\
                                                     model_ids_list)):
                 if model_ids:
                     appendage += ['## For Model %i : cooling id %s'\
                                   %(i+1,star['LAST_GASTRONOOM_MODEL'])] + \
                                  ['#molecule %s #%s' %(molecule,model_id) 
                                   for molecule,model_id in model_ids] + \
                                  ['########']
             for star,model_ids in zip(self.star_grid,model_id_unique):
                 for model_id in model_ids:
                     try:
                         i = 0
                         while True:
                             dummy = DataIO.readFile(\
                                 os.path.join(cc.path.gout,'models',model_id,\
                                     os.path.split(self.inputfilename)[1]+\
                                     '_%s_%i'%(model_id,i)))
                             i += 1
                     except IOError:
                         subprocess.call(['cp %s %s'%(self.inputfilename,\
                                     os.path.join(cc.path.gout,\
                                     'models',model_id,\
                                     os.path.split(self.inputfilename)[1]+\
                                     '_%s_%i'%(model_id,i)))],shell=True)
     if self.model_manager.mcmax_done_list:
         model_ids = [star['LAST_MCMAX_MODEL'] 
                      for star,boolean in zip(self.star_grid,\
                                         self.model_manager.mcmax_done_list) 
                      if boolean or self.append_results]
         if model_ids:
             appendage += ['#########################################',\
                     '## MCMax model_ids associated with this grid on %s:'\
                     %timestring]
             appendage += ['#%s'%model_id for model_id in model_ids]
             for model_id in model_ids:
                 try:
                     i = 0
                     while True:
                         dummy = DataIO.readFile(os.path.join(\
                                     cc.path.mout,'models',model_id,\
                                     os.path.split(self.inputfilename)[1]+\
                                     '_%s_%i'%(model_id,i)))
                         i += 1
                 except IOError:
                     subprocess.call(['cp %s %s'%(self.inputfilename,os.path.join(\
                                 cc.path.mout,'models',model_id,\
                                 os.path.split(self.inputfilename)[1]+\
                                 '_%s_%i'%(model_id,i)))],shell=True)
     if appendage: DataIO.writeFile(filename=self.inputfilename,\
                                    input_lines=appendage+['\n'],mode='a')
开发者ID:FungKu01,项目名称:ComboCode,代码行数:93,代码来源:ComboCode.py

示例7: writeIntIntTable

# 需要导入模块: from cc.tools.io import DataIO [as 别名]
# 或者: from cc.tools.io.DataIO import writeFile [as 别名]

#.........这里部分代码省略.........
    if not no_vib: 
        line_els.append('Vibrational')
    line_els.extend(['Rotational','$\lambda_0$',\
                     r'\multicolumn{%i}{c}{$F_\mathrm{int}$} \\'%len(pstars)])
    inlines.append('&'.join(line_els))
    line_els = ['band','']
    if not no_vib:
        line_els.append('state')
    line_els.extend(['transition',r'$\mu$m',\
                     r'\multicolumn{%i}{c}{(W m$^-2$))} \\\hline'%len(pstars)])
    inlines.append('&'.join(line_els))
    all_bands = ['SLW','SSW','R1B','R1A','B2B','B2A','B3A'] 
    bands = set([ib for v in ddict.values() for ib in v.data_ordernames])
    bands = [ib for ib in all_bands if ib in bands]
    if not sort_freq: bands.reverse()
    line_counter = dict()
    for s in stars:
        line_counter[s] = 0
    for band in bands:
        #inlines.append(r'\multicolumn{4}{c}{PACS Band: %s} & \multicolumn{%i}{c}{} \\\hline'\
                       #%(band,len(pstars)))
        new_band = 1
        for it,t in enumerate(trans):
            #-- Check if there's any actual line strength result for any star
            #   for this particular band; in any filename in this band.
            #   Otherwise, exclude the line from the table. If there's no 
            #   spectrum for this band at all, the line is excluded as well.
            #   In this case, the band will not be added to the table at all.
            #   Just look at the keys of t.unreso. If there's none of the 
            #   given band, then exclude the transition. All stars are 
            #   included in the same Transition() objects.
            all_keys = [k 
                        for k in t.unreso.keys()
                        if band in os.path.split(k)[1].split('_')]
            if not all_keys:
                continue
            
            #-- There's at least one star with a measured line strength in this 
            #   band, so add a column. 
            col1 = all_pmolecs[all_molecs.index(t.molecule.molecule)]
            
            #-- If the band has not had a line added before, add the band now
            if new_band:
                col0 = band
                new_band = 0
            else:
                col0 = ''
                
            #-- Define the input for the table.
            parts = [col0,\
                     col1]
            if not no_vib:
                parts.append(t.makeLabel(return_vib=1))
            parts.extend([t.makeLabel(inc_vib=0),'%.2f'%(t.wavelength*10**4)])
            #-- For every star, add the measured line strength of the 
            #   transition, if available. For now, it is assumed only one line
            #   strength measurement is available per star per band. (This is 
            #   not strictly true, for instance for overlapping line scans, but
            #   the line integration tool makes it impossible to discern 
            #   between multiple measurements of the same line in the same 
            #   band)
            for s in stars:
                #-- Grab the filename available in the transition object for 
                #   the measured line strength. If multiple filenames with the
                #   correct band are available, only the first is taken.
                all_fn = [sfn for sfn in t.unreso.keys()
                          if band in os.path.split(sfn)[1].split('_')\
                              and s in os.path.split(sfn)[1].split('_')]
                
                #-- Possibly, for this star, no datafiles of given band are
                #   present. Then just add no flux measurement and continue to
                #   the next star.
                if not all_fn: 
                    parts.append(r'/')
                    continue
                else:
                    fn = all_fn[0]
                
                #-- The filename is present, and thus should have a line 
                #   strength indicated, or be flagged as a blend. If not, an 
                #   error will be raised, but this should not happen!
                fint,finterr,fintblend = t.getIntIntUnresolved(fn)
                if fint == 'inblend':
                    parts.append('Blended')
                else:
                    line_counter[s] += 1
                    parts.append('%s%s%.2e (%.1f%s)'\
                                 %(t in mark_trans and extra_marker or r'',\
                                   fint<0 and blend_mark or r'',abs(fint),\
                                   finterr*100,r'\%'))
            parts[-1] = parts[-1] + r'\\'
            inlines.append('&'.join(parts))   
        if not new_band and band != bands[-1]: 
            inlines[-1] = inlines[-1] + r'\hline'
    DataIO.writeFile(filename,input_lines=inlines)
    if print_summary:
        print('Summary')
        for s in stars:
            print('%s: %i lines measured'%(s,len(ddict[s].linefit.wave_fit))+\
                  ', of which %i lines have been identified.'%line_counter[s])
开发者ID:IvS-KULeuven,项目名称:ComboCode,代码行数:104,代码来源:TableWriter.py

示例8: makeInputFiles

# 需要导入模块: from cc.tools.io import DataIO [as 别名]
# 或者: from cc.tools.io.DataIO import writeFile [as 别名]
 def makeInputFiles(self):
     
     '''
     Make the input files with just one line request in each.
     
     These inputfiles are then converted to format appropriate for Vic3 and
     subsequently copied to Vic3.
     
     '''
     
     model_id = self.models[self.current_model] 
     vic_model_folder = os.path.join('/data','leuven',self.disk,\
                                     self.account,'COCode','%s_%i/'\
                                     %(model_id,self.current_model))
     subprocess.call('ssh %[email protected] mkdir %s'\
                     %(self.account,vic_model_folder),shell=True)
     will_calculate_stuff = 0
     custom_files = []
     opacity_files = []
     starfiles = []
     for model_id_sphinx in self.sphinx_model_ids[self.current_model]:
         these_trans = [trans 
                        for trans in self.transitions[self.current_model]
                        if trans.getModelId() == model_id_sphinx]
         for i,trans in enumerate(these_trans):
             will_calculate_stuff = 1
             actual_command_list \
                 = self.command_lists[self.current_model].copy()
             actual_command_list['DATA_DIRECTORY'] \
                 = '"/user/leuven/%s/%s/COCode/data/"'\
                   %(self.disk,self.account)
             actual_command_list['OUTPUT_DIRECTORY'] \
                 = '"/data/leuven/%s/%s/COCode/output/%s/"'\
                   %(self.disk,self.account,trans.getModelId())
             actual_command_list['PARAMETER_FILE'] \
                 = '"/data/leuven/%s/%s/COCode/output/%s/%s"'\
                   %(self.disk,self.account,trans.getModelId(),\
                     'parameter_file_%s.dat'%trans.getModelId())
             actual_command_list['OUTPUT_SUFFIX'] = trans.getModelId()
             opacity_files.append(actual_command_list['TEMDUST_FILENAME'])
             if int(actual_command_list['KEYWORD_DUST_TEMPERATURE_TABLE']):
                 actual_command_list['DUST_TEMPERATURE_FILENAME'] \
                     = '"/data/leuven/%s/%s/COCode/dust_files/%s"'\
                       %(self.disk,self.account,\
                         os.path.split(actual_command_list\
                                       ['DUST_TEMPERATURE_FILENAME']\
                                       .strip('"'))[1])
             path = '/data/leuven/%s/%s/COCode/CustomFiles/'\
                    %(self.disk,self.account)
             molec_dict = trans.molecule.makeDict(path)
             starfiles.append(molec_dict.pop('STARFILE',''))
             commandfile = \
                  ['%s=%s'%(k,v) 
                   for k,v in sorted(actual_command_list.items()) 
                   if k != 'R_POINTS_MASS_LOSS'] + ['####'] + \
                  ['%s=%s'%(k,v) 
                   for k,v in sorted(molec_dict.items())] + ['####'] + \
                  ['%s=%s'%(k,v) 
                   for k,v in sorted(trans.makeDict().items())] + \
                  ['######']
             for key,fkey in zip(['ENHANCE_ABUNDANCE_FACTOR',     
                                  'SET_KEYWORD_CHANGE_ABUNDANCE',
                                  'SET_KEYWORD_CHANGE_TEMPERATURE'],\
                                 ['ABUNDANCE_FILENAME',\
                                  'CHANGE_FRACTION_FILENAME',\
                                  'NEW_TEMPERATURE_FILENAME']):
                  if getattr(trans.molecule,key.lower()):
                       custom_files.append((getattr(trans.molecule,\
                                                    fkey.lower()),\
                                            molec_dict[fkey].strip('"')))
             if actual_command_list.has_key('R_POINTS_MASS_LOSS'):
                 commandfile.extend(['%s=%s'%('R_POINTS_MASS_LOSS',v) 
                                     for v in actual_command_list\
                                              ['R_POINTS_MASS_LOSS']] + \
                                    ['####'])
             infile = '_'.join(['gastronoom',trans.getModelId(),\
                                '%i.inp'%(i+1)])
             DataIO.writeFile(os.path.join(cc.path.gastronoom,self.path,\
                                           'models',model_id,'vic_input',\
                                           infile),\
                              commandfile)
             self.inputfiles[self.current_model]\
                 .append(os.path.join('/data','leuven',self.disk,\
                                      self.account,'COCode','%s_%i'\
                                      %(model_id,self.current_model),\
                                      infile.replace('.inp','.*')))
         #- There is no overlap between filenames: All filenames with the 
         #- same trans model id get an increasing number i
         #- Then they are copied to VIC3 and removed afterwards, so there is 
         #- never a problem to store them in the cooling model_id folder
     if not will_calculate_stuff:
         return
     else:
         starfiles = list(set([f for f in starfiles if f]))
         if len(starfiles) > 1: 
             print('WARNING! Multiple starfiles detected in grid in Vic.py!')
         if starfiles:
             path = os.path.join('/data','leuven',self.disk,self.account,\
                              'COCode','StarFiles','starfile_tablestar.dat')
             subprocess.call(['scp ' + starfiles[0] + ' ' + self.account + \
#.........这里部分代码省略.........
开发者ID:IvS-KULeuven,项目名称:ComboCode,代码行数:103,代码来源:Vic.py

示例9: makeJobFile

# 需要导入模块: from cc.tools.io import DataIO [as 别名]
# 或者: from cc.tools.io.DataIO import writeFile [as 别名]
 def makeJobFile(self):
     
     '''
     Make the job file that will run the loop on VIC3 and copy the cooling
     and mline output to VIC3.
     
     @return: to be printed strings once all the copying is done, which 
              shows how many transitions are being calculated for which 
              sphinx model id
     @rtype: list[string]
     
     '''
     
     model_id = self.models[self.current_model]
     vic_server = '%[email protected]'%self.account
     jobfiles = []
     printing = []
     for model_id_sphinx in self.sphinx_model_ids[self.current_model]:
         these_trans = [trans 
                        for trans in self.transitions[self.current_model] 
                        if trans.getModelId() == model_id_sphinx]
         models_in_job = (int(log10(len(these_trans))))**2 
         if not models_in_job: models_in_job = 1
         job_number = len(these_trans)%models_in_job==0.0 \
                         and len(these_trans)/models_in_job \
                         or int(len(these_trans)/models_in_job)+1
         #- job_number: this is the number of jobs you want to queue
         job_number = job_number/8+1  
         time_per_job = self.time_per_sphinx*models_in_job
         walltimestring = '%.2i:00:00'%(int(time_per_job/60)+1)
         
         #- Create job file
         jobfile = DataIO.readFile(os.path.join(cc.path.gastronoom,\
                                                'vic_job_example.sh'))
         new_jobfile = []
         for line in jobfile:
             #if line.find('#PBS -l nodes=1:ppn=8') != -1:
             #    new_line = line.replace('ppn=8','ppn=%i'%)
             if line.split('=')[0].find('#PBS -l walltime') != -1:
                 new_line = '='.join([line.split('=')[0],walltimestring])
             elif line.split('=')[0].find('export COCODEHOME') != -1:
                 new_line = line.replace('vsc30226',self.account)\
                                     .replace('/302/','/%s/'%self.disk)
             elif line.split('=')[0].find('export COCODEDATA') != -1:
                 new_line = '='.join([line.split('=')[0],\
                         os.path.join(line.split('=')[1],model_id+'_'+\
                         str(self.current_model)+'/')])\
                         .replace('vsc30226',self.account)\
                         .replace('/302/','/%s/'%self.disk)
             elif line.find('for i in $(seq 1 1)') != -1:
                 new_line = 'for i in $(seq 1 %i)'%models_in_job
             elif line.split('=')[0].find('export MODELNUMBER') != -1:
                 new_line = '='.join([line.split('=')[0],line.split('=')[1]\
                                 .replace('model_id',model_id_sphinx)])
             else:
                 new_line = line
             new_jobfile.append(new_line)
             
         #- Save job file, change permission and copy to VIC
         local_folder = os.path.join(cc.path.gastronoom,\
                                     self.path,'models',model_id_sphinx)
         jobfilename_vic = '/user/leuven/'+self.disk+'/' + self.account + \
                           '/COCode/vic_job_' + model_id_sphinx + '.sh'
         jobfilename_local = os.path.join(local_folder,'vic_input',\
                                          'vic_job_%s.sh'%model_id_sphinx)
         DataIO.writeFile(jobfilename_local,new_jobfile)
         subprocess.call(['chmod +x %s'%jobfilename_local],shell=True)
         subprocess.call(['scp %s %s:%s'%(jobfilename_local,vic_server,\
                                          jobfilename_vic)],shell=True)
         jobfiles.append((jobfilename_vic,job_number))  
         
         #- Make the output folder on VIC
         vic_folder = '/data/leuven/%s/%s/COCode/output/%s/'\
                      %(self.disk,self.account,model_id_sphinx)
         subprocess.call('ssh %s mkdir %s'%(vic_server,vic_folder),\
                         shell=True)
         
         #-copy required GASTRoNOoM files, molecule specific.
         these_molecules = set(['sampling'] + \
                               [trans.molecule.molecule 
                                for trans in these_trans])
         to_be_copied = ['coolfgr*','input%s.dat'%model_id_sphinx]
         to_be_copied.extend(['cool*_%s.dat'%molec 
                              for molec in these_molecules])
         to_be_copied.extend(['ml*_%s.dat'%molec 
                              for molec in these_molecules 
                              if molec != 'sampling'])
         for filecopy in to_be_copied:
             subprocess.call(['scp %s %s:%s.'\
                              %(os.path.join(local_folder,filecopy),\
                                vic_server,vic_folder)], shell=True)
         
         #- number of nodes*number of cpus=amount of times to queue it
         printing.append('Running %i jobs with %i models each for ID %s.' \
                         %(job_number*7, models_in_job,model_id_sphinx))
     
     #- Create the run-jobs file.
     runjobsfile = DataIO.readFile(os.path.join(cc.path.gastronoom,\
                                                'vic_run_jobs_example.sh'))
     new_runjobsfile = []
#.........这里部分代码省略.........
开发者ID:IvS-KULeuven,项目名称:ComboCode,代码行数:103,代码来源:Vic.py

示例10: doChemistry

# 需要导入模块: from cc.tools.io import DataIO [as 别名]
# 或者: from cc.tools.io.DataIO import writeFile [as 别名]
    def doChemistry(self,star):
        
        """
        Running Chemistry.
        
        @param star: The parameter set for this session
        @type star: Star()
        
        """

        print '***********************************'                                       
        #- Create the input dictionary for this Chemistry run
        print '** Making input file for Chemistry'
        #-- Add the previous model_id to the list of new entries, so it does 
        #   not get deleted if replace_db_entry == 1. 
        if self.model_id: 
            self.new_entries.append(self.model_id)
        self.model_id = ''
        self.command_list = dict()
        # Bijzonder gevallen hier ipv in loop over orig
        # Dan is input_lines/command_list wat er in de database staat
        # input_lines = d
        
        if star['PERFORM_ROUTINE'] == 0:
            self.command_list['ROUTINE_RADIUS'] = 0
        else:
            self.command_list['ROUTINE_RADIUS'] = star['ROUTINE_RADIUS']
                #*star['R_STAR']*star.Rsun
        self.command_list['R_STAR'] = star['R_STAR']*star.Rsun
        #self.command_list['R_INNER_CHEM'] = star['R_INNER_CHEM']*\
            #star['R_STAR']*star.Rsun
        #self.command_list['R_OUTER_CHEM'] = star['R_OUTER_CHEM']*\
            #star['R_STAR']*star.Rsun
        
        self.command_list['REACTIONS_FILE'] = '"'+os.path.join(cc.path.csource,\
            'rates',star['REACTIONS_FILE'])+'"'
        self.command_list['SPECIES_FILE'] = '"'+os.path.join(cc.path.csource,\
            'specs',star['SPECIES_FILE'])+'"'
        self.command_list['FILECO'] = '"'+os.path.join(cc.path.csource,\
            'shielding',star['FILECO'])+'"'
        self.command_list['FILEN2'] = '"'+os.path.join(cc.path.csource,\
            star['FILEN2'])+'"'
        
        add_keys = [k  
                    for k in self.standard_inputfile.keys() 
                    if not self.command_list.has_key(k)]
        [self.setCommandKey(k,star,star_key=k.upper(),\
                            alternative=self.standard_inputfile[k])
         for k in add_keys]

        print '** DONE!'
        print '***********************************'
        
        #-- Check the Chemistry database if the model was calculated before
        modelbool = self.checkDatabase()
                
        #-- if no match found in database, calculate new model with new model id 
        #-- if the calculation did not fail, add entry to database for new model
        if not modelbool:
            input_dict = self.command_list.copy()
            input_lines = []
            orig = DataIO.readFile(self.inputfilename)
            for i,s in enumerate(orig):
                split = s.split()
                if s[0] == '!':
                    input_lines.append(s)
                else:
                    input_lines.append(" ".join(split[0:2])+' '+\
                        str(self.command_list[split[0]]))
            output_folder = os.path.join(cc.path.cout,'models',self.model_id)
            DataIO.testFolderExistence(output_folder)
            input_lines.append('OUTPUT_FOLDER = "' +\
                               output_folder+'/"') 
            
            input_filename = os.path.join(cc.path.cout,'models',\
                                          'inputChemistry_%s.txt'%self.model_id)
            
            DataIO.writeFile(filename=input_filename,input_lines=input_lines)
            
            #subprocess.call(' '.join([cc.path.ccode,input_filename]),shell=True)
            subprocess.call(' '.join([os.path.join(cc.path.csource,'csmodel'),input_filename]),shell=True)
            
            
            # files die worden aangemaakt op einde, test of successvol
            testf1 = os.path.join(output_folder,'cscoldens.out')
            testf2 = os.path.join(output_folder,'csfrac.out')
            if os.path.exists(testf1) and os.path.exists(testf2) and \
                    os.path.isfile(testf1) and os.path.isfile(testf2):
                del self.db[self.model_id]['IN_PROGRESS']
                self.db.addChangedKey(self.model_id)
            else:
                print '** Model calculation failed. No entry is added to ' + \
                      'the database.'
                del self.db[self.model_id]
                self.model_id = ''
            if not self.single_session: self.db.sync()

        #- Note that the model manager now adds/changes MUTABLE input keys, 
        #- which MAY be overwritten by the input file inputComboCode.dat
        print '***********************************'
开发者ID:IvS-KULeuven,项目名称:ComboCode,代码行数:102,代码来源:Chemistry.py

示例11: updateInputfile

# 需要导入模块: from cc.tools.io import DataIO [as 别名]
# 或者: from cc.tools.io.DataIO import writeFile [as 别名]
def updateInputfile(fn,fnT,ai=0,conv=0,texguess=0.9,fn_new=None):

    '''
    Update the ALI inputfile with new information for the next iteration. 
    
    @param fn: The full original ALI inputfilename
    @type fn: str
    @param fnT: The full filename of the temperature profile
    @type fnT: str
    
    @keyword ai: The amount of iterations to do in the ALI calculation. Set to 
                 the default of 0 if you wish to let ALI converge to whatever 
                 convergence criterion set in the ALI input OR by the iter_conv
                 keyword. 
                 
                 (default: 0)
    @type ai: int
    @keyword conv: The convergence criterion to use in ALI during iteration
                   with the energy balance. If 
                   more strict than the criterion given in the ALI 
                   inputfile this keyword is ignored. Default in case the
                   same convergence criterion should be used as in the ALI
                   inputfile during the iteration.
                   
                   (default: 0)
    @type conv: float 
    @keyword texguess: The TexGuess value for the iterations. Typically this is 
                       0.9 for standard initial conditions, but alternative 
                       could be -1 to start off from the populations calculated 
                       in the previous iteration.
                       
                       (default: 0.9)
    @type texguess: float    
    @keyword fn_new: The new (full) ALI inputfile name. Default if original is
                     to be updated.
                     
                     (default: None)
    @type fn_new: str
    
    '''
    
    ai, conv, tex_guess = int(ai), float(conv), float(texguess)
    
    #-- Change T and pop filenames
    data = changeKey(fn=fn,k='Tkin',v='I {} 1.0'.format(fnT))

    #-- Set the TexGuess to -1 so ALI reads the population filename
    tex_line = getKey(data=data,k='PassBand').split()
    tex_line[1] = str(texguess)
    data = changeKey(data=data,k='PassBand',v=' '.join(tex_line))
    
    #-- Set the number of maximum iterations if full convergence is not needed,
    #   or alternatively change the convergence criterion
    if ai or conv: 
        conv_line = getKey(data=data,k='MaxIter').split()
        if ai: conv_line[0] = str(ai)
        if conv and float(conv_line[5]) < conv: conv_line[5] = str(conv)
        data = changeKey(data=data,k='MaxIter',v=' '.join(conv_line))
    
    #-- Save the inputfile
    DataIO.writeFile(input_lines=data,filename=fn if fn_new is None else fn_new)
开发者ID:IvS-KULeuven,项目名称:ComboCode,代码行数:63,代码来源:ALI.py

示例12: doSphinx

# 需要导入模块: from cc.tools.io import DataIO [as 别名]
# 或者: from cc.tools.io.DataIO import writeFile [as 别名]
    def doSphinx(self,star):
        
        """
        Run Sphinx.
        
        First, database is checked for retrieval of old models. 

        @param star: The parameter set for this session
        @type star: Star()
        
        """
        
        self.checkSphinxDatabase()
        print '%i transitions out of %i not yet calculated.'\
              %(len([boolean for boolean in self.trans_bools if not boolean]),\
                len(self.trans_bools))
        for i,(trans_bool,trans) in enumerate(zip(self.trans_bools,self.trans_list)):
            if not trans_bool and trans.getModelId():
                if not self.sphinx:
                    #- Only transitions with no db entry will get empty model id
                    del self.sph_db[self.model_id][trans.molecule.getModelId()]\
                                   [trans.getModelId()][str(trans)]
                    self.sph_db.addChangedKey(self.model_id)
                    trans.setModelId('')
                elif self.vic <> None:
                    #- add transition to the vic translist for this cooling id
                    self.vic.addTrans(trans)
                elif self.recover_sphinxfiles: 
                    self.checkSphinxOutput(trans)
                else:
                    self.updateModel(trans.getModelId())
                    commandfile = ['%s=%s'%(k,v) 
                                   for k,v in sorted(self.command_list.items()) 
                                   if k != 'R_POINTS_MASS_LOSS'] + ['####'] + \
                                  ['%s=%s'%(k,v) 
                                   for k,v in sorted(trans.molecule.makeDict()\
                                                                .items())] + \
                                  ['####'] + \
                                  ['%s=%s'%(k,v) 
                                   for k,v in sorted(trans.makeDict()\
                                                                .items())] + \
                                  ['######']
                    if self.command_list.has_key('R_POINTS_MASS_LOSS'):
                        commandfile.extend(['%s=%s'%('R_POINTS_MASS_LOSS',v) 
                                            for v in self.command_list\
                                                    ['R_POINTS_MASS_LOSS']] + \
                                           ['####'])
                    filename = os.path.join(cc.path.gout,'models',\
                                            'gastronoom_%s.inp'\
                                            %trans.getModelId())
                    DataIO.writeFile(filename,commandfile)                
                    print 'Starting calculation for transition %i out of %i.'\
                          %(i+1,len(self.trans_bools))
                    self.execGastronoom(subcode='sphinx',filename=filename)
                    self.checkSphinxOutput(trans)
                    self.sph_db.sync()
                    
        #- check if at least one of the transitions was calculated: then 
        #- self.model_id doesnt have to be changed
        self.finalizeSphinx() 
        mline_not_available = set([trans.molecule.getModelId() 
                                   for boolean,trans in zip(self.trans_bools,\
                                                            self.trans_list) 
                                   if not boolean])  \
                              == set([''])
        if self.vic <> None and self.sphinx and (False in self.trans_bools \
              and not mline_not_available):
            self.vic.queueModel()
        elif self.vic <> None and self.sphinx and (False not in self.trans_bools\
              or mline_not_available):
            self.vic.reset()
开发者ID:FungKu01,项目名称:ComboCode,代码行数:73,代码来源:Gastronoom.py

示例13: doCooling

# 需要导入模块: from cc.tools.io import DataIO [as 别名]
# 或者: from cc.tools.io.DataIO import writeFile [as 别名]
    def doCooling(self,star):
        
        """
        Run Cooling.

        First, database is checked for retrieval of old model. 

        @param star: The parameter set for this session
        @type star: Star()
        
        """
        
        #-- Collect H2O and CO molecule definitions for inclusion in the 
        #   cooling inputfile. Also includes abundance_filename info for H2O if
        #   requested
        if star.getMolecule('1H1H16O') <> None:
            h2o_dict = star.getMolecule('1H1H16O').makeDict()
        else:            
            h2o_dict = Molecule('1H1H16O',45,45,648,50).makeDict()
        if star.getMolecule('12C16O') <> None:
            co_dict = star.getMolecule('12C16O').makeDict()
        else:
            co_dict = Molecule('12C16O',61,61,240,50).makeDict()
        
        #-- no abundance profiles should be possible for CO. 
        if co_dict.has_key('MOLECULE_TABLE'):
            raise IOError('CO cannot be attributed a custom abundance ' + \
                          'profile at this time.')
        
        #-- F_H2O is irrelevant if an abundance file is passed for oH2O
        if h2o_dict.has_key('MOLECULE_TABLE'):
            del self.command_list['F_H2O']
            
        #-- Collect all H2O molecular information important for cooling
        molec_dict = dict([(k,h2o_dict[k]) 
                            for k in self.cooling_molec_keys 
                            if h2o_dict.has_key(k)])

        #-- Check database: only include H2O extra keywords if 
        #   abundance_filename is present. CO can't have this anyway.
        model_bool = self.checkCoolingDatabase(molec_dict=molec_dict)    
        
        #- Run cooling if above is False
        if not model_bool:
            DataIO.testFolderExistence(os.path.join(cc.path.gout,'models',\
                                                    self.model_id))
            commandfile = ['%s=%s'%(k,v) 
                           for k,v in sorted(self.command_list.items())
                           if k != 'R_POINTS_MASS_LOSS'] + \
                          ['####'] + \
                          ['%s=%s'%(k,co_dict['MOLECULE'])] + \
                          ['%s=%s'%(k,h2o_dict[k]) 
                           for k in self.cooling_molec_keys + ['MOLECULE']
                           if h2o_dict.has_key(k)] + ['####']
            if self.command_list.has_key('R_POINTS_MASS_LOSS'):
                commandfile.extend(['%s=%s'%('R_POINTS_MASS_LOSS',v) 
                                    for v in self.command_list\
                                                    ['R_POINTS_MASS_LOSS']] + \
                                   ['####'])
            filename = os.path.join(cc.path.gout,'models',\
                                    'gastronoom_' + self.model_id + '.inp')
            DataIO.writeFile(filename,commandfile)
            if not self.skip_cooling:
                self.execGastronoom(subcode='cooling',filename=filename)
                self.cool_done = True
            if os.path.isfile(os.path.join(cc.path.gout,'models',\
                                           self.model_id,'coolfgr_all%s.dat'\
                                           %self.model_id)):
                #-- Add the other input keywords for cooling to the H2O info. 
                #   This is saved to the db
                molec_dict.update(self.command_list)
                self.cool_db[self.model_id] = molec_dict
                self.cool_db.sync()
            else:
                print 'Cooling model calculation failed. No entry is added '+ \
                      'to the database.'
                self.model_id = ''
开发者ID:FungKu01,项目名称:ComboCode,代码行数:79,代码来源:Gastronoom.py

示例14: doMCMax

# 需要导入模块: from cc.tools.io import DataIO [as 别名]
# 或者: from cc.tools.io.DataIO import writeFile [as 别名]

#.........这里部分代码省略.........
        #- outputfiles may not match anymore when e.g. plotting dust opacities
        dust_dict = dict()
        for species in star.getDustList():
            species_dict = dict()
            if star['TDESITER']:
                species_dict['TdesA'] = star['T_DESA_' + species]
                species_dict['TdesB'] = star['T_DESB_' + species]
            if star.has_key('R_MIN_%s'%species) and star['R_MIN_%s'%species]:
                species_dict['minrad'] = star['R_MIN_%s'%species]\
                                           *star['R_STAR']*star.Rsun/star.au
            #- R_MAX is always created by Star(), even if not requested.
            #- Will be empty string if not available; no maxrad is given
            if star['R_MAX_%s'%species]:
                species_dict['maxrad'] = star['R_MAX_%s'%species]\
                                           *star['R_STAR']*star.Rsun/star.au
            if int(star['MRN_DUST']) and star.has_key(['RGRAIN_%s'%species]):
                species_dict['rgrain'] = star['RGRAIN_%s'%species]
            else:
                species_dict['abun'] = star['A_%s'%species]
            if not os.path.split(star.dust[species]['fn'])[0]:
                print('WARNING! %s has an old opacity file. Should replace for reproducibility.'%species)
            dust_dict[star.dust[species]['fn']] = species_dict
        self.command_list['dust_species'] = dust_dict
        print '** DONE!'
        print '***********************************'
        
        #-- Check the MCMax database if the model was calculated before
        modelbool = self.checkDatabase()
                
        #-- if no match found in database, calculate new model with new model id 
        #-- if the calculation did not fail, add entry to database for new model
        if not modelbool:
            self.model_id = self.makeNewId()
            input_dict = self.command_list.copy()
            del input_dict['photon_count']
            del input_dict['dust_species']
            #-- dust_list in star is already sorted. rgrains species first, 
            #   then the rest, according to the order of appearance in Dust.dat
            for index,species in enumerate(star.getDustList()):
                speciesfile = star.dust[species]['fn']
                speciesdict = self.command_list['dust_species'][speciesfile]
                for k,v in speciesdict.items():
                    input_dict['%s%.2i'%(k,index+1)] = v
                #-- If speciesfile is .topac, they are T-dependent opacities
                #   and should always be given as topac##. The file then points
                #   to the .particle files of the T-dependent opacities.
                if speciesfile.find('.topac') != -1:
                    ftype = 'topac'
                #-- When full scattering is requested, always use .particle 
                #   files if they are available. If not, use whatever is in 
                #   Dust.dat, but then the species will not be properly 
                #   included for full scattering (requires scattering matrix)
                #   It is OK to have .opac files in Dust.dat, as long as 
                #   .particle files exist in the same location
                elif star['SCATTYPE'] == 'FULL':
                    partfile = os.path.splitext(speciesfile)[0] + '.particle'
                    if os.isfile(os.path.join(cc.path.mopac,partfile)):
                        ftype = 'part'
                        speciesfile = partfile
                    else:
                        ftype = 'opac'
                #-- If not full scattering, opacity files are fine. So, use 
                #   whatever is in Dust.dat. Dust.dat should preferentially 
                #   include .opac (or .opacity) files, but can be .particle 
                #   files if opacity files are not available, in which case
                #   ftype should still be 'part'.
                else:
                    if speciesfile.find('.particle') != -1: ftype = 'part'
                    else: ftype = 'opac'
                #-- Add the opacities home folder (not saved in db)
                input_dict['%s%.2i'%(ftype,index+1)] = "'%s'"\
                            %(os.path.join(cc.path.mopac,speciesfile))       
            input_filename = os.path.join(cc.path.mout,'models',\
                                          'inputMCMax_%s.dat'%self.model_id)
            output_folder = os.path.join(cc.path.mout,'models',self.model_id)
            input_lines = ["%s=%s"%(k,str(v)) 
                           for k,v in sorted(input_dict.items())]
            DataIO.writeFile(filename=input_filename,input_lines=input_lines)
            subprocess.call(' '.join(['MCMax',input_filename,\
                                      str(self.command_list['photon_count']),\
                                      '-o',output_folder]),shell=True)
            self.mcmax_done = True
            testf1 = os.path.join(output_folder,'denstemp.dat')
            testf2 = os.path.join(output_folder,'kappas.dat')
            if os.path.exists(testf1) and os.path.exists(testf2) and \
                    os.path.isfile(testf1) and os.path.isfile(testf2):
                self.db[self.model_id] = self.command_list
                self.db.sync()
            else:
                print '** Model calculation failed. No entry is added to ' + \
                      'the database and LAST_MCMAX_MODEL in STAR dictionary '+\
                      'is not updated.'
                self.model_id = ''
                
        #- add/change 'LAST_MCMAX_MODEL' entry
        if self.model_id:
            star['LAST_MCMAX_MODEL'] = self.model_id
        #- Note that the model manager now adds/changes MUTABLE input keys, 
        #- which MAY be overwritten by the input file inputComboCode.dat
        print '***********************************'
开发者ID:FungKu01,项目名称:ComboCode,代码行数:104,代码来源:MCMax.py


注:本文中的cc.tools.io.DataIO.writeFile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。