当前位置: 首页>>代码示例>>Python>>正文


Python OCO_Matrix.format_col方法代码示例

本文整理汇总了Python中OCO_Matrix.OCO_Matrix.format_col方法的典型用法代码示例。如果您正苦于以下问题:Python OCO_Matrix.format_col方法的具体用法?Python OCO_Matrix.format_col怎么用?Python OCO_Matrix.format_col使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在OCO_Matrix.OCO_Matrix的用法示例。


在下文中一共展示了OCO_Matrix.format_col方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: extract_run_data

# 需要导入模块: from OCO_Matrix import OCO_Matrix [as 别名]
# 或者: from OCO_Matrix.OCO_Matrix import format_col [as 别名]

#.........这里部分代码省略.........
                read_options = {}
                
                # Find read options
                for curr_item_desc in extract_items:
                    if type(curr_item_desc) == dict:
                        read_options = curr_item_desc

                extract_obj = self.get_cached_file(curr_dir, extract_file, **read_options)

                if extract_obj != None:
                    logger.debug('... %s' % extract_file)
                    for curr_item_desc in extract_items:
                        if type(curr_item_desc) == int:
                            insert_index = curr_item_desc
                        elif type(curr_item_desc) == dict:
                            # Ignore, already read in other loop
                            pass
                        elif type(curr_item_desc) == tuple:
                            try:
                                item_results = curr_item_desc[0](self, extract_obj, curr_dir, *curr_item_desc[1:])
                            except:
                                logger.error('Error calling routine: %s with values: %s' % (curr_item_desc[0], curr_item_desc[1:]))
                                logger.error(''.join(traceback.format_exception(*sys.exc_info(), limit=2)))
                                raise

                            # If nothing returned loop around
                            if item_results == None:
                                continue
                            
                            for item_name, item_value in item_results:
                                if item_name not in column_names:
                                    # Grow so insert indexes are meaningful 
                                    if insert_index > len(column_names)-1:
                                        column_names.extend([None] * (insert_index - len(column_names)+1))

                                    # If column name index is empty place item name there otherwise
                                    # after it
                                    if column_names[insert_index] == None:
                                        column_names[insert_index] = item_name
                                    else:
                                        column_names.insert(insert_index+1, item_name)
                                    
                                data_dict[curr_name][item_name] = item_value

                                # Increment so that items from current item results go in order
                                insert_index += 1

                        else:
                            raise Exception('Unknown item type: %s for extract file: %s' % (type(curr_item_desc), extract_file))
                else:
                    logger.debug('... %s (skipped, not present)' % extract_file)

            # Check validity of computed values
            for check_items, check_str in PER_RUN_CHECKS:
                
                check_count = 0
                for curr_item in check_items:
                    if curr_item in data_dict[curr_name].keys():
                        check_count += 1

                # Ignore if none of the required items are not in the dictionary for item
                if check_count == 0:
                    continue
                elif check_count != len(check_items):
                    err_msg = 'Not all required check items: "%s" present in dictionary for run_dir: %s' % (check_items, curr_dir)
                    logger.error(err_msg)
                    raise Exception(err_msg)
                else:
                    check_eval = check_str.format(**data_dict[curr_name])
                    if not eval( check_eval ):
                        err_msg = 'Check failed for run dir: %s. Check string: "%s" evaluated as: "%s"' % (curr_dir, check_str, check_eval)
                        logger.error(err_msg)
                        raise Exception(err_msg)

        # Cleanup empty values from column names:
        while True:
            try:
                column_names.remove(None)
            except ValueError:
                break

        # Remove any data that was not in the list of items
        # we were told to process, in case old data is present
        # in a file where the run dir list has been updated
        for data_name in data_dict.keys():
            if data_name not in seen_names:
                del data_dict[data_name]
                
        # Convert extracted data to matrix and write
        file_obj = OCO_Matrix()
        if file_id != None:
            file_obj.file_id = file_id
        file_obj.data = self.convert_data_to_matrix(column_names, data_dict)
        
        file_obj.labels = column_names
        file_obj.format_col = [True] * len(column_names)
        file_obj.format_col[ column_names.index(RUN_COLUMN_NAME) ] = False

        logger.info('Writing: %s' % output_filename)
        file_obj.write(output_filename, auto_size_cols=True)
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:104,代码来源:testset_summary.py

示例2: compute_runtimes

# 需要导入模块: from OCO_Matrix import OCO_Matrix [as 别名]
# 或者: from OCO_Matrix.OCO_Matrix import format_col [as 别名]

#.........这里部分代码省略.........
                if( line.find("max_pf_mom") >= 0 ):
                    line_parts = line.split()
                    num_pf_mom = int(line_parts[3])
                    continue

            if rt_line_count > 0:
                node_avg = total_runtime / rt_line_count
            else:
                node_avg = 0.0

            if(rt_line_count > 1):
                n_count = float(rt_line_count)
                node_var = (1 / (n_count-1)) * sum_sq_runtime - (n_count / (n_count-1)) * node_avg*node_avg
            else:
                node_var = 0.0

            # Collect additonal info
            single_scat = 'false'
            polarization = 'false'
            sv_size = 0
            if additional_cols:
                # Collect oco_l2.run info
                run_file = test_data_loc + "/" + RUNFILE_BASE
                if os.path.exists(run_file):
                    run_f = open(run_file, 'r')
                    run_lines = run_f.readlines()
                    run_f.close()

                    for line in run_lines:
                        if( line.find("single_scatter_correction") >= 0 ):
                            line_parts = line.split()
                            single_scat = line_parts[2].lower()

                        if( line.find("polarization") >= 0 ):
                            line_parts = line.split()
                            polarization = line_parts[2].lower()

                # Collect info from summary.dat file
                summary_file = test_data_loc + "/" + SUMMARY_BASE
                if os.path.exists(summary_file):
                    summ_f = open(summary_file, 'r')
                    summ_lines = summ_f.readlines()
                    summ_f.close()

                    for line in summ_lines:
                        if( line.find("Total") >= 0 ):
                            line_parts = line.split()
                            sv_size = int(line_parts[2])


            # Save data now in individual variables into matricies
            default_row = [tc_name, total_runtime, iter_count]
            for col_idx in range(len(default_row)):
                default_data[row_idx][col_idx] = default_row[col_idx]

            parallel_row = [rt_line_count, node_avg, node_var, min_runtime, max_runtime]
            for col_idx in range(len(parallel_row)):
                parallel_data[row_idx][col_idx] = parallel_row[col_idx]

            addl_row = [single_scat, polarization, num_pf_mom, sv_size]
            for col_idx in range(len(addl_row)):
                additional_data[row_idx][col_idx] = addl_row[col_idx]

            row_idx += 1


    # Put together the final output label list
    file_labels = copy.copy(DEFAULT_LABELS)
    if parallel_cols:
        file_labels += PARALLEL_LABELS
    if additional_cols:
        file_labels += ADDITIONAL_LABELS

    # Create a new data matrix for concatenated data
    file_data = numpy.zeros((len(run_dirs), len(file_labels)), dtype=numpy.chararray)

    # Concatenate various types of data
    for row_idx in range(len(run_dirs)):
        dflt_beg = 0
        dflt_end = len(DEFAULT_LABELS)
        file_data[row_idx][dflt_beg:dflt_end] = default_data[row_idx][:]

        par_end = dflt_end
        if parallel_cols:
            par_beg = dflt_end
            par_end = par_beg + len(PARALLEL_LABELS)
            file_data[row_idx][par_beg:par_end] = parallel_data[row_idx][:]

        if additional_cols:
            addl_beg = par_end
            addl_end = addl_beg + len(ADDITIONAL_LABELS)
            file_data[row_idx][addl_beg:addl_end] = additional_data[row_idx][:]

    out_mat_obj = OCO_Matrix()
    out_mat_obj.file_id = "Testset Timing Results"
    out_mat_obj.labels = file_labels
    out_mat_obj.format_col = [False]
    out_mat_obj.data = file_data

    out_mat_obj.write(output_data_file, auto_size_cols=True)
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:104,代码来源:compute_runtimes.py


注:本文中的OCO_Matrix.OCO_Matrix.format_col方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。