本文整理汇总了Python中OCO_Matrix.OCO_Matrix.data[:,dst_col_idx]方法的典型用法代码示例。如果您正苦于以下问题:Python OCO_Matrix.data[:,dst_col_idx]方法的具体用法?Python OCO_Matrix.data[:,dst_col_idx]怎么用?Python OCO_Matrix.data[:,dst_col_idx]使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类OCO_Matrix.OCO_Matrix
的用法示例。
在下文中一共展示了OCO_Matrix.data[:,dst_col_idx]方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: aggregate_matrix_files
# 需要导入模块: from OCO_Matrix import OCO_Matrix [as 别名]
# 或者: from OCO_Matrix.OCO_Matrix import data[:,dst_col_idx] [as 别名]
def aggregate_matrix_files(input_files, output_filename, src_column_names=None, dst_column_names=None):
# Create combined atmosphere file from values in all GAS sections
# Will overwrite columns of the same name in sequential order
# First gather contents of various files
uniq_file_ids = []
uniq_labels = []
uniq_units = []
uniq_filenames = []
file_objs = []
max_num_rows = -1
for curr_input_filename in input_files:
if curr_input_filename not in uniq_filenames:
print "%s -> %s" % (curr_input_filename, output_filename)
uniq_filenames.append(curr_input_filename)
curr_matobj = OCO_Matrix(curr_input_filename)
file_objs.append(curr_matobj)
max_num_rows = max(max_num_rows, curr_matobj.dims[0])
for (curr_lbl, curr_unit) in itertools.izip_longest(curr_matobj.labels, curr_matobj.units, fillvalue=""):
if not curr_lbl in uniq_labels:
uniq_labels.append(curr_lbl)
uniq_units.append(curr_unit)
if not curr_matobj.file_id in uniq_file_ids:
uniq_file_ids.append(curr_matobj.file_id)
out_matobj = OCO_Matrix()
if len(uniq_file_ids) == 1:
out_matobj.file_id = uniq_file_ids[0]
else:
out_matobj.file_id = uniq_file_ids
if src_column_names == None:
src_column_names = uniq_labels
if dst_column_names == None:
dst_column_names = uniq_labels
src_column_names = [ curr_col.upper() for curr_col in src_column_names ]
dst_column_names = [ curr_col.upper() for curr_col in dst_column_names ]
out_matobj.labels = dst_column_names
out_matobj.units = uniq_units
out_matobj.data = numpy.zeros((max_num_rows, len(dst_column_names)), dtype=float)
# Now aggregate data
for curr_matobj in file_objs:
out_matobj.header.update(curr_matobj.header)
n_src_rows = curr_matobj.dims[0]
for (src_col_idx, col_name) in enumerate(curr_matobj.labels):
if col_name.upper() in src_column_names:
dst_col_idx = src_column_names.index(col_name.upper())
out_matobj.data[:,dst_col_idx] = 0 # make sure not to overlay mismatched row sizes
out_matobj.data[:n_src_rows, dst_col_idx] = curr_matobj.data[:, src_col_idx]
out_matobj.write(output_filename)
if len(uniq_filenames) == 1:
src_filename = uniq_filenames[0]
else:
src_filename = ", ".join(uniq_filenames)
write_source_into_header(output_filename, src_filename)