本文整理汇总了Python中ocw.evaluation.Evaluation.run方法的典型用法代码示例。如果您正苦于以下问题:Python Evaluation.run方法的具体用法?Python Evaluation.run怎么用?Python Evaluation.run使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ocw.evaluation.Evaluation
的用法示例。
在下文中一共展示了Evaluation.run方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_subregion_unary_result_shape
# 需要导入模块: from ocw.evaluation import Evaluation [as 别名]
# 或者: from ocw.evaluation.Evaluation import run [as 别名]
def test_subregion_unary_result_shape(self):
bound = Bounds(
10, 18,
100, 108,
dt.datetime(2000, 1, 1), dt.datetime(2000, 3, 1))
new_eval = Evaluation(
self.test_dataset,
[self.another_test_dataset, self.another_test_dataset],
[TemporalStdDev()],
[bound]
)
new_eval.run()
# Expected result shape is
# [
# [
# [ # Subregions cause this extra layer
# temporalstddev.run(reference),
# temporalstddev.run(target1),
# temporalstddev.run(target2)
# ]
# ]
# ]
self.assertTrue(len(new_eval.unary_results) == 1)
self.assertTrue(type(new_eval.unary_results) == type([]))
self.assertTrue(len(new_eval.unary_results[0]) == 1)
self.assertTrue(len(new_eval.unary_results[0][0]) == 3)
示例2: Taylor_diagram_spatial_pattern_of_multiyear_climatology
# 需要导入模块: from ocw.evaluation import Evaluation [as 别名]
# 或者: from ocw.evaluation.Evaluation import run [as 别名]
def Taylor_diagram_spatial_pattern_of_multiyear_climatology(
obs_dataset, obs_name, model_datasets, model_names, file_name):
# calculate climatological mean fields
obs_clim_dataset = ds.Dataset(obs_dataset.lats, obs_dataset.lons,
obs_dataset.times,
utils.calc_temporal_mean(obs_dataset))
model_clim_datasets = []
for dataset in model_datasets:
model_clim_datasets.append(
ds.Dataset(dataset.lats, dataset.lons, dataset.times,
utils.calc_temporal_mean(dataset)))
# Metrics (spatial standard deviation and pattern correlation)
# determine the metrics
taylor_diagram = metrics.SpatialPatternTaylorDiagram()
# create the Evaluation object
taylor_evaluation = Evaluation(
obs_clim_dataset, # Climatological mean of reference dataset for the evaluation
model_clim_datasets, # list of climatological means from model datasets for the evaluation
[taylor_diagram])
# run the evaluation (bias calculation)
taylor_evaluation.run()
taylor_data = taylor_evaluation.results[0]
plotter.draw_taylor_diagram(
taylor_data,
model_names,
obs_name,
file_name,
pos='upper right',
frameon=False)
示例3: test_subregion_unary_result_shape
# 需要导入模块: from ocw.evaluation import Evaluation [as 别名]
# 或者: from ocw.evaluation.Evaluation import run [as 别名]
def test_subregion_unary_result_shape(self):
bound = Bounds(
lat_min=10, lat_max=18,
lon_min=100, lon_max=108,
start=dt.datetime(2000, 1, 1), end=dt.datetime(2000, 3, 1))
new_eval = Evaluation(
self.test_dataset,
[self.another_test_dataset, self.another_test_dataset],
[TemporalStdDev(), TemporalStdDev()],
[bound, bound, bound, bound, bound]
)
new_eval.run()
# Expected result shape is
# [
# [ # Subregions cause this extra layer
# [3, temporalstddev.run(reference).shape],
# ]
# ]
# 5 = number of subregions
self.assertTrue(len(new_eval.unary_results) == 5)
# number of metrics
self.assertTrue(len(new_eval.unary_results[0]) == 2)
self.assertTrue(isinstance(new_eval.unary_results, type([])))
# number of datasets (ref + target)
self.assertTrue(new_eval.unary_results[0][0].shape[0] == 3)
示例4: test_bias_output_shape
# 需要导入模块: from ocw.evaluation import Evaluation [as 别名]
# 或者: from ocw.evaluation.Evaluation import run [as 别名]
def test_bias_output_shape(self):
bias_eval = Evaluation(self.test_dataset, [
self.another_test_dataset], [Bias()])
bias_eval.run()
input_shape = tuple(self.test_dataset.values.shape)
bias_results_shape = tuple(bias_eval.results[0][0].shape)
self.assertEqual(input_shape, bias_results_shape)
示例5: Map_plot_bias_of_multiyear_climatology
# 需要导入模块: from ocw.evaluation import Evaluation [as 别名]
# 或者: from ocw.evaluation.Evaluation import run [as 别名]
def Map_plot_bias_of_multiyear_climatology(obs_dataset, obs_name, model_datasets, model_names,
file_name, row, column):
'''Draw maps of observed multi-year climatology and biases of models"'''
# calculate climatology of observation data
obs_clim = utils.calc_temporal_mean(obs_dataset)
# determine the metrics
map_of_bias = metrics.TemporalMeanBias()
# create the Evaluation object
bias_evaluation = Evaluation(obs_dataset, # Reference dataset for the evaluation
model_datasets, # list of target datasets for the evaluation
[map_of_bias, map_of_bias])
# run the evaluation (bias calculation)
bias_evaluation.run()
rcm_bias = bias_evaluation.results[0]
fig = plt.figure()
lat_min = obs_dataset.lats.min()
lat_max = obs_dataset.lats.max()
lon_min = obs_dataset.lons.min()
lon_max = obs_dataset.lons.max()
string_list = list(string.ascii_lowercase)
ax = fig.add_subplot(row,column,1)
m = Basemap(ax=ax, projection ='cyl', llcrnrlat = lat_min, urcrnrlat = lat_max,
llcrnrlon = lon_min, urcrnrlon = lon_max, resolution = 'l', fix_aspect=False)
lons, lats = np.meshgrid(obs_dataset.lons, obs_dataset.lats)
x,y = m(lons, lats)
m.drawcoastlines(linewidth=1)
m.drawcountries(linewidth=1)
m.drawstates(linewidth=0.5, color='w')
max = m.contourf(x,y,obs_clim,levels = plotter._nice_intervals(obs_dataset.values, 10), extend='both',cmap='PuOr')
ax.annotate('(a) \n' + obs_name,xy=(lon_min, lat_min))
cax = fig.add_axes([0.02, 1.-float(1./row), 0.01, 1./row*0.6])
plt.colorbar(max, cax = cax)
clevs = plotter._nice_intervals(rcm_bias, 11)
for imodel in np.arange(len(model_datasets)):
ax = fig.add_subplot(row, column,2+imodel)
m = Basemap(ax=ax, projection ='cyl', llcrnrlat = lat_min, urcrnrlat = lat_max,
llcrnrlon = lon_min, urcrnrlon = lon_max, resolution = 'l', fix_aspect=False)
m.drawcoastlines(linewidth=1)
m.drawcountries(linewidth=1)
m.drawstates(linewidth=0.5, color='w')
max = m.contourf(x,y,rcm_bias[imodel,:],levels = clevs, extend='both', cmap='RdBu_r')
ax.annotate('('+string_list[imodel+1]+') \n '+model_names[imodel],xy=(lon_min, lat_min))
cax = fig.add_axes([0.91, 0.1, 0.015, 0.8])
plt.colorbar(max, cax = cax)
plt.subplots_adjust(hspace=0.01,wspace=0.05)
plt.show()
fig.savefig(file_name,dpi=600,bbox_inches='tight')
示例6: test_result_shape
# 需要导入模块: from ocw.evaluation import Evaluation [as 别名]
# 或者: from ocw.evaluation.Evaluation import run [as 别名]
def test_result_shape(self):
bias_eval = Evaluation(
self.test_dataset,
[self.another_test_dataset, self.another_test_dataset, self.another_test_dataset],
[Bias(), Bias()]
)
bias_eval.run()
# Expected result shape is
# [bias, bias] where bias.shape[0] = number of datasets
self.assertTrue(len(bias_eval.results) == 2)
self.assertTrue(bias_eval.results[0].shape[0] == 3)
示例7: test_unary_result_shape
# 需要导入模块: from ocw.evaluation import Evaluation [as 别名]
# 或者: from ocw.evaluation.Evaluation import run [as 别名]
def test_unary_result_shape(self):
new_eval = Evaluation(
self.test_dataset,
[self.another_test_dataset, self.another_test_dataset, self.another_test_dataset, self.another_test_dataset],
[TemporalStdDev()]
)
new_eval.run()
# Expected result shape is
# [stddev] where stddev.shape[0] = number of datasets
self.assertTrue(len(new_eval.unary_results) == 1)
self.assertTrue(new_eval.unary_results[0].shape[0] == 5)
示例8: test_unary_result_shape
# 需要导入模块: from ocw.evaluation import Evaluation [as 别名]
# 或者: from ocw.evaluation.Evaluation import run [as 别名]
def test_unary_result_shape(self):
new_eval = Evaluation(
self.test_dataset,
[self.another_test_dataset, self.another_test_dataset],
[TemporalStdDev()]
)
new_eval.run()
# Expected result shape is
# [
# temporalstddev.run(reference),
# temporalstddev.run(target1),
# temporalstddev.run(target2)
# ]
self.assertTrue(len(new_eval.unary_results) == 1)
self.assertTrue(len(new_eval.unary_results[0]) == 3)
示例9: test_result_shape
# 需要导入模块: from ocw.evaluation import Evaluation [as 别名]
# 或者: from ocw.evaluation.Evaluation import run [as 别名]
def test_result_shape(self):
bias_eval = Evaluation(
self.test_dataset,
[self.another_test_dataset, self.another_test_dataset],
[Bias()]
)
bias_eval.run()
# Expected result shape is
# [
# [
# bias.run(reference, target1)
# ],
# [
# bias.run(reference, target2)
# ]
# ]
self.assertTrue(len(bias_eval.results) == 2)
self.assertTrue(len(bias_eval.results[0]) == 1)
self.assertTrue(len(bias_eval.results[1]) == 1)
示例10: Taylor_diagram_spatial_pattern_of_multiyear_climatology
# 需要导入模块: from ocw.evaluation import Evaluation [as 别名]
# 或者: from ocw.evaluation.Evaluation import run [as 别名]
def Taylor_diagram_spatial_pattern_of_multiyear_climatology(obs_dataset, obs_name, model_datasets, model_names,
file_name):
# calculate climatological mean fields
obs_dataset.values = utils.calc_temporal_mean(obs_dataset)
for dataset in model_datasets:
dataset.values = utils.calc_temporal_mean(dataset)
# Metrics (spatial standard deviation and pattern correlation)
# determine the metrics
taylor_diagram = metrics.SpatialPatternTaylorDiagram()
# create the Evaluation object
taylor_evaluation = Evaluation(obs_dataset, # Reference dataset for the evaluation
model_datasets, # list of target datasets for the evaluation
[taylor_diagram])
# run the evaluation (bias calculation)
taylor_evaluation.run()
taylor_data = taylor_evaluation.results[0]
plotter.draw_taylor_diagram(taylor_data, model_names, obs_name, file_name, pos='upper right',frameon=False)
示例11: test_subregion_result_shape
# 需要导入模块: from ocw.evaluation import Evaluation [as 别名]
# 或者: from ocw.evaluation.Evaluation import run [as 别名]
def test_subregion_result_shape(self):
bound = Bounds(
lat_min=10, lat_max=18,
lon_min=100, lon_max=108,
start=dt.datetime(2000, 1, 1), end=dt.datetime(2000, 3, 1))
bias_eval = Evaluation(
self.test_dataset,
[self.another_test_dataset, self.another_test_dataset],
[Bias()],
[bound]
)
bias_eval.run()
# Expected result shape is
# [
# [ # Subregions cause this extra layer
# [number of targets, bias.run(reference, target1).shape]
# ]
# ],
self.assertTrue(len(bias_eval.results) == 1)
self.assertTrue(len(bias_eval.results[0]) == 1)
self.assertTrue(bias_eval.results[0][0].shape[0] == 2)
self.assertTrue(isinstance(bias_eval.results, type([])))
示例12: test_subregion_result_shape
# 需要导入模块: from ocw.evaluation import Evaluation [as 别名]
# 或者: from ocw.evaluation.Evaluation import run [as 别名]
def test_subregion_result_shape(self):
bound = Bounds(
10, 18,
100, 108,
dt.datetime(2000, 1, 1), dt.datetime(2000, 3, 1))
bias_eval = Evaluation(
self.test_dataset,
[self.another_test_dataset, self.another_test_dataset],
[Bias()],
[bound]
)
bias_eval.run()
# Expected result shape is
# [
# [
# [ # Subregions cause this extra layer
# bias.run(reference, target1)
# ]
# ],
# [
# [
# bias.run(reference, target2)
# ]
# ]
# ]
self.assertTrue(len(bias_eval.results) == 2)
self.assertTrue(len(bias_eval.results[0]) == 1)
self.assertTrue(type(bias_eval.results[0]) == type([]))
self.assertTrue(len(bias_eval.results[1]) == 1)
self.assertTrue(type(bias_eval.results[1]) == type([]))
self.assertTrue(len(bias_eval.results[0][0]) == 1)
self.assertTrue(len(bias_eval.results[1][0]) == 1)
示例13: run_evaluation
# 需要导入模块: from ocw.evaluation import Evaluation [as 别名]
# 或者: from ocw.evaluation.Evaluation import run [as 别名]
def run_evaluation():
''' Run an OCW Evaluation.
*run_evaluation* expects the Evaluation parameters to be POSTed in
the following format.
.. sourcecode:: javascript
{
reference_dataset: {
// Id that tells us how we need to load this dataset.
'data_source_id': 1 == local, 2 == rcmed,
// Dict of data_source specific identifying information.
//
// if data_source_id == 1 == local:
// {
// 'id': The path to the local file on the server for loading.
// 'var_name': The variable data to pull from the file.
// 'lat_name': The latitude variable name.
// 'lon_name': The longitude variable name.
// 'time_name': The time variable name
// 'name': Optional dataset name
// }
//
// if data_source_id == 2 == rcmed:
// {
// 'dataset_id': The dataset id to grab from RCMED.
// 'parameter_id': The variable id value used by RCMED.
// 'name': Optional dataset name
// }
'dataset_info': {..}
},
// The list of target datasets to use in the Evaluation. The data
// format for the dataset objects should be the same as the
// reference_dataset above.
'target_datasets': [{...}, {...}, ...],
// All the datasets are re-binned to the reference dataset
// before being added to an experiment. This step (in degrees)
// is used when re-binning both the reference and target datasets.
'spatial_rebin_lat_step': The lat degree step. Integer > 0,
// Same as above, but for lon
'spatial_rebin_lon_step': The lon degree step. Integer > 0,
// The temporal resolution to use when doing a temporal re-bin
// This is a timedelta of days to use so daily == 1, monthly is
// (1, 31], annual/yearly is (31, 366], and full is anything > 366.
'temporal_resolution': Integer in range(1, 999),
// A list of the metric class names to use in the evaluation. The
// names must match the class name exactly.
'metrics': [Bias, TemporalStdDev, ...]
// The bounding values used in the Evaluation. Note that lat values
// should range from -180 to 180 and lon values from -90 to 90.
'start_time': start time value in the format '%Y-%m-%d %H:%M:%S',
'end_time': end time value in the format '%Y-%m-%d %H:%M:%S',
'lat_min': The minimum latitude value,
'lat_max': The maximum latitude value,
'lon_min': The minimum longitude value,
'lon_max': The maximum longitude value,
// NOTE: At the moment, subregion support is fairly minimal. This
// will be addressed in the future. Ideally, the user should be able
// to load a file that they have locally. That would change the
// format that this data is passed.
'subregion_information': Path to a subregion file on the server.
}
'''
# TODO: validate input parameters and return an error if not valid
eval_time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
data = request.json
eval_bounds = {
'start_time': datetime.strptime(data['start_time'], '%Y-%m-%d %H:%M:%S'),
'end_time': datetime.strptime(data['end_time'], '%Y-%m-%d %H:%M:%S'),
'lat_min': float(data['lat_min']),
'lat_max': float(data['lat_max']),
'lon_min': float(data['lon_min']),
'lon_max': float(data['lon_max'])
}
# Load all the datasets
ref_dataset = _process_dataset_object(data['reference_dataset'], eval_bounds)
target_datasets = [_process_dataset_object(obj, eval_bounds)
for obj
in data['target_datasets']]
# Normalize the dataset time values so they break on consistent days of the
# month or time of the day, depending on how they will be rebinned.
resolution = data['temporal_resolution']
time_delta = timedelta(days=resolution)
time_step = 'daily' if resolution == 1 else 'monthly'
ref_dataset = dsp.normalize_dataset_datetimes(ref_dataset, time_step)
#.........这里部分代码省略.........
示例14: TestEvaluation
# 需要导入模块: from ocw.evaluation import Evaluation [as 别名]
# 或者: from ocw.evaluation.Evaluation import run [as 别名]
class TestEvaluation(unittest.TestCase):
def setUp(self):
self.eval = Evaluation(None, [], [])
lat = np.array([10, 12, 14, 16, 18])
lon = np.array([100, 102, 104, 106, 108])
time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
flat_array = np.array(range(300))
value = flat_array.reshape(12, 5, 5)
self.variable = 'prec'
self.other_var = 'temp'
self.test_dataset = Dataset(lat, lon, time, value, self.variable)
self.another_test_dataset = Dataset(lat, lon, time, value,
self.other_var)
def test_init(self):
self.assertEquals(self.eval.ref_dataset, None)
self.assertEquals(self.eval.target_datasets, [])
self.assertEquals(self.eval.metrics, [])
self.assertEquals(self.eval.unary_metrics, [])
def test_full_init(self):
self.eval = Evaluation(
self.test_dataset,
[self.test_dataset, self.another_test_dataset],
[Bias(), Bias(), TemporalStdDev()])
ref_dataset = self.test_dataset
target_datasets = [self.test_dataset, self.another_test_dataset]
metrics = [Bias(), Bias()]
unary_metrics = [TemporalStdDev()]
self.eval = Evaluation(ref_dataset,
target_datasets,
metrics + unary_metrics)
self.assertEqual(self.eval.ref_dataset.variable, self.variable)
# Make sure the two target datasets were added properly
self.assertEqual(self.eval.target_datasets[0].variable, self.variable)
self.assertEqual(self.eval.target_datasets[1].variable, self.other_var)
# Make sure the three metrics were added properly
# The two Bias metrics are "binary" metrics
self.assertEqual(len(self.eval.metrics), 2)
# TemporalStdDev is a "unary" metric and should be stored as such
self.assertEqual(len(self.eval.unary_metrics), 1)
self.eval.run()
out_str = (
"<Evaluation - ref_dataset: {}, "
"target_dataset(s): {}, "
"binary_metric(s): {}, "
"unary_metric(s): {}, "
"subregion(s): {}>"
).format(
str(self.test_dataset),
[str(ds) for ds in target_datasets],
[str(m) for m in metrics],
[str(u) for u in unary_metrics],
None
)
self.assertEqual(str(self.eval), out_str)
def test_valid_ref_dataset_setter(self):
self.eval.ref_dataset = self.another_test_dataset
self.assertEqual(self.eval.ref_dataset.variable,
self.another_test_dataset.variable)
def test_invalid_ref_dataset(self):
with self.assertRaises(TypeError):
self.eval.ref_dataset = "This isn't a Dataset object"
def test_valid_subregion(self):
bound = Bounds(
lat_min=-10, lat_max=10,
lon_min=-20, lon_max=20,
start=dt.datetime(2000, 1, 1), end=dt.datetime(2001, 1, 1))
self.eval.subregions = [bound, bound]
self.assertEquals(len(self.eval.subregions), 2)
def test_invalid_subregion_bound(self):
bound = "This is not a bounds object"
with self.assertRaises(TypeError):
self.eval.subregions = [bound]
def test_add_ref_dataset(self):
self.eval = Evaluation(self.test_dataset, [], [])
self.assertEqual(self.eval.ref_dataset.variable, self.variable)
def test_add_valid_dataset(self):
self.eval.add_dataset(self.test_dataset)
self.assertEqual(self.eval.target_datasets[0].variable,
self.variable)
def test_add_invalid_dataset(self):
with self.assertRaises(TypeError):
#.........这里部分代码省略.........