本文整理汇总了Python中ocw.evaluation.Evaluation类的典型用法代码示例。如果您正苦于以下问题:Python Evaluation类的具体用法?Python Evaluation怎么用?Python Evaluation使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Evaluation类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_subregion_unary_result_shape
def test_subregion_unary_result_shape(self):
bound = Bounds(
lat_min=10, lat_max=18,
lon_min=100, lon_max=108,
start=dt.datetime(2000, 1, 1), end=dt.datetime(2000, 3, 1))
new_eval = Evaluation(
self.test_dataset,
[self.another_test_dataset, self.another_test_dataset],
[TemporalStdDev(), TemporalStdDev()],
[bound, bound, bound, bound, bound]
)
new_eval.run()
# Expected result shape is
# [
# [ # Subregions cause this extra layer
# [3, temporalstddev.run(reference).shape],
# ]
# ]
# 5 = number of subregions
self.assertTrue(len(new_eval.unary_results) == 5)
# number of metrics
self.assertTrue(len(new_eval.unary_results[0]) == 2)
self.assertTrue(isinstance(new_eval.unary_results, type([])))
# number of datasets (ref + target)
self.assertTrue(new_eval.unary_results[0][0].shape[0] == 3)
示例2: Taylor_diagram_spatial_pattern_of_multiyear_climatology
def Taylor_diagram_spatial_pattern_of_multiyear_climatology(
obs_dataset, obs_name, model_datasets, model_names, file_name):
# calculate climatological mean fields
obs_clim_dataset = ds.Dataset(obs_dataset.lats, obs_dataset.lons,
obs_dataset.times,
utils.calc_temporal_mean(obs_dataset))
model_clim_datasets = []
for dataset in model_datasets:
model_clim_datasets.append(
ds.Dataset(dataset.lats, dataset.lons, dataset.times,
utils.calc_temporal_mean(dataset)))
# Metrics (spatial standard deviation and pattern correlation)
# determine the metrics
taylor_diagram = metrics.SpatialPatternTaylorDiagram()
# create the Evaluation object
taylor_evaluation = Evaluation(
obs_clim_dataset, # Climatological mean of reference dataset for the evaluation
model_clim_datasets, # list of climatological means from model datasets for the evaluation
[taylor_diagram])
# run the evaluation (bias calculation)
taylor_evaluation.run()
taylor_data = taylor_evaluation.results[0]
plotter.draw_taylor_diagram(
taylor_data,
model_names,
obs_name,
file_name,
pos='upper right',
frameon=False)
示例3: test_subregion_unary_result_shape
def test_subregion_unary_result_shape(self):
bound = Bounds(
10, 18,
100, 108,
dt.datetime(2000, 1, 1), dt.datetime(2000, 3, 1))
new_eval = Evaluation(
self.test_dataset,
[self.another_test_dataset, self.another_test_dataset],
[TemporalStdDev()],
[bound]
)
new_eval.run()
# Expected result shape is
# [
# [
# [ # Subregions cause this extra layer
# temporalstddev.run(reference),
# temporalstddev.run(target1),
# temporalstddev.run(target2)
# ]
# ]
# ]
self.assertTrue(len(new_eval.unary_results) == 1)
self.assertTrue(type(new_eval.unary_results) == type([]))
self.assertTrue(len(new_eval.unary_results[0]) == 1)
self.assertTrue(len(new_eval.unary_results[0][0]) == 3)
示例4: test_bias_output_shape
def test_bias_output_shape(self):
bias_eval = Evaluation(self.test_dataset, [
self.another_test_dataset], [Bias()])
bias_eval.run()
input_shape = tuple(self.test_dataset.values.shape)
bias_results_shape = tuple(bias_eval.results[0][0].shape)
self.assertEqual(input_shape, bias_results_shape)
示例5: Map_plot_bias_of_multiyear_climatology
def Map_plot_bias_of_multiyear_climatology(obs_dataset, obs_name, model_datasets, model_names,
file_name, row, column):
'''Draw maps of observed multi-year climatology and biases of models"'''
# calculate climatology of observation data
obs_clim = utils.calc_temporal_mean(obs_dataset)
# determine the metrics
map_of_bias = metrics.TemporalMeanBias()
# create the Evaluation object
bias_evaluation = Evaluation(obs_dataset, # Reference dataset for the evaluation
model_datasets, # list of target datasets for the evaluation
[map_of_bias, map_of_bias])
# run the evaluation (bias calculation)
bias_evaluation.run()
rcm_bias = bias_evaluation.results[0]
fig = plt.figure()
lat_min = obs_dataset.lats.min()
lat_max = obs_dataset.lats.max()
lon_min = obs_dataset.lons.min()
lon_max = obs_dataset.lons.max()
string_list = list(string.ascii_lowercase)
ax = fig.add_subplot(row,column,1)
m = Basemap(ax=ax, projection ='cyl', llcrnrlat = lat_min, urcrnrlat = lat_max,
llcrnrlon = lon_min, urcrnrlon = lon_max, resolution = 'l', fix_aspect=False)
lons, lats = np.meshgrid(obs_dataset.lons, obs_dataset.lats)
x,y = m(lons, lats)
m.drawcoastlines(linewidth=1)
m.drawcountries(linewidth=1)
m.drawstates(linewidth=0.5, color='w')
max = m.contourf(x,y,obs_clim,levels = plotter._nice_intervals(obs_dataset.values, 10), extend='both',cmap='PuOr')
ax.annotate('(a) \n' + obs_name,xy=(lon_min, lat_min))
cax = fig.add_axes([0.02, 1.-float(1./row), 0.01, 1./row*0.6])
plt.colorbar(max, cax = cax)
clevs = plotter._nice_intervals(rcm_bias, 11)
for imodel in np.arange(len(model_datasets)):
ax = fig.add_subplot(row, column,2+imodel)
m = Basemap(ax=ax, projection ='cyl', llcrnrlat = lat_min, urcrnrlat = lat_max,
llcrnrlon = lon_min, urcrnrlon = lon_max, resolution = 'l', fix_aspect=False)
m.drawcoastlines(linewidth=1)
m.drawcountries(linewidth=1)
m.drawstates(linewidth=0.5, color='w')
max = m.contourf(x,y,rcm_bias[imodel,:],levels = clevs, extend='both', cmap='RdBu_r')
ax.annotate('('+string_list[imodel+1]+') \n '+model_names[imodel],xy=(lon_min, lat_min))
cax = fig.add_axes([0.91, 0.1, 0.015, 0.8])
plt.colorbar(max, cax = cax)
plt.subplots_adjust(hspace=0.01,wspace=0.05)
plt.show()
fig.savefig(file_name,dpi=600,bbox_inches='tight')
示例6: test_result_shape
def test_result_shape(self):
bias_eval = Evaluation(
self.test_dataset,
[self.another_test_dataset, self.another_test_dataset, self.another_test_dataset],
[Bias(), Bias()]
)
bias_eval.run()
# Expected result shape is
# [bias, bias] where bias.shape[0] = number of datasets
self.assertTrue(len(bias_eval.results) == 2)
self.assertTrue(bias_eval.results[0].shape[0] == 3)
示例7: test_unary_result_shape
def test_unary_result_shape(self):
new_eval = Evaluation(
self.test_dataset,
[self.another_test_dataset, self.another_test_dataset, self.another_test_dataset, self.another_test_dataset],
[TemporalStdDev()]
)
new_eval.run()
# Expected result shape is
# [stddev] where stddev.shape[0] = number of datasets
self.assertTrue(len(new_eval.unary_results) == 1)
self.assertTrue(new_eval.unary_results[0].shape[0] == 5)
示例8: test_unary_result_shape
def test_unary_result_shape(self):
new_eval = Evaluation(
self.test_dataset,
[self.another_test_dataset, self.another_test_dataset],
[TemporalStdDev()]
)
new_eval.run()
# Expected result shape is
# [
# temporalstddev.run(reference),
# temporalstddev.run(target1),
# temporalstddev.run(target2)
# ]
self.assertTrue(len(new_eval.unary_results) == 1)
self.assertTrue(len(new_eval.unary_results[0]) == 3)
示例9: test_result_shape
def test_result_shape(self):
bias_eval = Evaluation(
self.test_dataset,
[self.another_test_dataset, self.another_test_dataset],
[Bias()]
)
bias_eval.run()
# Expected result shape is
# [
# [
# bias.run(reference, target1)
# ],
# [
# bias.run(reference, target2)
# ]
# ]
self.assertTrue(len(bias_eval.results) == 2)
self.assertTrue(len(bias_eval.results[0]) == 1)
self.assertTrue(len(bias_eval.results[1]) == 1)
示例10: test_full_init
def test_full_init(self):
self.eval = Evaluation(
self.test_dataset,
[self.test_dataset, self.another_test_dataset],
[Bias(), Bias(), TemporalStdDev()])
ref_dataset = self.test_dataset
target_datasets = [self.test_dataset, self.another_test_dataset]
metrics = [Bias(), Bias()]
unary_metrics = [TemporalStdDev()]
self.eval = Evaluation(ref_dataset,
target_datasets,
metrics + unary_metrics)
self.assertEqual(self.eval.ref_dataset.variable, self.variable)
# Make sure the two target datasets were added properly
self.assertEqual(self.eval.target_datasets[0].variable, self.variable)
self.assertEqual(self.eval.target_datasets[1].variable, self.other_var)
# Make sure the three metrics were added properly
# The two Bias metrics are "binary" metrics
self.assertEqual(len(self.eval.metrics), 2)
# TemporalStdDev is a "unary" metric and should be stored as such
self.assertEqual(len(self.eval.unary_metrics), 1)
self.eval.run()
out_str = (
"<Evaluation - ref_dataset: {}, "
"target_dataset(s): {}, "
"binary_metric(s): {}, "
"unary_metric(s): {}, "
"subregion(s): {}>"
).format(
str(self.test_dataset),
[str(ds) for ds in target_datasets],
[str(m) for m in metrics],
[str(u) for u in unary_metrics],
None
)
self.assertEqual(str(self.eval), out_str)
示例11: setUp
def setUp(self):
self.eval = Evaluation(None, [], [])
lat = np.array([10, 12, 14, 16, 18])
lon = np.array([100, 102, 104, 106, 108])
time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
flat_array = np.array(range(300))
value = flat_array.reshape(12, 5, 5)
self.variable = 'prec'
self.other_var = 'temp'
self.test_dataset = Dataset(lat, lon, time, value, self.variable)
self.another_test_dataset = Dataset(lat, lon, time, value,
self.other_var)
示例12: Taylor_diagram_spatial_pattern_of_multiyear_climatology
def Taylor_diagram_spatial_pattern_of_multiyear_climatology(obs_dataset, obs_name, model_datasets, model_names,
file_name):
# calculate climatological mean fields
obs_dataset.values = utils.calc_temporal_mean(obs_dataset)
for dataset in model_datasets:
dataset.values = utils.calc_temporal_mean(dataset)
# Metrics (spatial standard deviation and pattern correlation)
# determine the metrics
taylor_diagram = metrics.SpatialPatternTaylorDiagram()
# create the Evaluation object
taylor_evaluation = Evaluation(obs_dataset, # Reference dataset for the evaluation
model_datasets, # list of target datasets for the evaluation
[taylor_diagram])
# run the evaluation (bias calculation)
taylor_evaluation.run()
taylor_data = taylor_evaluation.results[0]
plotter.draw_taylor_diagram(taylor_data, model_names, obs_name, file_name, pos='upper right',frameon=False)
示例13: test_subregion_result_shape
def test_subregion_result_shape(self):
bound = Bounds(
10, 18,
100, 108,
dt.datetime(2000, 1, 1), dt.datetime(2000, 3, 1))
bias_eval = Evaluation(
self.test_dataset,
[self.another_test_dataset, self.another_test_dataset],
[Bias()],
[bound]
)
bias_eval.run()
# Expected result shape is
# [
# [
# [ # Subregions cause this extra layer
# bias.run(reference, target1)
# ]
# ],
# [
# [
# bias.run(reference, target2)
# ]
# ]
# ]
self.assertTrue(len(bias_eval.results) == 2)
self.assertTrue(len(bias_eval.results[0]) == 1)
self.assertTrue(type(bias_eval.results[0]) == type([]))
self.assertTrue(len(bias_eval.results[1]) == 1)
self.assertTrue(type(bias_eval.results[1]) == type([]))
self.assertTrue(len(bias_eval.results[0][0]) == 1)
self.assertTrue(len(bias_eval.results[1][0]) == 1)
示例14: test_subregion_result_shape
def test_subregion_result_shape(self):
bound = Bounds(
lat_min=10, lat_max=18,
lon_min=100, lon_max=108,
start=dt.datetime(2000, 1, 1), end=dt.datetime(2000, 3, 1))
bias_eval = Evaluation(
self.test_dataset,
[self.another_test_dataset, self.another_test_dataset],
[Bias()],
[bound]
)
bias_eval.run()
# Expected result shape is
# [
# [ # Subregions cause this extra layer
# [number of targets, bias.run(reference, target1).shape]
# ]
# ],
self.assertTrue(len(bias_eval.results) == 1)
self.assertTrue(len(bias_eval.results[0]) == 1)
self.assertTrue(bias_eval.results[0][0].shape[0] == 2)
self.assertTrue(isinstance(bias_eval.results, type([])))
示例15: test_full_init
def test_full_init(self):
self.eval = Evaluation(
self.test_dataset,
[self.test_dataset, self.another_test_dataset],
[Bias(), Bias(), TemporalStdDev()])
self.assertEqual(self.eval.ref_dataset.variable, self.variable)
# Make sure the two target datasets were added properly
self.assertEqual(self.eval.target_datasets[0].variable, self.variable)
self.assertEqual(self.eval.target_datasets[1].variable, self.other_var)
# Make sure the three metrics were added properly
# The two Bias metrics are "binary" metrics
self.assertEqual(len(self.eval.metrics), 2)
# TemporalStdDev is a "unary" metric and should be stored as such
self.assertEqual(len(self.eval.unary_metrics), 1)