本文整理汇总了Python中utils.constants.UNIVARIATE_DATASET_NAMES属性的典型用法代码示例。如果您正苦于以下问题:Python constants.UNIVARIATE_DATASET_NAMES属性的具体用法?Python constants.UNIVARIATE_DATASET_NAMES怎么用?Python constants.UNIVARIATE_DATASET_NAMES使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类utils.constants
的用法示例。
在下文中一共展示了constants.UNIVARIATE_DATASET_NAMES属性的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: read_all_datasets
# 需要导入模块: from utils import constants [as 别名]
# 或者: from utils.constants import UNIVARIATE_DATASET_NAMES [as 别名]
def read_all_datasets(root_dir,archive_name):
datasets_dict = {}
dataset_names_to_sort = []
for dataset_name in DATASET_NAMES:
root_dir_dataset =root_dir+'/archives/'+archive_name+'/'+dataset_name+'/'
file_name = root_dir_dataset+dataset_name
x_train, y_train = readucr(file_name+'_TRAIN')
x_test, y_test = readucr(file_name+'_TEST')
datasets_dict[dataset_name] = (x_train.copy(),y_train.copy(),x_test.copy(),
y_test.copy())
dataset_names_to_sort.append((dataset_name,len(x_train)))
dataset_names_to_sort.sort(key=operator.itemgetter(1))
for i in range(len(DATASET_NAMES)):
DATASET_NAMES[i] = dataset_names_to_sort[i][0]
return datasets_dict
示例2: read_all_datasets
# 需要导入模块: from utils import constants [as 别名]
# 或者: from utils.constants import UNIVARIATE_DATASET_NAMES [as 别名]
def read_all_datasets(root_dir,archive_name, sort_dataset_name = False):
datasets_dict = {}
dataset_names_to_sort = []
for dataset_name in DATASET_NAMES:
file_name = root_dir+archive_name+'/'+dataset_name+'/'+dataset_name
x_train, y_train = readucr(file_name+'_TRAIN')
x_test, y_test = readucr(file_name+'_TEST')
datasets_dict[dataset_name] = (x_train.copy(),y_train.copy(),x_test.copy(),y_test.copy())
dataset_names_to_sort.append((dataset_name,len(x_train)))
item_getter = 1
if sort_dataset_name == True:
item_getter = 0
dataset_names_to_sort.sort(key=operator.itemgetter(item_getter))
for i in range(len(DATASET_NAMES)):
DATASET_NAMES[i] = dataset_names_to_sort[i][0]
return datasets_dict
示例3: add_results_from_bake_off
# 需要导入模块: from utils import constants [as 别名]
# 或者: from utils.constants import UNIVARIATE_DATASET_NAMES [as 别名]
def add_results_from_bake_off(df_res,df_res_bake_off,
classifiers_to_add=['COTE','ST','BOSS','EE','PF','DTW_R1_1NN']):
df_res_bake_off_to_add = df_res_bake_off.loc[\
df_res_bake_off['classifier_name'].isin(classifiers_to_add) \
& df_res_bake_off['dataset_name'].isin(DATASET_NAMES)]
pd_bake_off = pd.concat([df_res,df_res_bake_off_to_add],sort=False)
return pd_bake_off
示例4: add_themes
# 需要导入模块: from utils import constants [as 别名]
# 或者: from utils.constants import UNIVARIATE_DATASET_NAMES [as 别名]
def add_themes(df_perf):
for dataset_name in DATASET_NAMES:
df_perf.loc[df_perf['dataset_name']==dataset_name,'theme']= \
utils.constants.dataset_types[dataset_name]
df_perf.loc[df_perf['dataset_name'] == dataset_name, 'theme_colors'] = \
utils.constants.themes_colors[utils.constants.dataset_types[dataset_name]]
return df_perf
示例5: read_all_datasets
# 需要导入模块: from utils import constants [as 别名]
# 或者: from utils.constants import UNIVARIATE_DATASET_NAMES [as 别名]
def read_all_datasets(root_dir, archive_name):
datasets_dict = {}
dataset_names_to_sort = []
if archive_name == 'TSC':
for dataset_name in DATASET_NAMES:
root_dir_dataset = root_dir + '/archives/' + archive_name + '/' + dataset_name + '/'
file_name = root_dir_dataset + dataset_name
x_train, y_train = readucr(file_name + '_TRAIN')
x_test, y_test = readucr(file_name + '_TEST')
datasets_dict[dataset_name] = (x_train.copy(), y_train.copy(), x_test.copy(),
y_test.copy())
dataset_names_to_sort.append((dataset_name, len(x_train)))
dataset_names_to_sort.sort(key=operator.itemgetter(1))
for i in range(len(DATASET_NAMES)):
DATASET_NAMES[i] = dataset_names_to_sort[i][0]
elif archive_name == 'InlineSkateXPs':
for dataset_name in utils.constants.dataset_names_for_archive[archive_name]:
root_dir_dataset = root_dir + '/archives/' + archive_name + '/' + dataset_name + '/'
x_train = np.load(root_dir_dataset + 'x_train.npy')
y_train = np.load(root_dir_dataset + 'y_train.npy')
x_test = np.load(root_dir_dataset + 'x_test.npy')
y_test = np.load(root_dir_dataset + 'y_test.npy')
datasets_dict[dataset_name] = (x_train.copy(), y_train.copy(), x_test.copy(),
y_test.copy())
elif archive_name == 'SITS':
return read_sits_xps(root_dir)
else:
print('error in archive name')
exit()
return datasets_dict
示例6: read_all_datasets
# 需要导入模块: from utils import constants [as 别名]
# 或者: from utils.constants import UNIVARIATE_DATASET_NAMES [as 别名]
def read_all_datasets(root_dir,archive_name, split_val = False):
datasets_dict = {}
dataset_names_to_sort = []
for dataset_name in DATASET_NAMES:
root_dir_dataset =root_dir+'/archives/'+archive_name+'/'+dataset_name+'/'
file_name = root_dir_dataset+dataset_name
x_train, y_train = readucr(file_name+'_TRAIN')
x_test, y_test = readucr(file_name+'_TEST')
if split_val == True:
# check if dataset has already been splitted
temp_dir =root_dir_dataset+'TRAIN_VAL/'
# print(temp_dir)
train_test_dir = create_directory(temp_dir)
# print(train_test_dir)
if train_test_split is None:
# then do no re-split because already splitted
# read train set
x_train,y_train = readucr(temp_dir+dataset_name+'_TRAIN')
# read val set
x_val,y_val = readucr(temp_dir+dataset_name+'_VAL')
else:
# split for cross validation set
x_train,x_val,y_train,y_val = train_test_split(x_train,y_train,
test_size=0.25)
# concat train set
train_set = np.zeros((y_train.shape[0],x_train.shape[1]+1),dtype=np.float64)
train_set[:,0] = y_train
train_set[:,1:] = x_train
# concat val set
val_set = np.zeros((y_val.shape[0],x_val.shape[1]+1),dtype=np.float64)
val_set[:,0] = y_val
val_set[:,1:] = x_val
# save the train set
np.savetxt(temp_dir+dataset_name+'_TRAIN',train_set,delimiter=',')
# save the val set
np.savetxt(temp_dir+dataset_name+'_VAL',val_set,delimiter=',')
datasets_dict[dataset_name] = (x_train.copy(),y_train.copy(),x_val.copy(),
y_val.copy(),x_test.copy(),y_test.copy())
else:
datasets_dict[dataset_name] = (x_train.copy(),y_train.copy(),x_test.copy(),
y_test.copy())
dataset_names_to_sort.append((dataset_name,len(x_train)))
dataset_names_to_sort.sort(key=operator.itemgetter(1))
for i in range(len(DATASET_NAMES)):
DATASET_NAMES[i] = dataset_names_to_sort[i][0]
return datasets_dict
示例7: fit
# 需要导入模块: from utils import constants [as 别名]
# 或者: from utils.constants import UNIVARIATE_DATASET_NAMES [as 别名]
def fit(self, x_train, y_train, x_test, y_test, y_true):
y_pred = np.zeros(shape=y_test.shape)
l = 0
for dataset in datasets_names:
if dataset == self.dataset_name:
continue
curr_dir = self.transfer_directory+dataset+'/'+self.dataset_name+'/'
predictions_file_name = curr_dir + 'y_pred.npy'
if check_if_file_exits(predictions_file_name):
# then load only the predictions from the file
curr_y_pred = np.load(predictions_file_name)
else:
# predict from models saved
model = keras.models.load_model(curr_dir+'best_model.hdf5')
curr_y_pred = model.predict(x_test)
keras.backend.clear_session()
np.save(predictions_file_name, curr_y_pred)
y_pred = y_pred + curr_y_pred
l += 1
keras.backend.clear_session()
y_pred = y_pred / l
# save predictions
np.save(self.output_directory+'y_pred.npy',y_pred)
# convert the predicted from binary to integer
y_pred = np.argmax(y_pred, axis=1)
df_metrics = calculate_metrics(y_true, y_pred, 0.0)
df_metrics.to_csv(self.output_directory + 'df_metrics.csv', index=False)
print(self.dataset_name,df_metrics['accuracy'][0])
gc.collect()