本文整理汇总了Python中utils.AttributeDict.dataset方法的典型用法代码示例。如果您正苦于以下问题:Python AttributeDict.dataset方法的具体用法?Python AttributeDict.dataset怎么用?Python AttributeDict.dataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils.AttributeDict
的用法示例。
在下文中一共展示了AttributeDict.dataset方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: doPreprocessing
# 需要导入模块: from utils import AttributeDict [as 别名]
# 或者: from utils.AttributeDict import dataset [as 别名]
def doPreprocessing(self):
results = AttributeDict()
results.dataset = []
for i in range(len(self.params.dataset)):
# shall we just load it?
filename = '%s/preprocessing-%s%s.mat' % (self.params.dataset[i].savePath, self.params.dataset[i].saveFile, self.params.saveSuffix)
if self.params.dataset[i].preprocessing.load and os.path.isfile(filename):
r = loadmat(filename)
print('Loading file %s ...' % filename)
results.dataset[i].preprocessing = r.results_preprocessing
else:
# or shall we actually calculate it?
p = deepcopy(self.params)
p.dataset = self.params.dataset[i]
d = AttributeDict()
d.preprocessing = np.copy(SeqSLAM.preprocessing(p))
results.dataset.append(d)
if self.params.dataset[i].preprocessing.save:
results_preprocessing = results.dataset[i].preprocessing
savemat(filename, {'results_preprocessing': results_preprocessing})
return results
示例2: AttributeDict
# 需要导入模块: from utils import AttributeDict [as 别名]
# 或者: from utils.AttributeDict import dataset [as 别名]
from utils import AttributeDict
from tagger_exp import TaggerExperiment
p = AttributeDict()
p.encoder_proj = (2000, 1000, 500)
p.input_noise = 0.2
p.class_cost_x = 0
p.zhat_init_value = 0.26 # mean of the input data.
p.n_iterations = 3
p.n_groups = 4
p.lr = 0.0004
p.seed = 10
p.num_epochs = 100
p.batch_size = 100
p.valid_batch_size = 100
p.dataset = 'shapes50k20x20'
p.input_type = 'binary'
p.save_to = 'shapes50k20x20'
if __name__ == '__main__':
experiment = TaggerExperiment(p)
experiment.train()
示例3: len
# 需要导入模块: from utils import AttributeDict [as 别名]
# 或者: from utils.AttributeDict import dataset [as 别名]
p.input_noise = 0.2
p.class_cost_x = 0.
p.zhat_init_value = 0.5
p.n_iterations = 3
p.n_groups = 4
p.lr = 0.001
p.labeled_samples = 1000
p.save_freq = 50
p.seed = 1
p.num_epochs = 150
p.batch_size = 100
p.valid_batch_size = 100
p.objects_per_sample = 2
p.dataset = 'freq20-2mnist'
p.input_type = 'continuous'
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == '--pretrain':
p.save_to = 'freq20-2mnist-pretraining'
experiment = TaggerExperiment(p)
experiment.train()
elif len(sys.argv) == 3 and sys.argv[1] == '--continue':
p.load_from = sys.argv[2]
p.save_to = 'freq20-2mnist-supervision'
p.num_epochs = 50
p.n_iterations = 4
p.encoder_proj = (3000, 2000, 1000, 500, 250, 11)
p.lr = 0.0002
p.input_noise = 0.18