本文整理汇总了Python中optimizer.Optimizer.optimize方法的典型用法代码示例。如果您正苦于以下问题:Python Optimizer.optimize方法的具体用法?Python Optimizer.optimize怎么用?Python Optimizer.optimize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类optimizer.Optimizer
的用法示例。
在下文中一共展示了Optimizer.optimize方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cnn_2d_mnist
# 需要导入模块: from optimizer import Optimizer [as 别名]
# 或者: from optimizer.Optimizer import optimize [as 别名]
def cnn_2d_mnist():
d = image.mnist()
d.shuffle()
def layer_gen():
l1 = ConvLayer2d(layer_id=0, image_size=d.data_shape,
activation=calcutil.relu, c_in=1, c_out=16, k=(2, 2),
s=(1, 1), is_dropout=True)
l2 = MaxPoolLayer2d(layer_id=1, image_size=l1.output_size,
activation=calcutil.identity, c_in=16, k=(2, 2))
l3 = ConvLayer2d(layer_id=2, image_size=l2.output_size,
activation=calcutil.relu, c_in=16, c_out=32, k=(2, 2),
s=(1, 1), is_dropout=True)
l4 = MaxPoolLayer2d(layer_id=3, image_size=l3.output_size,
activation=calcutil.identity, c_in=32, k=(2, 2))
l5 = HiddenLayer(layer_id=4, n_in=l4.n_out, n_out=800,
activation=calcutil.relu)
l6 = HiddenLayer(layer_id=5, n_in=l5.n_out, n_out=100,
activation=calcutil.relu)
l7 = SoftMaxLayer(layer_id=6, n_in=l6.n_out, n_out=len(d.classes()))
layers = [l1, l2, l3, l4, l5, l6, l7]
return layers
m = Model(input_dtype='float32', layers_gen_func=layer_gen)
optimizer = Optimizer(d, m)
optimizer.optimize(100, 1000)
示例2: cnn_3d_psb
# 需要导入模块: from optimizer import Optimizer [as 别名]
# 或者: from optimizer.Optimizer import optimize [as 别名]
def cnn_3d_psb():
# PSB ボクセルデータ(Train/Test双方に存在するクラスのデータのみ)
data = PSBVoxel.create(is_co_class=True, is_cached=True, from_cached=True,
align_data=True)
# ボクセルデータを回転してデータ数増加
data.augment_rotate(start=(-5, 0, 0), end=(5, 0, 0),
step=(1, 1, 1), center=(50, 50, 50), is_cached=True,
from_cached=True, is_co_class=True)
# data.augment_translate(start=(0, 0, -5), end=(0, 0, 5), step=(1, 1, 1),
# is_cached=True, from_cached=True, is_co_class=True)
# データの順番をランダムに入れ替え
data.shuffle()
# データセットの次元ごとの要素数確認
print data
# 学習モデル生成関数
def layer_gen():
l1 = ConvLayer3d(layer_id=0, shape_size=data.data_shape,
activation=calcutil.relu, c_in=1, c_out=16, k=5,
s=3, is_dropout=True)
l2 = MaxPoolLayer3d(layer_id=1, shape_size=l1.output_size,
activation=calcutil.identity, c_in=16, k=4)
l3 = HiddenLayer(layer_id=2, n_in=l2.n_out, n_out=512,
activation=calcutil.relu, is_dropout=True)
l4 = HiddenLayer(layer_id=3, n_in=l3.n_out, n_out=256,
activation=calcutil.relu, is_dropout=True)
l5 = SoftMaxLayer(layer_id=4, n_in=l4.n_out, n_out=len(data.classes()))
layers = [l1, l2, l3, l4, l5]
return layers
# 学習モデル
model = Model(input_dtype='float32', layers_gen_func=layer_gen)
print model
# 学習モデルの学習パラメタを最適化するオブジェクト
optimizer = Optimizer(data, model)
# バッチ一回分の学習時に呼ばれる関数
def on_optimized():
optimizer.result.save()
optimizer.params_result.save()
# 最適化開始
optimizer.optimize(n_iter=100, n_batch=len(data.x_train) / 10,
is_total_test_enabled=False, on_optimized=on_optimized)
示例3: cnn_3d_shrec_usdf
# 需要导入模块: from optimizer import Optimizer [as 别名]
# 或者: from optimizer.Optimizer import optimize [as 别名]
def cnn_3d_shrec_usdf(n_fold):
# PSB ボクセルデータ(Train/Test双方に存在するクラスのデータのみ)
data = SHRECVoxelUSDF.create_shrec_voxel_usdf(n_fold=n_fold)
from CubicCNN.src.util.plotutil import plot_voxel
for x, y in zip(data.x_test[:, 0], data.y_test):
print y
plot_voxel(x == 10)
# データの順番をランダムに入れ替え
data.shuffle()
# データセットの次元ごとの要素数確認
print data
# 学習モデル生成関数
def layer_gen():
l1 = ConvLayer3d(layer_id=0, shape_size=data.data_shape,
activation=calcutil.relu, c_in=1, c_out=16, k=5,
s=3, is_dropout=True)
l2 = MaxPoolLayer3d(layer_id=1, shape_size=l1.output_size,
activation=calcutil.identity, c_in=16, k=4)
l3 = HiddenLayer(layer_id=2, n_in=l2.n_out, n_out=512,
activation=calcutil.relu, is_dropout=True)
l4 = HiddenLayer(layer_id=3, n_in=l3.n_out, n_out=256,
activation=calcutil.relu, is_dropout=True)
l5 = SoftMaxLayer(layer_id=4, n_in=l4.n_out, n_out=len(data.classes()))
layers = [l1, l2, l3, l4, l5]
return layers
# 学習モデル
model = Model(input_dtype='float32', layers_gen_func=layer_gen)
print model
# 学習モデルの学習パラメタを最適化するオブジェクト
optimizer = Optimizer(data, model)
# バッチ一回分の学習時に呼ばれる関数
def on_optimized():
optimizer.result.save()
optimizer.params_result.save()
# 最適化開始
optimizer.optimize(n_iter=100, n_batch=len(data.x_train) / 10,
is_total_test_enabled=False, on_optimized=on_optimized)
示例4: optimize_solution
# 需要导入模块: from optimizer import Optimizer [as 别名]
# 或者: from optimizer.Optimizer import optimize [as 别名]
def optimize_solution(input, solution, method):
parser = Parser(input)
sols = open(solution, 'r')
tester = Tester()
opt = Optimizer([], [])
for i in range(parser.c):
case = parser.read_next_case()
s = sols.readline().split(':')[1]
s = s[:-1]
if "IMPOSSIBLE" in s:
print "Skipping case #" + str(i + 1) + " " + s
continue
parsed_s = s.split()
opt.solution = parsed_s
opt.case = case
opt.current_mattes = sum([int(x) for x in opt.solution])
opt.optimize(method)
print "For case:"
print case
print "Optimizing solution: " + s + " ---> " + opt.get_solution()
sols.close()
parser.finish()
示例5: showOptimizer
# 需要导入模块: from optimizer import Optimizer [as 别名]
# 或者: from optimizer.Optimizer import optimize [as 别名]
def showOptimizer(self,subset):
from optimizer import Optimizer
ok = Optimizer.optimize(parent=self,beamline=self.beamline,
subset=subset)