本文整理汇总了Python中neon.util.param.opt_param函数的典型用法代码示例。如果您正苦于以下问题:Python opt_param函数的具体用法?Python opt_param怎么用?Python opt_param使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了opt_param函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, **kwargs):
self.live = False
self.server = None
opt_param(self, ['batch_size'], default_value=1)
opt_param(self, ['input_dtype', 'target_dtype'],
default_value=np.float32)
self.__dict__.update(kwargs)
示例2: initialize
def initialize(self, kwargs):
super(ConvLayer, self).initialize(kwargs)
self.initialize_local()
if self.pad != 0 and isinstance(self.backend, CPU):
raise NotImplementedError('pad != 0, for CPU backend in ConvLayer')
opt_param(self, ['shared_bias'], True)
if self.shared_bias:
self.bias_shape = (self.nofm, 1)
self.bias_expand = self.backend.empty((self.nout, 1),
dtype=self.weight_dtype)
else:
self.bias_shape = (self.nout, 1)
self.allocate_output_bufs()
self.allocate_param_bufs()
opt_param(self, ['prodbuf', 'bpropbuf', 'updatebuf'], None)
if isinstance(self.backend, CPU):
self.prodbuf = self.backend.empty((self.nofm, self.batch_size))
self.bpropbuf = self.backend.empty((self.fsize, self.batch_size))
self.updatebuf = self.backend.empty(self.weights.shape)
if self.backend.__module__ == 'neon.backends.gpu':
self.conv_params = self.backend.ng.conv_layer(
N=self.batch_size, C=self.nifm, K=self.nofm,
D=1, H=self.ifmshape[0], W=self.ifmshape[1], T=1,
R=self.fshape[0], S=self.fshape[1],
pad_d=0, pad_h=self.pad, pad_w=self.pad,
str_d=1, str_h=self.stride, str_w=self.stride,
grid_P=0, grid_Q=0,
dtype=self.weight_dtype)
self.prodbuf = self.bpropbuf = self.updatebuf = self.conv_params
示例3: allocate_param_bufs
def allocate_param_bufs(self):
if self.params_initialized:
return
make_ebuf = self.backend.empty
self.weights = self.weight_init.generate(self.weight_shape,
self.weight_dtype)
self.weights.name = self.name # naming weights for timing diagnostics
self.weight_updates = make_ebuf(self.weight_shape, self.updates_dtype)
self.use_biases = 'bias_init' in self.weight_init.__dict__
opt_param(self, ['brule_init'], None)
if self.use_biases is True:
self.biases = make_ebuf(self.bias_shape, self.weight_dtype)
self.biases.fill(self.weight_init.bias_init)
self.bias_updates = make_ebuf(self.bias_shape, self.updates_dtype)
self.params.extend([self.weights, self.biases])
self.updates.extend([self.weight_updates, self.bias_updates])
else:
self.params.extend([self.weights])
self.updates.extend([self.weight_updates])
if self.accumulate:
self.utemp = map(lambda x: make_ebuf(x.shape, self.updates_dtype),
self.updates)
for upm in self.updates:
upm.fill(0.0)
self.learning_rule = self.init_learning_rule(self.lrule_init)
self.bias_rule = None
if self.brule_init is not None and self.use_biases:
self.bias_rule = self.init_learning_rule(self.brule_init)
self.bias_rule.allocate_state([self.updates[-1]])
self.learning_rule.allocate_state(self.updates[:-1])
else:
self.learning_rule.allocate_state(self.updates)
self.params_initialized = True
示例4: __init__
def __init__(self, **kwargs):
super(AutoUniformValGen, self).__init__(**kwargs)
opt_param(self, ['relu'], False)
opt_param(self, ['islocal'], False)
self.low = float('nan')
self.high = float('nan')
示例5: __init__
def __init__(self, **kwargs):
self.macro_batched = False
self.__dict__.update(kwargs)
opt_param(self, ['backend_type'], 'np.float32')
self.backend_type = ensure_dtype(self.backend_type) # string to dtype
logger.info("Setting dtype to" + str(self.backend_type))
示例6: initialize
def initialize(self, kwargs):
opt_param(self, ['keep'], 0.5)
super(DropOutLayer, self).initialize(kwargs)
self.keepmask = self.backend.empty((self.nin, self.batch_size),
dtype=self.weight_dtype)
self.train_mode = True
self.allocate_output_bufs()
示例7: initialize
def initialize(self, kwargs):
super(RecurrentCostLayer, self).initialize(kwargs)
req_param(self, ['cost', 'ref_layer'])
opt_param(self, ['ref_label'], 'targets')
self.targets = None
self.cost.olayer = self.prev_layer
self.cost.initialize(kwargs)
self.deltas = self.cost.get_deltabuf()
示例8: __init__
def __init__(self, **kwargs):
self.accumulate = True
# Reusing deltas not supported for RNNs yet
self.reuse_deltas = False
super(RNN, self).__init__(**kwargs)
req_param(self, ['unrolls'])
self.rec_layer = self.layers[1]
opt_param(self, ['num_grad_params'], None)
示例9: __init__
def __init__(self, **kwargs):
self.initialized = False
self.__dict__.update(kwargs)
req_param(self, ['dataset', 'model'])
opt_param(self, ['backend'])
opt_param(self, ['live'], False)
if self.backend is not None:
self.initialize(self.backend)
示例10: allocate_param_bufs
def allocate_param_bufs(self):
if self.params_initialized:
return
def make_ebuf(shape, dtype, persist_values):
b = self.backend.empty(shape, dtype, persist_values)
if self.backend.is_dist:
b.ptype = 'replica' if self.is_local else 'vfragment'
return b
self.weight_init.is_local = self.is_local
self.weights = self.weight_init.generate(self.weight_shape,
self.weight_dtype)
self.weights.name = self.name # naming weights for timing diagnostics
self.weight_updates = make_ebuf(self.weight_shape,
dtype=self.updates_dtype,
persist_values=True)
self.make_views()
self.use_biases = 'bias_init' in self.weight_init.__dict__
opt_param(self, ['brule_init'], None)
if self.use_biases is True:
self.biases = make_ebuf(self.bias_shape, dtype=self.weight_dtype,
persist_values=False)
self.biases.fill(self.weight_init.bias_init)
self.bias_updates = make_ebuf(self.bias_shape,
dtype=self.updates_dtype,
persist_values=False)
self.params.extend([self.weights, self.biases])
self.updates.extend([self.weight_updates, self.bias_updates])
else:
self.params.extend([self.weights])
self.updates.extend([self.weight_updates])
if self.accumulate:
self.utemp = [make_ebuf(x.shape,
dtype=self.updates_dtype,
persist_values=False)
for x in self.updates]
for upm in self.updates:
upm.fill(0.0)
self.learning_rule = self.init_learning_rule(self.lrule_init)
self.bias_rule = None
if self.brule_init is not None and self.use_biases:
lrn = self.learning_rule.name + 'bias'
self.bias_rule = self.init_learning_rule(self.brule_init, name=lrn)
self.bias_rule.allocate_state([self.updates[-1]])
self.learning_rule.allocate_state(self.updates[:-1])
else:
self.learning_rule.allocate_state(self.updates)
if self.backend.is_dist:
# Create a mempool used for sharing in parallel mode
self.make_mempool()
self.params_initialized = True
示例11: initialize
def initialize(self, kwargs):
opt_param(self, ['keep'], 0.5)
super(DropOutLayer, self).initialize(kwargs)
bkend = self.backend
make_zbuf = bkend.allocate_fragment if self.is_local else bkend.empty
self.keepmask = make_zbuf((self.nin, self.batch_size),
dtype=self.weight_dtype)
self.train_mode = True
self.allocate_output_bufs()
示例12: __init__
def __init__(self, **kwargs):
self.initialized = False
self.__dict__.update(kwargs)
req_param(self, ['layers', 'batch_size'])
opt_param(self, ['step_print'], -1)
opt_param(self, ['accumulate'], False)
opt_param(self, ['reuse_deltas'], True)
opt_param(self, ['timing_plots'], False)
opt_param(self, ['serialize_schedule'])
示例13: __init__
def __init__(self, name, lr_params):
self.name = name
opt_param(self, ['velocity_dtype', 'param_dtype', 'gradient_dtype'],
np.float32)
opt_param(self, ['backend_type'], 'np.float32')
if self.backend_type == 'np.float16':
logger.info("Setting learning rule dtypes to float16")
for item in ('velocity_dtype', 'param_dtype', 'gradient_dtype'):
setattr(self, item, np.float16)
示例14: allocate_output_bufs
def allocate_output_bufs(self):
make_zbuf = self.backend.zeros
opt_param(self, ['out_shape'], (self.nout, self.batch_size))
opt_param(self, ['delta_shape'], (self.nin, self.batch_size))
self.output = make_zbuf(self.out_shape, self.output_dtype)
self.pre_act = self.activation.pre_act_buffer(self.backend,
self.output,
self.pre_act_dtype)
示例15: initialize
def initialize(self, kwargs):
super(CrossMapPoolingLayer, self).initialize(kwargs)
req_param(self, ['nofm'])
self.initialize_local()
self.allocate_output_bufs()
self.allocate_param_bufs()
opt_param(self, ['updatebuf'], None)
if isinstance(self.backend, CPU):
self.updatebuf = self.backend.empty((1, 1))