当前位置: 首页>>代码示例>>Python>>正文


Python backend.set_value方法代码示例

本文整理汇总了Python中keras.backend.set_value方法的典型用法代码示例。如果您正苦于以下问题:Python backend.set_value方法的具体用法?Python backend.set_value怎么用?Python backend.set_value使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.backend的用法示例。


在下文中一共展示了backend.set_value方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: on_batch_end

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import set_value [as 别名]
def on_batch_end(self, batch, logs):
        # Log the learning rate
        lr = K.get_value(self.model.optimizer.lr)
        self.lrs.append(lr)

        # Log the loss
        loss = logs['loss']
        self.losses.append(loss)

        # Check whether the loss got too large or NaN
        if batch > 5 and (math.isnan(loss) or loss > self.best_loss * 4):
            self.model.stop_training = True
            return

        if loss < self.best_loss:
            self.best_loss = loss

        # Increase the learning rate for the next batch
        lr *= self.lr_mult
        K.set_value(self.model.optimizer.lr, lr) 
开发者ID:surmenok,项目名称:keras_lr_finder,代码行数:22,代码来源:lr_finder.py

示例2: reset_states

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import set_value [as 别名]
def reset_states(self):
		assert self.stateful, 'Layer must be stateful.'
		input_shape = self.input_spec[0].shape

		if not input_shape[0]:
			raise Exception('If a RNN is stateful, a complete ' +
							'input_shape must be provided (including batch size).')

		if hasattr(self, 'states'):
			K.set_value(self.states[0],
			            np.zeros((input_shape[0], self.hidden_recurrent_dim)))
			K.set_value(self.states[1],
			            np.zeros((input_shape[0], self.input_dim)))
			K.set_value(self.states[2],
			            np.zeros((input_shape[0], self.hidden_dim)))
		else:
			self.states = [K.zeros((input_shape[0], self.hidden_recurrent_dim)),
							K.zeros((input_shape[0], self.input_dim)),
							K.zeros((input_shape[0], self.hidden_dim))] 
开发者ID:bnsnapper,项目名称:keras_bn_library,代码行数:21,代码来源:rnnrbm.py

示例3: reset_states

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import set_value [as 别名]
def reset_states(self):
		assert self.stateful, 'Layer must be stateful.'
		input_shape = self.input_spec[0].shape
		if not input_shape[0]:
			raise ValueError('If a RNN is stateful, it needs to know '
			                 'its batch size. Specify the batch size '
			                 'of your input tensors: \n'
			                 '- If using a Sequential model, '
			                 'specify the batch size by passing '
			                 'a `batch_input_shape` '
			                 'argument to your first layer.\n'
			                 '- If using the functional API, specify '
			                 'the time dimension by passing a '
			                 '`batch_shape` argument to your Input layer.')
		if hasattr(self, 'states'):
			K.set_value(self.states[0],
			            np.zeros((input_shape[0], self.input_dim)))
			K.set_value(self.states[1],
			            np.zeros((input_shape[0], self.output_dim)))
		else:
			self.states = [K.zeros((input_shape[0], self.input_dim)),
							K.zeros((input_shape[0], self.output_dim))] 
开发者ID:bnsnapper,项目名称:keras_bn_library,代码行数:24,代码来源:recurrent.py

示例4: on_epoch_end

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import set_value [as 别名]
def on_epoch_end(self, epoch, logs={}):
		current_score = logs.get('val_acc')
		divide = False
		if (epoch + 1) in self.checkpoints:
			divide = True
		elif (current_score >= self.previous_score - self.epsilon and current_score <= self.previous_score + self.epsilon):
			self.wait +=1
			if self.wait == self.patience:
				divide = True
		else:
			self.wait = 0
		if divide == True:
			K.set_value(self.model.optimizer.lr, self.model.optimizer.lr.get_value() / self.division_cst)
			self.wait = 0
			if self.verbose > 0:
				L.getLogger("train").info("Current learning rate is divided by"+str(self.division_cst) + ' and his values is equal to: ' + str(self.model.optimizer.lr.get_value()))
		self.previous_score = current_score

#
# Also evaluate performance on test set at each epoch end.
# 
开发者ID:ChihebTrabelsi,项目名称:deep_complex_networks,代码行数:23,代码来源:training.py

示例5: on_epoch_end

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import set_value [as 别名]
def on_epoch_end(self, epoch, logs=None):
        if epoch == 0 or (epoch + 1) % self.period != 0: return
        # Only save at the end of a cycle, a not at the beginning

        if not os.path.exists(self.folder_path):
            os.makedirs(self.folder_path)

        cycle = int(epoch / self.period)
        cycle_str = str(cycle).rjust(self.nb_digits, '0')
        self.model.save_weights(self.path_format.format(cycle_str), overwrite=True)

        # Resetting the learning rate
        K.set_value(self.model.optimizer.lr, self.base_lr)

        if self.verbose > 0:
            print('\nEpoch %05d: Reached %d-th cycle, saving model.' % (epoch, cycle)) 
开发者ID:arthurdouillard,项目名称:keras-snapshot_ensembles,代码行数:18,代码来源:snapshot.py

示例6: _average_metrics_in_place

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import set_value [as 别名]
def _average_metrics_in_place(self, logs):
        logs = logs or {}
        reduced_logs = {}
        # Reduce every metric among workers. Sort metrics by name
        # to ensure consistent order.
        for metric, value in sorted(logs.items()):
            if metric not in self.variables:
                self.variables[metric], self.allreduce_ops[metric] = \
                    self._make_variable(metric, value)
            else:
                K.set_value(self.variables[metric], value)
            reduced_logs[metric] = \
                K.get_session().run(self.allreduce_ops[metric])
        # Override the reduced values back into logs dictionary
        # for other callbacks to use.
        for metric, value in reduced_logs.items():
            logs[metric] = value 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:19,代码来源:callbacks.py

示例7: on_epoch_end

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import set_value [as 别名]
def on_epoch_end(self, epoch, logs={}):
        current = logs.get(self.monitor)
        lr = self.model.optimizer.lr
        # If you want to apply decay.
        if k.get_value(self.model.optimizer.iterations) == 100:
          k.set_value(self.model.optimizer.lr, 0.01)
          print("Updating Learning rate", 0.01)
        print("Current learning rate", k.get_value(self.model.optimizer.lr))    
        if current is None:
            warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning)
        #if k.get_value(self.model.optimizer.iterations)%5 == 0:
        #save_to_drive(k.get_value(self.model.optimizer.iterations))        
        if current >= self.value:
            if self.verbose > 0:
                print("Epoch %05d: early stopping THR" % epoch)
            self.model.stop_training = True

# Load CIFAR10 Data 
开发者ID:ambujraj,项目名称:hacktoberfest2018,代码行数:20,代码来源:DenseNet_CIFAR10.py

示例8: on_batch_end

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import set_value [as 别名]
def on_batch_end(self, epoch, logs=None):
        logs = logs or {}

        self.clr_iterations += 1
        new_lr = self.compute_lr()

        self.history.setdefault('lr', []).append(
            K.get_value(self.model.optimizer.lr))
        K.set_value(self.model.optimizer.lr, new_lr)

        if self._update_momentum:
            if not hasattr(self.model.optimizer, 'momentum'):
                raise ValueError("Momentum can be updated only on SGD optimizer !")

            new_momentum = self.compute_momentum()

            self.history.setdefault('momentum', []).append(
                K.get_value(self.model.optimizer.momentum))
            K.set_value(self.model.optimizer.momentum, new_momentum)

        for k, v in logs.items():
            self.history.setdefault(k, []).append(v) 
开发者ID:titu1994,项目名称:keras-one-cycle,代码行数:24,代码来源:clr.py

示例9: train_on_batch

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import set_value [as 别名]
def train_on_batch(self, action, n_repeat_action: int = 1, *args, **kwargs):
        losses, metrics = [], []
        # for i in range(n_repeat_action):
        data = self.get_next_batch(train=True)
        if not self.is_int(n_repeat_action):
            K.set_value(self.model.optimizer.lr, n_repeat_action)
        rate = n_repeat_action if self.is_int(n_repeat_action) else 10
        for i in range(rate):
            while len(data) < self.batch_size:
                data = self.get_next_batch(train=True)
            X, y = list(zip(*data))
            self.X, self.y = np.array(X), np.array(y)
            # Dynamic learning rate

            loss, metric = self.model.train_on_batch(self.X, self.y, *args, **kwargs)
            losses.append(loss)
            metrics.append(metric)

            old_weigths = self.model.get_weights()
            new_weights = self.update_weights(old_weigths, action, n_repeat_action)
            self.model.set_weights(new_weights)
        self.loss, self.metric = np.mean(losses), np.mean(metrics)
        return self.metric  # / self.loss 
开发者ID:Guillemdb,项目名称:FractalAI,代码行数:25,代码来源:dnn_train.py

示例10: update_learning_rate

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import set_value [as 别名]
def update_learning_rate(self, total_steps):
        # The deepmind paper says
        # ~400k: 1e-2
        # 400k~600k: 1e-3
        # 600k~: 1e-4

        if total_steps < 500:
            lr = 1e-2
        elif total_steps < 2000:
            lr = 1e-3
        elif total_steps < 9000:
            lr = 1e-4
        else:
            lr = 2.5e-5  # means (1e-4 / 4): the paper batch size=2048, ours is 512.
        K.set_value(self.optimizer.lr, lr)
        logger.debug(f"total step={total_steps}, set learning rate to {lr}") 
开发者ID:Zeta36,项目名称:connect4-alpha-zero,代码行数:18,代码来源:optimize.py

示例11: reset_states

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import set_value [as 别名]
def reset_states(self):
    assert self.stateful, 'Layer must be stateful.'
    input_shape = self.input_spec[0].shape
    if not input_shape[0]:
      raise Exception('If a RNN is stateful, a complete ' +
                      'input_shape must be provided (including batch size).')
    if hasattr(self, 'states'):
      K.set_value(self.states[0],
                  np.zeros((input_shape[0], self.output_dim)))
    else:
      self.states = [K.zeros((input_shape[0], self.output_dim))] 
开发者ID:LaurentMazare,项目名称:deep-models,代码行数:13,代码来源:rhn.py

示例12: on_epoch_end

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import set_value [as 别名]
def on_epoch_end(self, epoch, logs={}):
        if self.gamma is not None:
            K.set_value(self.model.optimizer.lr, self.gamma * K.get_value(self.model.optimizer.lr)) 
开发者ID:minerva-ml,项目名称:steppy-toolkit,代码行数:5,代码来源:callbacks.py

示例13: on_batch_end

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import set_value [as 别名]
def on_batch_end(self, epoch, logs={}):
        if not isinstance(self.model, GandlfModel):
            raise ValueError('The AdaptiveLearningRate callback only works '
                             'for Gandlf models.')

        if (not hasattr(self.model.gen_optimizer, 'lr') or
                not hasattr(self.model.dis_optimizer, 'lr')):
            raise ValueError('To use the Adaptive Learning Rate callback, '
                             'both the generator and discriminator optimizers '
                             'must have an "lr" attribute.')

        gen_loss, dis_loss = 0., 0.
        for key, val in logs.items():
            if key.endswith('gen_loss'):
                if val < 0:
                    raise ValueError('The adaptive learning rate callback '
                                     'doesn\'t work for negative losses.')
                gen_loss += val
            elif key.endswith('real_loss') or key.endswith('fake_loss'):
                if val < 0:
                    raise ValueError('The adaptive learning rate callback '
                                     'doesn\'t work for negative losses.')
                dis_loss += val

        dis_loss /= 2  # Double-counting real and fake data.
        total_loss = gen_loss + dis_loss + 1e-12
        gen_pct, dis_pct = gen_loss / total_loss, dis_loss / total_loss

        # Calculates the percentage to weight each one.
        generator_lr = self.generator_lr * gen_pct
        discriminator_lr = self.discriminator_lr * dis_pct

        # Updates the learning rates on both.
        K.set_value(self.model.gen_optimizer.lr, generator_lr)
        K.set_value(self.model.dis_optimizer.lr, discriminator_lr) 
开发者ID:codekansas,项目名称:gandlf,代码行数:37,代码来源:callbacks.py

示例14: on_epoch_begin

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import set_value [as 别名]
def on_epoch_begin(self, epoch, logs=None):
        new_gamma = 2.0 * (self.nb_epochs - epoch) / self.nb_epochs
        K.set_value(self.gamma, new_gamma)

        if self.verbose > 0:
            print('\nEpoch %05d: UpdateAnnealingParameter reducing gamma to %s.' % (epoch + 1, new_gamma)) 
开发者ID:zxq2233,项目名称:n2n-watermark-remove,代码行数:8,代码来源:model.py

示例15: find

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import set_value [as 别名]
def find(self, x_train, y_train, start_lr, end_lr, batch_size=64, epochs=1, **kw_fit):
        # If x_train contains data for multiple inputs, use length of the first input.
        # Assumption: the first element in the list is single input; NOT a list of inputs.
        N = x_train[0].shape[0] if isinstance(x_train, list) else x_train.shape[0]

        # Compute number of batches and LR multiplier
        num_batches = epochs * N / batch_size
        self.lr_mult = (float(end_lr) / float(start_lr)) ** (float(1) / float(num_batches))
        # Save weights into a file
        initial_weights = self.model.get_weights()

        # Remember the original learning rate
        original_lr = K.get_value(self.model.optimizer.lr)

        # Set the initial learning rate
        K.set_value(self.model.optimizer.lr, start_lr)

        callback = LambdaCallback(on_batch_end=lambda batch, logs: self.on_batch_end(batch, logs))

        self.model.fit(x_train, y_train,
                       batch_size=batch_size, epochs=epochs,
                       callbacks=[callback],
                       **kw_fit)

        # Restore the weights to the state before model fitting
        self.model.set_weights(initial_weights)

        # Restore the original learning rate
        K.set_value(self.model.optimizer.lr, original_lr) 
开发者ID:surmenok,项目名称:keras_lr_finder,代码行数:31,代码来源:lr_finder.py


注:本文中的keras.backend.set_value方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。