当前位置: 首页>>代码示例>>Python>>正文


Python optim.RMSprop方法代码示例

本文整理汇总了Python中torch.optim.RMSprop方法的典型用法代码示例。如果您正苦于以下问题:Python optim.RMSprop方法的具体用法?Python optim.RMSprop怎么用?Python optim.RMSprop使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.optim的用法示例。


在下文中一共展示了optim.RMSprop方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import RMSprop [as 别名]
def __init__(self, model, corpus, args, name, tune_pi_only):
        self.model = model
        self.corpus = corpus
        self.args = args
        self.name = name
        self.raw_goal = None
        self.vec_goals_list = None
        self.logprobs = None
        print("Do we only tune the policy: {}".format(tune_pi_only))
        self.opt = optim.SGD(
            [p for n, p in self.model.named_parameters() if 'c2z' in n or not tune_pi_only],
            lr=self.args.rl_lr,
            momentum=self.args.momentum,
            nesterov=(self.args.nesterov and self.args.momentum > 0))
        # self.opt = optim.Adam(self.model.parameters(), lr=0.01)
        # self.opt = optim.RMSprop(self.model.parameters(), lr=0.0005)
        self.all_rewards = []
        self.all_grads = []
        self.model.train() 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:21,代码来源:agent_task.py

示例2: get_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import RMSprop [as 别名]
def get_optimizer(model):
    parameters = _get_paramters(model)
    opt_lower = cfg.SOLVER.OPTIMIZER.lower()

    if opt_lower == 'sgd':
        optimizer = optim.SGD(
            parameters, lr=cfg.SOLVER.LR, momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
    elif opt_lower == 'adam':
        optimizer = optim.Adam(
            parameters, lr=cfg.SOLVER.LR, eps=cfg.SOLVER.EPSILON, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
    elif opt_lower == 'adadelta':
        optimizer = optim.Adadelta(
            parameters, lr=cfg.SOLVER.LR, eps=cfg.SOLVER.EPSILON, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
    elif opt_lower == 'rmsprop':
        optimizer = optim.RMSprop(
            parameters, lr=cfg.SOLVER.LR, alpha=0.9, eps=cfg.SOLVER.EPSILON,
            momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
    else:
        raise ValueError("Expected optimizer method in [sgd, adam, adadelta, rmsprop], but received "
                         "{}".format(opt_lower))

    return optimizer 
开发者ID:LikeLy-Journey,项目名称:SegmenTron,代码行数:24,代码来源:optimizer.py

示例3: make_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import RMSprop [as 别名]
def make_optimizer(args, my_model):
    trainable = filter(lambda x: x.requires_grad, my_model.parameters())

    if args.optimizer == 'SGD':
        optimizer_function = optim.SGD
        kwargs = {'momentum': args.momentum}
    elif args.optimizer == 'ADAM':
        optimizer_function = optim.Adam
        kwargs = {
            'betas': args.betas,
            'eps': args.epsilon
        }
    elif args.optimizer == 'RMSprop':
        optimizer_function = optim.RMSprop
        kwargs = {'eps': args.epsilon}

    kwargs['lr'] = args.lr
    kwargs['weight_decay'] = args.weight_decay
    
    return optimizer_function(trainable, **kwargs) 
开发者ID:HolmesShuan,项目名称:OISR-PyTorch,代码行数:22,代码来源:utility.py

示例4: _make_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import RMSprop [as 别名]
def _make_optimizer(self):
        if self.optimizer is not None:
            return

        # Also prepare optimizer:
        optimizer_name = self.hyperparameters["optimizer"].lower()
        if optimizer_name == "sgd":
            self.optimizer = optim.SGD(
                params=self.parameters(),
                lr=self.hyperparameters["learning_rate"],
                momentum=self.hyperparameters["momentum"],
            )
        elif optimizer_name == "rmsprop":
            self.optimizer = optim.RMSprop(
                params=self.parameters(),
                lr=self.hyperparameters["learning_rate"],
                alpha=self.params["learning_rate_decay"],
                momentum=self.params["momentum"],
            )
        elif optimizer_name == "adam":
            self.optimizer = optim.Adam(
                params=self.parameters(), lr=self.hyperparameters["learning_rate"],
            )
        else:
            raise Exception('Unknown optimizer "%s".' % (self.params["optimizer"])) 
开发者ID:microsoft,项目名称:machine-learning-for-programming-samples,代码行数:27,代码来源:model_torch.py

示例5: __init__

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import RMSprop [as 别名]
def __init__(self, lr, n_actions, name, input_dims, chkpt_dir):
        super(DeepQNetwork, self).__init__()
        self.checkpoint_dir = chkpt_dir
        self.checkpoint_file = os.path.join(self.checkpoint_dir, name)

        self.conv1 = nn.Conv2d(input_dims[0], 32, 8, stride=4)
        self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
        self.conv3 = nn.Conv2d(64, 64, 3, stride=1)

        fc_input_dims = self.calculate_conv_output_dims(input_dims)

        self.fc1 = nn.Linear(fc_input_dims, 512)
        self.fc2 = nn.Linear(512, n_actions)

        self.optimizer = optim.RMSprop(self.parameters(), lr=lr)

        self.loss = nn.MSELoss()
        self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')
        self.to(self.device) 
开发者ID:philtabor,项目名称:Deep-Q-Learning-Paper-To-Code,代码行数:21,代码来源:deep_q_network.py

示例6: __init__

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import RMSprop [as 别名]
def __init__(self, lr, n_actions, name, input_dims, chkpt_dir):
        super(DuelingDeepQNetwork, self).__init__()

        self.checkpoint_dir = chkpt_dir
        self.checkpoint_file = os.path.join(self.checkpoint_dir, name)

        self.conv1 = nn.Conv2d(input_dims[0], 32, 8, stride=4)
        self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
        self.conv3 = nn.Conv2d(64, 64, 3, stride=1)

        fc_input_dims = self.calculate_conv_output_dims(input_dims)

        self.fc1 = nn.Linear(fc_input_dims, 1024)
        self.fc2 = nn.Linear(1024, 512)
        self.V = nn.Linear(512, 1)
        self.A = nn.Linear(512, n_actions)

        self.optimizer = optim.RMSprop(self.parameters(), lr=lr)
        self.loss = nn.MSELoss()
        self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')
        self.to(self.device) 
开发者ID:philtabor,项目名称:Deep-Q-Learning-Paper-To-Code,代码行数:23,代码来源:deep_q_network.py

示例7: build_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import RMSprop [as 别名]
def build_optimizer(params, opt):
    if opt.optim == 'rmsprop':
        return optim.RMSprop(params, opt.learning_rate, opt.optim_alpha, opt.optim_epsilon, weight_decay=opt.weight_decay)
    elif opt.optim == 'adagrad':
        return optim.Adagrad(params, opt.learning_rate, weight_decay=opt.weight_decay)
    elif opt.optim == 'sgd':
        return optim.SGD(params, opt.learning_rate, weight_decay=opt.weight_decay)
    elif opt.optim == 'sgdm':
        return optim.SGD(params, opt.learning_rate, opt.optim_alpha, weight_decay=opt.weight_decay)
    elif opt.optim == 'sgdmom':
        return optim.SGD(params, opt.learning_rate, opt.optim_alpha, weight_decay=opt.weight_decay, nesterov=True)
    elif opt.optim == 'adam':
        return optim.Adam(params, opt.learning_rate, (opt.optim_alpha, opt.optim_beta), opt.optim_epsilon, weight_decay=opt.weight_decay)
    else:
        raise Exception("bad option opt.optim: {}".format(opt.optim))


# batch_size * feat_size -> (batch_size * count) * feat_size 
开发者ID:ltguo19,项目名称:VSUA-Captioning,代码行数:20,代码来源:utils.py

示例8: __init__

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import RMSprop [as 别名]
def __init__(self,
                 actor_critic,
                 value_loss_coef,
                 entropy_coef,
                 lr=None,
                 eps=None,
                 alpha=None,
                 max_grad_norm=None,
                 acktr=False):

        self.actor_critic = actor_critic
        self.acktr = acktr

        self.value_loss_coef = value_loss_coef
        self.entropy_coef = entropy_coef

        self.max_grad_norm = max_grad_norm

        if acktr:
            self.optimizer = KFACOptimizer(actor_critic)
        else:
            self.optimizer = optim.RMSprop(
                actor_critic.parameters(), lr, eps=eps, alpha=alpha) 
开发者ID:montrealrobotics,项目名称:dal,代码行数:25,代码来源:a2c_acktr.py

示例9: build_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import RMSprop [as 别名]
def build_optimizer(args, params, weight_decay=0.0):
    filter_fn = filter(lambda p : p.requires_grad, params)
    if args.opt == 'adam':
        optimizer = optim.Adam(filter_fn, lr=args.lr, weight_decay=weight_decay)
    elif args.opt == 'sgd':
        optimizer = optim.SGD(filter_fn, lr=args.lr, momentum=0.95, weight_decay=weight_decay)
    elif args.opt == 'rmsprop':
        optimizer = optim.RMSprop(filter_fn, lr=args.lr, weight_decay=weight_decay)
    elif args.opt == 'adagrad':
        optimizer = optim.Adagrad(filter_fn, lr=args.lr, weight_decay=weight_decay)
    if args.opt_scheduler == 'none':
        return None, optimizer
    elif args.opt_scheduler == 'step':
        scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.opt_decay_step, gamma=args.opt_decay_rate)
    elif args.opt_scheduler == 'cos':
        scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.opt_restart)
    return scheduler, optimizer 
开发者ID:RexYing,项目名称:gnn-model-explainer,代码行数:19,代码来源:train_utils.py

示例10: __init__

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import RMSprop [as 别名]
def __init__(self, mac, scheme, logger, args):
        self.args = args
        self.n_agents = args.n_agents
        self.n_actions = args.n_actions
        self.mac = mac
        self.logger = logger

        self.last_target_update_step = 0
        self.critic_training_steps = 0

        self.log_stats_t = -self.args.learner_log_interval - 1

        self.critic = COMACritic(scheme, args)
        self.target_critic = copy.deepcopy(self.critic)

        self.agent_params = list(mac.parameters())
        self.critic_params = list(self.critic.parameters())
        self.params = self.agent_params + self.critic_params

        self.agent_optimiser = RMSprop(params=self.agent_params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)
        self.critic_optimiser = RMSprop(params=self.critic_params, lr=args.critic_lr, alpha=args.optim_alpha, eps=args.optim_eps) 
开发者ID:oxwhirl,项目名称:pymarl,代码行数:23,代码来源:coma_learner.py

示例11: __init__

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import RMSprop [as 别名]
def __init__(self, mac, scheme, logger, args):
        self.args = args
        self.mac = mac
        self.logger = logger

        self.params = list(mac.parameters())

        self.last_target_update_episode = 0

        self.mixer = None
        if args.mixer == "qtran_base":
            self.mixer = QTranBase(args)
        elif args.mixer == "qtran_alt":
            raise Exception("Not implemented here!")

        self.params += list(self.mixer.parameters())
        self.target_mixer = copy.deepcopy(self.mixer)

        self.optimiser = RMSprop(params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)

        # a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
        self.target_mac = copy.deepcopy(mac)

        self.log_stats_t = -self.args.learner_log_interval - 1 
开发者ID:oxwhirl,项目名称:pymarl,代码行数:26,代码来源:qtran_learner.py

示例12: learning_schedule

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import RMSprop [as 别名]
def learning_schedule(params, optim_algorithm, lr, milestones, gamma):
    """Creates an optimizer and learning rate scheduler.

    Args:
        params: Model parameters
        optim_algorithm (str): Name of the optimisation algorithm
        lr: Initial learning rate
        milestones: Schedule milestones
        gamma: Learning rate decay factor

    Returns:
        optim.lr_scheduler._LRScheduler: Learning rate scheduler
    """
    if optim_algorithm == 'sgd':
        optimiser = optim.SGD(params, lr=lr)
    elif optim_algorithm == 'nesterov':
        optimiser = optim.SGD(params, lr=lr, momentum=0.8, nesterov=True)
    elif optim_algorithm == 'rmsprop':
        optimiser = optim.RMSprop(params, lr=lr)
    else:
        raise Exception('unrecognised optimisation algorithm: ' + optim_algorithm)
    return optim.lr_scheduler.MultiStepLR(optimiser, milestones=milestones, gamma=gamma) 
开发者ID:anibali,项目名称:margipose,代码行数:24,代码来源:train_helpers.py

示例13: __init__

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import RMSprop [as 别名]
def __init__(self, fold, config, metrics):
        logdir = os.path.join('..', 'logs', config.folder, 'fold{}'.format(fold))
        os.makedirs(logdir, exist_ok=True)
        self.config = config
        self.fold = fold
        self.model = models[config.network](num_classes=1, num_channels=config.num_channels)
        if self.config.optimizer == 'adam':
            self.optimizer = optim.Adam(self.model.parameters(), lr=config.lr)
        else:
            self.optimizer = optim.RMSprop(self.model.parameters(), lr=config.lr)
        self.model = nn.DataParallel(self.model).cuda()
        self.criterion = losses[config.loss]().cuda()
        self.writer = SummaryWriter(logdir)
        self.metrics = metrics
        self.devices = os.getenv('CUDA_VISIBLE_DEVICES', '0')
        if os.name == 'nt':
            self.devices = ','.join(str(d + 5) for d in map(int, self.devices.split(',')))

        self.cache = None
        self.cached_loss = 0
        self.hard_mining = True 
开发者ID:asanakoy,项目名称:kaggle_carvana_segmentation,代码行数:23,代码来源:train.py

示例14: main

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import RMSprop [as 别名]
def main(num_steps=10000000,
         env_name='PongNoFrameskip-v4',
#         env_name='BreakoutNoFrameskip-v4',
         seed=42):
    th.set_num_threads(1)
    random.seed(seed)
    th.manual_seed(seed)
    np.random.seed(seed)

    env = gym.make(env_name)
    env = envs.VisdomLogger(env, interval=10)
    env = envs.OpenAIAtari(env)
    env = envs.Torch(env)
    env = envs.Runner(env)
    env.seed(seed)

    policy = NatureCNN(env)
    optimizer = optim.RMSprop(policy.parameters(), lr=LR, alpha=0.99, eps=1e-5)
    get_action = lambda state: get_action_value(state, policy)

    for step in range(num_steps // A2C_STEPS + 1):
        # Sample some transitions
        replay = env.run(get_action, steps=A2C_STEPS)
        env.log('random', random.random()) 
开发者ID:learnables,项目名称:cherry,代码行数:26,代码来源:debug_atari.py

示例15: __init__

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import RMSprop [as 别名]
def __init__(self, opt_name, parameters, lr, clip_grad_norm=None):
        opt_name = opt_name.lower().replace('_', '').strip()
        if opt_name == 'sgd':
            optimizer = opt.SGD
        elif opt_name == 'rmsprop':
            optimizer = opt.RMSprop
        elif opt_name == 'adam':
            optimizer = opt.Adam
        self.parameters = list(parameters)
        if self.parameters == []:
            # in case we're not using the optimizer
            self.parameters = [Variable(torch.zeros(1), requires_grad=True)]
        self.opt = optimizer(self.parameters, lr=lr)
        self.clip_grad_norm = clip_grad_norm
        self.stored_grads = None
        self.zero_stored_grad()
        self._n_iter = 0 
开发者ID:joelouismarino,项目名称:amortized-variational-filtering,代码行数:19,代码来源:optimizer.py


注:本文中的torch.optim.RMSprop方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。