当前位置: 首页>>代码示例>>Python>>正文


Python memory.Memory方法代码示例

本文整理汇总了Python中memory.Memory方法的典型用法代码示例。如果您正苦于以下问题:Python memory.Memory方法的具体用法?Python memory.Memory怎么用?Python memory.Memory使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在memory的用法示例。


在下文中一共展示了memory.Memory方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import memory [as 别名]
# 或者: from memory import Memory [as 别名]
def __init__(self, mmemory, typeaccess=0) :
		if not isinstance(mmemory, Memory):
			raise TypeError("ERREUR")

		self.mmemory = mmemory
		self.mmemory.open("r", typeaccess)

		try :
			fichier = open("/usr/include/asm/unistd.h", "r")
		except IOError :
			print "No such file /usr/include/asm/unistd.h"
			sys.exit(-1)
			
		liste = fichier.readlines()
		fichier.close()
		count = 0
		for i in liste :
			if(re.match("#define __NR_", i)) :
				l = string.split(i)
				if(l[2][0].isdigit()) :
					count = string.atoi(l[2], 10)
					self.lists_syscalls.append([count, l[1][5:]])
				else :
					count = count + 1
					self.lists_syscalls.append([count, l[1][5:]]) 
开发者ID:tuwid,项目名称:darkc0de-old-stuff,代码行数:27,代码来源:syscalls.py

示例2: get_memory

# 需要导入模块: import memory [as 别名]
# 或者: from memory import Memory [as 别名]
def get_memory(self):
    cls = memory.LSHMemory if self.use_lsh else memory.Memory
    return cls(self.rep_dim, self.memory_size, self.vocab_size) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:5,代码来源:model.py

示例3: __init__

# 需要导入模块: import memory [as 别名]
# 或者: from memory import Memory [as 别名]
def __init__(self, global_model, global_average_model, global_optimizer, global_ep, global_ep_r, res_queue, name):
        super(Worker, self).__init__()

        self.env = gym.make(env_name)
        self.env.seed(500)

        self.name = 'w%i' % name
        self.global_ep, self.global_ep_r, self.res_queue = global_ep, global_ep_r, res_queue
        self.global_model, self.global_average_model, self.global_optimizer = global_model, global_average_model, global_optimizer
        self.local_model = LocalModel(self.env.observation_space.shape[0], self.env.action_space.n)
        self.num_actions = self.env.action_space.n

        self.memory = Memory(replay_memory_capacity) 
开发者ID:g6ling,项目名称:Reinforcement-Learning-Pytorch-Cartpole,代码行数:15,代码来源:worker.py

示例4: __init__

# 需要导入模块: import memory [as 别名]
# 或者: from memory import Memory [as 别名]
def __init__(self, mmemory, typeaccess=0) :
		if not isinstance(mmemory, Memory):
			raise TypeError("ERREUR")

		self.mmemory = mmemory
		self.mmemory.open("r", typeaccess) 
开发者ID:tuwid,项目名称:darkc0de-old-stuff,代码行数:8,代码来源:networks.py

示例5: __init__

# 需要导入模块: import memory [as 别名]
# 或者: from memory import Memory [as 别名]
def __init__(self, mmemory, typeaccess=0) :
		if not isinstance(mmemory, Memory):
			raise TypeError("ERREUR")

		self.mmemory = mmemory
		self.symbols = Symbols(self.mmemory)
		self.typeaccess = typeaccess 
开发者ID:tuwid,项目名称:darkc0de-old-stuff,代码行数:9,代码来源:tasks.py

示例6: run

# 需要导入模块: import memory [as 别名]
# 或者: from memory import Memory [as 别名]
def run(self):
        epsilon = 1.0
        steps = 0
        while self.global_ep.value < max_episode:
            if self.global_ep_r.value > goal_score:
                break
            done = False

            score = 0
            state = self.env.reset()
            state = torch.Tensor(state).to(device)
            state = state.unsqueeze(0)

            memory = Memory(async_update_step)

            while not done:
                steps += 1

                action = self.get_action(state, epsilon)
                next_state, reward, done, _ = self.env.step(action)

                next_state = torch.Tensor(next_state)
                next_state = next_state.unsqueeze(0)

                mask = 0 if done else 1
                reward = reward if not done or score == 499 else -1
                action_one_hot = np.zeros(2)
                action_one_hot[action] = 1
                memory.push(state, next_state, action_one_hot, reward, mask)

                score += reward
                state = next_state

                epsilon -= 0.00001
                epsilon = max(epsilon, 0.1)

                if len(memory) == async_update_step or done:
                    batch = memory.sample()
                    loss = QNet.train_model(self.online_net, self.target_net, self.optimizer, batch)
                    memory = Memory(async_update_step)
                    if done:
                        self.record(score, epsilon, loss)
                        break
                if steps % update_target == 0:
                    self.update_target_model()

            score = score if score == 500.0 else score + 1

        self.res_queue.put(None) 
开发者ID:g6ling,项目名称:Reinforcement-Learning-Pytorch-Cartpole,代码行数:51,代码来源:worker.py

示例7: main

# 需要导入模块: import memory [as 别名]
# 或者: from memory import Memory [as 别名]
def main():
    env = gym.make(env_name)
    env.seed(500)
    torch.manual_seed(500)

    num_inputs = env.observation_space.shape[0]
    num_actions = env.action_space.n
    env.close()

    global_target_model = Model(num_inputs, num_actions)
    global_online_model = Model(num_inputs, num_actions)
    global_target_model.train()
    global_online_model.train()
    
    global_target_model.load_state_dict(global_online_model.state_dict())
    global_target_model.share_memory()
    global_online_model.share_memory()
    
    global_memory = Memory(replay_memory_capacity)
    
    
    global_ep, global_ep_r, res_queue, global_memory_pipe = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue(), mp.Queue()

    writer = SummaryWriter('logs')

    n = 2 
    epsilons = [(i * 0.05 + 0.1) for i in range(n)]

    actors = [Actor(global_target_model, global_memory_pipe, global_ep, global_ep_r, epsilons[i], i) for i in range(n)]
    [w.start() for w in actors]
    learner = Learner(global_online_model, global_target_model, global_memory, global_memory_pipe, res_queue)
    learner.start()

    res = []
    while True:
        r = res_queue.get()
        if r is not None:
            res.append(r)
            [ep, loss] = r
            # writer.add_scalar('log/score', float(ep_r), ep)
            writer.add_scalar('log/loss', float(loss), ep)
        else:
            break
    [w.join() for w in actors] 
开发者ID:g6ling,项目名称:Reinforcement-Learning-Pytorch-Cartpole,代码行数:46,代码来源:train.py

示例8: run

# 需要导入模块: import memory [as 别名]
# 或者: from memory import Memory [as 别名]
def run(self):

        while self.global_ep.value < max_episode:
            self.local_model.pull_from_global_model(self.global_model)
            done = False
            score = 0
            steps = 0

            state = self.env.reset()
            state = torch.Tensor(state)
            state = state.unsqueeze(0)
            memory = Memory(n_step)

            while True:
                policy, value = self.local_model(state)
                action = self.get_action(policy, self.num_actions)

                next_state, reward, done, _ = self.env.step(action)
                next_state = torch.Tensor(next_state)
                next_state = next_state.unsqueeze(0)

                mask = 0 if done else 1
                reward = reward if not done or score == 499 else -1
                action_one_hot = torch.zeros(2)
                action_one_hot[action] = 1
                memory.push(state, next_state, action_one_hot, reward, mask)

                score += reward
                state = next_state

                if len(memory) == n_step or done:
                    batch = memory.sample()
                    loss = self.local_model.push_to_global_model(batch, self.global_model, self.global_optimizer)
                    self.local_model.pull_from_global_model(self.global_model)
                    memory = Memory(n_step)

                    if done:
                        running_score = self.record(score, loss)
                        break


        self.res_queue.put(None) 
开发者ID:g6ling,项目名称:Reinforcement-Learning-Pytorch-Cartpole,代码行数:44,代码来源:worker.py

示例9: main

# 需要导入模块: import memory [as 别名]
# 或者: from memory import Memory [as 别名]
def main():
    env = gym.make(env_name)
    env.seed(500)
    torch.manual_seed(500)

    num_inputs = env.observation_space.shape[0]
    num_actions = env.action_space.n
    print('state size:', num_inputs)
    print('action size:', num_actions)

    net = TNPG(num_inputs, num_actions)
    writer = SummaryWriter('logs')

    net.to(device)
    net.train()
    running_score = 0
    steps = 0
    loss = 0
    for e in range(30000):
        done = False
        memory = Memory()

        score = 0
        state = env.reset()
        state = torch.Tensor(state).to(device)
        state = state.unsqueeze(0)

        while not done:
            steps += 1

            action = net.get_action(state)
            next_state, reward, done, _ = env.step(action)

            next_state = torch.Tensor(next_state)
            next_state = next_state.unsqueeze(0)

            mask = 0 if done else 1
            reward = reward if not done or score == 499 else -1

            action_one_hot = torch.zeros(2)
            action_one_hot[action] = 1
            memory.push(state, next_state, action_one_hot, reward, mask)

            score += reward
            state = next_state

        loss = TNPG.train_model(net, memory.sample())

        score = score if score == 500.0 else score + 1
        running_score = 0.99 * running_score + 0.01 * score
        if e % log_interval == 0:
            print('{} episode | score: {:.2f}'.format(
                e, running_score))
            writer.add_scalar('log/score', float(running_score), e)
            writer.add_scalar('log/loss', float(loss), e)

        if running_score > goal_score:
            break 
开发者ID:g6ling,项目名称:Reinforcement-Learning-Pytorch-Cartpole,代码行数:60,代码来源:train.py

示例10: main

# 需要导入模块: import memory [as 别名]
# 或者: from memory import Memory [as 别名]
def main():
    env = gym.make(env_name)
    env.seed(500)
    torch.manual_seed(500)

    num_inputs = env.observation_space.shape[0]
    num_actions = env.action_space.n
    print('state size:', num_inputs)
    print('action size:', num_actions)

    net = TRPO(num_inputs, num_actions)
    writer = SummaryWriter('logs')

    net.to(device)
    net.train()
    running_score = 0
    steps = 0
    loss = 0
    for e in range(30000):
        done = False
        memory = Memory()

        score = 0
        state = env.reset()
        state = torch.Tensor(state).to(device)
        state = state.unsqueeze(0)

        while not done:
            steps += 1

            action = net.get_action(state)
            next_state, reward, done, _ = env.step(action)

            next_state = torch.Tensor(next_state)
            next_state = next_state.unsqueeze(0)

            mask = 0 if done else 1
            reward = reward if not done or score == 499 else -1

            action_one_hot = torch.zeros(2)
            action_one_hot[action] = 1
            memory.push(state, next_state, action_one_hot, reward, mask)

            score += reward
            state = next_state

        loss = TRPO.train_model(net, memory.sample())

        score = score if score == 500.0 else score + 1
        running_score = 0.99 * running_score + 0.01 * score
        if e % log_interval == 0:
            print('{} episode | score: {:.2f}'.format(
                e, running_score))
            writer.add_scalar('log/score', float(running_score), e)
            writer.add_scalar('log/loss', float(loss), e)

        if running_score > goal_score:
            break 
开发者ID:g6ling,项目名称:Reinforcement-Learning-Pytorch-Cartpole,代码行数:60,代码来源:train.py


注:本文中的memory.Memory方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。