当前位置: 首页>>代码示例>>Python>>正文


Python model.Generator方法代码示例

本文整理汇总了Python中model.Generator方法的典型用法代码示例。如果您正苦于以下问题:Python model.Generator方法的具体用法?Python model.Generator怎么用?Python model.Generator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在model的用法示例。


在下文中一共展示了model.Generator方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build_model

# 需要导入模块: import model [as 别名]
# 或者: from model import Generator [as 别名]
def build_model(self):
        """Create a generator and a discriminator."""
        if self.dataset in ['CelebA', 'RaFD']:
            self.G = Generator(self.g_conv_dim, self.c_dim, self.g_repeat_num)
            self.D = Discriminator(self.image_size, self.d_conv_dim, self.c_dim, self.d_repeat_num) 
        elif self.dataset in ['Both']:
            self.G = Generator(self.g_conv_dim, self.c_dim+self.c2_dim+2, self.g_repeat_num)   # 2 for mask vector.
            self.D = Discriminator(self.image_size, self.d_conv_dim, self.c_dim+self.c2_dim, self.d_repeat_num)

        self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])
        self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr, [self.beta1, self.beta2])
        self.print_network(self.G, 'G')
        self.print_network(self.D, 'D')
            
        self.G.to(self.device)
        self.D.to(self.device) 
开发者ID:yunjey,项目名称:stargan,代码行数:18,代码来源:solver.py

示例2: register

# 需要导入模块: import model [as 别名]
# 或者: from model import Generator [as 别名]
def register(self, trainer):
        self.generate = Generator(trainer.model.model, trainer.cuda) 
开发者ID:deepsound-project,项目名称:samplernn-pytorch,代码行数:4,代码来源:plugins.py

示例3: __init__

# 需要导入模块: import model [as 别名]
# 或者: from model import Generator [as 别名]
def __init__(self, config, args):
        self.config = config
        for k, v in args.__dict__.items():
            setattr(self.config, k, v)
        setattr(self.config, 'save_dir', '{}_log'.format(self.config.dataset))

        disp_str = ''
        for attr in sorted(dir(self.config), key=lambda x: len(x)):
            if not attr.startswith('__'):
                disp_str += '{} : {}\n'.format(attr, getattr(self.config, attr))
        sys.stdout.write(disp_str)
        sys.stdout.flush()

        self.labeled_loader, self.unlabeled_loader, self.unlabeled_loader2, self.dev_loader, self.special_set = data.get_cifar_loaders(config)

        self.dis = model.Discriminative(config).cuda()
        self.gen = model.Generator(image_size=config.image_size, noise_size=config.noise_size).cuda()
        self.enc = model.Encoder(config.image_size, noise_size=config.noise_size, output_params=True).cuda()

        self.dis_optimizer = optim.Adam(self.dis.parameters(), lr=config.dis_lr, betas=(0.5, 0.999))
        self.gen_optimizer = optim.Adam(self.gen.parameters(), lr=config.gen_lr, betas=(0.0, 0.999))
        self.enc_optimizer = optim.Adam(self.enc.parameters(), lr=config.enc_lr, betas=(0.0, 0.999))

        self.d_criterion = nn.CrossEntropyLoss()

        if not os.path.exists(self.config.save_dir):
            os.makedirs(self.config.save_dir)

        log_path = os.path.join(self.config.save_dir, '{}.FM+VI.{}.txt'.format(self.config.dataset, self.config.suffix))
        self.logger = open(log_path, 'wb')
        self.logger.write(disp_str)

        print self.dis 
开发者ID:kimiyoung,项目名称:ssl_bad_gan,代码行数:35,代码来源:cifar_trainer.py

示例4: __init__

# 需要导入模块: import model [as 别名]
# 或者: from model import Generator [as 别名]
def __init__(self, config, args):
        self.config = config
        for k, v in args.__dict__.items():
            setattr(self.config, k, v)
        setattr(self.config, 'save_dir', '{}_log'.format(self.config.dataset))

        disp_str = ''
        for attr in sorted(dir(self.config), key=lambda x: len(x)):
            if not attr.startswith('__'):
                disp_str += '{} : {}\n'.format(attr, getattr(self.config, attr))
        sys.stdout.write(disp_str)
        sys.stdout.flush()

        self.labeled_loader, self.unlabeled_loader, self.unlabeled_loader2, self.dev_loader, self.special_set = data.get_svhn_loaders(config)

        self.dis = model.Discriminative(config).cuda()
        self.gen = model.Generator(image_size=config.image_size, noise_size=config.noise_size).cuda()

        self.dis_optimizer = optim.Adam(self.dis.parameters(), lr=config.dis_lr, betas=(0.5, 0.999)) # 0.0 0.9999
        self.gen_optimizer = optim.Adam(self.gen.parameters(), lr=config.gen_lr, betas=(0.0, 0.999)) # 0.0 0.9999

        self.d_criterion = nn.CrossEntropyLoss()

        if not os.path.exists(self.config.save_dir):
            os.makedirs(self.config.save_dir)

        log_path = os.path.join(self.config.save_dir, '{}.FM+PT+ENT.{}.txt'.format(self.config.dataset, self.config.suffix))
        self.logger = open(log_path, 'wb')
        self.logger.write(disp_str) 
开发者ID:kimiyoung,项目名称:ssl_bad_gan,代码行数:31,代码来源:svhn_trainer.py

示例5: generate

# 需要导入模块: import model [as 别名]
# 或者: from model import Generator [as 别名]
def generate(agent_path, out, num=10000, environ_path='output/RF_cls_ecfp6.pkg'):
    """ Generating novel molecules with SMILES representation and
    storing them into hard drive as a data frame.

    Arguments:
        agent_path (str): the neural states file paths for the RNN agent (generator).
        out (str): file path for the generated molecules (and scores given by environment).
        num (int, optional): the total No. of SMILES that need to be generated. (Default: 10000)
        environ_path (str): the file path of the predictor for environment construction.
    """
    batch_size = 500
    df = pd.DataFrame()
    voc = util.Voc("data/voc.txt")
    agent = model.Generator(voc)
    agent.load_state_dict(torch.load(agent_path))
    for i in range(num // batch_size + 1):
        if i == 0 and num % batch_size == 0: continue
        batch = pd.DataFrame()
        samples = agent.sample(batch_size if i != 0 else num % batch_size)
        smiles, valids = util.check_smiles(samples, agent.voc)
        if environ_path is not None:
            # calculating the reward of each SMILES based on the environment (predictor).
            environ = util.Environment(environ_path)
            scores = environ(smiles)
            scores[valids == 0] = 0
            valids = scores
            batch['SCORE'] = valids
        batch['CANONICAL_SMILES'] = smiles
        df = df.append(batch)
    df.to_csv(out, sep='\t', index=None) 
开发者ID:XuhanLiu,项目名称:DrugEx,代码行数:32,代码来源:designer.py

示例6: main

# 需要导入模块: import model [as 别名]
# 或者: from model import Generator [as 别名]
def main():
    # Construction of the vocabulary
    voc = util.Voc("data/voc.txt")
    netP_path = 'output/net_pr'
    netE_path = 'output/net_ex'

    # Pre-training the RNN model with ZINC set
    prior = model.Generator(voc)
    if not os.path.exists(netP_path + '.pkg'):
        print('Exploitation network begins to be trained...')
        zinc = util.MolData("data/zinc_corpus.txt", voc, token='SENT')
        zinc = DataLoader(zinc, batch_size=BATCH_SIZE, shuffle=True, drop_last=True, collate_fn=zinc.collate_fn)
        prior.fit(zinc, out=netP_path)
        print('Exploitation network training is finished!')
    prior.load_state_dict(T.load(netP_path + '.pkg'))

    # Fine-tuning the RNN model with A2AR set as exploration stragety
    explore = model.Generator(voc)
    df = pd.read_table('data/chembl_corpus.txt').drop_duplicates('CANONICAL_SMILES')
    valid = df.sample(BATCH_SIZE)
    train = df.drop(valid.index)
    explore.load_state_dict(T.load(netP_path + '.pkg'))

    # Training set and its data loader
    train = util.MolData(train, voc, token='SENT')
    train = DataLoader(train, batch_size=BATCH_SIZE, collate_fn=train.collate_fn)

    # Validation set and its data loader
    valid = util.MolData(valid, voc, token='SENT')
    valid = DataLoader(valid, batch_size=BATCH_SIZE, collate_fn=valid.collate_fn)    

    print('Exploration network begins to be trained...')
    explore.fit(train, loader_valid=valid, out=netE_path, epochs=1000)
    print('Exploration network training is finished!') 
开发者ID:XuhanLiu,项目名称:DrugEx,代码行数:36,代码来源:pretrainer.py

示例7: Policy_gradient

# 需要导入模块: import model [as 别名]
# 或者: from model import Generator [as 别名]
def Policy_gradient(agent, environ, explore=None):
    """Training generator under reinforcement learning framework,
    The rewoard is only the final reward given by environment (predictor).

    agent (model.Generator): the exploitation network for SMILES string generation
    environ (util.Activity): the environment provide the final reward for each SMILES
    explore (model.Generator): the exploration network for SMILES string generation,
        it has the same architecture with the agent.
    """
    seqs = []

    # repeated sampling with MC times
    for _ in range(MC):
        seq = agent.sample(BATCH_SIZE, explore=explore, epsilon=Epsilon)
        seqs.append(seq)
    seqs = torch.cat(seqs, dim=0)
    ix = util.unique(seqs)
    seqs = seqs[ix]
    smiles, valids = util.check_smiles(seqs, agent.voc)

    # obtaining the reward
    preds = environ(smiles)
    preds[valids == False] = 0
    preds -= Baseline
    preds = torch.Tensor(preds.reshape(-1, 1)).to(util.dev)

    ds = TensorDataset(seqs, preds)
    loader = DataLoader(ds, batch_size=BATCH_SIZE)

    # Training Loop
    for seq, pred in loader:
        score = agent.likelihood(seq)
        agent.optim.zero_grad()
        loss = agent.PGLoss(score, pred)
        loss.backward()
        agent.optim.step() 
开发者ID:XuhanLiu,项目名称:DrugEx,代码行数:38,代码来源:agent.py

示例8: Rollout_PG

# 需要导入模块: import model [as 别名]
# 或者: from model import Generator [as 别名]
def Rollout_PG(agent, environ, explore=None):
    """Training generator under reinforcement learning framework,
    The rewoard is given for each token in the SMILES, which is generated by
    Monte Carlo Tree Search based on final reward given by the environment.

    agent (model.Generator): the exploitation network for SMILES string generation
    environ (util.Activity): the environment provide the final reward for each SMILES
    explore (model.Generator): the exploration network for SMILES string generation,
        it has the same architecture with the agent.
    """

    agent.optim.zero_grad()
    seqs = agent.sample(BATCH_SIZE, explore=explore, epsilon=Epsilon)
    batch_size = seqs.size(0)
    seq_len = seqs.size(1)
    rewards = np.zeros((batch_size, seq_len))
    smiles, valids = util.check_smiles(seqs, agent.voc)
    preds = environ(smiles) - Baseline
    preds[valids == False] = -Baseline
    scores, hiddens = agent.likelihood(seqs)

    # Monte Carlo Tree Search for step rewards generation
    for _ in tqdm(range(MC)):
        for i in range(0, seq_len):
            if (seqs[:, i] != 0).any():
                h = hiddens[:, :, i, :]
                subseqs = agent.sample(batch_size, inits=(seqs[:, i], h, i + 1, None))
                subseqs = torch.cat([seqs[:, :i+1], subseqs], dim=1)
                subsmile, subvalid = util.check_smiles(subseqs, voc=agent.voc)
                subpred = environ(subsmile) - Baseline
                subpred[1 - subvalid] = -Baseline
            else:
                subpred = preds
            rewards[:, i] += subpred
    loss = agent.PGLoss(scores, seqs, torch.FloatTensor(rewards / MC))
    loss.backward()
    agent.optim.step()
    return 0, valids.mean(), smiles, preds 
开发者ID:XuhanLiu,项目名称:DrugEx,代码行数:40,代码来源:agent.py

示例9: train

# 需要导入模块: import model [as 别名]
# 或者: from model import Generator [as 别名]
def train(self):
		batch_num = self.data.length//self.FLAGS.batch_size if self.data.length%self.FLAGS.batch_size==0 else self.data.length//self.FLAGS.batch_size + 1

		print("Start training WGAN...\n")

		for t in range(self.FLAGS.iter):

			d_cost = 0
			g_coat = 0

			for d_ep in range(self.d_epoch):

				img, tags, _, w_img, w_tags = self.data.next_data_batch(self.FLAGS.batch_size)
				z = self.data.next_noise_batch(len(tags), self.FLAGS.z_dim)

				feed_dict = {
					self.seq:tags,
					self.img:img,
					self.z:z,
					self.w_seq:w_tags,
					self.w_img:w_img
				}

				_, loss = self.sess.run([self.d_updates, self.d_loss], feed_dict=feed_dict)

				d_cost += loss/self.d_epoch

			z = self.data.next_noise_batch(len(tags), self.FLAGS.z_dim)
			feed_dict = {
				self.img:img,
				self.w_seq:w_tags,
				self.w_img:w_img,
				self.seq:tags,
				self.z:z
			}

			_, loss, step = self.sess.run([self.g_updates, self.g_loss, self.global_step], feed_dict=feed_dict)

			current_step = tf.train.global_step(self.sess, self.global_step)

			g_cost = loss

			if current_step % self.FLAGS.display_every == 0:
				print("Epoch {}, Current_step {}".format(self.data.epoch, current_step))
				print("Discriminator loss :{}".format(d_cost))
				print("Generator loss     :{}".format(g_cost))
				print("---------------------------------")

			if current_step % self.FLAGS.checkpoint_every == 0:
				path = self.saver.save(self.sess, self.checkpoint_prefix, global_step=current_step)
				print ("\nSaved model checkpoint to {}\n".format(path))

			if current_step % self.FLAGS.dump_every == 0:
				self.eval(current_step)
				print("Dump test image") 
开发者ID:m516825,项目名称:Conditional-GAN,代码行数:57,代码来源:improved_WGAN.py

示例10: main

# 需要导入模块: import model [as 别名]
# 或者: from model import Generator [as 别名]
def main():
    voc = util.Voc(init_from_file="data/voc_b.txt")
    netR_path = 'output/rf_dis.pkg'
    netG_path = 'output/net_p'
    netD_path = 'output/net_d'
    agent_path = 'output/net_gan_%d_%d_%dx%d' % (SIGMA * 10, BL * 10, BATCH_SIZE, MC)

    netR = util.Environment(netR_path)

    agent = model.Generator(voc)
    agent.load_state_dict(T.load(netG_path + '.pkg'))

    df = pd.read_table('data/CHEMBL251.txt')
    df = df[df['PCHEMBL_VALUE'] >= 6.5]
    data = util.MolData(df, voc)
    loader = DataLoader(data, batch_size=BATCH_SIZE, shuffle=True, drop_last=True, collate_fn=data.collate_fn)

    netD = model.Discriminator(VOCAB_SIZE, EMBED_DIM, FILTER_SIZE, NUM_FILTER)
    if not os.path.exists(netD_path + '.pkg'):
        Train_dis_BCE(netD, agent, loader, epochs=100, out=netD_path)
    netD.load_state_dict(T.load(netD_path + '.pkg'))

    best_score = 0
    log = open(agent_path + '.log', 'w')
    for epoch in range(1000):
        print('\n--------\nEPOCH %d\n--------' % (epoch + 1))
        print('\nPolicy Gradient Training Generator : ')
        Train_GAN(agent, netD, netR)

        print('\nAdversarial Training Discriminator : ')
        Train_dis_BCE(netD, agent, loader, epochs=1)

        seqs = agent.sample(1000)
        ix = util.unique(seqs)
        smiles, valids = util.check_smiles(seqs[ix], agent.voc)
        scores = netR(smiles)
        scores[valids == False] = 0
        unique = (scores >= 0.5).sum() / 1000
        if best_score < unique:
            T.save(agent.state_dict(), agent_path + '.pkg')
            best_score = unique
        print("Epoch+: %d average: %.4f valid: %.4f unique: %.4f" % (epoch, scores.mean(), valids.mean(), unique), file=log)
        for i, smile in enumerate(smiles):
            print('%f\t%s' % (scores[i], smile), file=log)

        for param_group in agent.optim.param_groups:
            param_group['lr'] *= (1 - 0.01)

    log.close() 
开发者ID:XuhanLiu,项目名称:DrugEx,代码行数:51,代码来源:organic.py


注:本文中的model.Generator方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。