当前位置: 首页>>代码示例>>Python>>正文


Python model.loss方法代码示例

本文整理汇总了Python中model.loss方法的典型用法代码示例。如果您正苦于以下问题:Python model.loss方法的具体用法?Python model.loss怎么用?Python model.loss使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在model的用法示例。


在下文中一共展示了model.loss方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: iterate

# 需要导入模块: import model [as 别名]
# 或者: from model import loss [as 别名]
def iterate(self, data):
        for key in data:
            t = data[key]
            if torch.is_tensor(t):
                data[key] = utils.ensure_device(t)
        tensor = torch.autograd.Variable(data['tensor'])
        pred = pybenchmark.profile('inference')(model._inference)(self.inference, tensor)
        height, width = data['image'].size()[1:3]
        rows, cols = pred['feature'].size()[-2:]
        loss, debug = pybenchmark.profile('loss')(model.loss)(self.anchors, norm_data(data, height, width, rows, cols), pred, self.config.getfloat('model', 'threshold'))
        loss_hparam = {key: loss[key] * self.config.getfloat('hparam', key) for key in loss}
        loss_total = sum(loss_hparam.values())
        self.optimizer.zero_grad()
        loss_total.backward()
        try:
            clip = self.config.getfloat('train', 'clip')
            nn.utils.clip_grad_norm(self.inference.parameters(), clip)
        except configparser.NoOptionError:
            pass
        self.optimizer.step()
        return dict(
            height=height, width=width, rows=rows, cols=cols,
            data=data, pred=pred, debug=debug,
            loss_total=loss_total, loss=loss, loss_hparam=loss_hparam,
        ) 
开发者ID:ruiminshen,项目名称:yolo2-pytorch,代码行数:27,代码来源:train.py

示例2: tower_loss

# 需要导入模块: import model [as 别名]
# 或者: from model import loss [as 别名]
def tower_loss(images, score_maps, geo_maps, training_masks, reuse_variables=None):
    # Build inference graph
    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
        f_score, f_geometry = model.model(images, is_training=True)

    model_loss = model.loss(score_maps, f_score,
                            geo_maps, f_geometry,
                            training_masks)
    total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

    # add summary
    if reuse_variables is None:
        tf.summary.image('input', images)
        tf.summary.image('score_map', score_maps)
        tf.summary.image('score_map_pred', f_score * 255)
        tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1])
        tf.summary.image('geo_map_0_pred', f_geometry[:, :, :, 0:1])
        tf.summary.image('training_masks', training_masks)
        tf.summary.scalar('model_loss', model_loss)
        tf.summary.scalar('total_loss', total_loss)

    return total_loss, model_loss 
开发者ID:HaozhengLi,项目名称:EAST_ICPR,代码行数:24,代码来源:multigpu_train.py

示例3: logging_init

# 需要导入模块: import model [as 别名]
# 或者: from model import loss [as 别名]
def logging_init(model, graph):
    """
    Set up logging so that progress can be visualised in TensorBoard.
    """
    # Add ops to record summaries for loss and accuracy...
    train_loss = tf.summary.scalar("train_loss", model.loss)
    train_accuracy = tf.summary.scalar("train_accuracy", model.accuracy)
    # ...then merge these ops into one single op so that they easily be run
    # together
    train_summary_ops = tf.summary.merge([train_loss, train_accuracy])
    # Same ops, but with different names, so that train/test results show up
    # separately in TensorBoard
    test_loss = tf.summary.scalar("test_loss", model.loss)
    test_accuracy = tf.summary.scalar("test_accuracy", model.accuracy)
    test_summary_ops = tf.summary.merge([test_loss, test_accuracy])

    timestamp = int(time.time())
    run_log_dir = os.path.join(LOGS_DIR, str(timestamp))
    os.makedirs(run_log_dir)
    # (this step also writes the graph to the events file so that
    # it shows up in TensorBoard)
    summary_writer = tf.summary.FileWriter(run_log_dir, graph)

    return train_summary_ops, test_summary_ops, summary_writer 
开发者ID:mrahtz,项目名称:tensorflow-pos-tagger,代码行数:26,代码来源:train.py

示例4: create_model

# 需要导入模块: import model [as 别名]
# 或者: from model import loss [as 别名]
def create_model(name, batch_size, learning_rate = 0.0001, wd = 0.00001, concat = False, l2_loss = False, penalty = False, coef = 0.4, verbosity = 0):
  """
  Create a model from model.py with the given configuration
  
  Args:
    name             : name of the model (used to create a specific folder to save/load parameters)
    batch_size       : batch size
    learning_rate    : learning_rate (cross entropy is arround 100* bigger than l2)
    wd               : weight decay factor
    concat           : does this model include direct connections?
    l2_loss          : does this model use l2 loss (if not then cross entropy)
    penalty          : whether to use the edge contrast penalty
    coef             : coef for the edge contrast penalty
    verbosity        : level of details to display
    
  Returns:
    my_model         : created model
  """
  
  my_model = model.MODEL(name, batch_size, learning_rate, wd, concat, l2_loss, penalty, coef)
  my_model.display_info(verbosity)
  return my_model 
开发者ID:arthurmeyer,项目名称:Saliency_Detection_Convolutional_Autoencoder,代码行数:24,代码来源:operations.py

示例5: copy_scalar

# 需要导入模块: import model [as 别名]
# 或者: from model import loss [as 别名]
def copy_scalar(self, **kwargs):
        step, loss_total, loss, loss_hparam = (kwargs[key] for key in 'step, loss_total, loss, loss_hparam'.split(', '))
        loss_total = loss_total.data.clone().cpu().numpy()
        loss = {key: l.data.clone().cpu().numpy() for key, l in loss.items()}
        loss_hparam = {key: l.data.clone().cpu().numpy() for key, l in loss_hparam.items()}
        return dict(
            step=step,
            loss_total=loss_total,
            loss=loss, loss_hparam=loss_hparam,
        ) 
开发者ID:ruiminshen,项目名称:yolo2-pytorch,代码行数:12,代码来源:train.py

示例6: summary_scalar

# 需要导入模块: import model [as 别名]
# 或者: from model import loss [as 别名]
def summary_scalar(self, **kwargs):
        step, loss_total, loss, loss_hparam = (kwargs[key] for key in 'step, loss_total, loss, loss_hparam'.split(', '))
        for key, l in loss.items():
            self.writer.add_scalar('loss/' + key, l[0], step)
        if self.config.getboolean('summary_scalar', 'loss_hparam'):
            self.writer.add_scalars('loss_hparam', {key: l[0] for key, l in loss_hparam.items()}, step)
        self.writer.add_scalar('loss_total', loss_total[0], step) 
开发者ID:ruiminshen,项目名称:yolo2-pytorch,代码行数:9,代码来源:train.py

示例7: check_nan

# 需要导入模块: import model [as 别名]
# 或者: from model import loss [as 别名]
def check_nan(self, **kwargs):
        step, loss_total, loss, data = (kwargs[key] for key in 'step, loss_total, loss, data'.split(', '))
        if np.isnan(loss_total.data.cpu()[0]):
            dump_dir = os.path.join(self.model_dir, str(step))
            os.makedirs(dump_dir, exist_ok=True)
            torch.save(collections.OrderedDict([(key, var.cpu()) for key, var in self.dnn.state_dict().items()]), os.path.join(dump_dir, 'model.pth'))
            torch.save(data, os.path.join(dump_dir, 'data.pth'))
            for key, l in loss.items():
                logging.warning('%s=%f' % (key, l.data.cpu()[0]))
            raise OverflowError('NaN loss detected, dump runtime information into ' + dump_dir) 
开发者ID:ruiminshen,项目名称:yolo2-pytorch,代码行数:12,代码来源:train.py

示例8: main

# 需要导入模块: import model [as 别名]
# 或者: from model import loss [as 别名]
def main(config):
    logger = config.get_logger('train')

    # setup data_loader instances
    data_loader = config.init_obj('data_loader', module_data)
    valid_data_loader = data_loader.split_validation()

    # build model architecture, then print to console
    model = config.init_obj('arch', module_arch)
    logger.info(model)

    # get function handles of loss and metrics
    criterion = getattr(module_loss, config['loss'])
    metrics = [getattr(module_metric, met) for met in config['metrics']]

    # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler
    trainable_params = filter(lambda p: p.requires_grad, model.parameters())
    optimizer = config.init_obj('optimizer', torch.optim, trainable_params)

    lr_scheduler = config.init_obj('lr_scheduler', torch.optim.lr_scheduler, optimizer)

    trainer = Trainer(model, criterion, metrics, optimizer,
                      config=config,
                      data_loader=data_loader,
                      valid_data_loader=valid_data_loader,
                      lr_scheduler=lr_scheduler)

    trainer.train() 
开发者ID:victoresque,项目名称:pytorch-template,代码行数:30,代码来源:train.py

示例9: tower_loss

# 需要导入模块: import model [as 别名]
# 或者: from model import loss [as 别名]
def tower_loss(images, score_maps, geo_maps, training_masks, weights_masks, reuse_variables=None):
    # Build inference graph
    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
        # f_score, f_geometry = model.model(images, is_training=True)
        f_score, f_geometry = model.model_InceptionResNet(images, is_training=True)
        # f_score, f_geometry = model.model_InceptionResNet_BLSTM(images, is_training=True)
        # f_score, f_geometry = model.model_InceptionResNet_symmetry(images, is_training=True)
    model_loss, L_g, L_s, L_s_c = model.loss(score_maps, f_score,
                            geo_maps, f_geometry,
                            training_masks, weights_masks)
    total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

    # add summary
    if reuse_variables is None:
        tf.summary.image('input', images)
        tf.summary.image('score_map', score_maps)
        tf.summary.image('score_map_pred', f_score * 255)
        tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1])
        tf.summary.image('geo_map_0_pred', f_geometry[:, :, :, 0:1])
        tf.summary.image('training_masks', training_masks)
        tf.summary.scalar('model_loss', model_loss)
        tf.summary.scalar('total_loss', total_loss)
        tf.summary.scalar('geometry_loss', L_g)
        tf.summary.scalar('score_loss', L_s)

    return total_loss, model_loss, L_g, L_s, L_s_c 
开发者ID:UpCoder,项目名称:ICPR_TextDection,代码行数:28,代码来源:multigpu_train.py

示例10: main

# 需要导入模块: import model [as 别名]
# 或者: from model import loss [as 别名]
def main(config):
    logger = config.get_logger('train')

    # setup data_loader instances
    data_loader = config.initialize('data_loader', module_data)
    valid_data_loader = data_loader.split_validation()

    # build model architecture, then print to console
    model = config.initialize('arch', module_arch)
    logger.info(model)

    # get function handles of loss and metrics
    loss = getattr(module_loss, config['loss'])
    metrics = [getattr(module_metric, met) for met in config['metrics']]

    # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler
    trainable_params = filter(lambda p: p.requires_grad, model.parameters())
    optimizer = config.initialize('optimizer', torch.optim, trainable_params)

    lr_scheduler = config.initialize('lr_scheduler', torch.optim.lr_scheduler, optimizer)

    trainer = getattr(module_trainer, config['trainer']['type'])(model, loss, metrics, optimizer,
                                                                    config=config,
                                                                    data_loader=data_loader,
                                                                    valid_data_loader=valid_data_loader,
                                                                    lr_scheduler=lr_scheduler)
    trainer.train() 
开发者ID:yjlolo,项目名称:vae-audio,代码行数:29,代码来源:train.py

示例11: _tower_fn

# 需要导入模块: import model [as 别名]
# 或者: from model import loss [as 别名]
def _tower_fn(is_training, images, score_maps, geo_maps, training_masks, reuse_variables=None):
    # Build inference graph
    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
        f_score, f_geometry = model.model(images, is_training=is_training)

    model_loss = model.loss(score_maps, f_score,
                            geo_maps, f_geometry,
                            training_masks)
    total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

    # add summary
    summaries = None
    if reuse_variables is None:
        image_sum = tf.summary.image('input', images)
        score_sum = tf.summary.image('score_map', score_maps)
        f_score_sum = tf.summary.image('score_map_pred', f_score * 255)
        geo_sum = tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1])
        f_geo_sum = tf.summary.image('geo_map_0_pred', f_geometry[:, :, :, 0:1])
        mask_sum = tf.summary.image('training_masks', training_masks)
        loss1_sum = tf.summary.scalar('model_loss', model_loss)
        loss_sum = tf.summary.scalar('total_loss', total_loss)
        summaries = [image_sum, score_sum, f_score_sum, geo_sum, f_geo_sum, mask_sum, loss1_sum, loss_sum]

    model_params = tf.trainable_variables()
    tower_grad = tf.gradients(total_loss, model_params)

    return total_loss, zip(tower_grad, model_params), summaries 
开发者ID:ucloud,项目名称:uai-sdk,代码行数:29,代码来源:distgpu_train.py

示例12: tower_loss

# 需要导入模块: import model [as 别名]
# 或者: from model import loss [as 别名]
def tower_loss(scope):
    """Calculate the total loss on a single tower running the MNIST model.
  
    Args:
      scope: unique prefix string identifying the MNIST tower, e.g. 'tower_0'
  
    Returns:
       Tensor of shape [] containing the total loss for a batch of data
    """
    # Get images and labels for MSNIT.
    images, labels = model.inputs(FLAGS.batch_size)

    # Build inference Graph.
    logits = model.inference(images, keep_prob=0.5)

    # Build the portion of the Graph calculating the losses. Note that we will
    # assemble the total_loss using a custom function below.
    _ = model.loss(logits, labels)

    # Assemble all of the losses for the current tower only.
    losses = tf.get_collection('losses', scope)

    # Calculate the total loss for the current tower.
    total_loss = tf.add_n(losses, name='total_loss')

    # Attach a scalar summary to all individual losses and the total loss; do
    # the same for the averaged version of the losses.
    if (FLAGS.tb_logging):
        for l in losses + [total_loss]:
            # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU
            # training session. This helps the clarity of presentation on
            # tensorboard.
            loss_name = re.sub('%s_[0-9]*/' % model.TOWER_NAME, '', l.op.name)
            tf.summary.scalar(loss_name, l)

    return total_loss 
开发者ID:normanheckscher,项目名称:mnist-multi-gpu,代码行数:38,代码来源:mnist_multi_gpu_train.py

示例13: model_init

# 需要导入模块: import model [as 别名]
# 或者: from model import loss [as 别名]
def model_init(vocab_size, embedding_size, n_past_words, n_pos_tags):
    pos_tagger = model.Tagger(vocab_size, embedding_size, n_past_words,
                              n_pos_tags)

    global_step = tf.Variable(
        initial_value=0, name="global_step", trainable=False)
    optimizer = tf.train.AdamOptimizer()
    train_op = optimizer.minimize(pos_tagger.loss, global_step=global_step)

    return pos_tagger, train_op, global_step 
开发者ID:mrahtz,项目名称:tensorflow-pos-tagger,代码行数:12,代码来源:train.py

示例14: step

# 需要导入模块: import model [as 别名]
# 或者: from model import loss [as 别名]
def step(sess, model, standard_ops, train_ops, test_ops, x, y, summary_writer,
         train):
    feed_dict = {model.input_x: x, model.input_y: y}

    if train:
        step, loss, accuracy, _, summaries = sess.run(standard_ops + train_ops,
                                                      feed_dict)
    else:
        step, loss, accuracy, summaries = sess.run(standard_ops + test_ops,
                                                   feed_dict)

    print("Step %d: loss %.1f, accuracy %d%%" % (step, loss, 100 * accuracy))
    summary_writer.add_summary(summaries, step) 
开发者ID:mrahtz,项目名称:tensorflow-pos-tagger,代码行数:15,代码来源:train.py

示例15: fit

# 需要导入模块: import model [as 别名]
# 或者: from model import loss [as 别名]
def fit(self):
        is_training = True
        config, params = self.config, self.params

        # start training from previous global_step
        start_step = self.sess.run(params["global_step"])
        if not start_step == 0:
            print("Start training from previous {} steps".format(start_step))

        for step in range(start_step, config.max_steps):
            t1 = time.time()

            # # dbg filter condition.
            # diff = self.sess.run(self.diff)
            # if diff < config.diff_thres: logging.debug(diff)
            # else: logging.debug('diff too large. discard.'+str(diff))

            loss, loss1, loss2, _ = self.sess.run([self.total_loss, self.loss1, self.loss2, self.train_op],
                          feed_dict={params["is_training"]: is_training})
            print('step {}: loss: {:.2f}\t loss1: {:.2f}\t loss2: {:.2f}'.format(step, loss, loss1, loss2))
            t2 = time.time()

            if step % config.summary_every_n_steps == 0:
                summary_feed_dict = {params["is_training"]: is_training}
                self.make_summary(summary_feed_dict, step)

                eta = (t2 - t1) * (config.max_steps - step + 1)
                print("Finished {}/{} steps, ETA:{:.2f} seconds".format(step, config.max_steps, eta))
                utils.flush_stdout()

            if step % config.save_model_steps == 0:
                self.saver.save(self.sess, os.path.join(config.logdir,
                    "{}-{}".format(config.checkpoint_basename.split('/')[-1], step)))

        self.saver.save(self.sess, os.path.join(config.logdir,
            "{}-{}".format(config.checkpoint_basename.split('/')[-1], config.max_steps))) 
开发者ID:neycyanshi,项目名称:DDRNet,代码行数:38,代码来源:trainer.py


注:本文中的model.loss方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。