当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.to_double方法代码示例

本文整理汇总了Python中tensorflow.to_double方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.to_double方法的具体用法?Python tensorflow.to_double怎么用?Python tensorflow.to_double使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.to_double方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: ngctc_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import to_double [as 别名]
def ngctc_loss(term_probs, targets,seq_len,tar_len):
    bs = tf.to_int32(tf.shape(term_probs)[0])
    #loss = 0.
    cond = lambda j,loss: tf.less(j, bs)
    j = tf.constant(0,dtype=tf.int32)
    loss = tf.constant(0,dtype=tf.float64)
    def body(j,loss):
        idx = tf.expand_dims(targets[j,:tar_len[j]],1)
        st = tf.transpose(term_probs[j], (1, 0))
        st = tf.transpose(tf.gather_nd(st, idx), (1, 0))
        length = seq_len[j]
        loss += -tf.reduce_sum(tf.log(forward_ngctc(st, length))/tf.to_double(bs)) # negative log likelihood for whole batch
        return tf.add(j,1),loss # average loss over batches

    out = tf.while_loop(cond,body,loop_vars= [j,loss])

    return out[1] 
开发者ID:KyriacosShiarli,项目名称:taco,代码行数:19,代码来源:ngctc_loss.py

示例2: _build

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import to_double [as 别名]
def _build(self, inputs, observed):
        debug_tensors = {}
        scalar_summary = functools.partial(_scalar_summary, debug_tensors)

        latents, divs = self._vae.infer_latents(inputs, observed)
        log_probs = self._vae.evaluate(inputs, observed, latents=latents)
        log_prob = tf.reduce_mean(log_probs)
        divergence = tf.reduce_mean(divs)
        scalar_summary("log_prob", log_prob)
        scalar_summary("divergence", divergence)
        scalar_summary("ELBO", log_prob - divergence)

        # We soften the divergence penalty at the start of training.
        temp_start = -np.log(self._hparams.divergence_strength_start)
        temp_decay = ((-np.log(0.5) / temp_start) **
                      (1. / self._hparams.divergence_strength_half))
        global_step = tf.to_double(tf.train.get_or_create_global_step())
        divergence_strength = tf.to_float(
            tf.exp(-temp_start * tf.pow(temp_decay, global_step)))
        scalar_summary("divergence_strength", divergence_strength)
        relaxed_elbo = log_prob - divergence * divergence_strength
        loss = -relaxed_elbo
        scalar_summary(self.module_name, loss)
        return loss, debug_tensors 
开发者ID:google,项目名称:vae-seq,代码行数:26,代码来源:train.py

示例3: gen_model

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import to_double [as 别名]
def gen_model(name, license, model, model_file, version=VERSION, featurize=True):
    g = tf.Graph()
    with tf.Session(graph=g) as session:
        K.set_learning_phase(0)
        inTensor = tf.placeholder(dtype=tf.string, shape=[], name="%s_input" % name)
        decoded = tf.decode_raw(inTensor, tf.uint8)
        imageTensor = tf.to_float(
            tf.reshape(
                decoded,
                shape=[
                    1,
                    model.inputShape()[0],
                    model.inputShape()[1],
                    3]))
        m = model.model(preprocessed=model.preprocess(imageTensor), featurize=featurize)
        outTensor = tf.to_double(tf.reshape(m.output, [-1]), name="%s_sparkdl_output__" % name)
        gdef = tfx.strip_and_freeze_until([outTensor], session.graph, session, False)
    g2 = tf.Graph()
    with tf.Session(graph=g2) as session:
        tf.import_graph_def(gdef, name='')
        filename = "sparkdl-%s_%s.pb" % (name, version)
        print('writing out ', filename)
        tf.train.write_graph(g2.as_graph_def(), logdir="./", name=filename, as_text=False)
        with open("./" + filename, "r") as f:
            h = sha256(f.read()).digest()
            base64_hash = b64encode(h)
            print('h', base64_hash)
    model_file.write(indent(
        scala_template % {
            "license": license,
            "name": name,
            "height": model.inputShape()[0],
            "width": model.inputShape()[1],
            "filename": filename,
            "base64": base64_hash},2))
    return g2 
开发者ID:databricks,项目名称:spark-deep-learning,代码行数:38,代码来源:generate_app_models.py

示例4: tac_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import to_double [as 别名]
def tac_loss(action_probs, term_probs, targets,seq_len,tar_len,safe = False):
    # For now a non batch version.
    # T length of trajectory. D size of dictionary. l length of label. B batch_size
    # actions_prob_tensors.shape [B,max(seq_len),D]
    # stop_tensors.shape [B,max(seq_len),D,2] #
    # targets.shape [B,max(tar_len)] # zero padded label sequences.
    # seq_len the actual length of each sequence.
    # tar_len the actual length of each target sequence
    # because the loss was only implemented per example, the batch version is simply in a loop rather than a matrix.
    bs = tf.to_int32(tf.shape(action_probs)[0])
    #loss = 0.
    cond = lambda j,loss: tf.less(j, bs)
    j = tf.constant(0,dtype=tf.int32)
    loss = tf.constant(0,dtype=tf.float64)
    def body(j,loss):
        idx = tf.expand_dims(targets[j,:tar_len[j]],1)
        ac = tf.transpose(tf.gather_nd(tf.transpose(action_probs[j]), idx))
        st = tf.transpose(term_probs[j], (1, 0, 2))
        st = tf.transpose(tf.gather_nd(st, idx), (1, 0, 2))
        length = seq_len[j]
        if safe:
            loss += -forward_tac_log(ac, st, length) / tf.to_double(bs)  # negative log likelihood
        else:
            loss += -tf.reduce_sum(tf.log(forward_tac_tf(ac, st, length))/tf.to_double(bs)) # negative log likelihood for whole batch
        return tf.add(j,1),loss # average loss over batches

    out = tf.while_loop(cond,body,loop_vars= [j,loss])

    return out[1] 
开发者ID:KyriacosShiarli,项目名称:taco,代码行数:31,代码来源:tac_loss.py

示例5: compute_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import to_double [as 别名]
def compute_loss(self, hparams, direction, lstm_input_given, ref_given, seq_len_given, feature_size):

        lstm_input, ref, seq_len = self._set_input_ref(direction, lstm_input_given, ref_given, seq_len_given)
        lstm_list, lstm_condition_list, lstm_scope, projector, projector_scope = self._set_lstm_projector(direction)
        lstm_output, lstm_state = self._encode(lstm_scope, lstm_input, seq_len, lstm_list, lstm_condition_list)
        output = projector(lstm_output)
        loss = tf.sqrt(
            tf.reduce_sum(tf.square(output - ref)) / (tf.to_double(self.batch_size * hparams.src_len * feature_size)))
        return loss, output 
开发者ID:psu1,项目名称:DeepRNN,代码行数:11,代码来源:model.py

示例6: multi_crop

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import to_double [as 别名]
def multi_crop(img, label, crop_size, image_size, crop_num=10):
    # it is not a best implementation of multiple crops for testing.

    def central_crop(img, crop_size):
        img_shape = tf.shape(img)
        depth = img.get_shape()[2]
        img_h = tf.to_double(img_shape[0])
        img_w = tf.to_double(img_shape[1])
        bbox_h_start = tf.to_int32((img_h - crop_size) / 2)
        bbox_w_start = tf.to_int32((img_w - crop_size) / 2)

        bbox_begin = tf.stack([bbox_h_start, bbox_w_start, 0])
        bbox_size = tf.stack([crop_size, crop_size, -1])
        image = tf.slice(img, bbox_begin, bbox_size)

        # The first two dimensions are dynamic and unknown.
        image.set_shape([crop_size, crop_size, depth])
        return image

    print('img.shape = ', image_size, '; crop_size:', crop_size)
    flipped_image = tf.reverse(img, [1])
    img_shape = tf.shape(img)
    crops = [
        img[:crop_size, :crop_size, :],  # Upper Left
        img[:crop_size, img_shape[1] - crop_size:, :],  # Upper Right
        img[img_shape[0] - crop_size:, :crop_size, :],  # Lower Left
        img[img_shape[0] - crop_size:, img_shape[1] - crop_size:, :],  # Lower Right
        central_crop(img, crop_size),

        flipped_image[:crop_size, :crop_size, :],  # Upper Left
        flipped_image[:crop_size, img_shape[1] - crop_size:, :],  # Upper Right
        flipped_image[img_shape[0] - crop_size:, :crop_size, :],  # Lower Left
        flipped_image[img_shape[0] - crop_size:, img_shape[1] - crop_size:, :],  # Lower Right
        central_crop(flipped_image, crop_size)
    ]

    assert len(crops) == crop_num

    return crops, [label[0] for _ in range(crop_num)] 
开发者ID:holyseven,项目名称:TransferLearningClassification,代码行数:41,代码来源:dataset_reader.py

示例7: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import to_double [as 别名]
def __init__(self, noisy_identity_init=0.001):
        def f(input_, forward, vcfg):
            assert not isinstance(input_, list)
            if isinstance(input_, tuple):
                is_tuple = True
            else:
                assert isinstance(input_, tf.Tensor)
                input_ = [input_]
                is_tuple = False

            out, logds = [], []
            for i, x in enumerate(input_):
                _, img_h, img_w, img_c = x.shape.as_list()
                if noisy_identity_init:
                    # identity + gaussian noise
                    initializer = (
                            np.eye(img_c) + noisy_identity_init * np.random.randn(img_c, img_c)
                    ).astype(np.float32)
                else:
                    # random orthogonal
                    initializer = np.linalg.qr(np.random.randn(img_c, img_c))[0].astype(np.float32)
                W = get_var('W{}'.format(i), shape=None, initializer=initializer, vcfg=vcfg)
                out.append(self._nin(x, W if forward else tf.matrix_inverse(W)))
                logds.append(
                    (1 if forward else -1) * img_h * img_w *
                    tf.to_float(tf.log(tf.abs(tf.matrix_determinant(tf.to_double(W)))))
                )
            logd = tf.fill([input_[0].shape[0]], tf.add_n(logds))

            if not is_tuple:
                assert len(out) == 1
                return out[0], logd
            return tuple(out), logd

        self.template = tf.make_template(self.__class__.__name__, f) 
开发者ID:aravindsrinivas,项目名称:flowpp,代码行数:37,代码来源:flows.py

示例8: light_head_preprocess_for_eval

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import to_double [as 别名]
def light_head_preprocess_for_eval(image, labels, bboxes,
                            out_shape=EVAL_SIZE, data_format='NHWC',
                            difficults=None, resize=Resize.WARP_RESIZE,
                            scope='light_head_preprocessing_eval'):
    with tf.name_scope(scope):
        if image.get_shape().ndims != 3:
            raise ValueError('Input must be of size [height, width, C>0]')

        image = tf.image.convert_image_dtype(image, dtype=tf.float32) * 2.
        image = tf_image_whitened(image, [_R_MEAN/127.5, _G_MEAN/127.5, _B_MEAN/127.5])

        # Add image rectangle to bboxes.
        bbox_img = tf.constant([[0., 0., 1., 1.]])
        if bboxes is None:
            bboxes = bbox_img
        else:
            bboxes = tf.concat([bbox_img, bboxes], axis=0)

        if resize == Resize.NONE:
            # No resizing...
            pass
        elif resize == Resize.CENTRAL_CROP:
            # Central cropping of the image.
            image, bboxes = tf_image.resize_image_bboxes_with_crop_or_pad(
                image, bboxes, out_shape[0], out_shape[1])
        elif resize == Resize.PAD_AND_RESIZE:
            # Resize image first: find the correct factor...
            shape = tf.shape(image)
            factor = tf.minimum(tf.to_double(1.0),
                                tf.minimum(tf.to_double(out_shape[0] / shape[0]),
                                           tf.to_double(out_shape[1] / shape[1])))
            resize_shape = factor * tf.to_double(shape[0:2])
            resize_shape = tf.cast(tf.floor(resize_shape), tf.int32)

            image = tf_image.resize_image(image, resize_shape,
                                          method=tf.image.ResizeMethod.BILINEAR,
                                          align_corners=False)
            # Pad to expected size.
            image, bboxes = tf_image.resize_image_bboxes_with_crop_or_pad(
                image, bboxes, out_shape[0], out_shape[1])
        elif resize == Resize.WARP_RESIZE:
            # Warp resize of the image.
            image = tf_image.resize_image(image, out_shape,
                                          method=tf.image.ResizeMethod.BILINEAR,
                                          align_corners=False)

        # Split back bounding boxes.
        bbox_img = bboxes[0]
        bboxes = bboxes[1:]
        # Remove difficult boxes.
        if difficults is not None:
            mask = tf.logical_not(tf.cast(difficults, tf.bool))
            labels = tf.boolean_mask(labels, mask)
            bboxes = tf.boolean_mask(bboxes, mask)
        # Image data format.
        if data_format == 'NCHW':
            image = tf.transpose(image, perm=(2, 0, 1))
        return image, labels, bboxes, bbox_img 
开发者ID:HiKapok,项目名称:X-Detector,代码行数:60,代码来源:common_preprocessing.py

示例9: preprocess_for_eval

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import to_double [as 别名]
def preprocess_for_eval(image, labels, bboxes,
                        out_shape=EVAL_SIZE, data_format='NHWC',
                        difficults=None, resize=Resize.WARP_RESIZE,
                        scope='common_preprocessing_eval'):
    with tf.name_scope(scope):
        if image.get_shape().ndims != 3:
            raise ValueError('Input must be of size [height, width, C>0]')

        image = tf.to_float(image)
        image = tf_image_whitened(image, [_R_MEAN, _G_MEAN, _B_MEAN])

        # Add image rectangle to bboxes.
        bbox_img = tf.constant([[0., 0., 1., 1.]])
        if bboxes is None:
            bboxes = bbox_img
        else:
            bboxes = tf.concat([bbox_img, bboxes], axis=0)

        if resize == Resize.NONE:
            # No resizing...
            pass
        elif resize == Resize.CENTRAL_CROP:
            # Central cropping of the image.
            image, bboxes = tf_image.resize_image_bboxes_with_crop_or_pad(
                image, bboxes, out_shape[0], out_shape[1])
        elif resize == Resize.PAD_AND_RESIZE:
            # Resize image first: find the correct factor...
            shape = tf.shape(image)
            factor = tf.minimum(tf.to_double(1.0),
                                tf.minimum(tf.to_double(out_shape[0] / shape[0]),
                                           tf.to_double(out_shape[1] / shape[1])))
            resize_shape = factor * tf.to_double(shape[0:2])
            resize_shape = tf.cast(tf.floor(resize_shape), tf.int32)

            image = tf_image.resize_image(image, resize_shape,
                                          method=tf.image.ResizeMethod.BILINEAR,
                                          align_corners=False)
            # Pad to expected size.
            image, bboxes = tf_image.resize_image_bboxes_with_crop_or_pad(
                image, bboxes, out_shape[0], out_shape[1])
        elif resize == Resize.WARP_RESIZE:
            # Warp resize of the image.
            image = tf_image.resize_image(image, out_shape,
                                          method=tf.image.ResizeMethod.BILINEAR,
                                          align_corners=False)

        # Split back bounding boxes.
        bbox_img = bboxes[0]
        bboxes = bboxes[1:]
        # Remove difficult boxes.
        if difficults is not None:
            mask = tf.logical_not(tf.cast(difficults, tf.bool))
            labels = tf.boolean_mask(labels, mask)
            bboxes = tf.boolean_mask(bboxes, mask)
        # Image data format.
        if data_format == 'NCHW':
            image = tf.transpose(image, perm=(2, 0, 1))
        return image, labels, bboxes, bbox_img 
开发者ID:HiKapok,项目名称:X-Detector,代码行数:60,代码来源:common_preprocessing.py


注:本文中的tensorflow.to_double方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。