当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.mul函数代码示例

本文整理汇总了Python中tensorflow.mul函数的典型用法代码示例。如果您正苦于以下问题:Python mul函数的具体用法?Python mul怎么用?Python mul使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了mul函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self, action1_bounds, action2_bounds, session):
        self.graph = session.graph
        with self.graph.as_default():
            self.sess = session

            self.action_bounds = [[action1_bounds[1], action2_bounds[1]],
                                  [action1_bounds[0], action2_bounds[0]]]

            self.action_size = len(self.action_bounds[0])
            self.action_input = tf.placeholder(tf.float32, [None, self.action_size])

            self.p_max = tf.constant(self.action_bounds[0], dtype=tf.float32)
            self.p_min = tf.constant(self.action_bounds[1], dtype=tf.float32)

            self.p_range = tf.constant([x - y for x, y in zip(self.action_bounds[0], self.action_bounds[1])],
                                       dtype=tf.float32)

            self.p_diff_max = tf.div(-self.action_input + self.p_max, self.p_range)
            self.p_diff_min = tf.div(self.action_input - self.p_min, self.p_range)

            self.zeros_act_grad_filter = tf.zeros([self.action_size])
            self.act_grad = tf.placeholder(tf.float32, [None, self.action_size])

            self.grad_inverter = tf.select(tf.greater(self.act_grad, self.zeros_act_grad_filter),
                                           tf.mul(self.act_grad, self.p_diff_max),
                                           tf.mul(self.act_grad, self.p_diff_min))
开发者ID:JakobBreuninger,项目名称:neurobotics,代码行数:26,代码来源:grad_inverter.py

示例2: entropy

    def entropy(self, n, p):
        # Note that given n and p where p is a probability vector of
        # length k, the entropy requires a sum over all
        # possible configurations of a k-vector which sums to n. It's
        # expensive.
        # http://stackoverflow.com/questions/36435754/generating-a-numpy-array-with-all-combinations-of-numbers-that-sum-to-less-than
        sess = tf.Session()
        n = sess.run(tf.cast(tf.squeeze(n), dtype=tf.int32))
        sess.close()
        p = tf.cast(tf.squeeze(p), dtype=tf.float32)
        if isinstance(n, np.int32):
            k = get_dims(p)[0]
            max_range = np.zeros(k, dtype=np.int32) + n
            x = np.array([i for i in product(*(range(i+1) for i in max_range))
                                 if sum(i)==n])
            logpmf = self.logpmf(x, n, p)
            return tf.reduce_sum(tf.mul(tf.exp(logpmf), logpmf))
        else:
            out = []
            for j in range(n.shape[0]):
                k = get_dims(p)[0]
                max_range = np.zeros(k, dtype=np.int32) + n[j]
                x = np.array([i for i in product(*(range(i+1) for i in max_range))
                                     if sum(i)==n[j]])
                logpmf = self.logpmf(x, n[j], p[j, :])
                out += [tf.reduce_sum(tf.mul(tf.exp(logpmf), logpmf))]

            return tf.pack(out)
开发者ID:crack521,项目名称:edward,代码行数:28,代码来源:distributions.py

示例3: cross_entropy

def cross_entropy(output, target):
    """Returns the cost function of Cross-entropy of two distributions, implement
    softmax internally.

    Parameters
    ----------
    output : Tensorflow variable
        A distribution with shape: [None, n_feature].
    target : Tensorflow variable
        A distribution with shape: [None, n_feature].

    Examples
    --------
    >>> ce = tf.cost.cross_entropy(y_logits, y_target_logits)

    Notes
    -----
    About cross-entropy: `wiki <https://en.wikipedia.org/wiki/Cross_entropy>`_.\n
    The code is borrowed from: `here <https://en.wikipedia.org/wiki/Cross_entropy>`_.
    """
    with tf.name_scope("cross_entropy_loss"):
        net_output_tf = output
        target_tf = target
        cross_entropy = tf.add(tf.mul(tf.log(net_output_tf, name=None),target_tf),
                             tf.mul(tf.log(1 - net_output_tf), (1 - target_tf)))
        return -1 * tf.reduce_mean(tf.reduce_sum(cross_entropy, 1), name='cross_entropy_mean')
开发者ID:shorxp,项目名称:tensorlayer,代码行数:26,代码来源:cost.py

示例4: blend_images

def blend_images(data_folder1, data_folder2, out_folder, alpha=.5):
    filename_queue = tf.placeholder(dtype=tf.string)
    label = tf.placeholder(dtype=tf.int32)
    tensor_image = tf.read_file(filename_queue)

    image = tf.image.decode_jpeg(tensor_image, channels=3)

    multiplier = tf.div(tf.constant(224, tf.float32),
                        tf.cast(tf.maximum(tf.shape(image)[0], tf.shape(image)[1]), tf.float32))
    x = tf.cast(tf.round(tf.mul(tf.cast(tf.shape(image)[0], tf.float32), multiplier)), tf.int32)
    y = tf.cast(tf.round(tf.mul(tf.cast(tf.shape(image)[1], tf.float32), multiplier)), tf.int32)
    image = tf.image.resize_images(image, [x, y])

    image = tf.image.rot90(image, k=label)

    image = tf.image.resize_image_with_crop_or_pad(image, 224, 224)
    sess = tf.Session()
    sess.run(tf.local_variables_initializer())
    for root, folders, files in os.walk(data_folder1):
        for each in files:
            if each.find('.jpg') >= 0:
                img1 = Image.open(os.path.join(root, each))
                img2_path = os.path.join(root.replace(data_folder1, data_folder2), each.split("-")[-1])
                rotation = int(each.split("-")[1])
                img2 = sess.run(image, feed_dict={filename_queue: img2_path, label: rotation})
                imsave(os.path.join(os.getcwd(), "temp", "temp.jpg"), img2)
                img2 = Image.open(os.path.join(os.getcwd(), "temp", "temp.jpg"))
                out_image = Image.blend(img1, img2, alpha)
                outfile = os.path.join(root.replace(data_folder1, out_folder), each)
                if not os.path.exists(os.path.split(outfile)[0]):
                    os.makedirs(os.path.split(outfile)[0])
                out_image.save(outfile)
            else:
                print(each)
    sess.close()
开发者ID:Sabrewarrior,项目名称:PhotoOrientation,代码行数:35,代码来源:misc.py

示例5: get_model

def get_model(name):
    name = functools.partial('{}-{}'.format, name)

    self_pos = tf.placeholder(Config.dtype, Config.data_shape, name='self_pos')
    self_ability = tf.placeholder(Config.dtype, Config.data_shape, name='self_ability')
    enemy_pos = tf.placeholder(Config.dtype, Config.data_shape, name='enemy_pos')
    input_label = tf.placeholder(Config.dtype, Config.label_shape, name='input_label')

    x = tf.concat(3, [self_pos, self_ability, enemy_pos], name=name('input_concat'))
    y = input_label

    nl = tf.nn.tanh

    def conv_pip(name, x):
        name = functools.partial('{}_{}'.format, name)

        x = conv2d(name('0'), x, Config.data_shape[3]*2, kernel=3, stride=1, nl=nl)
        x = conv2d(name('1'), x, Config.data_shape[3], kernel=3, stride=1, nl=nl)
        return x

    pred = conv_pip(name('conv0'), x)
    for layer in range(5):
        pred_branch = tf.concat(3, [pred,x], name=name('concate%d'%layer))
        pred += conv_pip(name('conv%d'%(layer+1)), pred_branch)

    x = tf.tanh(pred, name=name('control_tanh'))

    z = tf.mul(tf.exp(x), self_ability)
    z_sum = tf.reduce_sum(z, reduction_indices=[1,2,3], name=name('partition_function')) # partition function

    # another formula of y*logy
    loss = -tf.reduce_sum(tf.mul(x, y), reduction_indices=[1,2,3]) + tf.log(z_sum)
    z_sum = tf.reshape(z_sum, [-1, 1, 1, 1])
    pred = tf.div(z, z_sum, name=name('predict'))
    return Model([self_pos, self_ability, enemy_pos], input_label, loss, pred, debug=z)
开发者ID:milkpku,项目名称:BetaElephant,代码行数:35,代码来源:model.py

示例6: _build_loss

    def _build_loss(self):

        with tf.variable_scope("loss"):

            # Compute y_j = r_j * discount*best_qvalue
            self.tf_discount = tf.constant(self.discount)
            self.qtarget = tf.add(self.pl_rewards, tf.mul(1.0-self.pl_terminals, tf.mul(self.tf_discount, self.pl_qtargets)))

            # Select Q-values for given actions
            self.actions_one_hot = tf.one_hot(self.pl_actions, self.num_actions, 1.0, 0.0)
            self.qvalue_pred = tf.reduce_sum(tf.mul(self.qvalues, self.actions_one_hot), reduction_indices=1)

            # Difference between target and predicted Q-network output
            self.delta = tf.sub(self.qtarget, self.qvalue_pred)

            if self.clip_delta > 0:
                # Perform clipping of the error term, default clipping is to (-1, +1) range
                self.quadratic_part = tf.minimum(tf.abs(self.delta), tf.constant(self.clip_delta))
                self.linear_part    = tf.sub(tf.abs(self.delta), self.quadratic_part)
                self.delta_square   = tf.mul(tf.constant(0.5), tf.square(self.quadratic_part)) + (self.clip_delta*self.linear_part)
                #self.delta_clipped = tf.clip_by_value(self.delta, -1.0*self.clip_delta, self.clip_delta)
                #self.delta_square  = tf.square(self.delta_clipped)
            else:
                # No error clipping
                self.delta_square  = tf.square(self.delta)

        # Actual loss
        if self.batch_accumulator == "sum":
           self.loss = tf.reduce_sum(self.delta_square)
        else:
           self.loss = tf.reduce_mean(self.delta_square)

        # Running average of the loss for TensorBoard
        self.loss_moving_avg    = tf.train.ExponentialMovingAverage(decay=0.999)
        self.loss_moving_avg_op = self.loss_moving_avg.apply([self.loss])
开发者ID:tomrunia,项目名称:DeepReinforcementLearning-Atari,代码行数:35,代码来源:qnetwork.py

示例7: U_t_variance

def U_t_variance(timestep_outputs_matrix, total_timesteps, gamma = 5):

	with tf.op_scope(timestep_outputs_matrix + total_timesteps + gamma, "U_t_variance"):

		G_i_matrix = G_i_piecewise_variance(timestep_outputs_matrix, total_timesteps)
		tf.mul(timestep_outputs_matrix, )
		tf.reduce_prod(timestep_outputs_matrix_with_g)
开发者ID:dmakian,项目名称:TwitchRNNBot,代码行数:7,代码来源:decoding_enhanced.py

示例8: mean_var

 def mean_var(x, mask, mean, num):
     x_mask = tf.mul(x, mask)
     residual = x_mask - mean
     res_mask = tf.mul(residual ,mask)
     res_mask_sq = tf.mul(res_mask, res_mask)
     var = tf.reduce_sum(res_mask_sq,0,keep_dims=True)*1.0/(num+1e-7)
     return tf.reduce_sum(var)
开发者ID:thtrieu,项目名称:last_layer,代码行数:7,代码来源:cnnlda.py

示例9: IoU

def IoU(bbox, gt):

    # bbox = [ x , y , w , h ] ( x , y  left up)

    shape = [-1, 1]

    x1 = tf.maximum(tf.cast(bbox[0], tf.float32), tf.reshape(tf.cast(gt[:,0], tf.float32), shape))
    y1 = tf.maximum(tf.cast(bbox[1], tf.float32), tf.reshape(tf.cast(gt[:,1], tf.float32), shape))
    x2 = tf.minimum(tf.cast(bbox[2] + bbox[0], tf.float32), tf.reshape(tf.cast(gt[:,2] + gt[:,0], tf.float32), shape))
    y2 = tf.minimum(tf.cast(bbox[3] + bbox[1], tf.float32), tf.reshape(tf.cast(gt[:,3] + gt[:,1], tf.float32), shape))


    inter_w = tf.sub(x2,x1)

    inter_h = tf.sub(y2,y1)

    inter = tf.cast(inter_w * inter_h, tf.float32)

    bounding_box = tf.cast(tf.mul(bbox[2],bbox[3]), tf.float32)

    ground_truth = tf.reshape(tf.cast(tf.mul(gt[:,2],gt[:,3]), tf.float32), shape)

    #iou = tf.div(inter,tf.sub(tf.add(bounding_box,tf.reshape(ground_truth,shape)),inter))

    iou = inter / (bounding_box + ground_truth - inter)

    # limit the iou range between 0 and 1
    
    mask_less = tf.cast(tf.logical_not(tf.less(iou, tf.zeros_like(iou))), tf.float32)
    #mask_great = tf.cast(tf.logical_not(tf.greater(iou, tf.ones_like(iou))), tf.float32)
    
    iou = tf.mul(iou, mask_less)
    #iou = tf.mul(iou, positive_mask)
    
    return iou
开发者ID:Johannes-brahms,项目名称:Yolo,代码行数:35,代码来源:utils.py

示例10: __init__

    def __init__(self,
                 env,
                 render,
                 debug,
                 sess,
                 action_policy,
                 num_features,
                 batch_size,
                 max_num_steps,
                 n_iter,
                 algo_discount):

        super(REINFORCE, self).__init__(env, render, debug, sess, action_policy,
                num_features, batch_size, max_num_steps, n_iter)

        # params specific to the policy gradient algo 
        self.algo_discount = algo_discount

        with tf.variable_scope("policy"):
            self.actions = tf.placeholder(tf.int32, [None, 1], "actions")
            self.returns = tf.placeholder(tf.float32, [None, 1], "returns")

            num_actions = self.env.action_space.n
            action_mask = tf.one_hot(indices=self.actions, depth=num_actions)
            # TODO: why are we using softmax here?
            self.log_probs = tf.nn.log_softmax(self.action_policy.network.logits)
            self.policy_probs = tf.reduce_sum( \
                    tf.mul(self.log_probs, action_mask), reduction_indices = 1)
            # negative since we are maximizing 
            self.loss = -tf.reduce_sum(tf.mul(self.policy_probs, utils.standardize(self.returns)))
            self.opt = tf.train.AdamOptimizer(self.action_policy.learning_rate).minimize(self.loss)
开发者ID:evancasey,项目名称:tensorflow-policy-gradient,代码行数:31,代码来源:reinforce.py

示例11: __call__

    def __call__(self, inputs, state, scope=None):
        with tf.variable_scope(scope or type(self).__name__):
            # Conveniently the concatenation of all hidden states at t-1
            h_star_t_prev = state
            u_g = tf.get_variable("u_g", [self.state_size],
                                  initializer=tf.random_uniform_initializer(-0.1, 0.1))
            cur_state_pos = 0
            cur_inp = inputs
            new_states = []
            for i, cell in enumerate(self._cells):
                with tf.variable_scope("Cell%d" % i):
                    cur_state = array_ops.slice(
                            state, [0, cur_state_pos], [-1, cell.state_size])
                    with tf.variable_scope("Global Reset"):
                        w_g = tf.get_variable("w_g", cell.state_size,
                                              initializer=tf.random_uniform_initializer(-0.1, 0.1))
                        g = tf.sigmoid(tf.mul(w_g, cur_state) + tf.mul(u_g, h_star_t_prev))
                        U = tf.get_variable("U", [cell.state_size],
                                            initializer=tf.random_uniform_initializer(-0.1, 0.1))
                        cur_state = tf.reduce_sum(g * tf.matmul(cur_state, U))

                    cur_state_pos += cell.state_size
                    cur_inp, new_state = cell(cur_inp, cur_state)
                    new_states.append(new_state)

        return cur_inp, array_ops.concat(1, new_states)
开发者ID:jimfleming,项目名称:gated-recurrent-feedback,代码行数:26,代码来源:feedback_cell.py

示例12: _loss_x_entropy

    def _loss_x_entropy(self, x, z, noise=None):
        with tf.name_scope("xentropy_loss"):
            z_clipped = tf.clip_by_value(z, FLAGS.zero_bound, FLAGS.one_bound)
            z_minus_1_clipped = tf.clip_by_value((1.0 - z), FLAGS.zero_bound, FLAGS.one_bound)
            x_clipped = tf.clip_by_value(x, FLAGS.zero_bound, FLAGS.one_bound)
            x_minus_1_clipped = tf.clip_by_value((1.0 - x), FLAGS.zero_bound, FLAGS.one_bound)
            
            # cross_entropy = x * log(z) + (1 - x) * log(1 - z)
            
            cross_entropy = tf.add(tf.mul(tf.log(z_clipped), x_clipped),
                                   tf.mul(tf.log(z_minus_1_clipped), x_minus_1_clipped), name='X-Entr')

            if noise:
                with tf.name_scope("Given_Emphasis"):
                    a, b = self._get_emph_params
                    corrupted = tf.select(noise, cross_entropy, tf.zeros_like(cross_entropy), name='Corrupted_Emphasis')
                    
                    # OR -- tf.select(tf.logical_not(noisy_points), cross_entropy, tf.zeros_like(cross_entropy), name='Uncorrupted_Emphasis')
                    uncorrupted = tf.select(noise, tf.zeros_like(cross_entropy), cross_entropy, name='Uncorrupted_Emphasis')
                    
                    loss = a * (-1 * tf.reduce_sum(corrupted, 1)) + b * (-1 * tf.reduce_sum(uncorrupted, 1))
            else:
                # Sum the cost for each example
                loss = -1 * tf.reduce_sum(cross_entropy, 1)
        
            # Reduce mean to find the overall cost of the loss
            cross_entropy_mean = tf.reduce_mean(loss, name='xentropy_mean')
    
            return cross_entropy_mean
开发者ID:hussius,项目名称:StackedDAE,代码行数:29,代码来源:dae.py

示例13: outer_product

def outer_product(*inputs):
    """Computes outer product.

    Args:
        inputs: a list of 1-D `Tensor` (vector)
    """
    inputs = list(inputs)
    order = len(inputs)

    for idx, input_ in enumerate(inputs):
        if len(input_.get_shape()) == 1:
            inputs[idx] = tf.reshape(input_, [-1, 1] if idx % 2 == 0 else [1, -1])

    if order == 2:
        output = tf.mul(inputs[0], inputs[1])
    elif order == 3:
        size = []
        idx = 1
        for i in xrange(order):
            size.append(inputs[i].get_shape()[0])
        output = tf.zeros(size)

        u, v, w = inputs[0], inputs[1], inputs[2]
        uv = tf.mul(inputs[0], inputs[1])
        for i in xrange(self.size[-1]):
            output = tf.scatter_add(output, [0,0,i], uv)

    return output
开发者ID:PKUers,项目名称:NTM-tensorflow,代码行数:28,代码来源:ops.py

示例14: setUp

  def setUp(self):
    """Test setup.

    Structure of the forward graph:
              f
             | |
        -----   -----
        |           |
        d           e
       | |         | |
    ---   ---------  ---
    |         |        |
    a         b        c

    Construct a backward graph using the GradientDescentOptimizer.
    """

    self.a = tf.Variable(1.0, name="a")
    self.b = tf.Variable(2.0, name="b")
    self.c = tf.Variable(4.0, name="c")
    self.d = tf.mul(self.a, self.b, name="d")
    self.e = tf.mul(self.b, self.c, name="e")
    self.f = tf.mul(self.d, self.e, name="f")

    # Gradient descent optimizer that minimizes g.
    tf.train.GradientDescentOptimizer(0.01).minimize(self.f, name="optim")

    self.sess = tf.Session()
    self.sess.run(tf.global_variables_initializer())
开发者ID:brchiu,项目名称:tensorflow,代码行数:29,代码来源:stepper_test.py

示例15: _forward

    def _forward(self, obs_prob_list):
        
        with tf.name_scope('init_scaling_factor'):
            self.scale = tf.Variable(tf.zeros([self.N], tf.float64)) #scale factors
        
        with tf.name_scope('forward_first_step'):
            # initialize with state starting priors
            init_prob = tf.mul(self.T0, tf.squeeze(obs_prob_list[0]))

            # scaling factor at t=0
            self.scale = tf.scatter_update(self.scale, 0, 1.0 / tf.reduce_sum(init_prob))

            # scaled belief at t=0
            self.forward = tf.scatter_update(self.forward, 0, self.scale[0] * init_prob)

        # propagate belief
        for step, obs_prob in enumerate(obs_prob_list[1:]):
            with tf.name_scope('time_step-%s' %step):
                # previous state probability
                prev_prob = tf.expand_dims(self.forward[step, :], 0)
                # transition prior
                prior_prob = tf.matmul(prev_prob, self.T)
                # forward belief propagation
                forward_score = tf.mul(prior_prob, tf.squeeze(obs_prob))

                forward_prob = tf.squeeze(forward_score)
                # scaling factor
                self.scale = tf.scatter_update(self.scale, step+1, 1.0 / tf.reduce_sum(forward_prob))
                # Update forward matrix
                self.forward = tf.scatter_update(self.forward, step+1, self.scale[step+1] * forward_prob)
开发者ID:aliziaei,项目名称:HiddenMarkovModel_TensorFlow,代码行数:30,代码来源:HiddenMarkovModel.py


注:本文中的tensorflow.mul函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。