当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.select函数代码示例

本文整理汇总了Python中tensorflow.select函数的典型用法代码示例。如果您正苦于以下问题:Python select函数的具体用法?Python select怎么用?Python select使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了select函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self, shape, lambda1 = 0.1, lambda2 = 0.1, mu = 0.1):
        """Initialize the ChanVese segmenter

        Arguments:
        shape (required) -- size of the image to segment

        lambda1 (default : 0.1) -- The cost of labeling pixels type 1 (check the Class docstring). This argument (as well as lambda2) can be used if the segmentation should be biased in one direction or the other. It's not deterministic what bits of the image get labeled with either lambda though -- this (as well as lambda2) will likely be a bit of a guess and check parameter.

        lambda2 (default : 0.1) -- The cost of labeling pixels type 2 (check the Class docstring)

        mu (default : 0.1) -- This is the cost of having a boundary. A higher value will mean less boundaries
        """
        xs = range(3)
        ys = range(3)
        Xs, Ys = numpy.meshgrid(xs, ys)
        Rs = numpy.sqrt((Xs - 1.0)**2 + (Ys - 1.0)**2)

        kernelBlurCpu = numpy.exp(-Rs / (2.0 * 0.75**2)).astype('float32')
        kernelBlurCpu /= numpy.linalg.norm(kernelBlurCpu.flatten())
        
        self.kernel = tf.constant(kernelBlurCpu.reshape([3, 3, 1, 1]))

        self.I = tf.Variable(tf.truncated_normal(shape = [1, shape[0], shape[1], 1], mean = 0.0, stddev = 0.1))
        
        self.u1 = tf.Variable(1.0)
        self.u2 = tf.Variable(-1.0)

        self.G = tf.placeholder(tf.float32, shape = shape)

        self.Gv = tf.Variable(numpy.zeros([1, shape[0], shape[1], 1]).astype('float32'))
        self.initialize = self.Gv.assign(tf.reshape(self.G, shape = [1, shape[0], shape[1], 1]))
        self.initialize2 = self.I.assign(tf.reshape(self.G, shape = [1, shape[0], shape[1], 1]))

        self.blur = tf.nn.conv2d(self.I, self.kernel, strides = [1, 1, 1, 1], padding = 'SAME')

        self.Gv = tf.Variable(numpy.zeros([1, shape[0], shape[1], 1]).astype('float32'))

        self.u1m = tf.abs(self.blur - self.u1)
        self.u2m = tf.abs(self.blur - self.u2)

        ones = numpy.ones((1, shape[0], shape[1], 1)).astype('float32')
        zeros = numpy.zeros((1, shape[0], shape[1], 1)).astype('float32')

        self.lambda1 = lambda1
        self.lambda2 = lambda2
        self.mu = mu

        eta = 0.1
        self.conv = eta / (numpy.pi * (eta**2 + self.blur**2))

        self.u1t = self.lambda1 * tf.reduce_sum(tf.select(self.u2m > self.u1m, (self.Gv - self.u1)**2, zeros))
        self.u2t = self.lambda2 * tf.reduce_sum(tf.select(self.u2m <= self.u1m, (self.Gv - self.u2)**2, zeros))

        self.edgeLoss = self.mu * tf.reduce_sum(tf.abs(self.conv))

        self.loss = self.u1t + self.u2t + self.edgeLoss

        self.shape = shape

        self.train_step = tf.train.AdamOptimizer(1.0e-1).minimize(self.loss, var_list = [self.I, self.u1, self.u2])
开发者ID:bbbales2,项目名称:microstructure_python,代码行数:60,代码来源:chanvese.py

示例2: evaluate_precision_recall

def evaluate_precision_recall(
    input_layer, labels, threshold=0.5, per_example_weights=None, name=PROVIDED, phase=Phase.train
):
    """Computes the precision and recall of the prediction vs the labels.

  Args:
    input_layer: A Pretty Tensor object.
    labels: The target labels to learn as a float tensor.
    threshold: The threshold to use to decide if the prediction is true.
    per_example_weights: A Tensor with a weight per example.
    name: An optional name.
    phase: The phase of this model; non training phases compute a total across
      all examples.
  Returns:
    Precision and Recall.
  """
    _ = name  # Eliminate warning, name used for namescoping by PT.
    selected, sum_retrieved, sum_relevant = _compute_precision_recall(
        input_layer, labels, threshold, per_example_weights
    )

    if phase != Phase.train:
        dtype = tf.float32
        # Create the variables in all cases so that the load logic is easier.
        relevant_count = tf.get_variable(
            "relevant_count",
            [],
            dtype,
            tf.zeros_initializer,
            collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
            trainable=False,
        )
        retrieved_count = tf.get_variable(
            "retrieved_count",
            [],
            dtype,
            tf.zeros_initializer,
            collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
            trainable=False,
        )
        selected_count = tf.get_variable(
            "selected_count",
            [],
            dtype,
            tf.zeros_initializer,
            collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
            trainable=False,
        )

        with input_layer.g.device(selected_count.device):
            selected = tf.assign_add(selected_count, selected)
        with input_layer.g.device(retrieved_count.device):
            sum_retrieved = tf.assign_add(retrieved_count, sum_retrieved)
        with input_layer.g.device(relevant_count.device):
            sum_relevant = tf.assign_add(relevant_count, sum_relevant)

    return (
        tf.select(tf.equal(sum_retrieved, 0), tf.zeros_like(selected), selected / sum_retrieved),
        tf.select(tf.equal(sum_relevant, 0), tf.zeros_like(selected), selected / sum_relevant),
    )
开发者ID:yaowenwu,项目名称:prettytensor,代码行数:60,代码来源:pretty_tensor_loss_methods.py

示例3: loss_estimate

def loss_estimate(batch_size,old_state,data,total_data,model_params,base_mean,base_log_var):
    clipped_log_vals, nan_mask, reset_rows = data


    zeros = tf.zeros_like(clipped_log_vals)
    state_init = model_params.init_state(batch_size)
    data_count = tf.reduce_sum(tf.to_float(tf.logical_not(nan_mask)),name='data_count')
    
    model_input = tf.select(nan_mask,zeros,clipped_log_vals)
    target_outputs = model_input

    sample_params = model_params.sample_vals(batch_size)

    #TODO verify significance of old_state
    filtered_state = tf.select(reset_rows,old_state,state_init)

    new_state,delta_mean = sample_inference(filtered_state,model_input,sample_params)
    variance = tf.exp(base_log_var)
    mean = base_mean + delta_mean * variance

    raw_losses = gaussian_neg_log_likelyhood(target_outputs,mean,variance)
    clean_raw_losses = tf.select(nan_mask,zeros,raw_losses)
    raw_loss = tf.reduce_sum(clean_raw_losses)

    kl_divergence = model_params.get_divergence()

    loss_estimate = raw_loss * (total_data / data_count) + kl_divergence

    return loss_estimate,new_state,kl_divergence
开发者ID:CurtisHuebner,项目名称:SMP3.0,代码行数:29,代码来源:train_model.py

示例4: build_mh_update

    def build_mh_update(self):
        with tf.name_scope("gold_model"):
            self.joint_density_gold = self.joint_density(**self.symbols_gold)

        with tf.name_scope("proposed_model"):
            self.joint_density_proposed = self.joint_density(**self.symbols_proposed)
        with tf.name_scope("mh_updates"):            
            self.mh_ratio = self.joint_density_proposed - self.joint_density_gold
            self.uniform = tf.placeholder(dtype=tf.float32, name="u")
            log_uniform = tf.log(self.uniform)
            self.accepted = log_uniform < self.mh_ratio 
            
            update_ops = []
            for name, latent in self.latents.items():
                next_val = tf.select(self.accepted, latent["proposed"], latent["gold"])
                update_ops.append(latent["gold"].assign(next_val))

            self.step_counter = tf.Variable(0)
            self.accept_counter = tf.Variable(0)
            self.accept_rate = tf.to_double(self.accept_counter) / tf.to_double(self.step_counter)
            update_ops.append(self.step_counter.assign_add(1))
            update_ops.append(self.accept_counter.assign_add(tf.select(self.accepted, 1, 0)))
            
            self.global_update = tf.group(*update_ops)
                
        return self.global_update
开发者ID:BenJamesbabala,项目名称:bayesflow,代码行数:26,代码来源:mh.py

示例5: UpdateProbs

  def UpdateProbs(self, inp):
    """Update probabilities of each particle based on 2D matrix inp which is a 2D perspectiuve projection of the scene"""

    projection, onscreen = self.project()
    filtered_projection = tf.to_int64(tf.select(onscreen, projection, tf.zeros_like(projection)))
    per_state_probabilities = tf.gather_nd(inp, filtered_projection)
    
    filtered_probabilities = tf.select(onscreen, per_state_probabilities, tf.zeros_like(per_state_probabilities))
    
    new_state_indicies = tf.squeeze(tf.multinomial(tf.expand_dims(tf.log(filtered_probabilities),0), self.particles/10*9))
    
    new_state = tf.gather(self.state, new_state_indicies)
    
    # Add momentum
    new_state = tf.concat(1, [new_state[:, 0:3] + new_state[:, 3:6], new_state[:, 3:10]])
    
    # Add in particles for the "just come onscreen" case.
    new_state = tf.concat(0, [new_state, tf.random_normal([self.particles/10, 10]) * self.initial_std + self.initial_bias])

    
    new_state = new_state + tf.random_normal([self.particles, 10]) * self.update_std
    # Todo:  permute state by adding noise.

    
    return self.state.assign(new_state)
开发者ID:Hello1024,项目名称:quadcopter,代码行数:25,代码来源:particle.py

示例6: _loss_x_entropy

    def _loss_x_entropy(self, x, z, noise=None):
        with tf.name_scope("xentropy_loss"):
            z_clipped = tf.clip_by_value(z, FLAGS.zero_bound, FLAGS.one_bound)
            z_minus_1_clipped = tf.clip_by_value((1.0 - z), FLAGS.zero_bound, FLAGS.one_bound)
            x_clipped = tf.clip_by_value(x, FLAGS.zero_bound, FLAGS.one_bound)
            x_minus_1_clipped = tf.clip_by_value((1.0 - x), FLAGS.zero_bound, FLAGS.one_bound)
            
            # cross_entropy = x * log(z) + (1 - x) * log(1 - z)
            
            cross_entropy = tf.add(tf.mul(tf.log(z_clipped), x_clipped),
                                   tf.mul(tf.log(z_minus_1_clipped), x_minus_1_clipped), name='X-Entr')

            if noise:
                with tf.name_scope("Given_Emphasis"):
                    a, b = self._get_emph_params
                    corrupted = tf.select(noise, cross_entropy, tf.zeros_like(cross_entropy), name='Corrupted_Emphasis')
                    
                    # OR -- tf.select(tf.logical_not(noisy_points), cross_entropy, tf.zeros_like(cross_entropy), name='Uncorrupted_Emphasis')
                    uncorrupted = tf.select(noise, tf.zeros_like(cross_entropy), cross_entropy, name='Uncorrupted_Emphasis')
                    
                    loss = a * (-1 * tf.reduce_sum(corrupted, 1)) + b * (-1 * tf.reduce_sum(uncorrupted, 1))
            else:
                # Sum the cost for each example
                loss = -1 * tf.reduce_sum(cross_entropy, 1)
        
            # Reduce mean to find the overall cost of the loss
            cross_entropy_mean = tf.reduce_mean(loss, name='xentropy_mean')
    
            return cross_entropy_mean
开发者ID:hussius,项目名称:StackedDAE,代码行数:29,代码来源:dae.py

示例7: updatesome

 def updatesome():
     if reverse:
         return tf.select(
             tf.greater_equal(time, max_sequence_length-lengths),
             new_state,
             old_state)
     else:
         return tf.select(tf.less(time, lengths), new_state, old_state)
开发者ID:Styrke,项目名称:master-code,代码行数:8,代码来源:rnn_dot.py

示例8: testShapeMismatch

 def testShapeMismatch(self):
   c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
   x = np.random.rand(1, 3, 2) * 100
   y = np.random.rand(2, 5, 3) * 100
   for t in [np.float32, np.float64, np.int32, np.int64, np.complex64]:
     xt = x.astype(t)
     yt = y.astype(t)
     with self.assertRaises(ValueError):
       tf.select(c, xt, yt)
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:9,代码来源:cwise_ops_test.py

示例9: _copy_some_through

    def _copy_some_through(new_output, new_alpha, new_attn_ids, new_lmbdas, new_state):
        # Use broadcasting select to determine which values should get
        # the previous state & zero output, and which values should get
        # a calculated state & output.

        # Alpha needs to be (batch, tasks, k)
        copy_cond = (time >= sequence_length)
        return ([tf.select(copy_cond, zero_output, new_output),
                 tf.select(copy_cond, zero_alpha, new_alpha), # (batch, tasks, k)
                 tf.select(copy_cond, zero_attn_ids, new_attn_ids),
                 tf.select(copy_cond, zero_lmbdas, new_lmbdas)] +
                [tf.select(copy_cond, old_s, new_s)
                 for (old_s, new_s) in zip(state, new_state)])
开发者ID:hedgefair,项目名称:pycodesuggest,代码行数:13,代码来源:rnn.py

示例10: _corrupt

    def _corrupt(self, x, ratio, n_type='MN'):
        with tf.name_scope("Corruption"):
            """ Noise adding (or input corruption)
            This function adds noise to the given data.
            
            Args:
                x    : The input data for the noise to be applied
                ratio: The percentage of the data affected by the noise addition
                n_type: The type of noise to be applied.
                        Choices: MN (masking noise), SP (salt-and-pepper noise)
            """
            
            # Safety check. If unspecified noise type given, use Masking noise instead.
            if n_type != 'MN' and n_type != 'SP' and n_type != 'TFDO':
                n_type = 'MN'
                print("Unknown noise type. Masking noise will be used instead.")
            
            
            # if there is no noise to be added there is no need to proceed further
            if ratio == 0.0:
                return x_tilde, None
            
            if n_type == 'TFDO':
                x_tilde = tf.nn.dropout(x, keep_prob= 1 - ratio)
#                 points_to_alter = x_tilde == 0.
#                 print points_to_alter
#                 x_tilde = tf.select(points_to_alter, tf.add(tf.zeros_like(x_tilde, dtype=tf.float32),
#                                                                 FLAGS.zero_bound), x_tilde, name='X_tilde')
#                 x_tilde[x_tilde == 0.] = tf.constant(FLAGS.zero_bound)
            else:
                # It makes a copy of the data, otherwise 'target_feed' will also be affected
                x_tilde = tf.identity(x, name='X_tilde')
                shape = tf.Tensor.get_shape(x_tilde)
                # Creating and applying random noise to the data. (Masking noise)
                points_to_alter = tf.random_uniform(shape=shape, dtype=tf.float32) < ratio
                
                if n_type == 'MN':
                    x_tilde = tf.select(points_to_alter, tf.add(tf.zeros_like(x_tilde, dtype=tf.float32),
                                                                FLAGS.zero_bound), x_tilde, name='X_tilde')
                    
                elif n_type == 'SP':
                    coin_flip = np.asarray([np.random.choice([FLAGS.zero_bound, FLAGS.one_bound]) for _ in range(shape[0]) for _ in range(shape[1])]).reshape(shape)
                    x_tilde = tf.select(points_to_alter, tf.to_float(coin_flip), x_tilde, name='X_tilde')

                
            # Also returns the 'points_to_alter' in case of applied Emphasis
            if not FLAGS.emphasis or n_type == 'TFDO':
                points_to_alter = None
    
            return x_tilde, points_to_alter
开发者ID:glrs,项目名称:StackedDAE,代码行数:50,代码来源:dae.py

示例11: _lcod

def _lcod(x, w_e, w_s, thresh, T):
    """
    Learned Coordinate Descent (LCoD). LCoD is an approximately sparse encoder. It
    approximates (in an L2 sense) a sparse code of `x` according to dictionary `w_e`.
    Note that during backpropagation, `w_e` isn't strictly a dictionary (i.e.
    dictionary atoms are not strictly normalized).

    LCoD is a differentiable version of greedy coordinate descent.

    Args:
      x: [n, n_f] tensor
      w_e: [n_f, n_c] encoder tensor
      w_s: [n_c, n_f] mutual inhibition tensor
      thresh: soft thresold
      T: number of iterations
    Returns:
      z: LCoD output
    """
    with tf.name_scope('itr_00'):
        b = tf.matmul(x, w_e, name='b')
        z = tf.zeros_like(b, dtype=tf.float32, name='z')

    for t in range(1, T):
        with tf.name_scope('itr_%02d' % t):
            z_bar = _st(b, thresh, name='z_bar')
            with tf.name_scope('greedy_heuristic'):
                # no tf.tile b/c tf.select will brodcast?
                if t > 1:
                    z_diff = tf.sub(z_bar, z, name='z_diff')
                else:
                    z_diff = z_bar
                abs_z_diff = tf.abs(z_diff, name='abs_z_diff')

                tmp = tf.reduce_max(abs_z_diff, 1, True)
                tmp2 = tf.equal(abs_z_diff, tmp)
                e = tf.select(tmp2, z_diff, tf.zeros_like(z_bar, dtype=tf.float32),
                           name='e')
                ks = tf.argmax(abs_z_diff, 1, name='ks')
                
            with tf.name_scope('update_b'):
                s_slices = tf.gather(w_s, ks, name='s_slices')
                b = tf.add(b, tf.mul(e, s_slices), name='b')

            with tf.name_scope('update_z'):
                z = tf.select(tmp2, z_bar, z, name='z')

    with tf.name_scope('itr_%02d' % T):
        z = _st(b, thresh, name='z')

    return z
开发者ID:ulysseses,项目名称:sr_exp2,代码行数:50,代码来源:model.py

示例12: _build_graph

    def _build_graph(self, inputs, is_training):
        state, action, reward, next_state, isOver = inputs
        self.predict_value = self._get_DQN_prediction(state, is_training)
        action_onehot = tf.one_hot(action, NUM_ACTIONS)
        pred_action_value = tf.reduce_sum(self.predict_value * action_onehot, 1)    #N,
        max_pred_reward = tf.reduce_mean(tf.reduce_max(
            self.predict_value, 1), name='predict_reward')
        add_moving_summary(max_pred_reward)
        self.greedy_choice = tf.argmax(self.predict_value, 1)   # N,

        with tf.variable_scope('target'):
            targetQ_predict_value = self._get_DQN_prediction(next_state, False)    # NxA

            # DQN
            #best_v = tf.reduce_max(targetQ_predict_value, 1)    # N,

            # Double-DQN
            predict_onehot = tf.one_hot(self.greedy_choice, NUM_ACTIONS, 1.0, 0.0)
            best_v = tf.reduce_sum(targetQ_predict_value * predict_onehot, 1)

            target = reward + (1.0 - tf.cast(isOver, tf.float32)) * GAMMA * tf.stop_gradient(best_v)

        sqrcost = tf.square(target - pred_action_value)
        abscost = tf.abs(target - pred_action_value)    # robust error func
        cost = tf.select(abscost < 1, sqrcost, abscost)
        summary.add_param_summary([('conv.*/W', ['histogram', 'rms']),
                                   ('fc.*/W', ['histogram', 'rms']) ])   # monitor all W
        self.cost = tf.reduce_mean(cost, name='cost')
开发者ID:xhrwang,项目名称:tensorpack,代码行数:28,代码来源:DQN.py

示例13: set_logp_to_neg_inf

def set_logp_to_neg_inf(X, logp, bounds):
    """Set `logp` to negative infinity when `X` is outside the allowed bounds.

    # Arguments
        X: tensorflow.Tensor
            The variable to apply the bounds to
        logp: tensorflow.Tensor
            The log probability corrosponding to `X`
        bounds: list of `Region` objects
            The regions corrosponding to allowed regions of `X`

    # Returns
        logp: tensorflow.Tensor
            The newly bounded log probability
    """
    conditions = []
    for l, u in bounds:
        lower_is_neg_inf = not isinstance(l, tf.Tensor) and np.isneginf(l)
        upper_is_pos_inf = not isinstance(u, tf.Tensor) and np.isposinf(u)

        if not lower_is_neg_inf and upper_is_pos_inf:
            conditions.append(tf.greater(X, l))
        elif lower_is_neg_inf and not upper_is_pos_inf:
            conditions.append(tf.less(X, u))
        elif not (lower_is_neg_inf or upper_is_pos_inf):
            conditions.append(tf.logical_and(tf.greater(X, l), tf.less(X, u)))

    if len(conditions) > 0:
        is_inside_bounds = conditions[0]
        for condition in conditions[1:]:
            is_inside_bounds = tf.logical_or(is_inside_bounds, condition)

        logp = tf.select(is_inside_bounds, logp, tf.fill(tf.shape(X), config.dtype(-np.inf)))

    return logp
开发者ID:tensorprob,项目名称:tensorprob,代码行数:35,代码来源:utilities.py

示例14: get_total_loss

def get_total_loss(input_sequence, ngram_predictions, outputs, expected_sequence):
  if args.bootstrap_out:
    outputs = tf.add(outputs, tf.log(ngram_predictions))
  # [batch_size, unrolled_iterations]
  losses = tf.nn.sparse_softmax_cross_entropy_with_logits(outputs, expected_sequence)
  losses = tf.select(tf.equal(input_sequence, data.EOS), tf.zeros_like(losses), losses)
  return tf.reduce_sum(losses)
开发者ID:mafik,项目名称:pargen,代码行数:7,代码来源:stacked.py

示例15: reduce_mean

def reduce_mean(seq_batch, allow_empty=False):
    """Compute the mean of each sequence in a SequenceBatch.

    Args:
        seq_batch (SequenceBatch): a SequenceBatch with the following attributes:
            values (Tensor): a Tensor of shape (batch_size, seq_length, :, ..., :)
            mask (Tensor): if the mask values are arbitrary floats (rather than binary), the mean will be
            a weighted average.
        allow_empty (bool): allow computing the average of an empty sequence. In this case, we assume 0/0 == 0, rather
            than NaN. Default is False, causing an error to be thrown.

    Returns:
        Tensor: of shape (batch_size, :, ..., :)
    """
    values, mask = seq_batch.values, seq_batch.mask
    # compute weights for the average
    sums = tf.reduce_sum(mask, 1, keep_dims=True)  # (batch_size, 1)

    if allow_empty:
        asserts = []  # no assertion
        sums = tf.select(tf.equal(sums, 0), tf.ones(tf.shape(sums)), sums)  # replace 0's with 1's
    else:
        asserts = [tf.assert_positive(sums)]  # throw error if 0's exist

    with tf.control_dependencies(asserts):
        weights = mask / sums  # (batch_size, seq_length)
    return weighted_sum(seq_batch, weights)
开发者ID:siddk,项目名称:lang2program,代码行数:27,代码来源:seq_batch.py


注:本文中的tensorflow.select函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。