当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.ones函数代码示例

本文整理汇总了Python中tensorflow.ones函数的典型用法代码示例。如果您正苦于以下问题:Python ones函数的具体用法?Python ones怎么用?Python ones使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了ones函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

def main():
    tf.set_random_seed(10)
    with tf.Session() as sess:
        rnn_cell = tf.nn.rnn_cell.LSTMCell(10)

        # defining initial state
        initial_state = rnn_cell.zero_state(4, dtype=tf.float32)

        inputs = tf.Variable(tf.random_uniform(shape = (4, 30, 100)), name='input')
        inputs = tf.identity(inputs, "input_node")

        # 'state' is a tensor of shape [batch_size, cell_state_size]
        outputs, state = tf.nn.dynamic_rnn(rnn_cell, inputs, initial_state=initial_state, dtype=tf.float32)

        y1 = tf.identity(outputs, 'outputs')
        y2 = tf.identity(state, 'state')

        t1 = tf.ones([4, 30, 10])
        t2 = tf.ones([4, 10])

        loss = tf.reduce_sum((y1 - t1) * (y1 - t1)) + tf.reduce_sum((y2 - t2) * (y2 - t2))
        tf.identity(loss, name = "lstm_loss")
        # tf.summary.FileWriter('/tmp/log', tf.get_default_graph())

        net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split(','))
        run_model(net_outputs, argv[1], None, argv[3] == 'True')
开发者ID:ru003ar,项目名称:BigDL,代码行数:26,代码来源:dynamic_lstm.py

示例2: bn_layer

def bn_layer(inputs,is_training,name='BatchNorm',moving_decay=0.9,eps=1e-5):
    shape = inputs.shape
    assert len(shape) in [2,4]

    param_shape = shape[-1]

    gamma = tf.Variable(tf.ones(param_shape), name='gamma')
    beta  = tf.Variable(tf.zeros(param_shape), name='beta')
    mean  = tf.Variable(tf.ones(param_shape), trainable=False, name='mean')
    var   = tf.Variable(tf.ones(param_shape), trainable=False, name='var')

    tf.add_to_collection('l2_losses', tf.contrib.layers.l2_regularizer(lambda1)(gamma)) 
    tf.add_to_collection('l2_losses', tf.contrib.layers.l2_regularizer(lambda1)(beta)) 
    tf.add_to_collection('l2_losses', tf.contrib.layers.l2_regularizer(lambda1)(mean)) 
    tf.add_to_collection('l2_losses', tf.contrib.layers.l2_regularizer(lambda1)(var)) 



    if is_training == True:
        batch_mean, batch_var = tf.nn.moments(inputs,[0,1,2],name='moments')
        mean = tf.assign(mean, batch_mean)
        var = tf.assign(var, batch_var)
        return tf.nn.batch_normalization(inputs,batch_mean+mean*1e-10,batch_var+var*1e-10,gamma,beta,eps)
    else:
        return tf.nn.batch_normalization(inputs,mean,var,gamma,beta,eps)
开发者ID:chengyake,项目名称:karch,代码行数:25,代码来源:main.py

示例3: get_idx_map

def get_idx_map(shape):
    """Get index map for a image.
    Args:
        shape: [B, T, H, W] or [B, H, W]
    Returns:
        idx: [B, T, H, W, 2], or [B, H, W, 2]
    """
    s = shape
    ndims = tf.shape(s)
    wdim = ndims - 1
    hdim = ndims - 2
    idx_shape = tf.concat(0, [s, tf.constant([1])])
    ones_h = tf.ones(hdim - 1, dtype='int32')
    ones_w = tf.ones(wdim - 1, dtype='int32')
    h_shape = tf.concat(0, [ones_h, tf.constant([-1]), tf.constant([1, 1])])
    w_shape = tf.concat(0, [ones_w, tf.constant([-1]), tf.constant([1])])

    idx_y = tf.zeros(idx_shape, dtype='float')
    idx_x = tf.zeros(idx_shape, dtype='float')

    h = tf.slice(s, ndims - 2, [1])
    w = tf.slice(s, ndims - 1, [1])
    idx_y += tf.reshape(tf.to_float(tf.range(h[0])), h_shape)
    idx_x += tf.reshape(tf.to_float(tf.range(w[0])), w_shape)
    idx = tf.concat(ndims[0], [idx_y, idx_x])

    return idx
开发者ID:renmengye,项目名称:deep-tracker,代码行数:27,代码来源:build_deep_tracker.py

示例4: _make_evaluation_dict

  def _make_evaluation_dict(self, resized_groundtruth_masks=False):
    input_data_fields = fields.InputDataFields
    detection_fields = fields.DetectionResultFields

    image = tf.zeros(shape=[1, 20, 20, 3], dtype=tf.uint8)
    key = tf.constant('image1')
    detection_boxes = tf.constant([[[0., 0., 1., 1.]]])
    detection_scores = tf.constant([[0.8]])
    detection_classes = tf.constant([[0]])
    detection_masks = tf.ones(shape=[1, 1, 20, 20], dtype=tf.float32)
    num_detections = tf.constant([1])
    groundtruth_boxes = tf.constant([[0., 0., 1., 1.]])
    groundtruth_classes = tf.constant([1])
    groundtruth_instance_masks = tf.ones(shape=[1, 20, 20], dtype=tf.uint8)
    if resized_groundtruth_masks:
      groundtruth_instance_masks = tf.ones(shape=[1, 10, 10], dtype=tf.uint8)
    detections = {
        detection_fields.detection_boxes: detection_boxes,
        detection_fields.detection_scores: detection_scores,
        detection_fields.detection_classes: detection_classes,
        detection_fields.detection_masks: detection_masks,
        detection_fields.num_detections: num_detections
    }
    groundtruth = {
        input_data_fields.groundtruth_boxes: groundtruth_boxes,
        input_data_fields.groundtruth_classes: groundtruth_classes,
        input_data_fields.groundtruth_instance_masks: groundtruth_instance_masks
    }
    return eval_util.result_dict_for_single_example(image, key, detections,
                                                    groundtruth)
开发者ID:ALISCIFP,项目名称:models,代码行数:30,代码来源:eval_util_test.py

示例5: test_sample_mvn

def test_sample_mvn(session_tf, cov_structure, num_samples):
    """
    Draws 10,000 samples from a distribution
    with known mean and covariance. The test checks
    if the mean and covariance of the samples is
    close to the true mean and covariance.
    """

    N, D = 10000, 2
    means = tf.ones((N, D), dtype=float_type)
    if cov_structure == "full":
        covs = tf.eye(D, batch_shape=[N], dtype=float_type)
    elif cov_structure == "diag":
        covs = tf.ones((N, D), dtype=float_type)

    samples = _sample_mvn(means, covs, cov_structure, num_samples=num_samples)
    value = session_tf.run(samples)

    if num_samples is None:
        assert value.shape == (N, D)
    else:
        assert value.shape == (num_samples, N, D)
        value = value.reshape(-1, D)

    samples_mean = np.mean(value, axis=0)
    samples_cov = np.cov(value, rowvar=False)
    np.testing.assert_array_almost_equal(samples_mean, [1., 1.], decimal=1)
    np.testing.assert_array_almost_equal(samples_cov, [[1., 0.], [0., 1.]], decimal=1)
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:28,代码来源:test_multioutput.py

示例6: _test_logpdf_scalar

def _test_logpdf_scalar(scalar):
    x = tf.constant(scalar)
    val_true = stats.norm.logpdf(scalar)
    _assert_eq(norm.logpdf(x), val_true)
    _assert_eq(norm.logpdf(x, tf.zeros([1]), tf.constant(1.0)), val_true)
    _assert_eq(norm.logpdf(x, tf.zeros([1]), tf.ones([1])), val_true)
    _assert_eq(norm.logpdf(x, tf.zeros([1]), tf.diag(tf.ones([1]))), val_true)
开发者ID:bakersfieldag,项目名称:edward,代码行数:7,代码来源:test_stats_norm_logpdf.py

示例7: testDtype

 def testDtype(self):
   with self.test_session():
     d = tf.fill([2, 3], 12., name="fill")
     self.assertEqual(d.get_shape(), [2, 3])
     # Test default type for both constant size and dynamic size
     z = tf.ones([2, 3])
     self.assertEqual(z.dtype, tf.float32)
     self.assertEqual([2, 3], z.get_shape())
     self.assertAllEqual(z.eval(), np.ones([2, 3]))
     z = tf.ones(tf.shape(d))
     self.assertEqual(z.dtype, tf.float32)
     self.assertEqual([2, 3], z.get_shape())
     self.assertAllEqual(z.eval(), np.ones([2, 3]))
     # Test explicit type control
     for dtype in (tf.float32, tf.float64, tf.int32,
                   tf.uint8, tf.int16, tf.int8,
                   tf.complex64, tf.complex128, tf.int64, tf.bool):
       z = tf.ones([2, 3], dtype=dtype)
       self.assertEqual(z.dtype, dtype)
       self.assertEqual([2, 3], z.get_shape())
       self.assertAllEqual(z.eval(), np.ones([2, 3]))
       z = tf.ones(tf.shape(d), dtype=dtype)
       self.assertEqual(z.dtype, dtype)
       self.assertEqual([2, 3], z.get_shape())
       self.assertAllEqual(z.eval(), np.ones([2, 3]))
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:25,代码来源:constant_op_test.py

示例8: __init__

    def __init__(self, num_layers, num_units, batch_size, input_size, keep_prob=1.0):
        self.num_layers = num_layers
        self.grus = []
        self.inits = []
        self.dropout_mask = []
        for layer in range(num_layers):
            input_size_ = input_size if layer == 0 else 2 * num_units
            gru_fw = tf.nn.rnn_cell.MultiRNNCell([
                tf.contrib.cudnn_rnn.CudnnCompatibleGRUCell(num_units=num_units)])

            gru_bw = tf.nn.rnn_cell.MultiRNNCell([
                tf.contrib.cudnn_rnn.CudnnCompatibleGRUCell(num_units=num_units)])

            init_fw = tf.Variable(tf.zeros([num_units]))
            init_fw = tf.expand_dims(tf.tile(tf.expand_dims(init_fw, axis=0), [batch_size, 1]), axis=0)
            init_bw = tf.Variable(tf.zeros([num_units]))
            init_bw = tf.expand_dims(tf.tile(tf.expand_dims(init_bw, axis=0), [batch_size, 1]), axis=0)

            mask_fw = tf.nn.dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32),
                                    keep_prob=keep_prob)
            mask_bw = tf.nn.dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32),
                                    keep_prob=keep_prob)

            self.grus.append((gru_fw, gru_bw,))
            self.inits.append((init_fw, init_bw,))
            self.dropout_mask.append((mask_fw, mask_bw,))
开发者ID:RileyShe,项目名称:DeepPavlov,代码行数:26,代码来源:utils.py

示例9: benchmarkCudnnLSTMTraining

  def benchmarkCudnnLSTMTraining(self):
    test_configs = self._GetTestConfig()
    for config_name, config in test_configs.items():
      config = test_configs[config_name]
      num_layers = config["num_layers"]
      num_units = config["num_units"]
      batch_size = config["batch_size"]
      seq_length = config["seq_length"]

      with tf.Graph().as_default(), tf.device("/gpu:0"):
        model = tf.contrib.cudnn_rnn.CudnnLSTM(num_layers, num_units, num_units)
        params_size_t = model.params_size()
        input_data = tf.Variable(tf.ones([seq_length, batch_size, num_units]))
        input_h = tf.Variable(tf.ones([num_layers, batch_size, num_units]))
        input_c = tf.Variable(tf.ones([num_layers, batch_size, num_units]))
        params = tf.Variable(tf.ones([params_size_t]), validate_shape=False)
        output, output_h, output_c = model(
            is_training=True,
            input_data=input_data,
            input_h=input_h,
            input_c=input_c,
            params=params)
        all_grads = tf.gradients([output, output_h, output_c],
                                 [params, input_data, input_h, input_c])
        training_op = tf.group(*all_grads)
        self._BenchmarkOp(training_op, "cudnn_lstm %s %s" %
                          (config_name, self._GetConfigDesc(config)))
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:27,代码来源:cudnn_rnn_ops_benchmark.py

示例10: test_horovod_broadcast

    def test_horovod_broadcast(self):
        """Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors."""
        hvd.init()
        rank = hvd.rank()
        size = hvd.size()

        # This test does not apply if there is only one worker.
        if size == 1:
            return

        with self.test_session() as session:
            dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
                      tf.int32, tf.int64, tf.float32, tf.float64,
                      tf.bool]
            dims = [1, 2, 3]
            root_ranks = list(range(size))
            for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
                try:
                    tensor = tf.ones([17] * dim) * rank
                    root_tensor = tf.ones([17] * dim) * root_rank
                    if dtype == tf.bool:
                        tensor = tensor % 2
                        root_tensor = root_tensor % 2
                    tensor = tf.cast(tensor, dtype=dtype)
                    root_tensor = tf.cast(root_tensor, dtype=dtype)
                    broadcasted_tensor = hvd.broadcast(tensor, root_rank)
                    self.assertTrue(
                        session.run(tf.reduce_all(tf.equal(
                            tf.cast(root_tensor, tf.int32), tf.cast(broadcasted_tensor, tf.int32)))),
                        "hvd.broadcast produces incorrect broadcasted tensor")
                except Exception:
                    import traceback
                    traceback.print_exc()
开发者ID:kioco,项目名称:horovod,代码行数:33,代码来源:mpi_ops_test.py

示例11: _tf_loss

    def _tf_loss(self, sim, sim_emb):
        """Define loss"""

        if self.use_max_sim_neg:
            max_sim_neg = tf.reduce_max(sim[:, 1:], -1)
            loss = tf.reduce_mean(tf.maximum(0., self.mu_pos - sim[:, 0]) +
                                  tf.maximum(0., self.mu_neg + max_sim_neg))
        else:
            # create an array for mu
            mu = self.mu_neg * np.ones(self.num_neg + 1)
            mu[0] = self.mu_pos

            factors = tf.concat([-1 * tf.ones([1, 1]),
                                 tf.ones([1, tf.shape(sim)[1] - 1])], 1)
            max_margin = tf.maximum(0., mu + factors * sim)
            loss = tf.reduce_mean(tf.reduce_sum(max_margin, -1))

        max_sim_emb = tf.maximum(0., tf.reduce_max(sim_emb, -1))

        loss = (loss +
                # penalize max similarity between intent embeddings
                tf.reduce_mean(max_sim_emb) * self.C_emb +
                # add regularization losses
                tf.losses.get_regularization_loss())
        return loss
开发者ID:wuxingdexian,项目名称:rasa_nlu,代码行数:25,代码来源:embedding_intent_classifier.py

示例12: c_body

        def c_body(c, pa):
          # Zeroing predictions below threshold
          with tf.variable_scope('bboxes_c_select', reuse=True):
            c_scores = b_scores[:, c]
            c_fmask = tf.cast(tf.greater(c_scores, confidence_threshold), scores.dtype)
            c_scores = c_scores * c_fmask
            c_bboxes = b_bboxes * tf.expand_dims(c_fmask, axis=-1)

          # Apply NMS
          with tf.variable_scope('bboxes_c_nms', reuse=True):
            c_indices = tf.image.non_max_suppression(c_bboxes, c_scores, top_k, nms_threshold)
            size = tf.size(c_indices)
            c_batch_ = tf.to_float(b) * tf.ones(shape=[top_k, 1], dtype=tf.float32)  # len(indices) x 1
            c_labels = tf.to_float(c) * tf.ones(shape=[top_k, 1], dtype=tf.float32)  # len(indices) x 1

            extra_size = top_k - size
            c_scores = tf.expand_dims(tf.gather(c_scores, c_indices), axis=-1)  # len(indices) x 1
            empty_c_scores = tf.zeros([extra_size, 1], dtype=tf.float32)
            c_scores = tf.concat([c_scores, empty_c_scores], axis=0)

            c_bboxes = tf.gather(c_bboxes, c_indices)  # len(indices) x 4
            empty_c_bboxes = tf.zeros([extra_size, 4], dtype=tf.float32)
            c_bboxes = tf.concat([c_bboxes, empty_c_bboxes], axis=0)
            c_predictions = tf.concat([c_batch_, c_labels, c_scores, c_bboxes], axis=1)  # len(indices) x 7
          return c + 1, pa.write(index=c - 1, value=c_predictions)
开发者ID:undeadinu,项目名称:training_toolbox_tensorflow,代码行数:25,代码来源:ssd_base.py

示例13: __call__

    def __call__(self, inputs, states, scope=None):
        with tf.variable_scope(
                scope or type(self).__name__,
                initializer=tf.random_normal_initializer(stddev=0.01)):
            # get the tensor
            if self._separate_pad:
                t_shape = [self._num_outputs,
                           self._num_outputs,
                           self._num_inputs]
                vec_a = inputs
                vec_b = states
            else:
                t_shape = [self._num_outputs+1,
                           self._num_outputs,
                           self._num_inputs+1]
                vec_a = tf.concat(
                    axis=1, values=[inputs, tf.ones([inputs.get_shape()[0].value, 1])])
                vec_b = tf.concat(
                    axis=1, values=[inputs, tf.ones([inputs.get_shape()[0].value, 1])])
            tensor = get_tt_3_tensor(t_shape, self._ranks, name='W')
            result = bilinear_product_tt_3(vec_a, tensor, vec_b)
            if self._separate_pad:
                # TODO possible weightnorm
                D = tf.get_variable('D', [self._num_inputs, self._num_outputs],
                                    initializer=tf.uniform_unit_scaling_initializer(1.2))
                E = tf.get_variable('E', [self._num_outputs, self._num_outputs],
                                    initializer=tf.uniform_unit_scaling_initializer(1.2))
                b = tf.get_variable('b', [self._num_outputs],
                                    initializer=tf.constant_initializer(0.0))
                z = tf.nn.bias_add(tf.matmul(inputs, D) + tf.matmul(states, E), b)
                result = result + z

            result = self._nonlin(result)
            return result, result
开发者ID:PFCM,项目名称:rnns,代码行数:34,代码来源:simple_tensor_rnn.py

示例14: decoder_fn

 def decoder_fn(time, cell_state, cell_input, cell_output, context_state):
     with tf.name_scope(name, "simple_decoder_fn_inference",
                        [time, cell_state, cell_input, cell_output,
                         context_state]):
         if cell_input is not None:
             raise ValueError("Expected cell_input to be None, but saw: %s" %
                              cell_input)
         if cell_output is None:
             # invariant that this is time == 0
             next_input_id = tf.ones([batch_size], dtype=dtype) * (
                 start_of_sequence_id)
             done = tf.zeros([batch_size], dtype=tf.bool)
             cell_state = encoder_state
             cell_output = tf.zeros([cell_size],
                                    dtype=tf.float32)
         else:
             softmax_output = output_fn(cell_output)
             if sample:
                 next_input_id = tf.squeeze(tf.multinomial(softmax_output, 1), 1)
             else:
                 next_input_id = tf.argmax(softmax_output, 1)
             next_input_id = tf.cast(next_input_id, dtype=dtype)
             done = tf.equal(next_input_id, end_of_sequence_id)
         next_input = tf.gather(embeddings, next_input_id)
         # if time > maxlen, return all true vector
         done = tf.cond(
             tf.greater(time, maximum_length),
             lambda: tf.ones([batch_size], dtype=tf.bool),
             lambda: done)
         return (done, cell_state, next_input, next_input_id, context_state)
开发者ID:futurulus,项目名称:rl-cards,代码行数:30,代码来源:tfutils.py

示例15: _testParamShapes

  def _testParamShapes(self, desired_shape):
    tn_param_shapes = tfd.TruncatedNormal.param_shapes(desired_shape)
    # Check the shapes by comparison with the untruncated Normal.
    n_param_shapes = tfd.Normal.param_shapes(desired_shape)
    self.assertAllEqual(
        self.evaluate(tn_param_shapes["loc"]),
        self.evaluate(n_param_shapes["loc"]))
    self.assertAllEqual(
        self.evaluate(tn_param_shapes["scale"]),
        self.evaluate(n_param_shapes["scale"]))
    self.assertAllEqual(
        self.evaluate(tn_param_shapes["low"]),
        self.evaluate(n_param_shapes["loc"]))
    self.assertAllEqual(
        self.evaluate(tn_param_shapes["high"]),
        self.evaluate(n_param_shapes["loc"]))

    loc = tf.zeros(tn_param_shapes["loc"])
    scale = tf.ones(tn_param_shapes["scale"])
    high = tf.ones(tn_param_shapes["high"])
    low = tf.zeros(tn_param_shapes["low"])
    sample_shape = self.evaluate(
        tf.shape(
            tfd.TruncatedNormal(loc=loc, scale=scale, low=low,
                                high=high).sample()))
    self.assertAllEqual(desired_shape, sample_shape)
开发者ID:asudomoeva,项目名称:probability,代码行数:26,代码来源:truncated_normal_test.py


注:本文中的tensorflow.ones函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。