当前位置: 首页>>代码示例>>Python>>正文


Python numpy.array_repr方法代码示例

本文整理汇总了Python中numpy.array_repr方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.array_repr方法的具体用法?Python numpy.array_repr怎么用?Python numpy.array_repr使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在numpy的用法示例。


在下文中一共展示了numpy.array_repr方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testBiEncoderForwardPassWithDropout

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import array_repr [as 别名]
def testBiEncoderForwardPassWithDropout(self):
    with self.session(use_gpu=False):
      tf.random.set_seed(8372749040)
      p = self._BiEncoderParams()
      p.dropout_prob = 0.5
      mt_enc = encoder.MTEncoderBiRNN(p)
      batch = py_utils.NestedMap()
      batch.ids = tf.transpose(tf.reshape(tf.range(0, 8, 1), [4, 2]))
      batch.paddings = tf.zeros([2, 4])
      enc_out = mt_enc.FPropDefaultTheta(batch).encoded

      self.evaluate(tf.global_variables_initializer())
      actual_enc_out = enc_out.eval()
      print('bi_enc_actual_enc_out_with_dropout', np.array_repr(actual_enc_out))
      expected_enc_out = [[[-1.8358192e-05, 1.2103478e-05],
                           [2.9347059e-06, -3.0652325e-06]],
                          [[-8.1282624e-06, 4.5443494e-06],
                           [3.0826509e-06, -5.2950490e-06]],
                          [[-4.6669629e-07, 2.4246765e-05],
                           [-1.5221613e-06, -1.9654153e-06]],
                          [[-1.1511075e-05, 1.9061190e-05],
                           [-5.7250163e-06, 9.2785704e-06]]]
      self.assertAllClose(expected_enc_out, actual_enc_out) 
开发者ID:tensorflow,项目名称:lingvo,代码行数:25,代码来源:encoder_test.py

示例2: testFProp

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import array_repr [as 别名]
def testFProp(self, dtype=tf.float32, fprop_dtype=tf.float32):
    with self.session():
      tf.random.set_seed(_TF_RANDOM_SEED)
      p = self._testParams()
      p.dtype = dtype
      if fprop_dtype:
        p.fprop_dtype = fprop_dtype
        p.input.dtype = fprop_dtype
      mdl = p.Instantiate()
      mdl.FPropDefaultTheta()
      loss = mdl.loss
      logp = mdl.eval_metrics['log_pplx'][0]
      self.evaluate(tf.global_variables_initializer())
      vals = []
      for _ in range(5):
        vals += [self.evaluate((loss, logp))]

      print('actual vals = %s' % np.array_repr(np.array(vals)))
      self.assertAllClose(vals, [[233.57518, 10.381119], [236.10052, 10.378047],
                                 [217.99896, 10.380901], [217.94647, 10.378406],
                                 [159.5997, 10.380468]]) 
开发者ID:tensorflow,项目名称:lingvo,代码行数:23,代码来源:model_test.py

示例3: testFProp

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import array_repr [as 别名]
def testFProp(self, dtype=tf.float32):
    with self.session():
      tf.random.set_seed(_TF_RANDOM_SEED)
      p = self._testParams()
      p.dtype = dtype
      mdl = p.Instantiate()
      mdl.FPropDefaultTheta()
      loss = mdl.loss
      logp = mdl.eval_metrics['log_pplx'][0]
      self.evaluate(tf.global_variables_initializer())
      vals = []
      for _ in range(3):
        vals += [self.evaluate((loss, logp))]

      print('actual vals = %s' % np.array_repr(np.array(vals)))
      expected_vals = [
          [326.765106, 10.373495],
          [306.018066, 10.373494],
          [280.08429, 10.373492],
      ]
      self.assertAllClose(vals, expected_vals) 
开发者ID:tensorflow,项目名称:lingvo,代码行数:23,代码来源:model_test.py

示例4: testBeamSearchHelperWithSeqLengths

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import array_repr [as 别名]
def testBeamSearchHelperWithSeqLengths(self):
    with self.session(use_gpu=False) as sess:
      topk_ids, topk_lens, topk_scores = GetBeamSearchHelperResults(
          sess, num_hyps_per_beam=3, pass_seq_lengths=True)
      print(np.array_repr(topk_ids))
      print(np.array_repr(topk_lens))
      print(np.array_repr(topk_scores))
      expected_topk_ids = [[4, 3, 4, 3, 2, 0, 0], [4, 3, 11, 2, 0, 0, 0],
                           [4, 3, 6, 2, 0, 0, 0], [6, 0, 4, 6, 6, 11, 2],
                           [6, 0, 4, 6, 1, 2, 0], [6, 0, 4, 6, 6, 2, 0]]
      expected_topk_lens = [5, 4, 4, 7, 6, 6]
      expected_topk_scores = [[8.27340603, 6.26949024, 5.59490776],
                              [9.74691486, 8.46679497, 7.14809656]]
      self.assertEqual(expected_topk_ids, topk_ids.tolist())
      self.assertEqual(expected_topk_lens, topk_lens.tolist())
      self.assertAllClose(expected_topk_scores, topk_scores) 
开发者ID:tensorflow,项目名称:lingvo,代码行数:18,代码来源:beam_search_helper_test.py

示例5: testTransformerAttentionLayerFPropMaskedSelfAttention

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import array_repr [as 别名]
def testTransformerAttentionLayerFPropMaskedSelfAttention(self):
    with self.session(use_gpu=True) as sess:
      query_vec, paddings, _, _ = self._TransformerAttentionLayerInputs()

      p = attention.TransformerAttentionLayer.Params().Set(
          name='transformer_masked_self_atten',
          input_dim=4,
          is_masked=True,
          num_heads=2)
      p.params_init = py_utils.WeightInit.Xavier(scale=1.0, seed=0)
      l = p.Instantiate()
      ctx_vec, _ = l.FProp(l.theta, query_vec, None, paddings)

      tf.global_variables_initializer().run()
      actual_ctx = sess.run(ctx_vec)
      actual_ctx = np.reshape(actual_ctx, (10, 4))
      tf.logging.info(np.array_repr(actual_ctx))
      expected_ctx = [7.777687, 5.219166, 6.305151, 4.817311]
      self.assertAllClose(expected_ctx, np.sum(actual_ctx, axis=0)) 
开发者ID:tensorflow,项目名称:lingvo,代码行数:21,代码来源:batch_major_attention_test.py

示例6: testTransformerAttentionLayerFPropCrossAttention

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import array_repr [as 别名]
def testTransformerAttentionLayerFPropCrossAttention(self):
    with self.session(use_gpu=True) as sess:
      (query_vec, _, aux_vec,
       aux_paddings) = self._TransformerAttentionLayerInputs()
      p = attention.TransformerAttentionLayer.Params().Set(
          name='transformer_cross_atten',
          input_dim=4,
          is_masked=False,
          num_heads=2)
      p.params_init = py_utils.WeightInit.Xavier(scale=1.0, seed=0)
      l = p.Instantiate()
      ctx_vec, _ = l.FProp(l.theta, query_vec, aux_vec, aux_paddings)

      tf.global_variables_initializer().run()
      actual_ctx = sess.run(ctx_vec)
      actual_ctx = np.reshape(actual_ctx, (10, 4))
      tf.logging.info(np.array_repr(actual_ctx))
      expected_ctx = [19.345360, 15.057412, 13.744134, 13.387347]
      self.assertAllClose(expected_ctx, np.sum(actual_ctx, axis=0)) 
开发者ID:tensorflow,项目名称:lingvo,代码行数:21,代码来源:batch_major_attention_test.py

示例7: testTransformerLayerFPropWithCrossAttention

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import array_repr [as 别名]
def testTransformerLayerFPropWithCrossAttention(self, multiplier):
    with self.session(use_gpu=True) as sess:
      (query_vec, _, aux_vec,
       aux_paddings) = self._TransformerAttentionLayerInputs()
      query_vec = tf.tile(query_vec, [multiplier, 1, 1])
      paddings = tf.zeros([2 * multiplier, 5])
      p = attention.TransformerLayer.Params()
      p.name = 'transformer_layer'
      p.input_dim = 4
      p.tr_fflayer_tpl.hidden_dim = 7
      p.tr_atten_tpl.num_heads = 2
      p.params_init = py_utils.WeightInit.Xavier(scale=1.0, seed=0)
      l = p.Instantiate()
      ctx_vec, _ = l.FProp(l.theta, query_vec, paddings, aux_vec, aux_paddings)

      tf.global_variables_initializer().run()
      actual_ctx = sess.run(ctx_vec)
      actual_ctx = np.reshape(actual_ctx, (10 * multiplier, 4))
      tf.logging.info(np.array_repr(actual_ctx))
      expected_ctx = [
          4.7839108, 4.5303655, 5.5551023, 5.065767, 5.0493064, 3.2142467,
          2.8200178, 5.659971, 4.3814187, 2.60475
      ] * multiplier
      self.assertAllClose(expected_ctx, np.sum(actual_ctx, axis=1)) 
开发者ID:tensorflow,项目名称:lingvo,代码行数:26,代码来源:batch_major_attention_test.py

示例8: testTransformerDecoderLayerFProp

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import array_repr [as 别名]
def testTransformerDecoderLayerFProp(self):
    with self.session(use_gpu=True) as sess:
      (query_vec, paddings, aux_vec,
       aux_paddings) = self._TransformerAttentionLayerInputs()
      l = self._ConstructTransformerDecoderLayer()

      layer_output, _ = l.FProp(l.theta, query_vec, paddings, aux_vec,
                                aux_paddings)

      tf.global_variables_initializer().run()
      actual_layer_output = sess.run(layer_output)
      actual_layer_output = np.reshape(actual_layer_output, (10, 4))
      tf.logging.info(np.array_repr(actual_layer_output))
      expected_layer_output = [16.939590, 24.121685, 19.975197, 15.924350]
      self.assertAllClose(expected_layer_output,
                          np.sum(actual_layer_output, axis=0)) 
开发者ID:tensorflow,项目名称:lingvo,代码行数:18,代码来源:batch_major_attention_test.py

示例9: testTransformerDecoderLayerStackFProp

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import array_repr [as 别名]
def testTransformerDecoderLayerStackFProp(self):
    with self.session(use_gpu=True) as sess:
      (query_vec, paddings, aux_vec,
       aux_paddings) = self._TransformerAttentionLayerInputs()
      l = self._ConstructTransformerDecoderLayerStack()
      layer_output, _ = l.FProp(
          l.theta,
          query_vec=query_vec,
          paddings=paddings,
          aux_vec=aux_vec,
          aux_paddings=aux_paddings)
      tf.global_variables_initializer().run()
      actual_layer_output = sess.run(layer_output)
      actual_layer_output = np.reshape(actual_layer_output, (10, 4))
      tf.logging.info(np.array_repr(actual_layer_output))
      expected_layer_output = [9.926413, -4.491376, 27.051598, 2.112684]
      self.assertAllClose(expected_layer_output,
                          np.sum(actual_layer_output, axis=0)) 
开发者ID:tensorflow,项目名称:lingvo,代码行数:20,代码来源:batch_major_attention_test.py

示例10: testPerStepSourcePaddingMultiHeadedAttention

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import array_repr [as 别名]
def testPerStepSourcePaddingMultiHeadedAttention(self):
    params = attention.MultiHeadedAttention.Params()
    params.name = 'atten'
    params.params_init = py_utils.WeightInit.Gaussian(0.1, 877374)
    depth = 6
    params.source_dim = depth
    params.query_dim = depth
    params.hidden_dim = depth
    params.vn.global_vn = False
    params.vn.per_step_vn = False
    atten = params.Instantiate()
    prob_out, vec_out = self._testPerStepSourcePaddingHelper(atten, depth)
    print('vec_out', np.array_repr(np.sum(vec_out, 1)))
    self.assertAllClose([-0.006338, -0.025153, 0.041647, -0.025153],
                        np.sum(vec_out, 1))
    self.assertAllClose([1.0, 1.0, 1.0, 1.0], np.sum(prob_out, 1)) 
开发者ID:tensorflow,项目名称:lingvo,代码行数:18,代码来源:attention_test.py

示例11: testPerStepSourcePaddingLocationSensitiveAttention

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import array_repr [as 别名]
def testPerStepSourcePaddingLocationSensitiveAttention(self):
    params = attention.LocationSensitiveAttention.Params()
    params.name = 'atten'
    params.params_init = py_utils.WeightInit.Gaussian(0.1, 877374)
    depth = 6
    params.source_dim = depth
    params.query_dim = depth
    params.hidden_dim = depth
    params.location_filter_size = 3
    params.location_num_filters = 4
    params.vn.global_vn = False
    params.vn.per_step_vn = False
    atten_state = tf.concat(
        [tf.ones([4, 1], tf.float32),
         tf.zeros([4, 5], tf.float32)], 1)
    atten_state = tf.expand_dims(atten_state, 1)
    atten = params.Instantiate()
    prob_out, vec_out = self._testPerStepSourcePaddingHelper(
        atten, depth, atten_state=atten_state)
    print('vec_out', np.array_repr(np.sum(vec_out, 1)))
    self.assertAllClose([2.001103, 3.293414, 2.306448, 3.293414],
                        np.sum(vec_out, 1))
    self.assertAllClose([1.0, 1.0, 1.0, 1.0], np.sum(prob_out, 1)) 
开发者ID:tensorflow,项目名称:lingvo,代码行数:25,代码来源:attention_test.py

示例12: testPerStepSourcePaddingMonotonicAttention

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import array_repr [as 别名]
def testPerStepSourcePaddingMonotonicAttention(self):
    params = attention.MonotonicAttention.Params()
    params.name = 'atten'
    params.params_init = py_utils.WeightInit.Gaussian(0.1, 877374)
    depth = 6
    params.source_dim = depth
    params.query_dim = depth
    params.hidden_dim = depth
    params.vn.global_vn = False
    params.vn.per_step_vn = False
    atten = params.Instantiate()
    atten_state = atten.ZeroAttentionState(6, 4)
    atten_state.emit_probs = tf.concat(
        [tf.ones([4, 1], tf.float32),
         tf.zeros([4, 5], tf.float32)], 1)
    prob_out, vec_out = self._testPerStepSourcePaddingHelper(
        atten, depth, atten_state=atten_state)
    print('prob_out', np.array_repr(np.sum(prob_out, 1)))
    print('vec_out', np.array_repr(np.sum(vec_out, 1))) 
开发者ID:tensorflow,项目名称:lingvo,代码行数:21,代码来源:attention_test.py

示例13: testLSTMSimpleWithForgetGateInitBias

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import array_repr [as 别名]
def testLSTMSimpleWithForgetGateInitBias(self, couple_input_forget_gates,
                                           b_expected):
    params = rnn_cell.LSTMCellSimple.Params().Set(
        name='lstm',
        params_init=py_utils.WeightInit.Constant(0.1),
        couple_input_forget_gates=couple_input_forget_gates,
        num_input_nodes=2,
        num_output_nodes=3,
        forget_gate_bias=2.0,
        bias_init=py_utils.WeightInit.Constant(0.1),
        dtype=tf.float64)

    lstm = rnn_cell.LSTMCellSimple(params)

    np.random.seed(_NUMPY_RANDOM_SEED)
    with self.session(use_gpu=False):
      self.evaluate(tf.global_variables_initializer())
      b_value = lstm._GetBias(lstm.theta).eval()
      tf.logging.info('testLSTMSimpleWithForgetGateInitBias b = %s',
                      np.array_repr(b_value))
      self.assertAllClose(b_value, b_expected)

  # pyformat: disable 
开发者ID:tensorflow,项目名称:lingvo,代码行数:25,代码来源:rnn_cell_test.py

示例14: _testLNLSTMCellFPropBProp

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import array_repr [as 别名]
def _testLNLSTMCellFPropBProp(self, params, num_hidden_nodes=None):
    tf.reset_default_graph()
    lstm, _, state1 = self._testLNLSTMCellHelper(params, num_hidden_nodes)
    loss = -tf.math.log(
        tf.sigmoid(
            tf.reduce_sum(tf.square(state1.m)) +
            tf.reduce_sum(state1.m * state1.c * state1.c)))
    grads = tf.gradients(loss, lstm.vars.Flatten())

    with self.session(use_gpu=False):
      self.evaluate(tf.global_variables_initializer())
      m_v, c_v, grads_v = self.evaluate([state1.m, state1.c, grads])

    tf.logging.info('m_v = %s', np.array_repr(m_v))
    tf.logging.info('c_v = %s', np.array_repr(c_v))
    grads_val = py_utils.NestedMap()
    for (n, _), val in zip(lstm.vars.FlattenItems(), grads_v):
      tf.logging.info('%s : %s', n, np.array_repr(val))
      grads_val[n] = val
    return m_v, c_v, grads_val

  # pyformat: disable 
开发者ID:tensorflow,项目名称:lingvo,代码行数:24,代码来源:rnn_cell_test.py

示例15: testConv2DLayerFProp

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import array_repr [as 别名]
def testConv2DLayerFProp(self):
    # pyformat: disable
    # pylint: disable=bad-whitespace
    expected_output1 = [
        [[[ 0.36669245,  0.91488785],
          [ 0.07532132,  0.        ]],
         [[ 0.34952009,  0.        ],
          [ 1.91783941,  0.        ]]],
        [[[ 0.28304493,  0.        ],
          [ 0.        ,  0.        ]],
         [[ 0.        ,  0.86575812],
          [ 0.        ,  1.60203481]]]]
    # pyformat: enable
    # pylint: enable=bad-whitespace
    actual = self._evalConvLayerFProp()
    print('actual = ', np.array_repr(actual))
    self.assertAllClose(expected_output1, actual) 
开发者ID:tensorflow,项目名称:lingvo,代码行数:19,代码来源:layers_test.py


注:本文中的numpy.array_repr方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。