本文整理汇总了Python中tensorflow.python.framework.test_util.use_gpu函数的典型用法代码示例。如果您正苦于以下问题:Python use_gpu函数的具体用法?Python use_gpu怎么用?Python use_gpu使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了use_gpu函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testGain
def testGain(self):
shape = (10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init_default = init_ops_v2.Identity()
init_custom = init_ops_v2.Identity(gain=0.9)
with test_util.use_gpu():
self.assertAllClose(self.evaluate(init_default(shape, dtype=dtype)),
np.eye(*shape))
with test_util.use_gpu():
self.assertAllClose(self.evaluate(init_custom(shape, dtype=dtype)),
np.eye(*shape) * 0.9)
示例2: _VerifyValues
def _VerifyValues(self, image, ksizes, strides, padding, patches):
"""Tests input-output pairs for the ExtractVolumePatches op.
Args:
image: Input tensor with shape:
[batch, in_planes, in_rows, in_cols, depth].
ksizes: Patch size specified as: [ksize_planes, ksize_rows, ksize_cols].
strides: Output strides, specified as:
[stride_planes, stride_rows, stride_cols].
padding: Padding type.
patches: Expected output.
Note:
rates are not supported as of now.
"""
ksizes = [1] + ksizes + [1]
strides = [1] + strides + [1]
with test_util.use_gpu():
out_tensor = array_ops.extract_volume_patches(
constant_op.constant(image),
ksizes=ksizes,
strides=strides,
padding=padding,
name="im2col_3d")
self.assertAllClose(patches, self.evaluate(out_tensor))
示例3: _testGradientsSimple
def _testGradientsSimple(self, dtype):
# Test both positive and negative concat axis.
# -2 and 1 correspond to the same axis for 3-dimensional tensors.
for axis in [-2, 1]:
with test_util.use_gpu():
inp = []
inp_tensors = []
for x in [1, 2, 6]:
shape = [10, x, 2]
t = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
t += -1j * t
inp.append(t)
inp_tensors.append(
constant_op.constant(
t.flatten(),
shape=shape,
dtype=dtype))
c = array_ops.concat(inp_tensors, axis)
output_shape = [10, 9, 2]
grad_inp = np.random.rand(*output_shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
grad_inp += -1j * grad_inp
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
示例4: testNCHWToNHWC2D
def testNCHWToNHWC2D(self):
x_val = [[7, 4], [9, 3], [4, 5], [5, 1]]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x, src_format="NCHW", dst_format="NHWC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [[7, 4], [4, 5], [5, 1], [9, 3]])
示例5: testTwoOpsIndependent
def testTwoOpsIndependent(self):
with test_util.use_gpu():
sample_op1, sample_op2 = self._make_ops(32)
sample1, sample2 = self.evaluate([sample_op1, sample_op2])
# We expect sample1 and sample2 to be independent.
# 1 in 2^32 chance of this assertion failing.
self.assertFalse(np.equal(sample1, sample2).all())
示例6: testOneOpMultipleStepsIndependent
def testOneOpMultipleStepsIndependent(self):
with test_util.use_gpu():
sample_op1, _ = self._make_ops(10)
# Consecutive runs shouldn't yield identical output.
sample1a = self.evaluate(sample_op1)
sample1b = self.evaluate(sample_op1)
self.assertFalse(np.equal(sample1a, sample1b).all())
示例7: testZeros
def testZeros(self):
with test_util.use_gpu():
for dtype in dtypes.uint8, dtypes.int16, dtypes.int32, dtypes.int64:
zero = constant_op.constant(0, dtype=dtype)
one = constant_op.constant(1, dtype=dtype)
bads = [one // zero]
if dtype in (dtypes.int32, dtypes.int64):
bads.append(one % zero)
for bad in bads:
try:
result = self.evaluate(bad)
except errors_impl.OpError as e:
# Ideally, we'd get a nice exception. In theory, this should only
# happen on CPU, but 32 bit integer GPU division is actually on
# CPU due to a placer bug.
# TODO(irving): Make stricter once the placer bug is fixed.
self.assertIn('Integer division by zero', str(e))
else:
# On the GPU, integer division by zero produces all bits set.
# But apparently on some GPUs "all bits set" for 64 bit division
# means 32 bits set, so we allow 0xffffffff as well. This isn't
# very portable, so we may need to expand this list if other GPUs
# do different things.
self.assertTrue(test.is_gpu_available())
self.assertIn(result, (-1, 0xff, 0xffffffff))
示例8: testNegativeMinLogits
def testNegativeMinLogits(self):
random_seed.set_random_seed(78844)
with test_util.use_gpu():
logits = constant_op.constant([[np.finfo(np.float32).min] * 1023 + [0]])
num_samples = 1000
samples = self.evaluate(random_ops.multinomial(logits, num_samples))
self.assertAllEqual([[1023] * num_samples], samples)
示例9: testNHWCToNCHW
def testNHWCToNCHW(self):
x_val = [7, 4, 9, 3]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x)
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [7, 3, 4, 9])
示例10: testHWNCToNHWC
def testHWNCToNHWC(self):
x_val = [7, 4, 9, 3]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x, src_format="HWNC", dst_format="NHWC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [9, 7, 4, 3])
示例11: _compareScalar
def _compareScalar(self, func, x, y, dtype):
with test_util.use_gpu():
out = func(
ops.convert_to_tensor(np.array([x]).astype(dtype)),
ops.convert_to_tensor(np.array([y]).astype(dtype)))
ret = self.evaluate(out)
return ret[0]
示例12: _do_sampling
def _do_sampling(self, logits, num_samples, sampler):
"""Samples using the supplied sampler and inputs.
Args:
logits: Numpy ndarray of shape [batch_size, num_classes].
num_samples: Int; number of samples to draw.
sampler: A sampler function that takes (1) a [batch_size, num_classes]
Tensor, (2) num_samples and returns a [batch_size, num_samples] Tensor.
Returns:
Frequencies from sampled classes; shape [batch_size, num_classes].
"""
with test_util.use_gpu():
random_seed.set_random_seed(1618)
op = sampler(constant_op.constant(logits), num_samples)
d = self.evaluate(op)
batch_size, num_classes = logits.shape
freqs_mat = []
for i in range(batch_size):
cnts = dict(collections.Counter(d[i, :]))
# Requires drawn class labels be in range.
self.assertLess(max(cnts.keys()), num_classes)
self.assertGreaterEqual(min(cnts.keys()), 0)
freqs = [(cnts[k] * 1. / num_samples if k in cnts else 0)
for k in range(num_classes)]
freqs_mat.append(freqs)
return freqs_mat
示例13: Test
def Test(self):
if not use_static_shape_ or a_np_.dtype in (np.int32, np.int64, np.float16):
self.skipTest("Skipping infeasible gradient test.")
# Transpose and possibly conjugate a_np_ and b_np_ according to the
# attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
# results in a valid matrix multiplication and produces the same result as
# np.matrix(a_np_) * np.matrix(b_np_)
effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
epsilon = np.finfo(a_np_.dtype).eps
delta = epsilon**(1.0 / 3.0)
tol = 20 * delta
with self.session(), test_util.use_gpu():
theoretical, numerical = gradient_checker_v2.compute_gradient(
lambda x: math_ops.matmul(x, effective_b_np, **kwargs_),
[effective_a_np],
delta=delta)
self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
theoretical, numerical = gradient_checker_v2.compute_gradient(
lambda x: math_ops.matmul(effective_a_np, x, **kwargs_),
[effective_b_np],
delta=delta)
self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
示例14: _RunAndVerifyGradientsRandom
def _RunAndVerifyGradientsRandom(self):
# Random dims of rank 5
input_shape = np.random.randint(1, 5, size=5)
# Random number of tensors
num_tensors = np.random.randint(12, 20)
# Random dim to concat on
concat_dim = np.random.randint(5)
concat_dim_sizes = np.random.randint(1, 5, size=num_tensors)
with test_util.use_gpu():
inp = []
inp_tensors = []
for x in concat_dim_sizes:
shape = input_shape
shape[concat_dim] = x
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(t.flatten(), shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, concat_dim)
output_shape = input_shape
output_shape[concat_dim] = concat_dim_sizes.sum()
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, concat_dim)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
示例15: testGradientsLastDim
def testGradientsLastDim(self):
# Test both positive and negative concat axis.
# -1 and 2 correspond to the same axis for 3-dimensional tensors.
for axis in [-1, 2]:
with test_util.use_gpu():
inp = []
inp_tensors = []
for x in [1, 2, 6]:
shape = [10, 2, x]
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
t.flatten(),
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, 2)
output_shape = [10, 2, 9]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)