本文整理汇总了Python中tensorflow.py_func函数的典型用法代码示例。如果您正苦于以下问题:Python py_func函数的具体用法?Python py_func怎么用?Python py_func使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了py_func函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: read_news
def read_news(filename_queue):
"""Reads and parses examples from CIFAR10 data files.
Recommendation: if you want N-way read parallelism, call this function
N times. This will give you N independent Readers reading different
files & positions within those files, which will give better mixing of
examples.
Args:
filename_queue: A queue of strings with the filenames to read from.
"""
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example, features={
'hubs': tf.FixedLenFeature([3], dtype=tf.int64),
'words': tf.FixedLenFeature([6250], dtype=tf.int64)
})
def unpackbits(arr):
arr = arr.astype(np.ubyte)
unpacked_arr = np.unpackbits(arr)
if len(unpacked_arr) == 24:
unpacked_arr = unpacked_arr[:BagOfWords.NUM_CLASSES]
return unpacked_arr.astype(np.float32)
labels = features['hubs']
labels, = tf.py_func(unpackbits, [labels], [tf.float32])
labels.set_shape((BagOfWords.NUM_CLASSES,))
words = features['words']
words, = tf.py_func(unpackbits, [words], [tf.float32])
words.set_shape((BagOfWords.NUM_VOCABULARY_SIZE,))
return labels, words
示例2: testLarge
def testLarge(self):
with self.test_session() as sess:
x = tf.zeros([1000000], dtype=np.float32)
y = tf.py_func(lambda x: x + 1, [x], [tf.float32])
z = tf.py_func(lambda x: x * 2, [x], [tf.float32])
for _ in xrange(100):
sess.run([y[0].op, z[0].op])
示例3: get_dataset
def get_dataset(data, labels=None, batch_size=None, data_shape=None,
use_random_transpose=False, num_threads=1):
"""Create and return a tensorflow dataset from an array."""
if labels is None:
dataset = tf.data.Dataset.from_generator(
lambda: _gen_data(data), tf.float32)
if use_random_transpose:
dataset = dataset.map(
lambda pianoroll: tf.py_func(
random_transpose, [pianoroll], tf.float32),
num_parallel_calls=num_threads)
dataset = dataset.map(lambda pianoroll: set_pianoroll_shape(
pianoroll, data_shape), num_parallel_calls=num_threads)
else:
assert len(data) == len(labels), (
"Lengths of `data` and `lables` do not match.")
dataset = tf.data.Dataset.from_generator(
lambda: _gen_data(data, labels), [tf.float32, tf.int32])
if use_random_transpose:
dataset = dataset.map(
lambda pianoroll, label: (
tf.py_func(random_transpose, [pianoroll], tf.float32),
label),
num_parallel_calls=num_threads)
dataset = dataset.map(
lambda pianoroll, label: (set_pianoroll_shape(
pianoroll, data_shape), set_label_shape(label)),
num_parallel_calls=num_threads)
dataset = dataset.shuffle(SHUFFLE_BUFFER_SIZE).repeat().batch(batch_size)
return dataset.prefetch(PREFETCH_SIZE)
示例4: g_rinv
def g_rinv(layer, x1_target, x0_activation):
with tf.variable_scope(vscope[layer], reuse=True):
V_ = tf.get_variable('V')
c_ = tf.get_variable('c')
relu_inv = tf.py_func(ops.relu().f_inv, [x1_target, x0_activation], [tf.float32], name='x3_')[0]
add_inv = tf.sub(relu_inv, b[layer], name='x2_')
return tf.py_func(ops.linear().f_inv, [add_inv, x0_activation, W[layer]], [tf.float32], name='x1_')[0]
示例5: testCaching
def testCaching(self):
"""Confirm caching of control output is recacluated between calls."""
a = tf.constant(1)
b = tf.constant(2)
with tf.control_dependencies([a]):
c = tf.constant(42)
shared = {}
def sub(t):
shared[t] = shared.get(t, 0) + 1
return t
a = subscribe.subscribe(a, lambda t: tf.py_func(sub, [t], [t.dtype]))
with tf.control_dependencies([b]):
d = tf.constant(11)
# If it was using outdated cached control_outputs then
# evaling would not trigger the new subscription.
b = subscribe.subscribe(b, lambda t: tf.py_func(sub, [t], [t.dtype]))
with self.test_session() as sess:
c_out = sess.run([c])
d_out = sess.run([d])
self.assertEquals(c_out, [42])
self.assertEquals(d_out, [11])
self.assertEquals(shared, {2: 1, 1: 1})
示例6: testBasic
def testBasic(self):
def my_func(x, y):
return np.sinh(x) + np.cosh(y)
# scalar
with self.test_session():
x = tf.constant(1.0, tf.float32)
y = tf.constant(2.0, tf.float32)
z = tf.py_func(my_func, [x, y], [tf.float32])
self.assertEqual(z[0].eval(), my_func(1.0, 2.0).astype(np.float32))
# array
with self.test_session():
x = tf.constant([1.0, 2.0], tf.float64)
y = tf.constant([2.0, 3.0], tf.float64)
z = tf.py_func(my_func, [x, y], [tf.float64])
self.assertAllEqual(z[0].eval(), my_func([1.0, 2.0], [2.0, 3.0]).astype(np.float64))
# a bit exotic type (complex64)
with self.test_session():
x = tf.constant(1 + 2j, tf.complex64)
y = tf.constant(3 + 4j, tf.complex64)
z, = tf.py_func(my_func, [x, y], [tf.complex64])
self.assertAllClose(z.eval(), my_func(1 + 2j, 3 + 4j))
# a bit excotic function (rfft)
with self.test_session():
x = tf.constant([1.0, 2.0, 3.0, 4.0], tf.float32)
def rfft(x):
return np.fft.rfft(x).astype(np.complex64)
y, = tf.py_func(rfft, [x], [tf.complex64])
self.assertAllClose(y.eval(), np.fft.rfft([1.0, 2.0, 3.0, 4.0]))
示例7: evaluate
def evaluate(dataset_path):
"""Evaluate model on Dataset for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
train_dir = Path(FLAGS.checkpoint_dir)
reference_shape = mio.import_pickle(train_dir / 'reference_shape.pkl')
images, gt_truth, inits, _ = data_provider.batch_inputs(
[dataset_path], reference_shape,
batch_size=FLAGS.batch_size, is_training=False)
mirrored_images, _, mirrored_inits, shapes = data_provider.batch_inputs(
[dataset_path], reference_shape,
batch_size=FLAGS.batch_size, is_training=False, mirror_image=True)
print('Loading model...')
# Build a Graph that computes the logits predictions from the
# inference model.
with tf.device(FLAGS.device):
patch_shape = (FLAGS.patch_size, FLAGS.patch_size)
pred, _, _ = mdm_model.model(images, inits, patch_shape=patch_shape)
tf.get_variable_scope().reuse_variables()
pred_mirrored, _, _ = mdm_model.model(
mirrored_images, mirrored_inits, patch_shape=patch_shape)
pred_images, = tf.py_func(utils.batch_draw_landmarks,
[images, pred], [tf.float32])
gt_images, = tf.py_func(utils.batch_draw_landmarks,
[images, gt_truth], [tf.float32])
summaries = []
summaries.append(tf.image_summary('images',
tf.concat(2, [gt_images, pred_images]), max_images=5))
avg_pred = pred + tf.py_func(flip_predictions, (pred_mirrored, shapes), (tf.float32, ))[0]
avg_pred /= 2.
# Calculate predictions.
norm_error = mdm_model.normalized_rmse(avg_pred, gt_truth)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
mdm_train.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_summary(summaries)
graph_def = tf.get_default_graph().as_graph_def()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
graph_def=graph_def)
while True:
_eval_once(saver, summary_writer, norm_error, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
示例8: testGradientFunction
def testGradientFunction(self):
# Input to tf.py_func is necessary, otherwise get_gradient_function()
# returns None per default.
a = tf.constant(0)
x, = tf.py_func(lambda a: 0, [a], [tf.int64])
y, = tf.py_func(lambda a: 0, [a], [tf.int64], stateful=False)
self.assertEqual(None, ops.get_gradient_function(x.op))
self.assertEqual(None, ops.get_gradient_function(y.op))
示例9: augment
def augment(image, cfg):
options = cfg.IMAGE_AUGMENTATIONS
if options.FLIP_LEFT_RIGHT:
image = tf.image.random_flip_left_right(image)
if options.CROP:
# We want the size to be larger, and then will crop a region out of it
target_size = tf.to_int32(cfg.INPUT_SIZE * options.CROP_UPSCALE_FACTOR)
if cfg.MAINTAIN_ASPECT_RATIO:
# Resize the image up, then pad with 0s
#image = resize_image_preserve_aspect_ratio(image, target_size, target_size)
params = [image, target_size, target_size]
output = tf.py_func(resize_image_maintain_aspect_ratio, params, [tf.float32], name="resize_maintain_aspect_ratio")
image = output[0]
else:
# Just resize it
image = tf.image.resize_images(
image,
(target_size,
target_size)
)
image = tf.random_crop(image, [cfg.INPUT_SIZE, cfg.INPUT_SIZE, 3])
else:
# Just resize it
if cfg.MAINTAIN_ASPECT_RATIO:
# Resize the image up, then pad with 0s
#image = resize_image_preserve_aspect_ratio(image, target_size, target_size)
params = [image, tf.constant(cfg.INPUT_SIZE), tf.constant(cfg.INPUT_SIZE)]
output = tf.py_func(resize_image_maintain_aspect_ratio, params, [tf.float32], name="resize_maintain_aspect_ratio")
image = output[0]
else:
image = tf.image.resize_images(
image,
(cfg.INPUT_SIZE,
cfg.INPUT_SIZE)
)
if options.BRIGHTNESS:
image = tf.image.random_brightness(image, max_delta=63)
if options.CONTRAST:
image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
return image
示例10: build_graph
def build_graph(self):
"""Builds data processing graph using ``tf.data`` API."""
self._dataset = tf.data.Dataset.from_tensor_slices(self._files)
if self.params['shuffle']:
self._dataset = self._dataset.shuffle(self._size)
self._dataset = self._dataset.repeat()
if self.params['mode'] != 'infer':
self._dataset = self._dataset.map(
lambda line: tf.py_func(
self._parse_audio_transcript_element,
[line],
[self.params['dtype'], tf.int32, tf.int32, tf.int32],
stateful=False,
),
num_parallel_calls=8,
)
self._dataset = self._dataset.padded_batch(
self.params['batch_size'],
padded_shapes=([None, self.params['num_audio_features']], 1, [None], 1)
)
else:
self._dataset = self._dataset.map(
lambda line: tf.py_func(
self._parse_audio_element,
[line],
[self.params['dtype'], tf.int32],
stateful=False,
),
num_parallel_calls=8,
)
self._dataset = self._dataset.padded_batch(
self.params['batch_size'],
padded_shapes=([None, self.params['num_audio_features']], 1)
)
self._iterator = self._dataset.prefetch(8).make_initializable_iterator()
if self.params['mode'] != 'infer':
x, x_length, y, y_length = self._iterator.get_next()
# need to explicitly set batch size dimension
# (it is employed in the model)
y.set_shape([self.params['batch_size'], None])
y_length = tf.reshape(y_length, [self.params['batch_size']])
else:
x, x_length = self._iterator.get_next()
x.set_shape([self.params['batch_size'], None,
self.params['num_audio_features']])
x_length = tf.reshape(x_length, [self.params['batch_size']])
self._input_tensors = {}
self._input_tensors["source_tensors"] = [x, x_length]
if self.params['mode'] != 'infer':
self._input_tensors['target_tensors'] = [y, y_length]
示例11: SigJoin
def SigJoin(x,y,m,fixedLast=None):
rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
if fixedLast is None:
tf.RegisterGradient(rnd_name)(_sigJoinGrad)
g=tf.get_default_graph()
with g.gradient_override_map({"PyFunc":rnd_name}):
return tf.py_func(_sigJoinImp, [x,y,m], tf.float32, name="SigJoin")
else:
tf.RegisterGradient(rnd_name)(_sigJoinGradFixed)
g=tf.get_default_graph()
with g.gradient_override_map({"PyFunc":rnd_name}):
return tf.py_func(_sigJoinFixedImp, [x,y,m,fixedLast], tf.float32, name="SigJoin")
示例12: testStrings
def testStrings(self):
def read_fixed_length_numpy_strings():
return np.array([" there"])
def read_and_return_strings(x, y):
return x + y
with self.test_session():
x = tf.constant(["hello", "hi"], tf.string)
y, = tf.py_func(read_fixed_length_numpy_strings, [], [tf.string])
z, = tf.py_func(read_and_return_strings, [x, y], [tf.string])
self.assertListEqual(list(z.eval()), ["hello there", "hi there"])
示例13: build_model
def build_model(self):
if self.y_dim:
self.y= tf.placeholder(tf.float32, [None, self.y_dim], name='y')
self.ir_images = tf.placeholder(tf.float32, [self.batch_size] + self.ir_image_shape,
name='ir_images')
self.normal_images = tf.placeholder(tf.float32, [self.batch_size] + self.normal_image_shape,
name='normal_images')
self.ir_sample_images= tf.placeholder(tf.float32, [self.sample_size] + self.ir_image_shape,
name='ir_sample_images')
self.ei_images = tf.placeholder(tf.float32, [self.batch_size] + self.ir_image_shape,
name='ei_images')
self.G = self.generator(self.ir_images)
self.D = self.discriminator(self.normal_images) # real image output
self.sampler = self.sampler(self.ir_images)
self.D_ = self.discriminator(self.G, reuse=True) #fake image output
self.d_sum = tf.histogram_summary("d", self.D)
self.d__sum = tf.histogram_summary("d_", self.D_)
#self.G_sum = tf.image_summary("G", self.G)
self.d_loss_real = binary_cross_entropy_with_logits(tf.ones_like(self.D), self.D)
self.d_loss_fake = binary_cross_entropy_with_logits(tf.zeros_like(self.D_), self.D_)
self.ang_loss = tf.py_func(norm_,[self.G,self.normal_images],[tf.float64])
self.ang_loss = tf.to_float(self.ang_loss[0],name='ToFloat')
self.L2_loss = tf.reduce_sum(tf.pow(tf.sub(self.G,self.normal_images),2))/(2 * self.batch_size)
self.EI_loss = tf.py_func(compute_ei,[self.G],[tf.float64])
self.EI_loss = tf.to_float(self.EI_loss[0],name='ToFloat')
self.g_loss = binary_cross_entropy_with_logits(tf.ones_like(self.D_), self.D_)
self.gen_loss = self.g_loss * self.lambda_g + self.L2_loss * self.lambda_l2 + self.EI_loss * self.lambda_ei
self.g_loss_sum = tf.scalar_summary("g_loss", self.g_loss)
self.ang_loss_sum = tf.scalar_summary("ang_loss", self.ang_loss)
self.l2_loss_sum = tf.scalar_summary("l2_loss", self.L2_loss)
self.ei_loss_sum = tf.scalar_summary("ei_loss", self.EI_loss)
self.gen_loss_sum = tf.scalar_summary("gen_loss", self.gen_loss)
self.d_loss_real_sum = tf.scalar_summary("d_loss_real", self.d_loss_real)
self.d_loss_fake_sum = tf.scalar_summary("d_loss_fake", self.d_loss_fake)
self.d_loss = self.d_loss_real + self.d_loss_fake
self.d_loss_sum = tf.scalar_summary("d_loss", self.d_loss)
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver()
示例14: return_fn
def return_fn(trX, trY, teX):
with tf.device(device):
with tf.device("/cpu:0"):
if probs:
return tf.py_func(
_py_fit_predict,
[tf.identity(trX),
tf.identity(trY),
tf.identity(teX)], [tf.int64, tf.int64, tf.float32])
else:
return tf.py_func(
_py_fit_predict,
[tf.identity(trX),
tf.identity(trY),
tf.identity(teX)], [tf.int64, tf.int64])
示例15: log_prob
def log_prob(self, xs, zs):
"""
Parameters
----------
xs : any
A batch of data points, as any data type the user interfaces with
when defining this method.
zs : list or tf.Tensor
A list of tf.Tensor's if multiple varational families,
otherwise a tf.Tensor if single variational family.
Returns
-------
tf.Tensor
S-vector of type tf.float32,
[log p(xs, zs[1,:]), .., log p(xs, zs[S,:])].
Notes
-----
It wraps around a Python function. The Python function takes
as input zs of type np.ndarray, and outputs a np.ndarray.
"""
# Store data in order to later pass data to Python function.
self.xs = xs
return tf.py_func(self._py_log_prob_z, [zs], [tf.float32])[0]