本文整理匯總了Python中tensorflow.python.framework.graph_util.tensor_shape_from_node_def_name方法的典型用法代碼示例。如果您正苦於以下問題:Python graph_util.tensor_shape_from_node_def_name方法的具體用法?Python graph_util.tensor_shape_from_node_def_name怎麽用?Python graph_util.tensor_shape_from_node_def_name使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.framework.graph_util
的用法示例。
在下文中一共展示了graph_util.tensor_shape_from_node_def_name方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _pool_flops
# 需要導入模塊: from tensorflow.python.framework import graph_util [as 別名]
# 或者: from tensorflow.python.framework.graph_util import tensor_shape_from_node_def_name [as 別名]
def _pool_flops(graph, node):
"""Common code which compute flops for pooling operations."""
# compute flops for average and max pooling
_verify_conv_data_format(node)
#
# Pooling declaration:
# Inputs:
# - value
# Outputs:
# - output
# Attributes:
# - ksize
# - strides
# - padding
# - data_format
#
# Pooling implenetation:
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
kernel_shape = list(node.attr["ksize"].list.i)
kernel_area = _list_product(kernel_shape)
return ops.OpStats("flops", kernel_area * out_shape.num_elements())
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:24,代碼來源:flops_registry.py
示例2: _conv_2d_backprop_filter_flops
# 需要導入模塊: from tensorflow.python.framework import graph_util [as 別名]
# 或者: from tensorflow.python.framework.graph_util import tensor_shape_from_node_def_name [as 別名]
def _conv_2d_backprop_filter_flops(graph, node):
"""Compute flops for Conv2DBackpropFilter operation."""
# Formula same as for Conv2DBackpropInput:
# batch_size * image_x_dim * image_y_dim * kernel_x_dim * kernel_y_dim
# * input_depth * output_depth * 2 / (image_x_stride * image_x_stride)
#
_verify_conv_data_format(node)
# image_shape = [batch_size, image_y_dim, image_x_dim, input_depth]
image_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
image_shape.assert_is_fully_defined()
# kernel_shape = [kernel_y_dim, kernel_x_dim, input_depth, output_depth]
kernel_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
kernel_shape.assert_is_fully_defined()
# strides
strides_shape = list(node.attr["strides"].list.i)
strides_product = strides_shape[1] * strides_shape[2]
return ops.OpStats("flops",
(2 * image_shape.num_elements()
* kernel_shape.num_elements()
/ (image_shape[-1].value * strides_product)))
################################################################################
# Other ops
################################################################################
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:26,代碼來源:flops_registry.py
示例3: _calc_conv_flops
# 需要導入模塊: from tensorflow.python.framework import graph_util [as 別名]
# 或者: from tensorflow.python.framework.graph_util import tensor_shape_from_node_def_name [as 別名]
def _calc_conv_flops(graph, node):
"""Calculates the compute resources needed for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (output_count * filter_in_depth * filter_height *
filter_width * 2))
示例4: _calc_depthwise_conv_flops
# 需要導入模塊: from tensorflow.python.framework import graph_util [as 別名]
# 或者: from tensorflow.python.framework.graph_util import tensor_shape_from_node_def_name [as 別名]
def _calc_depthwise_conv_flops(graph, node):
"""Calculates the compute resources needed for DepthwiseConv2dNative."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
示例5: _calc_bias_add_flops
# 需要導入模塊: from tensorflow.python.framework import graph_util [as 別名]
# 或者: from tensorflow.python.framework.graph_util import tensor_shape_from_node_def_name [as 別名]
def _calc_bias_add_flops(graph, node):
"""Calculates the computing needed for BiasAdd."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
示例6: _calc_dilation2d_flops
# 需要導入模塊: from tensorflow.python.framework import graph_util [as 別名]
# 或者: from tensorflow.python.framework.graph_util import tensor_shape_from_node_def_name [as 別名]
def _calc_dilation2d_flops(graph, node):
"""Calculates the compute resources needed for Dilation2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
示例7: _calc_mat_mul_flops
# 需要導入模塊: from tensorflow.python.framework import graph_util [as 別名]
# 或者: from tensorflow.python.framework.graph_util import tensor_shape_from_node_def_name [as 別名]
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
示例8: _l2_loss_flops
# 需要導入模塊: from tensorflow.python.framework import graph_util [as 別名]
# 或者: from tensorflow.python.framework.graph_util import tensor_shape_from_node_def_name [as 別名]
def _l2_loss_flops(graph, node):
"""Compute flops for L2Loss operation."""
in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
in_shape.assert_is_fully_defined()
# Tensorflow uses inefficient implementation, with (3*N-1) flops:
# Optimal implementation is 2*N flops
return ops.OpStats("flops", in_shape.num_elements() * 3 - 1)
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:9,代碼來源:flops_registry.py
示例9: _binary_per_element_op_flops
# 需要導入模塊: from tensorflow.python.framework import graph_util [as 別名]
# 或者: from tensorflow.python.framework.graph_util import tensor_shape_from_node_def_name [as 別名]
def _binary_per_element_op_flops(graph, node, ops_per_element=1):
"""Common code which compute flops for binary operations."""
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
return ops.OpStats("flops", out_shape.num_elements() * ops_per_element)
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:7,代碼來源:flops_registry.py
示例10: _reduction_op_flops
# 需要導入模塊: from tensorflow.python.framework import graph_util [as 別名]
# 或者: from tensorflow.python.framework.graph_util import tensor_shape_from_node_def_name [as 別名]
def _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0):
"""Common code which compute flops for reduction operations."""
in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
in_shape.assert_is_fully_defined()
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
num_flops = (in_shape.num_elements() * reduce_flops
+ out_shape.num_elements() * (finalize_flops - reduce_flops))
return ops.OpStats("flops", num_flops)
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:11,代碼來源:flops_registry.py
示例11: _avg_pool_grad_flops
# 需要導入模塊: from tensorflow.python.framework import graph_util [as 別名]
# 或者: from tensorflow.python.framework.graph_util import tensor_shape_from_node_def_name [as 別名]
def _avg_pool_grad_flops(graph, node):
"""Compute flops for AvgPoolGrad operation."""
_verify_conv_data_format(node)
# Pooling gradient implementation:
out_backprop_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
out_backprop_shape.assert_is_fully_defined()
kernel_shape = list(node.attr["ksize"].list.i)
kernel_area = _list_product(kernel_shape)
# TensorFlow multiply each element of pooling window by coefficient,
# then sum up all of them, thus we have 2 flops per element:
# More optimal implementation - if division is done after.
return ops.OpStats("flops",
kernel_area * out_backprop_shape.num_elements() * 2)
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:16,代碼來源:flops_registry.py
示例12: _conv_2d_backprop_input_flops
# 需要導入模塊: from tensorflow.python.framework import graph_util [as 別名]
# 或者: from tensorflow.python.framework.graph_util import tensor_shape_from_node_def_name [as 別名]
def _conv_2d_backprop_input_flops(graph, node):
"""Compute flops for Conv2DBackpropInput operation."""
# Formula:
# batch_size * image_x_dim * image_y_dim * kernel_x_dim * kernel_y_dim
# * input_depth * output_depth * 2 / (image_x_stride * image_x_stride)
#
# Where:
# image_x_dim, image_y_dim and input_depth --- size of input to source (no
# backprop) convolution, in other words they are sizes of backprop output.
# output_depth --- number of filters in the original convolution, thus
# depth of backprop input.
# kernel_x_dim and kernel_y_dim --- sizes of filter in spatial dimension
# image_x_stride and image_x_stride --- strides of the convolution
#
_verify_conv_data_format(node)
# out_shape = [batch_size, image_y_dim, image_x_dim, input_depth]
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
# kernel_shape = [kernel_y_dim, kernel_x_dim, input_depth, output_depth]
kernel_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
kernel_shape.assert_is_fully_defined()
# strides
strides_shape = list(node.attr["strides"].list.i)
strides_product = strides_shape[1] * strides_shape[2]
return ops.OpStats("flops",
(2 * out_shape.num_elements()
* kernel_shape.num_elements()
/ (out_shape[-1].value * strides_product)))
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:31,代碼來源:flops_registry.py
示例13: _add_n_flops
# 需要導入模塊: from tensorflow.python.framework import graph_util [as 別名]
# 或者: from tensorflow.python.framework.graph_util import tensor_shape_from_node_def_name [as 別名]
def _add_n_flops(graph, node):
"""Compute flops for AddN operation."""
if not node.input:
return _zero_flops(graph, node)
in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
in_shape.assert_is_fully_defined()
return ops.OpStats("flops", in_shape.num_elements() * (len(node.input) - 1))
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:9,代碼來源:flops_registry.py