当前位置: 首页>>代码示例>>Python>>正文


Python backend.function方法代码示例

本文整理汇总了Python中tensorflow.keras.backend.function方法的典型用法代码示例。如果您正苦于以下问题:Python backend.function方法的具体用法?Python backend.function怎么用?Python backend.function使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.keras.backend的用法示例。


在下文中一共展示了backend.function方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_smooth_sigmoid

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import function [as 别名]
def test_smooth_sigmoid():
  """Test smooth_sigmoid function."""
  test_values = np.array(
      [[-3.0, -2.0, -1.0, -0.5, 0.005, 0.0, 0.005, 0.5, 1, 4, 10]],
      dtype=K.floatx())

  def ref_smooth_sigmoid(y):
    x = 0.1875 * y + 0.5
    z = 0.0 if x <= 0.0 else (1.0 if x >= 1.0 else x)
    return z

  sigmoid = np.vectorize(ref_smooth_sigmoid)
  x = K.placeholder(ndim=2)
  f = K.function([x], [smooth_sigmoid(x)])
  result = f([test_values])[0]
  expected = sigmoid(test_values)
  assert_allclose(result, expected, rtol=1e-05) 
开发者ID:google,项目名称:qkeras,代码行数:19,代码来源:qactivation_test.py

示例2: test_hard_sigmoid

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import function [as 别名]
def test_hard_sigmoid():
  """Test hard_sigmoid function."""
  test_values = np.array(
      [[-3.0, -2.0, -1.0, -0.5, 0.005, 0.0, 0.005, 0.5, 1, 4, 10]],
      dtype=K.floatx())

  def ref_hard_sigmoid(y):
    x = 0.5 * y + 0.5
    z = 0.0 if x <= 0.0 else (1.0 if x >= 1.0 else x)
    return z

  sigmoid = np.vectorize(ref_hard_sigmoid)

  x = K.placeholder(ndim=2)
  f = K.function([x], [hard_sigmoid(x)])
  result = f([test_values])[0]
  expected = sigmoid(test_values)
  assert_allclose(result, expected, rtol=1e-05) 
开发者ID:google,项目名称:qkeras,代码行数:20,代码来源:qactivation_test.py

示例3: saliency

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import function [as 别名]
def saliency(input, output):
    with graph.as_default():
        with sess.as_default():
            processed_input = preprocessing(input)
            processed_output = output

            output = 0 if float(output["Positive review"]) > 0.5 else 1
            input_tensors = [model.layers[0].input, K.learning_phase()]
            saliency_input = model.layers[1].input
            saliency_output = model.layers[-1].output[:, output]
            gradients = model.optimizer.get_gradients(saliency_output, saliency_input)
            compute_gradients = K.function(inputs=input_tensors, outputs=gradients)
            saliency_graph = compute_gradients(processed_input.reshape(1, 500))[0]

            saliency_graph = saliency_graph.reshape(500, 32)

            saliency_graph = np.abs(saliency_graph).sum(axis=1)
            normalized_saliency = (saliency_graph - saliency_graph.min()) /                                   (saliency_graph.max() - saliency_graph.min())

            start_idx = np.where(processed_input[0] == START_TOKEN)[0][0]
            heat_map = []
            counter = 0
            words = input.split(" ")
            for i in range(start_idx + 1, 500):
                heat_map.extend([normalized_saliency[i]] * len(words[counter]))
                heat_map.append(0)  # zero saliency value assigned to the spaces between words
                counter += 1
            return np.array(heat_map)


# In[6]: 
开发者ID:gradio-app,项目名称:gradio-UI,代码行数:33,代码来源:sentiment-analysis.py

示例4: test_quantized_relu

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import function [as 别名]
def test_quantized_relu(bits, integer, use_sigmoid, negative_slope, test_values,
                        expected_values):
  """Test quantized_relu function."""
  x = K.placeholder(ndim=2)
  f = K.function([x], [quantized_relu(bits, integer, use_sigmoid,
                                      negative_slope)(x)])
  result = f([test_values])[0]
  assert_allclose(result, expected_values, rtol=1e-05) 
开发者ID:google,项目名称:qkeras,代码行数:10,代码来源:leakyrelu_test.py

示例5: test_quantized_po2

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import function [as 别名]
def test_quantized_po2(bits,
                       max_value,
                       use_stochastic_rounding,
                       quadratic_approximation,
                       test_values,
                       expected_values):
  """Test quantized_po2 function."""
  x = K.placeholder(ndim=2)
  f = K.function([x], [quantized_po2(bits, max_value, use_stochastic_rounding,
                                     quadratic_approximation)(x)])
  result = f([test_values])[0]
  assert_allclose(result, expected_values, rtol=1e-05, atol=1e-05) 
开发者ID:google,项目名称:qkeras,代码行数:14,代码来源:qactivation_test.py

示例6: test_quantized_relu_po2

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import function [as 别名]
def test_quantized_relu_po2(bits,
                            max_value,
                            use_stochastic_rounding,
                            quadratic_approximation,
                            test_values,
                            expected_values):
  """Test quantized_po2 function."""
  x = K.placeholder(ndim=2)
  f = K.function([x],
                 [quantized_relu_po2(bits, max_value, use_stochastic_rounding,
                                     quadratic_approximation)(x)])
  result = f([test_values])[0]
  assert_allclose(result, expected_values, rtol=1e-05, atol=1e-05) 
开发者ID:google,项目名称:qkeras,代码行数:15,代码来源:qactivation_test.py

示例7: test_quantized_bits

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import function [as 别名]
def test_quantized_bits(bits, integer, symmetric, keep_negative, test_values,
                        expected_values):
  x = K.placeholder(ndim=2)
  f = K.function([x],
                 [quantized_bits(bits, integer, symmetric, keep_negative)(x)])
  result = f([test_values])[0]
  assert_allclose(result, expected_values, rtol=1e-05) 
开发者ID:google,项目名称:qkeras,代码行数:9,代码来源:qactivation_test.py

示例8: test_ternary

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import function [as 别名]
def test_ternary(alpha, threshold, test_values, expected_values):
  x = K.placeholder(ndim=2)
  f = K.function([x],
                 [ternary(alpha, threshold)(x)])
  result = f([test_values])[0]
  assert_allclose(result, expected_values, rtol=1e-05) 
开发者ID:google,项目名称:qkeras,代码行数:8,代码来源:qactivation_test.py

示例9: test_binary

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import function [as 别名]
def test_binary(use_01, alpha, test_values, expected_values):
  x = K.placeholder(ndim=2)
  f = K.function([x], [binary(use_01, alpha)(x)])
  result = f([test_values])[0]
  assert_allclose(result, expected_values, rtol=1e-05) 
开发者ID:google,项目名称:qkeras,代码行数:7,代码来源:qactivation_test.py

示例10: test_stochastic_round_quantized_po2

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import function [as 别名]
def test_stochastic_round_quantized_po2(test_values, expected_values):
  K.set_learning_phase(1)
  np.random.seed(666)
  x = K.placeholder(ndim=2) 
  q = quantized_po2(use_stochastic_rounding=True)
  f = K.function([x], [q(x)])
  res = f([test_values])[0]
  res = np.average(res)
  assert_allclose(res, expected_values, rtol=1e-01, atol=1e-6) 
开发者ID:google,项目名称:qkeras,代码行数:11,代码来源:qactivation_test.py

示例11: test_stochastic_round_quantized_relu_po2

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import function [as 别名]
def test_stochastic_round_quantized_relu_po2(test_values, expected_values):
  K.set_learning_phase(1)
  np.random.seed(666)
  x = K.placeholder(ndim=2)
  q = quantized_relu_po2(use_stochastic_rounding=True)
  f = K.function([x], [q(x)])
  res = f([test_values])[0]
  res = np.average(res)
  assert_allclose(res, expected_values, rtol=1e-01, atol=1e-6) 
开发者ID:google,项目名称:qkeras,代码行数:11,代码来源:qactivation_test.py

示例12: test_stochastic_ternary_inference_mode

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import function [as 别名]
def test_stochastic_ternary_inference_mode(alpha, threshold, test_values, expected_values):
  K.set_learning_phase(0)
  x = K.placeholder(ndim=2)
  q = stochastic_ternary(alpha, threshold)
  f = K.function([x],
                 [q(x)])
  result = f([test_values])[0]
  assert_allclose(result, expected_values, rtol=1e-05) 
开发者ID:google,项目名称:qkeras,代码行数:10,代码来源:qactivation_test.py

示例13: _evaluate

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import function [as 别名]
def _evaluate(model: Model, nodes_to_evaluate, x, y=None, auto_compile=False):
    if not model._is_compiled:
        if model.name in ['vgg16', 'vgg19', 'inception_v3', 'inception_resnet_v2', 'mobilenet_v2', 'mobilenetv2']:
            print('Transfer learning detected. Model will be compiled with ("categorical_crossentropy", "adam").')
            print('If you want to change the default behaviour, then do in python:')
            print('model.name = ""')
            print('Then compile your model with whatever loss you want: https://keras.io/models/model/#compile.')
            print('If you want to get rid of this message, add this line before calling keract:')
            print('model.compile(loss="categorical_crossentropy", optimizer="adam")')
            model.compile(loss='categorical_crossentropy', optimizer='adam')
        else:
            if auto_compile:
                model.compile(loss='mse', optimizer='adam')
            else:
                print('Please compile your model first! https://keras.io/models/model/#compile.')
                print('If you only care about the activations (outputs of the layers), '
                      'then just compile your model like that:')
                print('model.compile(loss="mse", optimizer="adam")')
                raise Exception('Compilation of the model required.')

    def eval_fn(k_inputs):
        try:
            return K.function(k_inputs, nodes_to_evaluate)(model._standardize_user_data(x, y))
        except AttributeError:  # one way to avoid forcing non eager mode.
            return K.function(k_inputs, nodes_to_evaluate)((x, y))  # although works.

    try:
        return eval_fn(model._feed_inputs + model._feed_targets + model._feed_sample_weights)
    except Exception:
        return eval_fn(model._feed_inputs) 
开发者ID:philipperemy,项目名称:keract,代码行数:32,代码来源:keract.py

示例14: get_gradients_of_activations

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import function [as 别名]
def get_gradients_of_activations(model, x, y, layer_names=None, output_format='simple', nested=False):
    """
    Get gradients of the outputs of the activation functions, regarding the loss.
    Intuitively, it shows how your activation maps change over a tiny modification of the loss.
    :param model: keras compiled model or one of ['vgg16', 'vgg19', 'inception_v3', 'inception_resnet_v2',
    'mobilenet_v2', 'mobilenetv2'].
    :param x: Model input (Numpy array). In the case of multi-inputs, x should be of type List.
    :param y: Model target (Numpy array). In the case of multi-inputs, y should be of type List.
    :param layer_names: (optional) Single name of a layer or list of layer names for which activations should be
    returned. It is useful in very big networks when it is computationally expensive to evaluate all the layers/nodes.
    :param output_format: Change the output dictionary key of the function.
    - 'simple': output key will match the names of the Keras layers. For example Dense(1, name='d1') will
    return {'d1': ...}.
    - 'full': output key will match the full name of the output layer name. In the example above, it will
    return {'d1/BiasAdd:0': ...}.
    - 'numbered': output key will be an index range, based on the order of definition of each layer within the model.
    - 'nested': If specified, will move recursively through the model definition to retrieve nested layers.
                Recursion ends at leaf layers of the model tree or at layers with their name specified in layer_names.

                E.g., a model with the following structure

                -layer1
                    -conv1
                    ...
                    -fc1
                -layer2
                    -fc2

                ... yields a dictionary with keys 'layer1/conv1', ..., 'layer1/fc1', 'layer2/fc2'.
                If layer_names = ['layer2/fc2'] is specified, the dictionary will only hold one key 'layer2/fc2'.

                The layer names are generated by joining all layers from top level to leaf level with the separator '/'.
    :return: Dict {layer_names (specified by output_format) -> activation of the layer output/node (Numpy array)}.
    """
    nodes = _get_nodes(model, output_format, nested=nested, layer_names=layer_names)
    return _get_gradients(model, x, y, nodes) 
开发者ID:philipperemy,项目名称:keract,代码行数:38,代码来源:keract.py

示例15: _get_gradients

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import function [as 别名]
def _get_gradients(model, x, y, nodes):
    if model.optimizer is None:
        raise Exception('Please compile the model first. The loss function is required to compute the gradients.')

    nodes_names = nodes.keys()
    nodes_values = nodes.values()

    try:
        if not hasattr(model, 'total_loss'):
            raise Exception('Disable TF eager mode to use get_gradients.\n'
                            'Add this command at the beginning of your script:\n'
                            'tf.compat.v1.disable_eager_execution()')
        grads = model.optimizer.get_gradients(model.total_loss, nodes_values)
    except ValueError as e:
        if 'differentiable' in str(e):
            # Probably one of the gradients operations is not differentiable...
            grads = []
            differentiable_nodes = []
            for n in nodes_values:
                try:
                    grads.extend(model.optimizer.get_gradients(model.total_loss, n))
                    differentiable_nodes.append(n)
                except ValueError:
                    pass
            nodes_values = differentiable_nodes
        else:
            raise e

    gradients_values = _evaluate(model, grads, x, y)

    return OrderedDict(zip(nodes_names, gradients_values)) 
开发者ID:philipperemy,项目名称:keract,代码行数:33,代码来源:keract.py


注:本文中的tensorflow.keras.backend.function方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。