当前位置: 首页>>代码示例>>Python>>正文


Python v1.variables_initializer方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.variables_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python v1.variables_initializer方法的具体用法?Python v1.variables_initializer怎么用?Python v1.variables_initializer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.variables_initializer方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import variables_initializer [as 别名]
def build(self, input_shape):
        with self._sess.graph.as_default():
            self._placeholders["tokens"] = tf.placeholder(
                dtype=tf.int32, shape=[None, None], name="tokens"
            )

            self._ops["output_logits"] = self.compute_logits(
                self._placeholders["tokens"]
            )
            self._ops["output_probs"] = tf.nn.softmax(self._ops["output_logits"], -1)
            result = self.compute_loss_and_acc(
                rnn_output_logits=self._ops["output_logits"],
                target_token_seq=self._placeholders["tokens"],
            )
            self._ops["loss"] = result.token_ce_loss
            self._ops["num_tokens"] = result.num_predictions
            self._ops["num_correct_tokens"] = result.num_correct_token_predictions
            self._ops["train_step"] = self._make_training_step(self._ops["loss"])

            init_op = tf.variables_initializer(
                self._sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
            )
            self._sess.run(init_op) 
开发者ID:microsoft,项目名称:machine-learning-for-programming-samples,代码行数:25,代码来源:model_tf1.py

示例2: testCreateRegularizer_Sliced

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import variables_initializer [as 别名]
def testCreateRegularizer_Sliced(self):
    # Call handler to create regularizer.
    handler = batch_norm_source_op_handler.BatchNormSourceOpHandler(
        _GAMMA_THRESHOLD)
    batch_norm_op_slice = orm.OpSlice(self.batch_norm_op, orm.Slice(0, 3))
    regularizer = handler.create_regularizer(batch_norm_op_slice)

    # Verify regularizer is the gamma tensor.
    with self.cached_session():
      # Initialize the gamma tensor to check value equality.
      with tf.variable_scope('', reuse=tf.AUTO_REUSE):
        gamma_tensor = tf.get_variable('conv1/BatchNorm/gamma')
      init = tf.variables_initializer([gamma_tensor])
      init.run()

      # Verify regularizer is the sliced gamma tensor.
      self.assertAllEqual(gamma_tensor.eval()[0:3],
                          regularizer._gamma.eval()) 
开发者ID:google-research,项目名称:morph-net,代码行数:20,代码来源:batch_norm_source_op_handler_test.py

示例3: test_expected_calibration_error_all_bins_filled

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import variables_initializer [as 别名]
def test_expected_calibration_error_all_bins_filled(self):
    """Test expected calibration error when all bins contain predictions."""
    y_true, y_pred = self._get_calibration_placeholders()
    expected_ece_op, update_op = calibration_metrics.expected_calibration_error(
        y_true, y_pred, nbins=2)
    with self.test_session() as sess:
      metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES)
      sess.run(tf.variables_initializer(var_list=metrics_vars))
      # Bin calibration errors (|confidence - accuracy| * bin_weight):
      # - [0,0.5): |0.2 - 0.333| * (3/5) = 0.08
      # - [0.5, 1]: |0.75 - 0.5| * (2/5) = 0.1
      sess.run(
          update_op,
          feed_dict={
              y_pred: np.array([0., 0.2, 0.4, 0.5, 1.0]),
              y_true: np.array([0, 0, 1, 0, 1])
          })
    actual_ece = 0.08 + 0.1
    expected_ece = sess.run(expected_ece_op)
    self.assertAlmostEqual(actual_ece, expected_ece) 
开发者ID:tensorflow,项目名称:models,代码行数:22,代码来源:calibration_metrics_tf1_test.py

示例4: test_expected_calibration_error_all_bins_not_filled

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import variables_initializer [as 别名]
def test_expected_calibration_error_all_bins_not_filled(self):
    """Test expected calibration error when no predictions for one bin."""
    y_true, y_pred = self._get_calibration_placeholders()
    expected_ece_op, update_op = calibration_metrics.expected_calibration_error(
        y_true, y_pred, nbins=2)
    with self.test_session() as sess:
      metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES)
      sess.run(tf.variables_initializer(var_list=metrics_vars))
      # Bin calibration errors (|confidence - accuracy| * bin_weight):
      # - [0,0.5): |0.2 - 0.333| * (3/5) = 0.08
      # - [0.5, 1]: |0.75 - 0.5| * (2/5) = 0.1
      sess.run(
          update_op,
          feed_dict={
              y_pred: np.array([0., 0.2, 0.4]),
              y_true: np.array([0, 0, 1])
          })
    actual_ece = np.abs(0.2 - (1 / 3.))
    expected_ece = sess.run(expected_ece_op)
    self.assertAlmostEqual(actual_ece, expected_ece) 
开发者ID:tensorflow,项目名称:models,代码行数:22,代码来源:calibration_metrics_tf1_test.py

示例5: _test_image_producer

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import variables_initializer [as 别名]
def _test_image_producer(self, batch_group_size, put_slower_than_get):
    # We use the variable x to simulate a staging area of images. x represents
    # the number of batches in the staging area.
    x = tf.Variable(0, dtype=tf.int32)
    if put_slower_than_get:
      put_dep = self._slow_tensorflow_op()
      get_dep = tf.no_op()
    else:
      put_dep = tf.no_op()
      get_dep = self._slow_tensorflow_op()
    with tf.control_dependencies([put_dep]):
      put_op = x.assign_add(batch_group_size, use_locking=True)
    with tf.control_dependencies([get_dep]):
      get_op = x.assign_sub(1, use_locking=True)
    with self.test_session() as sess:
      sess.run(tf.variables_initializer([x]))
      image_producer = cnn_util.ImageProducer(sess, put_op, batch_group_size,
                                              use_python32_barrier=False)
      image_producer.start()
      for _ in range(5 * batch_group_size):
        sess.run(get_op)
        # We assert x is nonnegative, to ensure image_producer never causes
        # an unstage op to block. We assert x is at most 2 * batch_group_size,
        # to ensure it doesn't use too much memory by storing too many batches
        # in the staging area.
        self.assertGreaterEqual(sess.run(x), 0)
        self.assertLessEqual(sess.run(x), 2 * batch_group_size)
        image_producer.notify_image_consumption()
        self.assertGreaterEqual(sess.run(x), 0)
        self.assertLessEqual(sess.run(x), 2 * batch_group_size)

      image_producer.done()
      time.sleep(0.1)
      self.assertGreaterEqual(sess.run(x), 0)
      self.assertLessEqual(sess.run(x), 2 * batch_group_size) 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:37,代码来源:cnn_util_test.py

示例6: restore

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import variables_initializer [as 别名]
def restore(cls, saved_model_path: str) -> "LanguageModelTF1":
        with gzip.open(saved_model_path) as f:
            saved_data = pickle.load(f)
        model = cls(saved_data["hyperparameters"], saved_data["vocab"])
        model.build((None, None))

        variables_to_initialize = []
        with model._sess.graph.as_default():
            with tf.name_scope("restore"):
                restore_ops = []
                used_vars = set()
                for variable in sorted(
                    model._sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES),
                    key=lambda v: v.name,
                ):
                    used_vars.add(variable.name)
                    if variable.name in saved_data["weights"]:
                        # print('Initializing %s from saved value.' % variable.name)
                        restore_ops.append(
                            variable.assign(saved_data["weights"][variable.name])
                        )
                    else:
                        print(
                            "Freshly initializing %s since no saved value was found."
                            % variable.name
                        )
                        variables_to_initialize.append(variable)
                for var_name in sorted(saved_data["weights"]):
                    if var_name not in used_vars:
                        if (
                            var_name.endswith("Adam:0")
                            or var_name.endswith("Adam_1:0")
                            or var_name in ["beta1_power:0", "beta2_power:0"]
                        ):
                            continue
                        print("Saved weights for %s not used by model." % var_name)
                restore_ops.append(tf.variables_initializer(variables_to_initialize))
                model._sess.run(restore_ops)
        return model 
开发者ID:microsoft,项目名称:machine-learning-for-programming-samples,代码行数:41,代码来源:model_tf1.py

示例7: testDepthwiseChannelMapping

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import variables_initializer [as 别名]
def testDepthwiseChannelMapping(self):
    """Verify depth multiplier maps input to output as expected."""
    tf.reset_default_graph()

    # Construct input tensor with shape [1, 4, 4, 5].  There are 5 channels
    # where each channel has values corresponding to the channel index.
    channel0 = tf.ones([1, 4, 4, 1]) * 0
    channel1 = tf.ones([1, 4, 4, 1]) * 1
    channel2 = tf.ones([1, 4, 4, 1]) * 2
    channel3 = tf.ones([1, 4, 4, 1]) * 3
    channel4 = tf.ones([1, 4, 4, 1]) * 4
    inputs = tf.concat(
        [channel0, channel1, channel2, channel3, channel4], axis=3)
    # Sanity check that input tensor is the right shape.
    self.assertAllEqual([1, 4, 4, 5], inputs.shape.as_list())

    conv = layers.separable_conv2d(
        inputs, num_outputs=None, kernel_size=3, depth_multiplier=2,
        weights_initializer=identity_initializer, scope='depthwise_conv')

    with self.cached_session():
      with tf.variable_scope('', reuse=tf.AUTO_REUSE):
        weights = tf.get_variable('depthwise_conv/depthwise_weights')
        biases = tf.get_variable('depthwise_conv/biases', [10],
                                 initializer=tf.zeros_initializer)
      init = tf.variables_initializer([weights, biases])
      init.run()

      # The depth_multiplier replicates channels with [a, a, b, b, c, c, ...]
      # pattern.  Expected output has shape [1, 4, 4, 10].
      expected_output = tf.concat(
          [channel0, channel0,
           channel1, channel1,
           channel2, channel2,
           channel3, channel3,
           channel4, channel4],
          axis=3)
      # Sanity check that output tensor is the right shape.
      self.assertAllEqual([1, 4, 4, 10], expected_output.shape.as_list())

      self.assertAllEqual(expected_output.eval(), conv.eval()) 
开发者ID:google-research,项目名称:morph-net,代码行数:43,代码来源:depthwise_convolution_op_handler_test.py

示例8: _initialize_uninitialized

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import variables_initializer [as 别名]
def _initialize_uninitialized(self, sess):
    global_vars = tf.global_variables()
    is_not_initialized = sess.run(
        [tf.is_variable_initialized(var) for var in global_vars])
    not_initialized_vars = [v for (v, f) in zip(global_vars,
                                                is_not_initialized) if not f]

    if not_initialized_vars:
      sess.run(tf.variables_initializer(not_initialized_vars)) 
开发者ID:tensorflow,项目名称:graphics,代码行数:11,代码来源:reconstruction.py

示例9: initialize

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import variables_initializer [as 别名]
def initialize():
    """Initialize all the uninitialized variables in the global scope."""
    new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
    get_session().run(tf.variables_initializer(new_variables))

    ALREADY_INITIALIZED.update(new_variables) 
开发者ID:microsoft,项目名称:nni,代码行数:8,代码来源:util.py

示例10: _get_ece

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import variables_initializer [as 别名]
def _get_ece(self, ece_op, update_op):
    """Return scalar expected calibration error."""
    with self.test_session() as sess:
      metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES)
      sess.run(tf.variables_initializer(var_list=metrics_vars))
      _ = sess.run(update_op)
    return sess.run(ece_op) 
开发者ID:tensorflow,项目名称:models,代码行数:9,代码来源:calibration_evaluation_tf1_test.py

示例11: test_expected_calibration_error_with_multiple_data_streams

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import variables_initializer [as 别名]
def test_expected_calibration_error_with_multiple_data_streams(self):
    """Test expected calibration error when multiple data batches provided."""
    y_true, y_pred = self._get_calibration_placeholders()
    expected_ece_op, update_op = calibration_metrics.expected_calibration_error(
        y_true, y_pred, nbins=2)
    with self.test_session() as sess:
      metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES)
      sess.run(tf.variables_initializer(var_list=metrics_vars))
      # Identical data to test_expected_calibration_error_all_bins_filled,
      # except split over three batches.
      sess.run(
          update_op,
          feed_dict={
              y_pred: np.array([0., 0.2]),
              y_true: np.array([0, 0])
          })
      sess.run(
          update_op,
          feed_dict={
              y_pred: np.array([0.4, 0.5]),
              y_true: np.array([1, 0])
          })
      sess.run(
          update_op, feed_dict={
              y_pred: np.array([1.0]),
              y_true: np.array([1])
          })
    actual_ece = 0.08 + 0.1
    expected_ece = sess.run(expected_ece_op)
    self.assertAlmostEqual(actual_ece, expected_ece) 
开发者ID:tensorflow,项目名称:models,代码行数:32,代码来源:calibration_metrics_tf1_test.py


注:本文中的tensorflow.compat.v1.variables_initializer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。