當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.saturate_cast方法代碼示例

本文整理匯總了Python中tensorflow.saturate_cast方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.saturate_cast方法的具體用法?Python tensorflow.saturate_cast怎麽用?Python tensorflow.saturate_cast使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.saturate_cast方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _legacy_output_transform_func

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import saturate_cast [as 別名]
def _legacy_output_transform_func(*expr, out_mul=1.0, out_add=0.0, out_shrink=1, out_dtype=None):
    if out_mul != 1.0:
        expr = [x * out_mul for x in expr]

    if out_add != 0.0:
        expr = [x + out_add for x in expr]

    if out_shrink > 1:
        ksize = [1, 1, out_shrink, out_shrink]
        expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW") for x in expr]

    if out_dtype is not None:
        if tf.as_dtype(out_dtype).is_integer:
            expr = [tf.round(x) for x in expr]
        expr = [tf.saturate_cast(x, out_dtype) for x in expr]
    return expr 
開發者ID:produvia,項目名稱:ai-platform,代碼行數:18,代碼來源:network.py

示例2: version_10

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import saturate_cast [as 別名]
def version_10(cls, node, **kwargs):
    tensor_dict = kwargs["tensor_dict"]
    x = tensor_dict[node.inputs[0]]
    y_scale = tensor_dict[node.inputs[1]]

    x = tf.cast(x, tf.float32)
    y = tf.divide(x, y_scale)
    y = tf.round(y)
    if len(node.inputs) == 3:
      y_zero_point = tensor_dict[node.inputs[2]]
      y_dtype = y_zero_point.dtype
      y_zero_point = tf.cast(y_zero_point, tf.float32)
      y = tf.add(y, y_zero_point)
    else:  # y_zero_point default dtype = uint8
      y_dtype = tf.uint8

    y = tf.saturate_cast(y, y_dtype)

    return [y] 
開發者ID:onnx,項目名稱:onnx-tensorflow,代碼行數:21,代碼來源:quantize_linear.py

示例3: apply_gradients

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import saturate_cast [as 別名]
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    def apply_ops_wrapper():
      update_op = self._optimizer.apply_gradients(grads_and_vars,
                                                  global_step, name)
      apply_ops = []
      with tf.control_dependencies([update_op]):
        for grad, var in grads_and_vars:
          if var.name in self._fp32_to_fp16:
            dst_var = self._fp32_to_fp16[var.name]
            apply_ops.append(
                tf.assign(dst_var, tf.saturate_cast(var, tf.float16))
            )
      if apply_ops:
        return tf.group(apply_ops)
      return update_op

    if self._loss_scaler:
      grad_has_nans, grad_amax = AutomaticLossScaler.check_grads(grads_and_vars)
      should_skip_update = tf.logical_or(tf.is_inf(grad_amax), grad_has_nans)
      loss_scale_update_op = self._loss_scaler.update_op(grad_has_nans,
                                                         grad_amax)
      with tf.control_dependencies([loss_scale_update_op]):
        return tf.cond(should_skip_update, tf.no_op, apply_ops_wrapper)
    else:
      return apply_ops_wrapper() 
開發者ID:NVIDIA,項目名稱:OpenSeq2Seq,代碼行數:27,代碼來源:mp_wrapper.py

示例4: get_decoder_self_attention_bias

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import saturate_cast [as 別名]
def get_decoder_self_attention_bias(length, dtype=tf.float32):
  """Calculate bias for decoder that maintains model's autoregressive property.

  Creates a tensor that masks out locations that correspond to illegal
  connections, so prediction at position i cannot draw information from future
  positions.

  Args:
    length: int length of sequences in batch.

  Returns:
    float tensor of shape [1, 1, length, length]
  """
  #print("get_decoder_self_attention_bias", dtype)

  with tf.name_scope("decoder_self_attention_bias"):
    #valid_locs = tf.matrix_band_part(tf.ones([length, length], dtype=dtype), -1, 0)
    valid_locs = tf.matrix_band_part(tf.ones([length, length], dtype=tf.float32), -1, 0)
    valid_locs = tf.reshape(valid_locs, [1, 1, length, length])
    neg_inf=_NEG_INF #if (dtype==tf.float32) else _NEG_INF_FP16
    bias = neg_inf * (1.0 - valid_locs)
    #bias=tf.saturate_cast(bias, dtype=dtype)
  return bias 
開發者ID:NVIDIA,項目名稱:OpenSeq2Seq,代碼行數:25,代碼來源:utils.py

示例5: call

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import saturate_cast [as 別名]
def call(self, x):
    if self.norm_type=="layernorm_L2":
      epsilon = self.epsilon
      dtype = x.dtype
      x = tf.cast(x=x, dtype=tf.float32)
      mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
      variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)
      norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
      result = norm_x * self.scale + self.bias
      return tf.cast(x=result, dtype=dtype)

    else:
      dtype = x.dtype
      if dtype==tf.float16:
        x = tf.cast(x, dtype=tf.float32)
      mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
      x = x - mean
      variance = tf.reduce_mean(tf.abs(x), axis=[-1], keepdims=True)
      norm_x = tf.div(x , variance + self.epsilon)
      y = norm_x * self.scale + self.bias
      if dtype == tf.float16:
        y = tf.saturate_cast(y, dtype)
      return y 
開發者ID:NVIDIA,項目名稱:OpenSeq2Seq,代碼行數:25,代碼來源:common.py

示例6: convert_images_to_uint8

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import saturate_cast [as 別名]
def convert_images_to_uint8(images, drange=[-1,1], nchw_to_nhwc=False, shrink=1):
    """Convert a minibatch of images from float32 to uint8 with configurable dynamic range.
    Can be used as an output transformation for Network.run().
    """
    images = tf.cast(images, tf.float32)
    if shrink > 1:
        ksize = [1, 1, shrink, shrink]
        images = tf.nn.avg_pool(images, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW")
    if nchw_to_nhwc:
        images = tf.transpose(images, [0, 2, 3, 1])
    scale = 255 / (drange[1] - drange[0])
    images = images * scale + (0.5 - drange[0] * scale)
    return tf.saturate_cast(images, tf.uint8) 
開發者ID:produvia,項目名稱:ai-platform,代碼行數:15,代碼來源:tfutil.py

示例7: testSaturate

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import saturate_cast [as 別名]
def testSaturate(self):
    in_types = tf.float32,
    out_types = tf.int8, tf.uint8, tf.int16, tf.float32
    with self.test_session() as sess:
      for in_type in in_types:
        for out_type in out_types:
          lo, hi = in_type.min, in_type.max
          x = tf.constant([lo, lo + 1, lo // 2, hi // 2, hi - 1, hi],
                          dtype=in_type)
          y = tf.saturate_cast(x, dtype=out_type)
          self.assertEqual(y.dtype, out_type)
          x, y = sess.run([x, y])
          correct = np.maximum(out_type.min, np.minimum(out_type.max, x))
          self.assertAllEqual(correct, y) 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:16,代碼來源:cast_op_test.py

示例8: _mu_law_encode

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import saturate_cast [as 別名]
def _mu_law_encode(signal, channels, dtype):
  mu = tf.saturate_cast(channels - 1, dtype)
  safe_audio_abs = tf.minimum(tf.abs(signal), 1.0)
  magnitude = tf.log1p(mu * safe_audio_abs) / tf.log1p(mu)
  signal = tf.sign(signal) * magnitude
  return tf.cast((signal + 1) / 2 * mu + 0.5, tf.int32) 
開發者ID:NVIDIA,項目名稱:OpenSeq2Seq,代碼行數:8,代碼來源:wavenet_encoder.py

示例9: run

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import saturate_cast [as 別名]
def run(self, *in_arrays,
        return_as_list  = False,    # True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.
        print_progress  = False,    # Print progress to the console? Useful for very large input arrays.
        minibatch_size  = None,     # Maximum minibatch size to use, None = disable batching.
        num_gpus        = 1,        # Number of GPUs to use.
        out_mul         = 1.0,      # Multiplicative constant to apply to the output(s).
        out_add         = 0.0,      # Additive constant to apply to the output(s).
        out_shrink      = 1,        # Shrink the spatial dimensions of the output(s) by the given factor.
        out_dtype       = None,     # Convert the output to the specified data type.
        **dynamic_kwargs):          # Additional keyword arguments to pass into the network construction function.

        assert len(in_arrays) == self.num_inputs
        num_items = in_arrays[0].shape[0]
        if minibatch_size is None:
            minibatch_size = num_items
        key = str([list(sorted(dynamic_kwargs.items())), num_gpus, out_mul, out_add, out_shrink, out_dtype])

        # Build graph.
        if key not in self._run_cache:
            with absolute_name_scope(self.scope + '/Run'), tf.control_dependencies(None):
                in_split = list(zip(*[tf.split(x, num_gpus) for x in self.input_templates]))
                out_split = []
                for gpu in range(num_gpus):
                    with tf.device('/gpu:%d' % gpu):
                        out_expr = self.get_output_for(*in_split[gpu], return_as_list=True, **dynamic_kwargs)
                        if out_mul != 1.0:
                            out_expr = [x * out_mul for x in out_expr]
                        if out_add != 0.0:
                            out_expr = [x + out_add for x in out_expr]
                        if out_shrink > 1:
                            ksize = [1, 1, out_shrink, out_shrink]
                            out_expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') for x in out_expr]
                        if out_dtype is not None:
                            if tf.as_dtype(out_dtype).is_integer:
                                out_expr = [tf.round(x) for x in out_expr]
                            out_expr = [tf.saturate_cast(x, out_dtype) for x in out_expr]
                        out_split.append(out_expr)
                self._run_cache[key] = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)]

        # Run minibatches.
        out_expr = self._run_cache[key]
        out_arrays = [np.empty([num_items] + shape_to_list(expr.shape)[1:], expr.dtype.name) for expr in out_expr]
        for mb_begin in range(0, num_items, minibatch_size):
            if print_progress:
                print('\r%d / %d' % (mb_begin, num_items), end='')
            mb_end = min(mb_begin + minibatch_size, num_items)
            mb_in = [src[mb_begin : mb_end] for src in in_arrays]
            mb_out = tf.get_default_session().run(out_expr, dict(zip(self.input_templates, mb_in)))
            for dst, src in zip(out_arrays, mb_out):
                dst[mb_begin : mb_end] = src

        # Done.
        if print_progress:
            print('\r%d / %d' % (num_items, num_items))
        if not return_as_list:
            out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays)
        return out_arrays

    # Returns a list of (name, output_expr, trainable_vars) tuples corresponding to
    # individual layers of the network. Mainly intended to be used for reporting. 
開發者ID:zalandoresearch,項目名稱:disentangling_conditional_gans,代碼行數:62,代碼來源:tfutil.py

示例10: version_10

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import saturate_cast [as 別名]
def version_10(cls, node, **kwargs):
    tensor_dict = kwargs["tensor_dict"]
    a = tensor_dict[node.inputs[0]]
    a_scale = tensor_dict[node.inputs[1]]
    a_zero_point = tensor_dict[node.inputs[2]]
    b = tensor_dict[node.inputs[3]]
    b_scale = tensor_dict[node.inputs[4]]
    b_zero_point = tensor_dict[node.inputs[5]]
    y_scale = tensor_dict[node.inputs[6]]
    y_zero_point = tensor_dict[node.inputs[7]]
    y_dtype = y_zero_point.dtype

    # reshape 1-D a_scale, a_zero_point, y_scale and
    # y_zero_point so it can broadcast in arithmetic
    # operations later
    a_scale_shape = a_scale.get_shape().as_list()
    if a_scale_shape and a_scale_shape[0] > 1:
      a_scale = tf.reshape(a_scale, [a_scale_shape[0], 1])
      a_zero_point = tf.reshape(a_zero_point, [a_scale_shape[0], 1])
    y_scale_shape = y_scale.get_shape().as_list()
    if y_scale_shape and y_scale_shape[0] > 1:
      y_scale = tf.reshape(y_scale, [y_scale_shape[0], 1])
      y_zero_point = tf.reshape(y_zero_point, [y_scale_shape[0], 1])

    # cast all inputs to float32
    a = tf.cast(a, tf.float32)
    a_zero_point = tf.cast(a_zero_point, tf.float32)
    b = tf.cast(b, tf.float32)
    b_zero_point = tf.cast(b_zero_point, tf.float32)
    y_zero_point = tf.cast(y_zero_point, tf.float32)

    # dequantize a and b
    dequantized_a = tf.subtract(a, a_zero_point)
    dequantized_a = tf.multiply(dequantized_a, a_scale)
    dequantized_b = tf.subtract(b, b_zero_point)
    dequantized_b = tf.multiply(dequantized_b, b_scale)

    # matmul
    x = tf.matmul(dequantized_a, dequantized_b)

    # quantize x
    y = tf.divide(x, y_scale)
    y = tf.round(y)
    y = tf.add(y, y_zero_point)
    y = tf.saturate_cast(y, y_dtype)

    return [y] 
開發者ID:onnx,項目名稱:onnx-tensorflow,代碼行數:49,代碼來源:q_linear_mat_mul.py

示例11: run_with_session

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import saturate_cast [as 別名]
def run_with_session(self, session, *in_arrays, return_as_list=False,
            # True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.
            print_progress=False,  # Print progress to the console? Useful for very large input arrays.
            minibatch_size=None,  # Maximum minibatch size to use, None = disable batching.
            num_gpus=1,  # Number of GPUs to use.
            out_mul=1.0,  # Multiplicative constant to apply to the output(s).
            out_add=0.0,  # Additive constant to apply to the output(s).
            out_shrink=1,  # Shrink the spatial dimensions of the output(s) by the given factor.
            out_dtype=None,  # Convert the output to the specified data type.
            **dynamic_kwargs):  # Additional keyword arguments to pass into the network construction function.

        assert len(in_arrays) == self.num_inputs
        num_items = in_arrays[0].shape[0]
        if minibatch_size is None:
            minibatch_size = num_items
        key = str([list(sorted(dynamic_kwargs.items())), num_gpus, out_mul, out_add, out_shrink, out_dtype])

        # Build graph.
        if key not in self._run_cache:
            with absolute_name_scope(self.scope + '/Run'), tf.control_dependencies(None):
                in_split = list(zip(*[tf.split(x, num_gpus) for x in self.input_templates]))
                out_split = []
                for gpu in range(num_gpus):
                    with tf.device('/gpu:%d' % gpu):
                        out_expr = self.get_output_for(*in_split[gpu], return_as_list=True, **dynamic_kwargs)
                        if out_mul != 1.0:
                            out_expr = [x * out_mul for x in out_expr]
                        if out_add != 0.0:
                            out_expr = [x + out_add for x in out_expr]
                        if out_shrink > 1:
                            ksize = [1, 1, out_shrink, out_shrink]
                            out_expr = [
                                tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW')
                                for x in out_expr]
                        if out_dtype is not None:
                            if tf.as_dtype(out_dtype).is_integer:
                                out_expr = [tf.round(x) for x in out_expr]
                            out_expr = [tf.saturate_cast(x, out_dtype) for x in out_expr]
                        out_split.append(out_expr)
                self._run_cache[key] = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)]

        # Run minibatches.
        out_expr = self._run_cache[key]
        out_arrays = [np.empty([num_items] + shape_to_list(expr.shape)[1:], expr.dtype.name) for expr in out_expr]
        for mb_begin in range(0, num_items, minibatch_size):
            if print_progress:
                print('\r%d / %d' % (mb_begin, num_items), end='')
            mb_end = min(mb_begin + minibatch_size, num_items)
            mb_in = [src[mb_begin: mb_end] for src in in_arrays]
            mb_out = session.run(out_expr, dict(zip(self.input_templates, mb_in)))
            for dst, src in zip(out_arrays, mb_out):
                dst[mb_begin: mb_end] = src

        # Done.
        if print_progress:
            print('\r%d / %d' % (num_items, num_items))
        if not return_as_list:
            out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays)
        return out_arrays

    # Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s). 
開發者ID:afruehstueck,項目名稱:tileGAN,代碼行數:63,代碼來源:tfutil.py

示例12: run

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import saturate_cast [as 別名]
def run(self, *in_arrays,
        return_as_list  = False,    # True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.
        print_progress  = False,    # Print progress to the console? Useful for very large input arrays.
        minibatch_size  = None,     # Maximum minibatch size to use, None = disable batching.
        num_gpus        = 1,        # Number of GPUs to use.
        out_mul         = 1.0,      # Multiplicative constant to apply to the output(s).
        out_add         = 0.0,      # Additive constant to apply to the output(s).
        out_shrink      = 1,        # Shrink the spatial dimensions of the output(s) by the given factor.
        out_dtype       = None,     # Convert the output to the specified data type.
        **dynamic_kwargs):          # Additional keyword arguments to pass into the network construction function.

        assert len(in_arrays) == self.num_inputs
        num_items = in_arrays[0].shape[0]
        if minibatch_size is None:
            minibatch_size = num_items
        key = str([list(sorted(dynamic_kwargs.items())), num_gpus, out_mul, out_add, out_shrink, out_dtype])

        # Build graph.
        if key not in self._run_cache:
            with absolute_name_scope(self.scope + '/Run'), tf.control_dependencies(None):
                in_split = list(zip(*[tf.split(x, num_gpus) for x in self.input_templates]))
                out_split = []
                for gpu in range(num_gpus):
                    with tf.device('/gpu:%d' % gpu):
                        out_expr = self.get_output_for(*in_split[gpu], return_as_list=True, **dynamic_kwargs)
                        if out_mul != 1.0:
                            out_expr = [x * out_mul for x in out_expr]
                        if out_add != 0.0:
                            out_expr = [x + out_add for x in out_expr]
                        if out_shrink > 1:
                            ksize = [1, 1, out_shrink, out_shrink]
                            out_expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') for x in out_expr]
                        if out_dtype is not None:
                            if tf.as_dtype(out_dtype).is_integer:
                                out_expr = [tf.round(x) for x in out_expr]
                            out_expr = [tf.saturate_cast(x, out_dtype) for x in out_expr]
                        out_split.append(out_expr)
                self._run_cache[key] = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)]

        # Run minibatches.
        out_expr = self._run_cache[key]
        out_arrays = [np.empty([num_items] + shape_to_list(expr.shape)[1:], expr.dtype.name) for expr in out_expr]
        for mb_begin in range(0, num_items, minibatch_size):
            if print_progress:
                print('\r%d / %d' % (mb_begin, num_items), end='')
            mb_end = min(mb_begin + minibatch_size, num_items)
            mb_in = [src[mb_begin : mb_end] for src in in_arrays]
            mb_out = tf.get_default_session().run(out_expr, dict(zip(self.input_templates, mb_in)))
            for dst, src in zip(out_arrays, mb_out):
                dst[mb_begin : mb_end] = src

        # Done.
        if print_progress:
            print('\r%d / %d' % (num_items, num_items))
        if not return_as_list:
            out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays)
        return out_arrays


    # Returns a list of (name, output_expr, trainable_vars) tuples corresponding to
    # individual layers of the network. Mainly intended to be used for reporting. 
開發者ID:afruehstueck,項目名稱:tileGAN,代碼行數:63,代碼來源:tfutil.py

示例13: optimize_latent_codes

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import saturate_cast [as 別名]
def optimize_latent_codes(args):
	tflib.init_tf()

	with dnnlib.util.open_url(STYLEGAN_MODEL_URL, cache_dir=config.cache_dir) as f:
		_G, _D, Gs = pickle.load(f)

	latent_code = tf.get_variable(
		name='latent_code', shape=(1, 18, 512), dtype='float32', initializer=tf.initializers.zeros()
	)

	generated_img = Gs.components.synthesis.get_output_for(latent_code, randomize_noise=False)
	generated_img = tf.transpose(generated_img, [0, 2, 3, 1])
	generated_img = ((generated_img + 1) / 2) * 255
	generated_img = tf.image.resize_images(generated_img, tuple(args.hr_img_size), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
	generated_lr_img = tf.image.resize_images(generated_img, tuple(args.lr_img_size), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
	generated_img_for_display = tf.saturate_cast(generated_img, tf.uint8)

	lr_img = tf.placeholder(tf.float32, [None, args.lr_img_size[0], args.lr_img_size[1], 3])

	perceptual_model = PerceptualModel(img_size=args.lr_img_size)
	generated_img_features = perceptual_model(generated_lr_img)
	target_img_features = perceptual_model(lr_img)

	loss_op = tf.reduce_mean(tf.abs(generated_img_features - target_img_features))

	optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
	train_op = optimizer.minimize(loss_op, var_list=[latent_code])

	sess = tf.get_default_session()

	img_names = sorted(os.listdir(args.lr_imgs_dir))
	for img_name in img_names:
		img = imageio.imread(os.path.join(args.lr_imgs_dir, img_name))

		sess.run(tf.variables_initializer([latent_code] + optimizer.variables()))

		progress_bar_iterator = tqdm(
			iterable=range(args.total_iterations),
			bar_format='{desc}: {percentage:3.0f}% |{bar}| {n_fmt}/{total_fmt}{postfix}',
			desc=img_name
		)

		for i in progress_bar_iterator:
			loss, _ = sess.run(
				fetches=[loss_op, train_op],
				feed_dict={
					lr_img: img[np.newaxis, ...]
				}
			)

			progress_bar_iterator.set_postfix_str('loss=%.2f' % loss)

		hr_imgs, latent_codes = sess.run(
			fetches=[generated_img_for_display, latent_code],
			feed_dict={
				lr_img: img[np.newaxis, ...]
			}
		)

		imageio.imwrite(os.path.join(args.hr_imgs_dir, img_name), hr_imgs[0])
		np.savez(file=os.path.join(args.latents_dir, img_name + '.npz'), latent_code=latent_codes[0]) 
開發者ID:avivga,項目名稱:style-image-prior,代碼行數:63,代碼來源:super_resolution.py


注:本文中的tensorflow.saturate_cast方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。