当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.GraphKeys方法代码示例

本文整理汇总了Python中tensorflow.GraphKeys方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.GraphKeys方法的具体用法?Python tensorflow.GraphKeys怎么用?Python tensorflow.GraphKeys使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.GraphKeys方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_GraphKeys

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphKeys [as 别名]
def get_GraphKeys():
    if tf.__version__ < "2.0.0":
        return tf.GraphKeys
    else:
        return tf.compat.v1.GraphKeys 
开发者ID:shenweichen,项目名称:DeepCTR,代码行数:7,代码来源:utils.py

示例2: _get_ops_to_replicate

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphKeys [as 别名]
def _get_ops_to_replicate(gradiend_info_list):
    grads = [gradient_info._grad for gradient_info in gradiend_info_list]

    grad_related = set()
    for grad in grads:
        if isinstance(grad, tf.IndexedSlices):
            grad_related.add(grad.indices)
            grad_related.add(grad.values)
            grad_related.add(grad.dense_shape)
        elif isinstance(grad, tf.Tensor):
            grad_related.add(grad)
        else:
            raise RuntimeError("Incorrect grad.")

    grads_ancestor_ops = get_ancestors([grad.op for grad in grad_related],
                                       include_control_inputs=True)
    pipeline_ops = get_pipeline_ops(grads_ancestor_ops)

    global_var_related_ops = set()
    for global_var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
        global_var_related_ops.add(global_var.op)
        global_var_related_ops.add(global_var.initializer)
        global_var_related_ops.add(global_var._snapshot.op)

    table_related_ops = set()
    for table_init in tf.get_collection(tf.GraphKeys.TABLE_INITIALIZERS):
        table_related_ops.add(table_init)
        table_related_ops.add(table_init.inputs[0].op)

    # Assume that all variables are member of either GLOBAL_VARIABLES
    # or LOCAL_VARIABLES.
    local_var_op_to_var = \
        dict([(var.op, var)
              for var in tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES)])
    local_var_ops = set(local_var_op_to_var.keys())
    local_var_ops.intersection_update(grads_ancestor_ops)

    ops_to_replicate = set()
    ops_to_replicate.update(grads_ancestor_ops)
    ops_to_replicate.update(pipeline_ops)
    ops_to_replicate.difference_update(global_var_related_ops)
    ops_to_replicate.difference_update(table_related_ops)
    ops_to_replicate.update(
        [local_var_op_to_var[var_op].initializer for var_op in local_var_ops])

    return ops_to_replicate 
开发者ID:snuspl,项目名称:parallax,代码行数:48,代码来源:in_graph_parallel.py

示例3: _handle_collection_def

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphKeys [as 别名]
def _handle_collection_def(multi_gpu_meta_graph_def, op_names_to_replicate,
                           num_replicas):
    allow_bytes_list_keys = [tf.GraphKeys.QUEUE_RUNNERS,
                             tf.GraphKeys.GLOBAL_VARIABLES,
                             tf.GraphKeys.TRAINABLE_VARIABLES,
                             tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
                             tf.GraphKeys.LOCAL_VARIABLES,
                             tf.GraphKeys.MODEL_VARIABLES,
                             tf.GraphKeys.GRADIENTS_INFO,
                             tf.GraphKeys.GLOBAL_STEP]
    keys_to_remove = []
    for key, col_def in multi_gpu_meta_graph_def.collection_def.items():
        kind = col_def.WhichOneof("kind")
        # Update node_list collections (e.g., GLOBAL_STEP, TRAIN_OP, UPDATE_OP,
        # LOSSES, ...)
        if kind == 'node_list':
            new_col_def = get_new_col_def_of_node_list(
                            col_def, op_names_to_replicate, num_replicas)
            multi_gpu_meta_graph_def.collection_def[key].Clear()
            multi_gpu_meta_graph_def.collection_def[key].CopyFrom(new_col_def)
        elif kind == 'bytes_list':
            if ops.get_from_proto_function(key):
                # Collections in allow_bytes_list_keys will be handled
                # explicitly below
                # (e.g., QUEUE_RUNNERS, LOCAL_VARIABLES, ...)
                if key in allow_bytes_list_keys:
                    continue
                # Remove unhandled collections (e.g., COND_CONTEXT)
                # TODO: Handle all protos in tf.GraphKeys
                else:
                    keys_to_remove.append(key)
            # Keep collections without proto function
            # (e.g., user defined string)
            else:
                continue
        else:
            raise RuntimeError("Should not reach here")
    for key in keys_to_remove:
        del multi_gpu_meta_graph_def.collection_def[key]

    # Update QUEUE_RUNNERS and LOCAL_VARIABLES collection
    update_queue_runners(multi_gpu_meta_graph_def, op_names_to_replicate,
                          num_replicas)
    update_local_variables(multi_gpu_meta_graph_def, op_names_to_replicate,
                            num_replicas)
    update_shard_info_for_in_graph(multi_gpu_meta_graph_def, num_replicas) 
开发者ID:snuspl,项目名称:parallax,代码行数:48,代码来源:in_graph_parallel.py

示例4: add_tensorboard

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphKeys [as 别名]
def add_tensorboard(self, session, tensorboard_dir, tb_run_name=None, timeline_enabled=False):
        """
        Add the tensorboard operations to the RNN
        This method will add ops to feed tensorboard
          self.train_summaries_op : will produce the summary for a training step
          self.test_summaries_op : will produce the summary for a test step
          self.summary_writer_op : will write the summary to disk

        Parameters
        ----------
        :param session: the tensorflow session
        :param tensorboard_dir: path to tensorboard directory
        :param tb_run_name: directory name for the tensorboard files inside tensorboard_dir, if None a default dir
                            will be created
        :param timeline_enabled: enable the output of a trace file for timeline visualization
        """
        self.tensorboard_dir = tensorboard_dir
        self.timeline_enabled = timeline_enabled

        # Define GraphKeys for TensorBoard
        graphkey_training = tf.GraphKeys()
        graphkey_test = tf.GraphKeys()

        # Learning rate
        tf.summary.scalar('Learning_rate', self.learning_rate_var, collections=[graphkey_training, graphkey_test])

        # Loss
        with tf.name_scope('Mean_loss'):
            mean_loss = tf.divide(self.accumulated_mean_loss, self.mini_batch)
            tf.summary.scalar('Training', mean_loss, collections=[graphkey_training])
            tf.summary.scalar('Test', mean_loss, collections=[graphkey_test])

        # Accuracy
        with tf.name_scope('Accuracy_-_Error_Rate'):
            mean_error_rate = tf.divide(self.accumulated_error_rate, self.mini_batch)
            tf.summary.scalar('Training', mean_error_rate, collections=[graphkey_training])
            tf.summary.scalar('Test', mean_error_rate, collections=[graphkey_test])

        # Hidden state
        with tf.name_scope('RNN_internal_state'):
            for idx, state_variable in enumerate(self.rnn_tuple_state):
                tf.summary.histogram('Training_layer-{0}_cell_state'.format(idx), state_variable[0],
                                     collections=[graphkey_training])
                tf.summary.histogram('Test_layer-{0}_cell_state'.format(idx), state_variable[0],
                                     collections=[graphkey_test])
                tf.summary.histogram('Training_layer-{0}_hidden_state'.format(idx), state_variable[1],
                                     collections=[graphkey_training])
                tf.summary.histogram('Test_layer-{0}_hidden_state'.format(idx), state_variable[1],
                                     collections=[graphkey_test])

        self.train_summaries_op = tf.summary.merge_all(key=graphkey_training)
        self.test_summaries_op = tf.summary.merge_all(key=graphkey_test)
        if tb_run_name is None:
            run_name = datetime.now().strftime('%Y-%m-%d--%H-%M-%S')
        else:
            run_name = tb_run_name
        self.summary_writer_op = tf.summary.FileWriter(tensorboard_dir + '/' + run_name + '/', graph=session.graph) 
开发者ID:inikdom,项目名称:rnn-speech,代码行数:59,代码来源:LanguageModel.py

示例5: add_tensorboard

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphKeys [as 别名]
def add_tensorboard(self, session, tensorboard_dir, tb_run_name=None, timeline_enabled=False):
        """
        Add the tensorboard operations to the acoustic RNN
        This method will add ops to feed tensorboard
          self.train_summaries_op : will produce the summary for a training step
          self.test_summaries_op : will produce the summary for a test step
          self.summary_writer_op : will write the summary to disk

        Parameters
        ----------
        :param session: the tensorflow session
        :param tensorboard_dir: path to tensorboard directory
        :param tb_run_name: directory name for the tensorboard files inside tensorboard_dir, if None a default dir
                            will be created
        :param timeline_enabled: enable the output of a trace file for timeline visualization
        """
        self.tensorboard_dir = tensorboard_dir
        self.timeline_enabled = timeline_enabled

        # Define GraphKeys for TensorBoard
        graphkey_training = tf.GraphKeys()
        graphkey_test = tf.GraphKeys()

        # Learning rate
        tf.summary.scalar('Learning_rate', self.learning_rate_var, collections=[graphkey_training, graphkey_test])

        # Loss
        with tf.name_scope('Mean_loss'):
            mean_loss = tf.divide(self.accumulated_mean_loss, self.mini_batch)
            tf.summary.scalar('Training', mean_loss, collections=[graphkey_training])
            tf.summary.scalar('Test', mean_loss, collections=[graphkey_test])

        # Accuracy
        with tf.name_scope('Accuracy_-_Error_Rate'):
            mean_error_rate = tf.divide(self.accumulated_error_rate, self.mini_batch)
            tf.summary.scalar('Training', mean_error_rate, collections=[graphkey_training])
            tf.summary.scalar('Test', mean_error_rate, collections=[graphkey_test])

        # Hidden state
        with tf.name_scope('RNN_internal_state'):
            for idx, state_variable in enumerate(self.rnn_tuple_state):
                tf.summary.histogram('Training_layer-{0}_cell_state'.format(idx), state_variable[0],
                                     collections=[graphkey_training])
                tf.summary.histogram('Test_layer-{0}_cell_state'.format(idx), state_variable[0],
                                     collections=[graphkey_test])
                tf.summary.histogram('Training_layer-{0}_hidden_state'.format(idx), state_variable[1],
                                     collections=[graphkey_training])
                tf.summary.histogram('Test_layer-{0}_hidden_state'.format(idx), state_variable[1],
                                     collections=[graphkey_test])

        self.train_summaries_op = tf.summary.merge_all(key=graphkey_training)
        self.test_summaries_op = tf.summary.merge_all(key=graphkey_test)
        if tb_run_name is None:
            run_name = datetime.now().strftime('%Y-%m-%d--%H-%M-%S')
        else:
            run_name = tb_run_name
        self.summary_writer_op = tf.summary.FileWriter(tensorboard_dir + '/' + run_name + '/', graph=session.graph) 
开发者ID:inikdom,项目名称:rnn-speech,代码行数:59,代码来源:AcousticModel.py

示例6: build_model

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphKeys [as 别名]
def build_model(self, input_imgs, is_training, targets, masks=None, privileged_input=None):
        '''Builds the model. Assumes that the input is from range [0, 1].
        Args:
            input_imgs: list of input images (scaled between -1 and 1) with the
                       dimensions specified in the cfg
            is_training: flag for whether the model is in training mode or not
            mask: mask used for computing sum of squares loss. If None, we assume
                  it is np.ones.
        '''
        print('building model')
        cfg = self.cfg
        self.is_training = is_training

        if self.decoder_only:
            encoder_output = input_imgs # Assume that the input is the representation
        else:
            encoder_output = self.build_encoder(input_imgs, is_training)
        # encoder_output = self.build_encoder(input_imgs, is_training)

        final_output = self.build_siamese_output_postprocess(encoder_output, is_training)

        losses = self.get_losses(final_output, targets, is_softmax='l2_loss' not in cfg)
        # use weight regularization
        if 'omit_weight_reg' in cfg and cfg['omit_weight_reg']:
            add_reg = False
        else:
            add_reg = True
        
        # get losses
        regularization_loss = tf.add_n( slim.losses.get_regularization_losses(), name='losses/regularization_loss' )
        total_loss = slim.losses.get_total_loss( add_regularization_losses=add_reg,
                                                 name='losses/total_loss')

        self.input_images = input_imgs
        self.targets = targets
        self.masks = masks
        self.encoder_output = encoder_output
        self.losses = losses
        self.task_loss = losses[0]
        self.total_loss = total_loss
        self.decoder_output = final_output
        # add summaries
        slim.summarize_variables()
        slim.summarize_weights()
        slim.summarize_biases()
        slim.summarize_activations()
        slim.summarize_collection(tf.GraphKeys.LOSSES)
        tf.summary.scalar('accuracy', self.accuracy)
        slim.summarize_tensor( regularization_loss )
        slim.summarize_tensor( total_loss )
        self.model_built = True 
开发者ID:StanfordVL,项目名称:taskonomy,代码行数:53,代码来源:siamese_nets.py

示例7: get_losses

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphKeys [as 别名]
def get_losses(self, final_output, target, is_softmax=True):
        '''Returns the loss for a Siamese Network.
        Args:
            final_output: tensor that represent the final output of the image bundle.
            target: Tensor of target to be output by the siamese network.
            
        Returns:
            losses: list of tensors representing each loss component
        '''
        print('setting up losses...')
        self.target = target
        self.final_output = final_output
        with tf.variable_scope('losses'):
            if is_softmax:
                correct_prediction = tf.equal(tf.argmax(final_output,1), target)
                self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

                siamese_loss = tf.reduce_mean(
                    tf.nn.sparse_softmax_cross_entropy_with_logits(
                                logits=final_output, 
                                labels=target,
                                name='softmax_loss'))
                self.siamese_loss = siamese_loss
            else:
                # If it's not softmax, it's l2 norm loss.
                self.accuracy = 0
#                 self.l2_loss = tf.losses.mean_squared_error(
                    # final_output,
                    # target,
                    # scope='d1',
                    # loss_collection=tf.GraphKeys,
                    # reduction="none") 
                target = tf.to_float(target)
                final_output = tf.to_float(final_output)
                self.l2_loss = tf.norm(target - final_output, axis=1)
                #self.l2_loss_sum = tf.reduce_sum(self.l2_loss, 1)  

                
                siamese_loss = self.l2_loss
                #if self.threshold is not None:
                if False:
                    ind = tf.unstack(siamese_loss)
                    siamese_loss = [ tf.cond(tf.greater(x, self.threshold), 
                                            lambda: self.threshold + self.threshold * tf.log(x / self.threshold),
                                            lambda: x) for x in ind ]
                    self.robust_l2_loss = siamese_loss
                    siamese_loss = tf.stack(siamese_loss)
                          
                self.siamese_loss = tf.reduce_sum(siamese_loss) / self.cfg['batch_size'] 
        tf.add_to_collection(tf.GraphKeys.LOSSES, self.siamese_loss)

        losses = [self.siamese_loss]
        return losses 
开发者ID:StanfordVL,项目名称:taskonomy,代码行数:55,代码来源:siamese_nets.py

示例8: build_model

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphKeys [as 别名]
def build_model(self, input_imgs, is_training, targets, masks=None, privileged_input=None):
        '''Builds the model. Assumes that the input is from range [0, 1].
        Args:
            input_imgs: batch of input images (scaled between -1 and 1) with the
                       dimensions specified in the cfg
            is_training: flag for whether the model is in training mode or not
            mask: mask used for computing sum of squares loss. If None, we assume
                  it is np.ones.
        '''
        print('building model')
        cfg = self.cfg
        self.is_training= is_training
        self.masks = masks
        if self.decoder_only:
            encoder_output = input_imgs
        else:
            encoder_output = self.build_encoder(input_imgs, is_training)

        final_output = self.build_postprocess(encoder_output, is_training)

        losses = self.get_losses(final_output, targets, is_softmax='l2_loss' not in cfg)
        # use weight regularization
        if 'omit_weight_reg' in cfg and cfg['omit_weight_reg']:
            add_reg = False
        else:
            add_reg = True
        
        # get losses
        regularization_loss = tf.add_n( slim.losses.get_regularization_losses(), name='losses/regularization_loss' )
        total_loss = slim.losses.get_total_loss( add_regularization_losses=add_reg,
                                                 name='losses/total_loss')

        self.input_images = input_imgs
        self.targets = targets
        self.masks = masks
        self.encoder_output = encoder_output
        self.decoder_output = final_output
        self.losses = losses
        self.total_loss = total_loss

        # add summaries
        if  self.extended_summaries:
            slim.summarize_variables()
            slim.summarize_weights()
            slim.summarize_biases()
            slim.summarize_activations()
        slim.summarize_collection(tf.GraphKeys.LOSSES)
        slim.summarize_tensor( regularization_loss )
        slim.summarize_tensor( total_loss )
        self.model_built = True 
开发者ID:StanfordVL,项目名称:taskonomy,代码行数:52,代码来源:basic_feedforward.py

示例9: build_model

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphKeys [as 别名]
def build_model(self, input_imgs, is_training, targets, masks=None, privileged_input=None):
        '''Builds the model. Assumes that the input is from range [0, 1].
        Args:
            input_imgs: batch of input images (scaled between -1 and 1) with the
                       dimensions specified in the cfg
            is_training: flag for whether the model is in training mode or not
            mask: mask used for computing sum of squares loss. If None, we assume
                  it is np.ones.
        '''
        print('building model')
        cfg = self.cfg
        self.is_training= is_training

        if self.decoder_only:
            encoder_output = input_imgs
        else:
            encoder_output = self.build_encoder(input_imgs, is_training)

        final_output = encoder_output
        self.input_images = input_imgs
        self.targets = targets
        self.encoder_output = encoder_output
        self.decoder_output = final_output

        if not is_training:
            losses = self.get_losses(final_output, targets, is_softmax='l2_loss' not in cfg)
            # use weight regularization
            if 'omit_weight_reg' in cfg and cfg['omit_weight_reg']:
                add_reg = False
            else:
                add_reg = True
            
            # get losses
            regularization_loss = tf.add_n( slim.losses.get_regularization_losses(), name='losses/regularization_loss' )
            total_loss = slim.losses.get_total_loss( add_regularization_losses=add_reg,
                                                     name='losses/total_loss')

            self.losses = losses
            self.total_loss = total_loss
            slim.summarize_tensor( regularization_loss )
            slim.summarize_tensor( total_loss )

        # add summaries
        if  self.extended_summaries:
            slim.summarize_variables()
            slim.summarize_weights()
            slim.summarize_biases()
            slim.summarize_activations()
        slim.summarize_collection(tf.GraphKeys.LOSSES)
        self.model_built = True 
开发者ID:StanfordVL,项目名称:taskonomy,代码行数:52,代码来源:resnet_ff.py

示例10: get_losses

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphKeys [as 别名]
def get_losses( self, output_vectors, idx_segments, masks ):
        '''Returns the metric loss for 'num_pixels' embedding vectors. 
        
        Args:
            output_imgs: Tensor of images output by the decoder.
            desired_imgs: Tensor of target images to be output by the decoder.
            masks: Tensor of masks to be applied when computing sum of squares
                    loss.
            
        Returns:
            losses: list of tensors representing each loss component
        '''
        print('setting up losses...')
        self.output_images = output_vectors
        self.target_images = idx_segments
        self.masks = masks

        with tf.variable_scope('losses'):
            last_axis = 2
            fir, sec, seg_id = tf.unstack(idx_segments, axis=last_axis)

            idxes = tf.stack([self.batch_index_slice, fir, sec], axis=last_axis)
            self.embed = tf.gather_nd( output_vectors, idxes )
            embed = self.embed
            square = tf.reduce_sum( embed*embed, axis=-1 )
            square_t = tf.expand_dims(square, axis=-1)
            square = tf.expand_dims(square, axis=1)

            pairwise_dist = square - 2 * tf.matmul(embed, tf.transpose(embed, perm=[0,2,1])) + square_t 
            pairwise_dist = tf.clip_by_value( pairwise_dist, 0, 80)
            #pairwise_dist = 0 - pairwise_dist
            self.pairwise_dist = pairwise_dist
            pairwise_exp = tf.exp(pairwise_dist) + 1
            sigma = tf.divide(2 ,  pairwise_exp)
            sigma = tf.clip_by_value(sigma,1e-7,1.0 - 1e-7)
            self.sigma = sigma
            same = tf.log(sigma)
            diff = tf.log(1 - sigma)

            self.same = same
            self.diff = diff
            
            seg_id_i = tf.tile(tf.expand_dims(seg_id, -1), [1, 1, self.num_pixels])
            seg_id_j = tf.transpose(seg_id_i, perm=[0,2,1])

            seg_comp = tf.equal(seg_id_i, seg_id_j)
            seg_same = tf.cast(seg_comp, self.input_type) 
            seg_diff = 1 - seg_same

            loss_matrix = seg_same * same + seg_diff * diff
            reduced_loss = 0 - tf.reduce_mean(loss_matrix) # / self.num_pixels
         
        tf.add_to_collection(tf.GraphKeys.LOSSES, reduced_loss)
        self.metric_loss = reduced_loss
        losses = [reduced_loss]
        return losses 
开发者ID:StanfordVL,项目名称:taskonomy,代码行数:58,代码来源:constant_predictor.py


注:本文中的tensorflow.GraphKeys方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。