當前位置: 首頁>>代碼示例>>Python>>正文


Python config.num_classes方法代碼示例

本文整理匯總了Python中config.num_classes方法的典型用法代碼示例。如果您正苦於以下問題:Python config.num_classes方法的具體用法?Python config.num_classes怎麽用?Python config.num_classes使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在config的用法示例。


在下文中一共展示了config.num_classes方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _score_layer

# 需要導入模塊: import config [as 別名]
# 或者: from config import num_classes [as 別名]
def _score_layer(self, input_layer, num_classes, scope):
        import config
        with slim.arg_scope(self.arg_scope):
            logits = slim.conv2d(input_layer, num_classes, [1, 1], 
                 stride=1,
                 activation_fn=None, 
                 scope='score_from_%s'%scope,
                 normalizer_fn=None)
            try:
                use_dropout = config.dropout_ratio > 0
            except:
                use_dropout = False
                
            if use_dropout:
                if self.is_training:
                    dropout_ratio = config.dropout_ratio
                else:
                    dropout_ratio = 0
                keep_prob = 1.0 - dropout_ratio
                tf.logging.info('Using Dropout, with keep_prob = %f'%(keep_prob))
                logits = tf.nn.dropout(logits, keep_prob)
            return logits 
開發者ID:ZJULearning,項目名稱:pixel_link,代碼行數:24,代碼來源:pixel_link_symbol.py

示例2: _fuse_by_cascade_conv1x1_128_upsamle_concat_conv1x1_2

# 需要導入模塊: import config [as 別名]
# 或者: from config import num_classes [as 別名]
def _fuse_by_cascade_conv1x1_128_upsamle_concat_conv1x1_2(self, scope, num_classes = 32):
        import config
        num_layers = len(config.feat_layers)
        
        with tf.variable_scope(scope):
            smaller_score_map = None
            for idx in range(0, len(config.feat_layers))[::-1]: #[4, 3, 2, 1, 0]
                current_layer_name = config.feat_layers[idx]
                current_layer = self.end_points[current_layer_name]
                current_score_map = self._score_layer(current_layer, 
                                      num_classes, current_layer_name)
                if smaller_score_map is None:
                    smaller_score_map = current_score_map
                else:
                    upscore_map = self._upscore_layer(smaller_score_map, current_score_map)
                    smaller_score_map = tf.concat([current_score_map, upscore_map], axis = 0)
            
        return smaller_score_map 
開發者ID:ZJULearning,項目名稱:pixel_link,代碼行數:20,代碼來源:pixel_link_symbol.py

示例3: _fuse_feat_layers

# 需要導入模塊: import config [as 別名]
# 或者: from config import num_classes [as 別名]
def _fuse_feat_layers(self):
        import config
        if config.feat_fuse_type == FUSE_TYPE_cascade_conv1x1_upsample_sum:
            self.pixel_cls_logits = self._fuse_by_cascade_conv1x1_upsample_sum(
                config.num_classes, scope = 'pixel_cls')
            
            self.pixel_link_logits = self._fuse_by_cascade_conv1x1_upsample_sum(
                config.num_neighbours * 2, scope = 'pixel_link')
            
        elif config.feat_fuse_type == FUSE_TYPE_cascade_conv1x1_128_upsamle_sum_conv1x1_2:
            base_map = self._fuse_by_cascade_conv1x1_128_upsamle_sum_conv1x1_2(
                                    scope = 'fuse_feature')
            
            self.pixel_cls_logits = self._score_layer(base_map,
                  config.num_classes, scope = 'pixel_cls')
            
            self.pixel_link_logits = self._score_layer(base_map,
                   config.num_neighbours  * 2, scope = 'pixel_link')
        elif config.feat_fuse_type == FUSE_TYPE_cascade_conv1x1_128_upsamle_concat_conv1x1_2:
            base_map = self._fuse_by_cascade_conv1x1_128_upsamle_concat_conv1x1_2(
                                    scope = 'fuse_feature')
        else:
            raise ValueError('feat_fuse_type not supported:%s'%(config.feat_fuse_type)) 
開發者ID:ZJULearning,項目名稱:pixel_link,代碼行數:25,代碼來源:pixel_link_symbol.py

示例4: compute_class_prior

# 需要導入模塊: import config [as 別名]
# 或者: from config import num_classes [as 別名]
def compute_class_prior(do_plot=False):
    categories_folder = 'data/instance-level_human_parsing/Training/Category_ids'
    names = [f for f in os.listdir(categories_folder) if f.lower().endswith('.png')]
    num_samples = len(names)
    prior_prob = np.zeros(num_classes)
    pb = ProgressBar(total=num_samples, prefix='Compute class prior', suffix='', decimals=3, length=50, fill='=')
    for i in range(num_samples):
        name = names[i]
        filename = os.path.join(categories_folder, name)
        category = np.ravel(cv.imread(filename, 0))
        counts = np.bincount(category)
        idxs = np.nonzero(counts)[0]
        prior_prob[idxs] += counts[idxs]
        pb.print_progress_bar(i + 1)

    prior_prob = prior_prob / (1.0 * np.sum(prior_prob))

    # Save
    np.save(os.path.join(data_dir, "prior_prob.npy"), prior_prob)

    if do_plot:
        plt.hist(prior_prob, bins=100)
        plt.yscale("log")
        plt.show() 
開發者ID:foamliu,項目名稱:Look-Into-Person,代碼行數:26,代碼來源:class_rebal.py

示例5: __init__

# 需要導入模塊: import config [as 別名]
# 或者: from config import num_classes [as 別名]
def __init__(self, args):
        super(ArcMarginModel, self).__init__()

        self.weight = Parameter(torch.FloatTensor(num_classes, args.emb_size))
        nn.init.xavier_uniform_(self.weight)

        self.easy_margin = args.easy_margin
        self.m = args.margin_m
        self.s = args.margin_s

        self.cos_m = math.cos(self.m)
        self.sin_m = math.sin(self.m)
        self.th = math.cos(math.pi - self.m)
        self.mm = math.sin(math.pi - self.m) * self.m 
開發者ID:foamliu,項目名稱:InsightFace-PyTorch,代碼行數:16,代碼來源:models.py

示例6: update_confusion_matrix

# 需要導入模塊: import config [as 別名]
# 或者: from config import num_classes [as 別名]
def update_confusion_matrix(all_labels, all_predicts, batch_labels, batch_predicts, num_classes):

    if all_labels is not None:
        assert all_labels.shape[0] == all_predicts.shape[0]
        if all_labels.shape[0] > 10000:
            all_labels = all_labels[-10000:]
            all_predicts = all_predicts[-10000:]

    if all_labels is None and all_predicts is None:
        all_labels = batch_labels
        all_predicts = batch_predicts
    elif all_labels is not None and all_predicts is not None:
        all_labels = torch.cat((all_labels, batch_labels))
        all_predicts = torch.cat((all_predicts, batch_predicts))

    conf_matrix = confusion_matrix(all_labels, all_predicts, labels=list(range(num_classes)))

    probs_matrix = np.zeros(conf_matrix.shape)

    for i in range(probs_matrix.shape[0]):
        row = conf_matrix[i]
        if np.sum(row) == 0:
            probs_row = 0
        else:
            probs_row = row/np.sum(row)
        probs_matrix[i] = probs_row

    probs_matrix = np.around(probs_matrix, decimals=5)
    return probs_matrix, all_labels, all_predicts

#printing the confusion matrix during training 
開發者ID:BMIRDS,項目名稱:HistoGAN,代碼行數:33,代碼來源:utils_model.py

示例7: create_model

# 需要導入模塊: import config [as 別名]
# 或者: from config import num_classes [as 別名]
def create_model(num_layers, pretrain):

    assert num_layers in [18, 24, 50, 101, 152]
    architecture = 'resnet' + str(num_layers)
    model = None

    #for pretrained on imagenet
    if pretrain == True:
        if architecture == 'resnet18':
            model = torchvision.models.resnet18(pretrained=True)
        elif architecture == 'resnet34':
            model = torchvision.models.resnet34(pretrained=True)
        elif architecture == 'resnet50':
            model = torchvision.models.resnet50(pretrained=True)
        elif architecture == 'resnet101':
            model = torchvision.models.resnet101(pretrained=True)
        elif architecture == 'resnet152':
            model = torchvision.models.resnet152(pretrained=True)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, config.num_classes)

    #default he initialization
    else:
        if architecture == 'resnet18':
            model = torchvision.models.resnet18(pretrained=False, num_classes=config.num_classes)
        elif architecture == 'resnet34':
            model = torchvision.models.resnet34(pretrained=False, num_classes=config.num_classes)
        elif architecture == 'resnet50':
            model = torchvision.models.resnet50(pretrained=False, num_classes=config.num_classes)
        elif architecture == 'resnet101':
            model = torchvision.models.resnet101(pretrained=False, num_classes=config.num_classes)
        elif architecture == 'resnet152':
            model = torchvision.models.resnet152(pretrained=False, num_classes=config.num_classes)
        
    return model

#get the data transforms: 
開發者ID:BMIRDS,項目名稱:HistoGAN,代碼行數:39,代碼來源:utils_model.py

示例8: visualize

# 需要導入模塊: import config [as 別名]
# 或者: from config import num_classes [as 別名]
def visualize(wsi_folder, preds_folder, vis_folder, colors):

	#get list of whole slides
	whole_slides = get_all_image_paths(wsi_folder)
	print(len(whole_slides), "whole slides found from", wsi_folder)

	prediction_to_color = {config.classes[i]:color_to_np_color(config.colors[i]) for i in range(config.num_classes)}

	#for each wsi
	for whole_slide in whole_slides:

		#read in the image
		whole_slide_numpy = cv2.imread(whole_slide); print("visualizing", whole_slide, "of shape", whole_slide_numpy.shape); assert whole_slide_numpy.shape[2] == 3
		
		#get the predictions
		xy_to_pred_class = get_xy_to_pred_class(preds_folder, whole_slide.split('/')[-1])
		
		#add the predictions to image
		whole_slide_with_predictions = add_predictions_to_image(xy_to_pred_class, whole_slide_numpy, prediction_to_color)
		
		#save it
		output_path = join(vis_folder, whole_slide.split('/')[-1].split('.')[0]+'_predictions.jpg')
		confirm_output_folder(basefolder(output_path))
		imsave(output_path, whole_slide_with_predictions)

	print('find the visualizations in', vis_folder) 
開發者ID:BMIRDS,項目名稱:HistoGAN,代碼行數:28,代碼來源:utils_evaluation.py

示例9: _fuse_by_cascade_conv1x1_128_upsamle_sum_conv1x1_2

# 需要導入模塊: import config [as 別名]
# 或者: from config import num_classes [as 別名]
def _fuse_by_cascade_conv1x1_128_upsamle_sum_conv1x1_2(self, scope):
        """
        The feature fuse fashion of 
            'Deep Direct Regression for Multi-Oriented Scene Text Detection'
        
        Instead of fusion of scores, feature map from 1x1, 128 conv are fused,
        and the scores are predicted on it.
        """
        base_map = self._fuse_by_cascade_conv1x1_upsample_sum(num_classes = 128, 
                                                              scope = 'feature_fuse')
        return base_map 
開發者ID:ZJULearning,項目名稱:pixel_link,代碼行數:13,代碼來源:pixel_link_symbol.py

示例10: _fuse_by_cascade_conv1x1_upsample_sum

# 需要導入模塊: import config [as 別名]
# 或者: from config import num_classes [as 別名]
def _fuse_by_cascade_conv1x1_upsample_sum(self, num_classes, scope):
        """
        The feature fuse fashion of FCN for semantic segmentation:
        Suppose there are several feature maps with decreasing sizes , 
        and we are going to get a single score map from them.
        
        Every feature map contributes to the final score map:
            predict score on all the feature maps using 1x1 conv, with 
            depth equal to num_classes
            
        The score map is upsampled and added in a cascade way:
            start from the smallest score map, upsmale it to the size
            of the next score map with a larger size, and add them 
            to get a fused score map. Upsample this fused score map and
            add it to the next sibling larger score map. The final 
            score map is got when all score maps are fused together 
        """
        import config
        num_layers = len(config.feat_layers)
        
        with tf.variable_scope(scope):
            smaller_score_map = None
            for idx in range(0, len(config.feat_layers))[::-1]: #[4, 3, 2, 1, 0]
                current_layer_name = config.feat_layers[idx]
                current_layer = self.end_points[current_layer_name]
                current_score_map = self._score_layer(current_layer, 
                                      num_classes, current_layer_name)
                if smaller_score_map is None:
                    smaller_score_map = current_score_map
                else:
                    upscore_map = self._upscore_layer(smaller_score_map, current_score_map)
                    smaller_score_map = current_score_map + upscore_map
            
        return smaller_score_map 
開發者ID:ZJULearning,項目名稱:pixel_link,代碼行數:36,代碼來源:pixel_link_symbol.py

示例11: classifier

# 需要導入模塊: import config [as 別名]
# 或者: from config import num_classes [as 別名]
def classifier(config, image_path):
	# prepare
	use_cuda = torch.cuda.is_available()
	FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
	classes = loadClasses(config.clsnamespath)
	# model
	model = NetsTorch(net_name=config.net_name, pretrained=False, num_classes=config.num_classes)
	model.load_state_dict(torch.load(config.weightspath))
	if use_cuda:
		model = model.cuda()
	model.eval()
	# transform
	transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
	# run
	img = Image.open(image_path)
	img_input = transform(img)
	img_input = img_input.type(FloatTensor).unsqueeze(0)
	with torch.no_grad():
		preds = model(img_input)
	preds = nn.Softmax(-1)(preds).cpu()
	max_prob, max_prob_id = preds.view(-1).max(0)
	max_prob = max_prob.item()
	max_prob_id = max_prob_id.item()
	clsname = classes[max_prob_id]
	if max_prob > config.conf_thresh:
		print('[Garbage]: %s, [Conf]: %s.' % (clsname, max_prob))
	else:
		print('No Garbage!!!') 
開發者ID:CharlesPikachu,項目名稱:garbageClassifier,代碼行數:30,代碼來源:demo.py

示例12: build_model

# 需要導入模塊: import config [as 別名]
# 或者: from config import num_classes [as 別名]
def build_model():
    base_model = InceptionResNetV2(weights='imagenet', include_top=False)
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(x)
    predictions = Dense(num_classes, activation='softmax')(x)
    model = Model(inputs=base_model.input, outputs=predictions)
    return model 
開發者ID:foamliu,項目名稱:Scene-Classification,代碼行數:10,代碼來源:model.py

示例13: cross_entropy

# 需要導入模塊: import config [as 別名]
# 或者: from config import num_classes [as 別名]
def cross_entropy(y_true, y_pred):
    y_true = K.reshape(y_true, (-1, num_classes))
    y_pred = K.reshape(y_pred, (-1, num_classes))

    idx_max = K.argmax(y_true, axis=1)
    weights = K.gather(prior_factor, idx_max)
    weights = K.reshape(weights, (-1, 1))

    # multiply y_true by weights
    y_true = y_true * weights

    cross_ent = K.categorical_crossentropy(y_pred, y_true)
    cross_ent = K.mean(cross_ent, axis=-1)

    return cross_ent 
開發者ID:foamliu,項目名稱:Look-Into-Person,代碼行數:17,代碼來源:utils.py

示例14: __getitem__

# 需要導入模塊: import config [as 別名]
# 或者: from config import num_classes [as 別名]
def __getitem__(self, idx):
        i = idx * batch_size

        length = min(batch_size, (len(self.names) - i))
        batch_x = np.empty((length, img_rows, img_cols, 3), dtype=np.float32)
        batch_y = np.empty((length, img_rows, img_cols, num_classes), dtype=np.float32)

        for i_batch in range(length):
            name = self.names[i]
            filename = os.path.join(self.images_folder, name + '.jpg')
            image = cv.imread(filename)
            image_size = image.shape[:2]
            category = get_category(self.categories_folder, name)

            x, y = random_choice(image_size)
            image = safe_crop(image, x, y)
            category = safe_crop(category, x, y)

            if np.random.random_sample() > 0.5:
                image = np.fliplr(image)
                category = np.fliplr(category)

            x = image / 255.
            y = category

            batch_x[i_batch, :, :, 0:3] = x
            batch_y[i_batch, :, :] = to_categorical(y, num_classes)

            i += 1

        return batch_x, batch_y 
開發者ID:foamliu,項目名稱:Look-Into-Person,代碼行數:33,代碼來源:data_generator.py

示例15: __init__

# 需要導入模塊: import config [as 別名]
# 或者: from config import num_classes [as 別名]
def __init__(self, input_size, hidden_size, n_layers=1, dropout=0):
        super(EncoderRNN, self).__init__()
        self.n_layers = n_layers
        self.hidden_size = hidden_size
        self.embedding = nn.Embedding(input_size, hidden_size)

        # Initialize GRU; the input_size and hidden_size params are both set to 'hidden_size'
        #   because our input size is a word embedding with number of features == hidden_size
        self.gru = nn.GRU(hidden_size, hidden_size, n_layers,
                          dropout=(0 if n_layers == 1 else dropout), bidirectional=True)
        self.fc = nn.Linear(hidden_size, num_labels * num_classes) 
開發者ID:foamliu,項目名稱:Sentiment-Analysis,代碼行數:13,代碼來源:models.py


注:本文中的config.num_classes方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。