當前位置: 首頁>>代碼示例>>Python>>正文


Python DB.getTrainingImages方法代碼示例

本文整理匯總了Python中db.DB.getTrainingImages方法的典型用法代碼示例。如果您正苦於以下問題:Python DB.getTrainingImages方法的具體用法?Python DB.getTrainingImages怎麽用?Python DB.getTrainingImages使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在db.DB的用法示例。


在下文中一共展示了DB.getTrainingImages方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: gen_training_data

# 需要導入模塊: from db import DB [as 別名]
# 或者: from db.DB import getTrainingImages [as 別名]
def gen_training_data(project, purpose='train', nsamples=1000, patchSize=29, outPatchSize=1):
    def relabel(image):
        id_list = np.unique(image)
        for index, id in enumerate(id_list):
            image[image==id] = index
        return image

    print 'gen_data'
    if project == None:
        return

    start_time = time.time()

    files_gray = []
    files_membranes = []
 
    if purpose == 'train':
        images = DB.getTrainingImages( project.id, new=False )
        path = Paths.TrainGrayscale

        for image in images:
            d_path = '%s/%s.tif'%(path, image.id)
            m_path = '%s/%s.%s.json'%(Paths.Labels, image.id, project.id)

            if os.path.exists( d_path ) and os.path.exists( l_path ):
                '''
                # load the annotations
                with open( l_path ) as labels_f:
                    annotations = json.load( labels_f )

                # skip if not enough samples in the annotations
                sample_sizes = get_sample_sizes( annotations )
                if np.sum( sample_sizes ) == 0:
                    continue

                label_sample_sizes = label_sample_sizes + np.array(sample_sizes)
                files_gray.append( d_path )
                data_labels.append( annotations )
                '''
                files_gray.append( d_path )
                files_membranes.append( m_path )

    else:
        images = DB.getImages( project.id, purpose=1, new=False, annotated=True )
        path = Paths.ValidLabels

    files_gray = []
    data_labels = []
    label_sample_sizes = np.array([ 0, 0])

    if len( files_gray ) == 0 or len( data_labels ) == 0 or np.min( label_sample_sizes ) == 0:
        return None

    whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
    whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
    whole_set_membranes = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)

    return data
開發者ID:Rhoana,項目名稱:icon,代碼行數:60,代碼來源:g.py

示例2: saveannotations

# 需要導入模塊: from db import DB [as 別名]
# 或者: from db.DB import getTrainingImages [as 別名]
    def saveannotations(self, imageId, projectId, data):
        # Always save the annotations to the labels folder.
        path = '%s/%s.%s.json'%(Paths.Labels, imageId,projectId)
        with open(path, 'w') as outfile:
            outfile.write(data)

        # Add a training and prediction task to the database
        DB.saveAnnotations( projectId, imageId, path )

        H5Data.generate_preview( DATA_PATH, DATA_NAME, DATA_PATH_LABELS, DATA_PATH_SEGMENTATION, DATA_PATH_IMAGES, imageId, projectId )

        images = DB.getTrainingImages( projectId )
        for img in images:
            print img.id, img.annotationFile, img.annotationTime, img.annotationStatus
開發者ID:Rhoana,項目名稱:icon,代碼行數:16,代碼來源:annotationhandler.py

示例3: saveannotations

# 需要導入模塊: from db import DB [as 別名]
# 或者: from db.DB import getTrainingImages [as 別名]
    def saveannotations(self, imageId, projectId, data):
        print 'saveannotations....%s'%(imageId)
        # Always save the annotations to the labels folder.
        path = '%s/%s.%s.json'%(Paths.Labels, imageId,projectId)
        with open(path, 'w') as outfile:
            outfile.write(data)

        # Add a training and prediction task to the database
        DB.saveAnnotations( projectId, imageId, path )

        print '---->>>>>training images for :',projectId
        images = DB.getTrainingImages( projectId )
        for img in images:
            print img.id, img.annotationFile, img.annotationTime, img.annotationStatus
開發者ID:thouis,項目名稱:icon,代碼行數:16,代碼來源:annotationhandler.py

示例4: get_pixel_count

# 需要導入模塊: from db import DB [as 別名]
# 或者: from db.DB import getTrainingImages [as 別名]
    def get_pixel_count(self, project):

        counts = [ 0 for i in project.labels]
        images = DB.getTrainingImages( project.id, new=False)

        # Load training samples for each image.
        for image in images:

            annPath = '%s/%s.%s.json'%(Paths.Labels, image.id, project.id)
            with open(annPath) as json_file:
                print annPath
                annotations = json.load( json_file )

            if len(annotations) == 0:
                continue

            for i, coordinates in enumerate(annotations):
                counts[i] += len(coordinates)/2
            
        return counts
開發者ID:thouis,項目名稱:icon,代碼行數:22,代碼來源:data.py

示例5: load_training

# 需要導入模塊: from db import DB [as 別名]
# 或者: from db.DB import getTrainingImages [as 別名]
    def load_training(self):

        

        # retrieve the list of training images 
        # (annotated images)
        first_time = (len(self.entries) == 0)
        images     = DB.getTrainingImages( self.project.id, new=(not first_time) )
        imgs = DB.getImages( self.project.id )

        # bailout if there's no images to train.
        if len(images) == 0:
            return

        # determine the maximum number of samples to draw
        # from each image
        n_samples_per_image = Data.MaxSamples/len(images)

        print '#n_samples_per_image:', n_samples_per_image
        print '#images:', len(images)

        entries = []

        # Load training samples for each image.
        for image in images:

            Utility.report_status( 'loading', image.id)
            print 'ttime:', image.trainingTime
            print 'atime:', image.annotationTime
            print 'tstat:', image.trainingStatus

            offset = len( entries )

            # generate samples for the image
            #data   = self.gen_samples( project, image.id, n_samples_per_image )
            data   = self.gen_samples( Paths.TrainGrayscale, self.project, image.id, n_samples_per_image )
            x_data = data[0]
            y_data = data[1]
            n_data = len( y_data )

            print 'wmean:', data[2], 'wstd:', data[3], 'mean:', self.project.mean, 'std:', self.project.std

            # skip if no annotations found
            if n_data == 0:
                continue

            # add sample to the training set
            if offset == 0:
                x = x_data
                y = y_data
                p = np.ones( n_data, dtype=np.int )
            else:
                x = np.vstack( (x, x_data) )
                y = np.hstack( (y, y_data) )
                p = np.hstack( (p, np.ones( n_data, dtype=np.int )) ) 

            # keep track of each image's data in an entry for 
            # easier replacement.
            entries.append( Entry( image.id, offset, n_data ) )

            #Utility.report_memused()
            Utility.report_status('x', '(%d bytes)'%(x.nbytes))
            Utility.report_status('y', '(%d bytes)'%(y.nbytes))
            Utility.report_status('.','.')


        # bailout if no entries found
        if len(entries) == 0:
            Utility.report_status('Fetching new data', 'None Found')
            return

        Utility.report_status( 'Loading new data', 'please wait')

        # bailout if no current entries
        if len(self.entries) > 0:
            #append old entries after the new entries
            offset = len(y)

            print entries[-1].name, entries[-1].offset, entries[-1].length
            mask = np.ones( len(self.y), dtype=bool)
            names = [ e.name for e in entries ]

            for entry in self.entries:
                if entry.name in names:
                    mask[ entry.offset : entry.offset+entry.length ] = False
                else:
                    entry.offset = offset
                    offset += entry.length
                    entries.append( entry )
                    print entry.name, entry.offset, entry.length

            x_keep = self.x[ mask ]
            y_keep = self.y[ mask ]
            p_keep = self.p[ mask ]
            x = np.vstack( (x, x_keep) )
            y = np.hstack( (y, y_keep) )
            p = np.hstack( (p, p_keep) )

        
        if len( np.unique( y ) ) <= 1:
#.........這裏部分代碼省略.........
開發者ID:thouis,項目名稱:icon,代碼行數:103,代碼來源:data.py

示例6: agenerate_experiment_data_patch_prediction

# 需要導入模塊: from db import DB [as 別名]
# 或者: from db.DB import getTrainingImages [as 別名]
def agenerate_experiment_data_patch_prediction(purpose='train', nsamples=1000, patchSize=29, outPatchSize=1, project=None):

    def relabel(image):
        id_list = np.unique(image)
        for index, id in enumerate(id_list):
            image[image==id] = index
        return image

    start_time = time.time()

    if purpose == 'train':
        images = DB.getTrainingImages( project.id, new=False )
        path = Paths.TrainGrayscale
    else:
        images = DB.getImages( project.id, purpose=1, new=False, annotated=True )
        path = Paths.ValidGrayscale
   
    files_gray = []
    data_labels = []
    label_sample_sizes = np.array([ 0, 0])
 
    #imgs = DB.getImages( project.id )
    for image in images:
        d_path = '%s/%s.tif'%(path, image.id)
        l_path = '%s/%s.%s.json'%(Paths.Labels, image.id, project.id)

        if os.path.exists( d_path ) and os.path.exists( l_path ):

            # load the annotations
            with open( l_path ) as labels_f:
                annotations = json.load( labels_f )

            # skip if not enough samples in the annotations
            sample_sizes = get_sample_sizes( annotations )
            if np.sum( sample_sizes ) == 0:
                continue

            label_sample_sizes = label_sample_sizes + np.array(sample_sizes)
            files_gray.append( d_path )
            data_labels.append( annotations )

    print len(files_gray)
    print len(data_labels)
    print label_sample_sizes

    if len( files_gray ) == 0 or len( data_labels ) == 0 or np.min( label_sample_sizes ) == 0:
        return None

    whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
    whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)

    #how many samples per image?
    nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(files_gray)[0]) ))
    print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
    counter = 0

    border_patch = np.ceil(patchSize/2.0)

    pad = patchSize

    read_order = np.random.permutation(np.shape(files_gray)[0])
    for index in read_order:
        file_image = files_gray[index]
        labels = data_labels[index]
        sample_sizes = get_sample_sizes( labels )

        img = mahotas.imread(files_gray[index])
        img = np.pad(img, ((pad, pad), (pad, pad)), 'symmetric')
        # normalizes [0,1]
        img = normalizeImage(img, doClahe=True)

        membrane_img = gen_membrane_image( labels, img.shape )
        print membrane_img.shape
        print np.unique(membrane_img)

        for label, coordinates in enumerate( labels ):
            if counter >= nsamples:
                break
       
            ncoordinates = len(coordinates)

            if ncoordinates == 0:
                continue

            # randomly sample from the label
            indices = np.random.choice( ncoordinates, sample_sizes[label], replace=False)
          
            for i in indices:
                if i%2 == 1:
                    i = i-1

                if counter >= nsamples:
                    break

                col = coordinates[i]
                row = coordinates[i+1]
                r1  = int(row+patchSize-border_patch)
                r2  = int(row+patchSize+border_patch)
                c1  = int(col+patchSize-border_patch)
                c2  = int(col+patchSize+border_patch)
#.........這裏部分代碼省略.........
開發者ID:Rhoana,項目名稱:icon,代碼行數:103,代碼來源:g.py

示例7: gen_training_data

# 需要導入模塊: from db import DB [as 別名]
# 或者: from db.DB import getTrainingImages [as 別名]
def gen_training_data(project, nsamples=1000, patchSize=29, outPatchSize=1):
    def relabel(image):
        id_list = np.unique(image)
        for index, id in enumerate(id_list):
            image[image==id] = index
        return image

    print 'gen_data'
    if project == None:
        return

    n_labels = len( project.labels )

    start_time = time.time()

    files_gray = []
    files_annotations = []

    images = DB.getTrainingImages( project.id, new=False )
    path = Paths.TrainGrayscale

    # build the list of images to sample from while discarding those
    # without annnotations.
    for image in images:
        d_path = '%s/%s.tif'%(path, image.id)
        m_path = '%s/%s.%s.json'%(Paths.Labels, image.id, project.id)

        if os.path.exists( d_path ) and os.path.exists( m_path ):
            files_gray.append( d_path )
            files_annotations.append( m_path )


    whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
    whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)

    # return nothing if images or annotations not found
    if len( files_gray ) == 0 or len( files_annotations ) == 0:
        return None

    print files_gray
    print files_annotations

    whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
    whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)

    #how many samples per image?
    nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(files_gray)[0]) ))
    print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
    counter = 0


    # pad image borders
    border = np.int(np.ceil(patchSize/2.0))
    pad = patchSize
    
    n_samples_remaining = nsamples
    n_images = len(files_gray)

    n_samples_per_image = int(nsamples/n_images)
    n_samples_per_label = [ int(nsamples/n_labels) for label in project.labels]

    print 'n_samples_per_image:', n_samples_per_image
    print 'n_samples_per_label:', n_samples_per_label

    for i_image in range( n_images ):

        img = mahotas.imread(files_gray[ i_image ])
        ann, annotations = gen_annotated_image( files_annotations[ i_image ], img.shape )

        img = np.pad(img, ((pad, pad), (pad, pad)), 'symmetric')
        img = normalizeImage(img, doClahe=True)
        
        # get the label indices
        #indices = np.nonzero( ann )


        ann = np.pad(ann, ((pad, pad), (pad, pad)), 'symmetric')

        # set pixel values to label
        ann = ann - 1
        ann[ ann < 0 ] = 0
        print ann.shape
        print img.shape

        print 'min-max'
        print np.min( ann ), np.max( ann )
        print np.min( img ), np.max( img )


        #nsamples_perImage = int(n_samples_remaining/(n_images - img_index))
        #n_img_samples = min( len(indices[0]), nsamples_perImage)
        n_samples_per_image = int(n_samples_remaining/(n_images - i_image))
        #n_n_img_samples = nsamples_perImage

        

        print '--------'
        print 'i_image:',i_image
        print 'image',files_gray[i_image]
        #print 'n_samples_remaining:', n_samples_remaining
#.........這裏部分代碼省略.........
開發者ID:Rhoana,項目名稱:icon,代碼行數:103,代碼來源:ff.py


注:本文中的db.DB.getTrainingImages方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。