當前位置: 首頁>>代碼示例>>Python>>正文


Python DB.getImages方法代碼示例

本文整理匯總了Python中db.DB.getImages方法的典型用法代碼示例。如果您正苦於以下問題:Python DB.getImages方法的具體用法?Python DB.getImages怎麽用?Python DB.getImages使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在db.DB的用法示例。


在下文中一共展示了DB.getImages方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: remove_project

# 需要導入模塊: from db import DB [as 別名]
# 或者: from db.DB import getImages [as 別名]
    def remove_project(self, projectId):
        print 'removing project....', projectId
        images = DB.getImages( projectId )
        for image in images:
            DB.removeImage(projectId, image.id)

        DB.removeProject( projectId )

        # remove learning model
        types = ['mlp', 'cnn']
        for t in types:
            #best_cnn_model.cnn.0.pklA
            path = '%s/best_%s.%s.*.pkl'%(Paths.Models,projectId,t)
            paths = glob.glob(path)
            for path in paths:
                print 'deleting model...', path
                os.remove( path )

        # remove labels
        path = '%s/*.%s.json'%(Paths.Labels, projectId)
        labels = glob.glob(path)
        for p in labels:
            print 'trying to delete:', p
            os.remove( p )

        # remove segmentations
        path = '%s/*.%s.seg'%(Paths.Segmentation, projectId)
        segs = glob.glob(path)
        for p in segs:
            print 'trying to delete:', p
            os.remove( p )
開發者ID:Rhoana,項目名稱:icon,代碼行數:33,代碼來源:projecthandler.py

示例2: gen_training_data

# 需要導入模塊: from db import DB [as 別名]
# 或者: from db.DB import getImages [as 別名]
def gen_training_data(project, purpose='train', nsamples=1000, patchSize=29, outPatchSize=1):
    def relabel(image):
        id_list = np.unique(image)
        for index, id in enumerate(id_list):
            image[image==id] = index
        return image

    print 'gen_data'
    if project == None:
        return

    start_time = time.time()

    files_gray = []
    files_membranes = []
 
    if purpose == 'train':
        images = DB.getTrainingImages( project.id, new=False )
        path = Paths.TrainGrayscale

        for image in images:
            d_path = '%s/%s.tif'%(path, image.id)
            m_path = '%s/%s.%s.json'%(Paths.Labels, image.id, project.id)

            if os.path.exists( d_path ) and os.path.exists( l_path ):
                '''
                # load the annotations
                with open( l_path ) as labels_f:
                    annotations = json.load( labels_f )

                # skip if not enough samples in the annotations
                sample_sizes = get_sample_sizes( annotations )
                if np.sum( sample_sizes ) == 0:
                    continue

                label_sample_sizes = label_sample_sizes + np.array(sample_sizes)
                files_gray.append( d_path )
                data_labels.append( annotations )
                '''
                files_gray.append( d_path )
                files_membranes.append( m_path )

    else:
        images = DB.getImages( project.id, purpose=1, new=False, annotated=True )
        path = Paths.ValidLabels

    files_gray = []
    data_labels = []
    label_sample_sizes = np.array([ 0, 0])

    if len( files_gray ) == 0 or len( data_labels ) == 0 or np.min( label_sample_sizes ) == 0:
        return None

    whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
    whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
    whole_set_membranes = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)

    return data
開發者ID:Rhoana,項目名稱:icon,代碼行數:60,代碼來源:g.py

示例3: load_validation

# 需要導入模塊: from db import DB [as 別名]
# 或者: from db.DB import getImages [as 別名]
    def load_validation(self):

        # retrieve the list of training images 
        # (annotated images)
        valid_new = len(self.entries_valid) > 0
        print 'valid_new: ', valid_new
        images = DB.getImages( self.project.id, purpose=1, new=valid_new, annotated=True )

        # bailout if there's no images to train.
        if len(images) == 0:
            return

        # determine the maximum number of samples to draw
        # from each image
        n_samples_per_image = Data.MaxSamples/len(images)

        print '#n_samples_per_image:', n_samples_per_image
        print '#images:', len(images)

        entries = []

        # Load training samples for each image.
        for image in images:

            Utility.report_status( 'loading validation image', image.id)
            print 'ttime:', image.trainingTime
            print 'atime:', image.annotationTime
            print 'tstat:', image.trainingStatus

            offset = len( entries )

            # generate samples for the image
            #data   = self.gen_samples( project, image.id, n_samples_per_image )
            data   = self.gen_samples( Paths.ValidGrayscale, self.project, image.id, n_samples_per_image )
            x_data = data[0]
            y_data = data[1]
            n_data = len( y_data )

            # skip if no annotations found
            if n_data == 0:
                continue

            # add sample to the training set
            if offset == 0:
                x = x_data
                y = y_data
                p = np.ones( n_data, dtype=np.int )
            else:
                x = np.vstack( (x, x_data) )
                y = np.hstack( (y, y_data) )
                p = np.hstack( (p, np.ones( n_data, dtype=np.int )) )

            # keep track of each image's data in an entry for 
            # easier replacement.
            entries.append( Entry( image.id, offset, n_data ) )

            #Utility.report_memused()
            Utility.report_status('x', '(%d bytes)'%(x.nbytes))
            Utility.report_status('y', '(%d bytes)'%(y.nbytes))
            Utility.report_status('.','.')



        # bailout if no entries found
        if len(entries) == 0:
            Utility.report_status('Fetching new data', 'None Found')
            return

        Utility.report_status( 'Loading new data', 'please wait')

        # bailout if no current entries
        if len(self.entries_valid) > 0:
            #append old entries after the new entries
            offset = len(y)

            print entries[-1].name, entries[-1].offset, entries[-1].length
            mask = np.ones( len(self.y_valid), dtype=bool)
            names = [ e.name for e in entries ]

            for entry in self.entries_valid:
                if entry.name in names:
                    mask[ entry.offset : entry.offset+entry.length ] = False
                else:
                    entry.offset = offset
                    offset += entry.length
                    entries.append( entry )
                    print entry.name, entry.offset, entry.length

            x_keep = self.x_valid[ mask ]
            y_keep = self.y_valid[ mask ]
            p_keep = self.p_valid[ mask ]
            x = np.vstack( (x, x_keep) )
            y = np.hstack( (y, y_keep) )
            p = np.hstack( (p, p_keep) )


        if len( np.unique( y ) ) <= 1:
            print 'not enough labels specified...'
            return

#.........這裏部分代碼省略.........
開發者ID:thouis,項目名稱:icon,代碼行數:103,代碼來源:data.py

示例4: load_training

# 需要導入模塊: from db import DB [as 別名]
# 或者: from db.DB import getImages [as 別名]
    def load_training(self):

        

        # retrieve the list of training images 
        # (annotated images)
        first_time = (len(self.entries) == 0)
        images     = DB.getTrainingImages( self.project.id, new=(not first_time) )
        imgs = DB.getImages( self.project.id )

        # bailout if there's no images to train.
        if len(images) == 0:
            return

        # determine the maximum number of samples to draw
        # from each image
        n_samples_per_image = Data.MaxSamples/len(images)

        print '#n_samples_per_image:', n_samples_per_image
        print '#images:', len(images)

        entries = []

        # Load training samples for each image.
        for image in images:

            Utility.report_status( 'loading', image.id)
            print 'ttime:', image.trainingTime
            print 'atime:', image.annotationTime
            print 'tstat:', image.trainingStatus

            offset = len( entries )

            # generate samples for the image
            #data   = self.gen_samples( project, image.id, n_samples_per_image )
            data   = self.gen_samples( Paths.TrainGrayscale, self.project, image.id, n_samples_per_image )
            x_data = data[0]
            y_data = data[1]
            n_data = len( y_data )

            print 'wmean:', data[2], 'wstd:', data[3], 'mean:', self.project.mean, 'std:', self.project.std

            # skip if no annotations found
            if n_data == 0:
                continue

            # add sample to the training set
            if offset == 0:
                x = x_data
                y = y_data
                p = np.ones( n_data, dtype=np.int )
            else:
                x = np.vstack( (x, x_data) )
                y = np.hstack( (y, y_data) )
                p = np.hstack( (p, np.ones( n_data, dtype=np.int )) ) 

            # keep track of each image's data in an entry for 
            # easier replacement.
            entries.append( Entry( image.id, offset, n_data ) )

            #Utility.report_memused()
            Utility.report_status('x', '(%d bytes)'%(x.nbytes))
            Utility.report_status('y', '(%d bytes)'%(y.nbytes))
            Utility.report_status('.','.')


        # bailout if no entries found
        if len(entries) == 0:
            Utility.report_status('Fetching new data', 'None Found')
            return

        Utility.report_status( 'Loading new data', 'please wait')

        # bailout if no current entries
        if len(self.entries) > 0:
            #append old entries after the new entries
            offset = len(y)

            print entries[-1].name, entries[-1].offset, entries[-1].length
            mask = np.ones( len(self.y), dtype=bool)
            names = [ e.name for e in entries ]

            for entry in self.entries:
                if entry.name in names:
                    mask[ entry.offset : entry.offset+entry.length ] = False
                else:
                    entry.offset = offset
                    offset += entry.length
                    entries.append( entry )
                    print entry.name, entry.offset, entry.length

            x_keep = self.x[ mask ]
            y_keep = self.y[ mask ]
            p_keep = self.p[ mask ]
            x = np.vstack( (x, x_keep) )
            y = np.hstack( (y, y_keep) )
            p = np.hstack( (p, p_keep) )

        
        if len( np.unique( y ) ) <= 1:
#.........這裏部分代碼省略.........
開發者ID:thouis,項目名稱:icon,代碼行數:103,代碼來源:data.py

示例5: aload

# 需要導入模塊: from db import DB [as 別名]
# 或者: from db.DB import getImages [as 別名]
    def aload(self, project):

        self.projecft = project

        # LOAD TRAINING DATA 
        train_new = len(self.entries) > 0
        images = DB.getImages( project.id, purpose=0, new=train_new)
        out = self.load_data(
                Paths.TrainGrayscale, 
                images, 
                project, 
                self.x, 
                self.y,
                self.p, 
                self.entries)

        self.x = out[0]
        self.y = out[1]
        self.p = out[2]
        self.entries = out[3]

        n_samples = len(self.y)
        self.i = np.arange( n_samples )
        self.n_superbatch = int(n_samples/(Data.TrainSuperBatchSize + Data.ValidSuperBatchSize))
        self.i_randomize  = 0 
        self.data_changed = True
        self.i_train = []
        self.avg_losses = []
        self.last_avg_loss = 0 

        if n_samples >  0:  
            Utility.report_status('---------training---------', '') 
            Utility.report_status('#samples','(%d)'%len(self.y))
            Utility.report_status('x shape','(%d,%d)'%(self.x.shape[0], self.x.shape[1]))
            Utility.report_status('y shape','(%d)'%(self.x.shape[0]))
            Utility.report_status('x memory', '(%d bytes)'%(self.x.nbytes))
            Utility.report_status('y memory', '(%d bytes)'%(self.y.nbytes))

        print 'min:', np.min( self.x )
        print 'max:', np.max( self.x )
        print 'uy:', np.unique( self.y )
        print 'x:', self.x[:5]
        print 'y:', self.y[:5]


        # LOAD VALIDATION IMAGES
        valid_new = len(self.entries_valid) > 0
        images = DB.getImages( project.id, purpose=1, new=valid_new )
        out = self.load_data(   
                Paths.ValidGrayscale, 
                images, 
                project, 
                self.x_valid, 
                self.y_valid, 
                self.p_valid,
                self.entries_valid)

        self.x_valid = out[0]
        self.y_valid = out[1]
        self.p_valid = out[2]
        self.entries_valid = out[3]

        n_samples = len(self.y_valid)
        self.i_valid = np.arange( n_samples )

        if n_samples >  0:
            Utility.report_status('---------validation---------', '')
            Utility.report_status('#samples','(%d)'%len(self.y_valid))
            Utility.report_status('x shape','(%d,%d)'%(self.x_valid.shape[0], self.x_valid.shape[1]))
            Utility.report_status('y shape','(%d)'%(self.x_valid.shape[0]))
            Utility.report_status('x memory', '(%d bytes)'%(self.x_valid.nbytes))
            Utility.report_status('y memory', '(%d bytes)'%(self.y_valid.nbytes))

        print 'min:', np.min( self.x_valid )
        print 'max:', np.max( self.x_valid )
        print 'uy:', np.unique( self.y_valid )
        print 'x:', self.x_valid[:5]
        print 'y:', self.y_valid[:5]

        Utility.report_memused()
        DB.finishLoadingTrainingset( project.id )
開發者ID:thouis,項目名稱:icon,代碼行數:83,代碼來源:data.py

示例6: agenerate_experiment_data_patch_prediction

# 需要導入模塊: from db import DB [as 別名]
# 或者: from db.DB import getImages [as 別名]
def agenerate_experiment_data_patch_prediction(purpose='train', nsamples=1000, patchSize=29, outPatchSize=1, project=None):

    def relabel(image):
        id_list = np.unique(image)
        for index, id in enumerate(id_list):
            image[image==id] = index
        return image

    start_time = time.time()

    if purpose == 'train':
        images = DB.getTrainingImages( project.id, new=False )
        path = Paths.TrainGrayscale
    else:
        images = DB.getImages( project.id, purpose=1, new=False, annotated=True )
        path = Paths.ValidGrayscale
   
    files_gray = []
    data_labels = []
    label_sample_sizes = np.array([ 0, 0])
 
    #imgs = DB.getImages( project.id )
    for image in images:
        d_path = '%s/%s.tif'%(path, image.id)
        l_path = '%s/%s.%s.json'%(Paths.Labels, image.id, project.id)

        if os.path.exists( d_path ) and os.path.exists( l_path ):

            # load the annotations
            with open( l_path ) as labels_f:
                annotations = json.load( labels_f )

            # skip if not enough samples in the annotations
            sample_sizes = get_sample_sizes( annotations )
            if np.sum( sample_sizes ) == 0:
                continue

            label_sample_sizes = label_sample_sizes + np.array(sample_sizes)
            files_gray.append( d_path )
            data_labels.append( annotations )

    print len(files_gray)
    print len(data_labels)
    print label_sample_sizes

    if len( files_gray ) == 0 or len( data_labels ) == 0 or np.min( label_sample_sizes ) == 0:
        return None

    whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
    whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)

    #how many samples per image?
    nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(files_gray)[0]) ))
    print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
    counter = 0

    border_patch = np.ceil(patchSize/2.0)

    pad = patchSize

    read_order = np.random.permutation(np.shape(files_gray)[0])
    for index in read_order:
        file_image = files_gray[index]
        labels = data_labels[index]
        sample_sizes = get_sample_sizes( labels )

        img = mahotas.imread(files_gray[index])
        img = np.pad(img, ((pad, pad), (pad, pad)), 'symmetric')
        # normalizes [0,1]
        img = normalizeImage(img, doClahe=True)

        membrane_img = gen_membrane_image( labels, img.shape )
        print membrane_img.shape
        print np.unique(membrane_img)

        for label, coordinates in enumerate( labels ):
            if counter >= nsamples:
                break
       
            ncoordinates = len(coordinates)

            if ncoordinates == 0:
                continue

            # randomly sample from the label
            indices = np.random.choice( ncoordinates, sample_sizes[label], replace=False)
          
            for i in indices:
                if i%2 == 1:
                    i = i-1

                if counter >= nsamples:
                    break

                col = coordinates[i]
                row = coordinates[i+1]
                r1  = int(row+patchSize-border_patch)
                r2  = int(row+patchSize+border_patch)
                c1  = int(col+patchSize-border_patch)
                c2  = int(col+patchSize+border_patch)
#.........這裏部分代碼省略.........
開發者ID:Rhoana,項目名稱:icon,代碼行數:103,代碼來源:g.py


注:本文中的db.DB.getImages方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。