当前位置: 首页>>代码示例>>Python>>正文


Python File.flush方法代码示例

本文整理汇总了Python中h5py.File.flush方法的典型用法代码示例。如果您正苦于以下问题:Python File.flush方法的具体用法?Python File.flush怎么用?Python File.flush使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在h5py.File的用法示例。


在下文中一共展示了File.flush方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_flush

# 需要导入模块: from h5py import File [as 别名]
# 或者: from h5py.File import flush [as 别名]
 def test_flush(self):
     """ Flush via .flush method """
     fid = File(self.mktemp(), 'w')
     fid.flush()
     fid.close()
开发者ID:ajelenak-thg,项目名称:h5py,代码行数:7,代码来源:test_file.py

示例2: make_nuc

# 需要导入模块: from h5py import File [as 别名]
# 或者: from h5py.File import flush [as 别名]
def make_nuc(ncc_file_path, n3d_file_path, out_file_name):
  
  if not out_file_name.lower().endswith('.nuc'):
    out_file_name = out_file_name + '.nuc'
  
  contact_dict = import_contacts(ncc_file_path)
  
  contact_name = os.path.splitext(os.path.basename(ncc_file_path))[0]
  
  pos_dict, coords_dict = import_coords(n3d_file_path)
  
  root = File(out_file_name, mode='w')
        
  hierarchy = (('contacts',   ('original', 'working')),
                ('display',    ()),
                ('chromosomes',()),
                ('dataTracks', ('derived', 'external', 'innate')),
                ('sample',     ('protocol', 'organism', 'tissue')),
                ('structures', ('0')),
                ('images',     ())
                )
   
  for parent, children in hierarchy:
    group = root.create_group(parent)
  
    for child in children:
      group.create_group(child)
  
  for child in ('particles', 'restraints', 'transforms', 'coords'):
    root['structures']['0'].create_group(child)
  
  now = int(time.time())
  random.seed(now)        
  
  root.attrs['id'] = np.array([random.random(), now, now], np.float32)
  
  root['sample'].attrs['name'] = np.string_('Unknown')  
  
  contact_group = root['contacts']['working'].create_group(contact_name)
  
  for chromoPair in contact_dict:
    chrA, chrB = chromoPair
    
    if chrA not in contact_group:
      contact_group.create_group(chrA)

    contact_group[chrA].create_dataset(chrB, dtype=np.uint32, data=contact_dict[chromoPair].T)
    
  coords_group   = root['structures']['0']['coords']
  particle_group = root['structures']['0']['particles']
 
  
  for chromo in coords_dict:
    coords_group.create_dataset(chromo, dtype=np.float64, data=coords_dict[chromo])
    
    pos = np.array(pos_dict[chromo], np.uint32)
    group = particle_group.create_group(chromo)
    group.create_dataset('positions', dtype=np.uint32, data=pos)
    
    chromo_group = root['chromosomes'].create_group(chromo)
    chromo_group.attrs['limits'] = np.array([pos.min(), pos.max()])
    
  root.flush()
开发者ID:TheLaueLab,项目名称:nuc_frames,代码行数:65,代码来源:make_nuc.py

示例3: close_file

# 需要导入模块: from h5py import File [as 别名]
# 或者: from h5py.File import flush [as 别名]
def close_file(file: h5py.File):
    file.flush()
    file.close()
开发者ID:DomenicD,项目名称:domenic,代码行数:5,代码来源:experiments.py

示例4: labelManager

# 需要导入模块: from h5py import File [as 别名]
# 或者: from h5py.File import flush [as 别名]
class labelManager(object):

    def __init__(self, fileName, startBlockNum = 0):
        self._f = File(fileName,'r+')
        self._blockNumber = startBlockNum
        self._maxLabelNum = 9999

    def addBlockLabel(self, data, start, stop=None, invert = False):
        if not stop:
            stop = [length + offset for length, offset in zip(data.shape, start)]

        if self._blockNumber <= self._maxLabelNum:
            dataset = self._f['PixelClassification/LabelSets/labels000'].create_dataset('block%04d' % self._blockNumber, data=(data.astype(np.uint8)))
            dataset.attrs.create('blockSlice',pointsToPosition(start, stop, invert))
            self._blockNumber += 1
        else:
            print 'Warning: maximum label block number exceeded. Unable to add further labels.'


    def addMultipleSingleLabels(self, positions, labelValue):
        for point in positions.T:
            self.addLabels(labelValue, pointsToPosition(point, point+1))

    def addSingleLabel(self, labelValue, position):
        dataset = self._f['PixelClassification/LabelSets/labels000'].create_dataset('block%04d' % self._blockNumber, data=[[[[np.uint8(labelValue)]]]])
        dataset.attrs.create('blockSlice',position)
        self._blockNumber += 1

    def clear(self):
        dataset = self._f['PixelClassification/LabelSets/labels000']
        for key in dataset.keys():
            del dataset[key]
        self._blockNumber = 0

    def getSubBlocks(self):
        """ returns subblocks containing the labels together with their corresponding offsets"""

        dataset = self._f['PixelClassification/LabelSets/labels000']
        labelBlocks = []
        for key in dataset:
            offset = strToPos(dataset[key].attrs.get('blockSlice'))
            values = dataset[key].value
            labelBlocks.append([offset, values])
            print key
        return labelBlocks

    def getInSingleBlock(self, shape=None):
        """ returns a block containing all the labels. The return is guaranteed to start at (0,0,0) global coordinates,
        it may however not cover the whole block (max(shape[0]), max(shape[1]), max(shape[2])), since there is no good way
        of determining the shape of the raw data from ilasti"""


        # get the labels as they are saved in the projecct
        labeledBlocks = self.getSubBlocks()

        offsets = np.array([labeledBlock[0] for labeledBlock in labeledBlocks])
        shapes = np.array([labeledBlock[1].shape[:3] for labeledBlock in labeledBlocks])
        data = [labelsBlock[1][:,:,:,0] for labelsBlock in labeledBlocks]

        if shape is None:
            # find out the dimension of the block, there should be a better way of doing that.
            shape = np.max(offsets + shapes[:,:3], axis=0)

        # write all labeles into one big array
        labelBlockTotal = np.zeros(shape, dtype=np.uint8)
        for offset, shape, dataBlock in zip(offsets, shapes, data):
            index = [slice(offset[0], offset[0] + shape[0]),
                    slice(offset[1], offset[1] + shape[1]),
                    slice(offset[2], offset[2] + shape[2])]
            labelBlockTotal[index] += dataBlock

        return labelBlockTotal


    def flush(self):
        self._f.flush()

    def changeRawDataPath(self, newPath):
        """ deletes all saved paths and replaces it with the path 'newPath' """
        dataset = self._f['Input Data/infos/lane0000/Raw Data/']
        dataset.pop('filePath')
        dataset.create_dataset('filePath', data=newPath)
开发者ID:timoMa,项目名称:resizeIlp,代码行数:84,代码来源:IlastikLabelManager.py

示例5: convert_cifar10

# 需要导入模块: from h5py import File [as 别名]
# 或者: from h5py.File import flush [as 别名]
def convert_cifar10(directory, output_directory,
                    output_filename='cifar10.hdf5'):
    """Converts the CIFAR-10 dataset to HDF5.
    Converts the CIFAR-10 dataset to an HDF5 dataset compatible with
    :class:`fuel.datasets.CIFAR10`. The converted dataset is saved as
    'cifar10.hdf5'.
    It assumes the existence of the following file:
    * `cifar-10-python.tar.gz`
    Parameters
    ----------
    directory : str
        Directory in which input files reside.
    output_directory : str
        Directory in which to save the converted dataset.
    output_filename : str, optional
        Name of the saved dataset. Defaults to 'cifar10.hdf5'.
    Returns
    -------
    output_paths : tuple of str
        Single-element tuple containing the path to the converted dataset.
    """
    output_path = os.path.join(output_directory, output_filename)
    h5file = File(output_path, mode='w')
    input_file = os.path.join(directory, DISTRIBUTION_FILE)
    tar_file = tarfile.open(input_file, 'r:gz')

    train_batches = []
    for batch in range(1, 6):
        file = tar_file.extractfile(
            'cifar-10-batches-py/data_batch_%d' % batch)
        try:
            if six.PY3:
                array = cPickle.load(file, encoding='latin1')
            else:
                array = cPickle.load(file)
            train_batches.append(array)
        finally:
            file.close()

    train_features = numpy.concatenate(
        [batch['data'].reshape(batch['data'].shape[0], 3, 32, 32)
            for batch in train_batches])
    train_labels = numpy.concatenate(
        [numpy.array(batch['labels'], dtype=numpy.uint8)
            for batch in train_batches])
    train_labels = numpy.expand_dims(train_labels, 1)

    print train_features.shape
    print train_labels.shape

    flipped_train_features = train_features[:,:,:,::-1]

    train_features = numpy.array([val for pair in zip(train_features, flipped_train_features) for val in pair])
    train_labels = numpy.repeat(train_labels, 2, axis=0)

    print train_features.shape
    print train_labels.shape

    file = tar_file.extractfile('cifar-10-batches-py/test_batch')
    try:
        if six.PY3:
            test = cPickle.load(file, encoding='latin1')
        else:
            test = cPickle.load(file)
    finally:
        file.close()

    test_features = test['data'].reshape(test['data'].shape[0],
                                         3, 32, 32)
    test_labels = numpy.array(test['labels'], dtype=numpy.uint8)
    test_labels = numpy.expand_dims(test_labels, 1)

    data = (('train', 'features', train_features),
            ('train', 'targets', train_labels),
            ('test', 'features', test_features),
            ('test', 'targets', test_labels))
    fill_hdf5_file(h5file, data)
    h5file['features'].dims[0].label = 'batch'
    h5file['features'].dims[1].label = 'channel'
    h5file['features'].dims[2].label = 'height'
    h5file['features'].dims[3].label = 'width'
    h5file['targets'].dims[0].label = 'batch'
    h5file['targets'].dims[1].label = 'index'

    h5file.flush()
    h5file.close()

    return (output_path,)
开发者ID:piotder,项目名称:nn_project,代码行数:90,代码来源:expand.py


注:本文中的h5py.File.flush方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。