当前位置: 首页>>代码示例>>Python>>正文


Python io.loadmat方法代码示例

本文整理汇总了Python中scipy.io.loadmat方法的典型用法代码示例。如果您正苦于以下问题:Python io.loadmat方法的具体用法?Python io.loadmat怎么用?Python io.loadmat使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scipy.io的用法示例。


在下文中一共展示了io.loadmat方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _load_selective_search_roidb

# 需要导入模块: from scipy import io [as 别名]
# 或者: from scipy.io import loadmat [as 别名]
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
            'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb) 
开发者ID:guoruoqian,项目名称:cascade-rcnn_Pytorch,代码行数:20,代码来源:pascal_voc.py

示例2: __init__

# 需要导入模块: from scipy import io [as 别名]
# 或者: from scipy.io import loadmat [as 别名]
def __init__(self, path, start_epoch):
        if start_epoch is not 0:
           stats_ = sio.loadmat(os.path.join(path,'stats.mat'))
           data = stats_['data']
           content = data[0,0]
           self.trainObj = content['trainObj'][:,:start_epoch].squeeze().tolist()
           self.trainTop1 = content['trainTop1'][:,:start_epoch].squeeze().tolist()
           self.trainTop5 = content['trainTop5'][:,:start_epoch].squeeze().tolist()
           self.valObj = content['valObj'][:,:start_epoch].squeeze().tolist()
           self.valTop1 = content['valTop1'][:,:start_epoch].squeeze().tolist()
           self.valTop5 = content['valTop5'][:,:start_epoch].squeeze().tolist()
           if start_epoch is 1:
               self.trainObj = [self.trainObj]
               self.trainTop1 = [self.trainTop1]
               self.trainTop5 = [self.trainTop5]
               self.valObj = [self.valObj]
               self.valTop1 = [self.valTop1]
               self.valTop5 = [self.valTop5]
        else:
           self.trainObj = []
           self.trainTop1 = []
           self.trainTop5 = []
           self.valObj = []
           self.valTop1 = []
           self.valTop5 = [] 
开发者ID:jiangtaoxie,项目名称:fast-MPN-COV,代码行数:27,代码来源:functions.py

示例3: test

# 需要导入模块: from scipy import io [as 别名]
# 或者: from scipy.io import loadmat [as 别名]
def test():
  y = sio.loadmat(here(__file__) + '/demo/ma1.mat')['y']

  # The right results are:
  #           "biased": [-0.12250513  0.35963613  1.00586945  0.35963613 -0.12250513]
  #           "unbiaed": [-0.12444965  0.36246791  1.00586945  0.36246791 -0.12444965]
  print cum2est(y, 2, 128, 0, 'unbiased')
  print cum2est(y, 2, 128, 0, 'biased')

  # For the 3rd cumulant:
  #           "biased": [-0.18203039  0.07751503  0.67113035  0.729953    0.07751503]
  #           "unbiased": [-0.18639911  0.07874543  0.67641484  0.74153955  0.07937539]
  print cum3est(y, 2, 128, 0, 'biased', 1)
  print cum3est(y, 2, 128, 0, 'unbiased', 1)

  # For testing the 4th-order cumulant
  # "biased": [-0.03642083  0.4755026   0.6352588   1.38975232  0.83791117  0.41641134 -0.97386322]
  # "unbiased": [-0.04011388  0.48736793  0.64948927  1.40734633  0.8445089   0.42303979 -0.99724968]
  print cum4est(y, 3, 128, 0, 'biased', 1, 1)
  print cum4est(y, 3, 128, 0, 'unbiased', 1, 1) 
开发者ID:synergetics,项目名称:spectrum,代码行数:22,代码来源:cumest.py

示例4: get_predict_labels

# 需要导入模块: from scipy import io [as 别名]
# 或者: from scipy.io import loadmat [as 别名]
def get_predict_labels():
    inputs = tf.placeholder("float", [None, 64, 64, 1])
    is_training = tf.placeholder("bool")
    prediction, _ = googlenet(inputs, is_training)
    predict_labels = tf.argmax(prediction, 1)
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    data = sio.loadmat("../data/dataset.mat")
    testdata = data["test"] / 127.5 - 1.0
    testlabel = data["testlabels"]
    saver.restore(sess, "../save_para/.\\model.ckpt")
    nums_test = testlabel.shape[1]
    PREDICT_LABELS = np.zeros([nums_test])
    for i in range(nums_test // BATCH_SIZE):
        PREDICT_LABELS[i * BATCH_SIZE:i * BATCH_SIZE + BATCH_SIZE] = sess.run(predict_labels, feed_dict={inputs: testdata[i * BATCH_SIZE:i * BATCH_SIZE + BATCH_SIZE], is_training: False})
    PREDICT_LABELS[(nums_test // BATCH_SIZE - 1) * BATCH_SIZE + BATCH_SIZE:] = sess.run(predict_labels, feed_dict={inputs: testdata[(nums_test // BATCH_SIZE - 1) * BATCH_SIZE + BATCH_SIZE:], is_training: False})
    np.savetxt("../data/predict_labels.txt", PREDICT_LABELS) 
开发者ID:MingtaoGuo,项目名称:Chinese-Character-and-Calligraphic-Image-Processing,代码行数:20,代码来源:confusionMatrix.py

示例5: export_one_scan

# 需要导入模块: from scipy import io [as 别名]
# 或者: from scipy.io import loadmat [as 别名]
def export_one_scan(scan_name):
    pt = np.load(os.path.join(DATA_DIR, scan_name+'_pc.npz'))['pc']
    np.savetxt(mode+'tmp.xyz', pt)
    os.system("mv {}tmp.xyz {}tmp.xyzrgb".format(mode, mode))
    point_cloud = o3d.io.read_point_cloud(mode+'tmp.xyzrgb')

    pred_proposals = np.load(os.path.join(PRED_PATH, 'center'+scan_name+'_nms.npy'))
    gt_bbox = sio.loadmat(os.path.join(PRED_PATH, 'center'+scan_name+'_gt.mat'))['gt']
    bb =[]
    if mode=='gt':
        boundingboxes = gt_bbox
    elif mode =='pred':
        boundingboxes = pred_proposals
    else:
        print("model must be gt or pred")
        return
    for i in range(boundingboxes.shape[0]):
        c = np.array(color_mapping[int(boundingboxes[i,-1])])/255.0
        for _ in range(2):
            bb.append(create_lineset(boundingboxes[i]+0.005*(np.random.rand()-0.5)*2, colors=c))
    load_view_point([point_cloud] + bb, './viewpoint.json', window_name=scan_name+'_'+mode) 
开发者ID:zaiweizhang,项目名称:H3DNet,代码行数:23,代码来源:show_results_sunrgbd.py

示例6: mat_load

# 需要导入模块: from scipy import io [as 别名]
# 或者: from scipy.io import loadmat [as 别名]
def mat_load(path, m_dict=None):
    """
    Load mat files.
    :param path:
    :return:
    """
    if m_dict is None:
        data = sio.loadmat(path)
    else:
        data = sio.loadmat(path, m_dict)

    return data

# endregion

# region File/Folder Names/Pathes 
开发者ID:CMU-CREATE-Lab,项目名称:deep-smoke-machine,代码行数:18,代码来源:utils.py

示例7: process

# 需要导入模块: from scipy import io [as 别名]
# 或者: from scipy.io import loadmat [as 别名]
def process(self):
        mat = loadmat(self.raw_paths[0])['Problem'][0][0][2].tocsr().tocoo()

        row = torch.from_numpy(mat.row).to(torch.long)
        col = torch.from_numpy(mat.col).to(torch.long)
        edge_index = torch.stack([row, col], dim=0)

        edge_attr = torch.from_numpy(mat.data).to(torch.float)
        if torch.all(edge_attr == 1.):
            edge_attr = None

        size = torch.Size(mat.shape)
        if mat.shape[0] == mat.shape[1]:
            size = None

        num_nodes = mat.shape[0]

        data = Data(edge_index=edge_index, edge_attr=edge_attr, size=size,
                    num_nodes=num_nodes)

        if self.pre_transform is not None:
            data = self.pre_transform(data)

        torch.save(self.collate([data]), self.processed_paths[0]) 
开发者ID:rusty1s,项目名称:pytorch_geometric,代码行数:26,代码来源:suite_sparse.py

示例8: _load_selective_search_IJCV_roidb

# 需要导入模块: from scipy import io [as 别名]
# 或者: from scipy.io import loadmat [as 别名]
def _load_selective_search_IJCV_roidb(self, gt_roidb):
        IJCV_path = os.path.abspath(os.path.join(self.cache_path, '..',
                                                 'selective_search_IJCV_data',
                                                 'voc_' + self._year))
        assert os.path.exists(IJCV_path), \
               'Selective search IJCV data not found at: {}'.format(IJCV_path)

        top_k = self.config['top_k']
        box_list = []
        for i in xrange(self.num_images):
            filename = os.path.join(IJCV_path, self.image_index[i] + '.mat')
            raw_data = sio.loadmat(filename)
            box_list.append((raw_data['boxes'][:top_k, :]-1).astype(np.uint16))

        return self.create_roidb_from_box_list(box_list, gt_roidb)

    # evaluate detection results 
开发者ID:CharlesShang,项目名称:TFFRCNN,代码行数:19,代码来源:pascal3d.py

示例9: _load_selective_search_roidb

# 需要导入模块: from scipy import io [as 别名]
# 或者: from scipy.io import loadmat [as 别名]
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
               'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb) 
开发者ID:CharlesShang,项目名称:TFFRCNN,代码行数:20,代码来源:pascal_voc.py

示例10: _load_selective_search_roidb

# 需要导入模块: from scipy import io [as 别名]
# 或者: from scipy.io import loadmat [as 别名]
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(self._data_path,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
               'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb) 
开发者ID:CharlesShang,项目名称:TFFRCNN,代码行数:20,代码来源:kittivoc.py

示例11: __init__

# 需要导入模块: from scipy import io [as 别名]
# 或者: from scipy.io import loadmat [as 别名]
def __init__(self, opt):
        super(DataSetUSRNet, self).__init__()
        self.opt = opt
        self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
        self.patch_size = self.opt['H_size'] if self.opt['H_size'] else 96
        self.sigma_max = self.opt['sigma_max'] if self.opt['sigma_max'] is not None else 25
        self.scales = opt['scales'] if opt['scales'] is not None else [1,2,3,4]
        self.sf_validation = opt['sf_validation'] if opt['sf_validation'] is not None else 3
        #self.kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels_12.mat'))['kernels']
        self.kernels = loadmat(os.path.join('kernels', 'kernels_12.mat'))['kernels']  # for validation

        # -------------------
        # get the path of H
        # -------------------
        self.paths_H = util.get_image_paths(opt['dataroot_H'])  # return None if input is None
        self.count = 0 
开发者ID:cszn,项目名称:KAIR,代码行数:18,代码来源:dataset_usrnet.py

示例12: __init__

# 需要导入模块: from scipy import io [as 别名]
# 或者: from scipy.io import loadmat [as 别名]
def __init__(self, dir, transform=None):
        self.dir = dir
        box_data = torch.from_numpy(loadmat(self.dir+'/box_data.mat')['boxes']).float()
        op_data = torch.from_numpy(loadmat(self.dir+'/op_data.mat')['ops']).int()
        sym_data = torch.from_numpy(loadmat(self.dir+'/sym_data.mat')['syms']).float()
        #weight_list = torch.from_numpy(loadmat(self.dir+'/weights.mat')['weights']).float()
        num_examples = op_data.size()[1]
        box_data = torch.chunk(box_data, num_examples, 1)
        op_data = torch.chunk(op_data, num_examples, 1)
        sym_data = torch.chunk(sym_data, num_examples, 1)
        #weight_list = torch.chunk(weight_list, num_examples, 1)
        self.transform = transform
        self.trees = []
        for i in range(len(op_data)) :
            boxes = torch.t(box_data[i])
            ops = torch.t(op_data[i])
            syms = torch.t(sym_data[i])
            tree = Tree(boxes, ops, syms)
            self.trees.append(tree) 
开发者ID:kevin-kaixu,项目名称:grass_pytorch,代码行数:21,代码来源:grassdata.py

示例13: __init__

# 需要导入模块: from scipy import io [as 别名]
# 或者: from scipy.io import loadmat [as 别名]
def __init__(self, dir, transform=None):
        self.dir = dir
        box_data = torch.from_numpy(loadmat(self.dir+u'/box_data.mat')[u'boxes']).float()
        op_data = torch.from_numpy(loadmat(self.dir+u'/op_data.mat')[u'ops']).int()
        sym_data = torch.from_numpy(loadmat(self.dir+u'/sym_data.mat')[u'syms']).float()
        #weight_list = torch.from_numpy(loadmat(self.dir+'/weights.mat')['weights']).float()
        num_examples = op_data.size()[1]
        box_data = torch.chunk(box_data, num_examples, 1)
        op_data = torch.chunk(op_data, num_examples, 1)
        sym_data = torch.chunk(sym_data, num_examples, 1)
        #weight_list = torch.chunk(weight_list, num_examples, 1)
        self.transform = transform
        self.trees = []
        for i in xrange(len(op_data)) :
            boxes = torch.t(box_data[i])
            ops = torch.t(op_data[i])
            syms = torch.t(sym_data[i])
            tree = Tree(boxes, ops, syms)
            self.trees.append(tree) 
开发者ID:kevin-kaixu,项目名称:grass_pytorch,代码行数:21,代码来源:grassdata.py

示例14: __getitem__

# 需要导入模块: from scipy import io [as 别名]
# 或者: from scipy.io import loadmat [as 别名]
def __getitem__(self, idx):
        img_path = self.data_frame.iloc[idx, 0]
        img = cv2.imread(img_path, 1)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        x, y, w, h = self.data_frame.iloc[idx, 1:5]
        l, t, ww, hh = enlarge_bbox(x, y, w, h, self.enlarge_factor)
        r, b = l + ww, t + hh

        img = img[t: b, l:r, :]
        img = cv2.resize(img, (self.img_size, self.img_size))
        img = img.astype(np.float32) - 127.5

        img = nd.transpose(nd.array(img), (2, 0, 1))

        label_path = img_path.replace('.jpg', '.mat')

        label = sio.loadmat(label_path)

        params_shape = label['Shape_Para'].astype(np.float32).ravel()
        params_exp = label['Exp_Para'].astype(np.float32).ravel()

        return img, params_shape, params_exp 
开发者ID:ShownX,项目名称:mxnet-E2FAR,代码行数:25,代码来源:E2FAR.py

示例15: _load_imdb

# 需要导入模块: from scipy import io [as 别名]
# 或者: from scipy.io import loadmat [as 别名]
def _load_imdb(self):
        face_score_treshold = 3
        dataset = loadmat(self.dataset_path)
        image_names_array = dataset['imdb']['full_path'][0, 0][0]
        gender_classes = dataset['imdb']['gender'][0, 0][0]
        face_score = dataset['imdb']['face_score'][0, 0][0]
        second_face_score = dataset['imdb']['second_face_score'][0, 0][0]
        face_score_mask = face_score > face_score_treshold
        second_face_score_mask = np.isnan(second_face_score)
        unknown_gender_mask = np.logical_not(np.isnan(gender_classes))
        mask = np.logical_and(face_score_mask, second_face_score_mask)
        mask = np.logical_and(mask, unknown_gender_mask)
        image_names_array = image_names_array[mask]
        gender_classes = gender_classes[mask].tolist()
        image_names = []
        for image_name_arg in range(image_names_array.shape[0]):
            image_name = image_names_array[image_name_arg][0]
            image_names.append(image_name)
        return dict(zip(image_names, gender_classes)) 
开发者ID:oarriaga,项目名称:face_classification,代码行数:21,代码来源:datasets.py


注:本文中的scipy.io.loadmat方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。