當前位置: 首頁>>代碼示例>>Python>>正文


Python provider.loadDataFile方法代碼示例

本文整理匯總了Python中provider.loadDataFile方法的典型用法代碼示例。如果您正苦於以下問題:Python provider.loadDataFile方法的具體用法?Python provider.loadDataFile怎麽用?Python provider.loadDataFile使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在provider的用法示例。


在下文中一共展示了provider.loadDataFile方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: find_models

# 需要導入模塊: import provider [as 別名]
# 或者: from provider import loadDataFile [as 別名]
def find_models(category, model, templates, case):
	# model:		No of models to be stored for a particular category.
	# category: 	Name of the category to be stored.
	# templates:	Array having templates (BxNx3)
	# case:			Which files to be used? (test/train)

	if case == 'test':
		FILES = TEST_FILES
	if case == 'train':
		FILES = TRAIN_FILES
	print(FILES)
	count = 0														# Counter to find number of models.
	for train_idx in range(len(FILES)):						# Loop over all the training files from ModelNet40 data.
		current_data, current_label = provider.loadDataFile(FILES[train_idx])		# Load data of from a file.
		for i in range(current_data.shape[0]):
			if count<model and shapes.index(category)==current_label[i]:
				# import transforms3d.euler as t3d 
				# rot = t3d.euler2mat(0*np.pi/1	80, 0*np.pi/180, 90*np.pi/180, 'szyx')
				# templates.append((np.dot(rot, current_data[i].T).T))
				templates.append(current_data[i]/2.0)				# Append data if it belongs to the category and less than given number of models.
				count += 1
	return templates 
開發者ID:vinits5,項目名稱:pointnet-registration-framework,代碼行數:24,代碼來源:generate_dataset.py

示例2: load_data

# 需要導入模塊: import provider [as 別名]
# 或者: from provider import loadDataFile [as 別名]
def load_data(all_files, room_filelist, test_area_idx):
  # Load all data
  data_batch_list = []
  label_batch_list = []
  for h5_filename in all_files:
    data_batch, label_batch = provider.loadDataFile(h5_filename)
    data_batch_list.append(data_batch)
    label_batch_list.append(label_batch)
  data_batches = np.concatenate(data_batch_list, 0)
  label_batches = np.concatenate(label_batch_list, 0)

  test_area = 'Area_'+test_area_idx
  train_idxs = []
  test_idxs = []
  for i,room_name in enumerate(room_filelist):
    if test_area in room_name:
      test_idxs.append(i)
    else:
      train_idxs.append(i)

  return data_batches[train_idxs,...], label_batches[train_idxs], data_batches[test_idxs,...], label_batches[test_idxs] 
開發者ID:lightaime,項目名稱:deep_gcns,代碼行數:23,代碼來源:sem_seg_util.py

示例3: loadDataFile

# 需要導入模塊: import provider [as 別名]
# 或者: from provider import loadDataFile [as 別名]
def loadDataFile(self, filename):
		return load_h5(filename) 
開發者ID:vinits5,項目名稱:pointnet-registration-framework,代碼行數:4,代碼來源:generate_dataset.py

示例4: __init__

# 需要導入模塊: import provider [as 別名]
# 或者: from provider import loadDataFile [as 別名]
def __init__(self, h5_filepath, num_point=1024, augment=False):
        print('loading ', h5_filepath)
        data, label = provider.loadDataFile(h5_filepath)
        assert len(data) == len(label)
        # data: (2048, 2048, 3) - (batchsize, point, xyz)
        # Reduce num point here.
        self.data = data[:, :num_point, :].astype(np.float32)
        # (2048,) - (batchsize,)
        self.label = np.squeeze(label).astype(np.int32)
        self.augment = augment
        self.num_point = num_point
        self.length = len(data)
        print('length ', self.length) 
開發者ID:corochann,項目名稱:chainer-pointnet,代碼行數:15,代碼來源:ply_dataset.py

示例5: eval_one_epoch

# 需要導入模塊: import provider [as 別名]
# 或者: from provider import loadDataFile [as 別名]
def eval_one_epoch(sess, ops, test_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    
    for fn in range(len(TEST_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:,0:NUM_POINT,:]
        current_label = np.squeeze(current_label)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE

            feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
                         ops['labels_pl']: current_label[start_idx:end_idx],
                         ops['is_training_pl']: is_training}
            summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
                ops['loss'], ops['pred']], feed_dict=feed_dict)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += (loss_val*BATCH_SIZE)
            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val[i-start_idx] == l)
            
    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)))) 
開發者ID:hxdengBerkeley,項目名稱:PointCNN.Pytorch,代碼行數:42,代碼來源:train.py

示例6: loadDataFile

# 需要導入模塊: import provider [as 別名]
# 或者: from provider import loadDataFile [as 別名]
def loadDataFile(self, filename):
		return self.load_h5(filename) 
開發者ID:vinits5,項目名稱:pcrnet,代碼行數:4,代碼來源:generate_dataset.py

示例7: train_classifier_one_epoch

# 需要導入模塊: import provider [as 別名]
# 或者: from provider import loadDataFile [as 別名]
def train_classifier_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    
    for fn in range(len(TRAIN_FILES_CLS)):    
        # Shuffle train files
        current_data, current_label = provider.loadDataFile(TRAIN_FILES_CLS[fn]) 
        current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)
        # I find that we can increase the accuracy by about 0.2% after 
        # padding zero vectors, but I do not know the reason.
        current_data = np.concatenate([current_data, np.zeros((
                current_data.shape[0], NUM_FEATURE_CLS - current_data.shape[1]))], axis  = -1)
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        
        total_correct = 0
        total_seen = 0
        loss_sum = 0
        
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            
            # Input the features and labels to the graph.
            feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx,...],
                         ops['labels_pl']: current_label[start_idx:end_idx],
                         ops['is_training_pl']: is_training,}
            # Calculate the loss and classification scores.
            summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
                ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
                    
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val 
開發者ID:KuangenZhang,項目名稱:ldgcnn,代碼行數:40,代碼來源:train.py

示例8: train_one_epoch

# 需要導入模塊: import provider [as 別名]
# 或者: from provider import loadDataFile [as 別名]
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    
    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)
    
    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:,0:NUM_POINT,:]
        current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))            
        current_label = np.squeeze(current_label)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        
        total_correct = 0
        total_seen = 0
        loss_sum = 0
       
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            
            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            feed_dict = {ops['pointclouds_pl']: jittered_data,
                         ops['labels_pl']: current_label[start_idx:end_idx],
                         ops['is_training_pl']: is_training,}
            summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
                ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val
        
        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen))) 
開發者ID:hxdengBerkeley,項目名稱:PointCNN.Pytorch,代碼行數:45,代碼來源:train.py

示例9: train_one_epoch

# 需要導入模塊: import provider [as 別名]
# 或者: from provider import loadDataFile [as 別名]
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    
    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)
    
    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        # Load data and labels from the files.
        current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:,0:NUM_POINT,:]
        # Shuffle the data in the training set.
        current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))            
        current_label = np.squeeze(current_label)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        
        total_correct = 0
        total_seen = 0
        loss_sum = 0
       
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            
            # Augment batched point clouds by rotating, jittering, shifting, 
            # and scaling.
            rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            jittered_data = provider.random_scale_point_cloud(jittered_data)
            jittered_data = provider.rotate_perturbation_point_cloud(jittered_data)
            jittered_data = provider.shift_point_cloud(jittered_data)
            
            # Input the augmented point cloud and labels to the graph.
            feed_dict = {ops['pointclouds_pl']: jittered_data,
                         ops['labels_pl']: current_label[start_idx:end_idx],
                         ops['is_training_pl']: is_training,}
            
            # Calculate the loss and accuracy of the input batch data.            
            summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
                ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
            
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val
        
        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen))) 
開發者ID:KuangenZhang,項目名稱:ldgcnn,代碼行數:56,代碼來源:train.py

示例10: eval_one_epoch

# 需要導入模塊: import provider [as 別名]
# 或者: from provider import loadDataFile [as 別名]
def eval_one_epoch(sess, ops, test_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    
    for fn in range(len(TEST_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:,0:NUM_POINT,:]
        current_label = np.squeeze(current_label)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            # Input the point cloud and labels to the graph.
            feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
                         ops['labels_pl']: current_label[start_idx:end_idx],
                         ops['is_training_pl']: is_training}
            # Calculate the loss and classification scores.
            summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
                ops['loss'], ops['pred']], feed_dict=feed_dict)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += (loss_val*BATCH_SIZE)
            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val[i-start_idx] == l)
            
    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
    return total_correct / float(total_seen) 
開發者ID:KuangenZhang,項目名稱:ldgcnn,代碼行數:44,代碼來源:train.py

示例11: save_global_feature

# 需要導入模塊: import provider [as 別名]
# 或者: from provider import loadDataFile [as 別名]
def save_global_feature(sess, ops, saver, layers):
    feature_name = 'global_feature'
    file_name_vec = ['train_' + feature_name, 'test_' + feature_name]
    Files_vec = [TRAIN_FILES, TEST_FILES]
    #Restore variables that achieves the best validation accuracy from the disk.
    saver.restore(sess, os.path.join(LOG_DIR, FLAGS.model+
                                         str(NAME_MODEL)+ "_model.ckpt")) 
    log_string("Model restored.") 
    is_training = False
    # Extract the features from training set and validation set.
    for r in range(2):
        file_name = file_name_vec[r]
        Files = Files_vec[r]
        global_feature_vec = np.array([])
        label_vec = np.array([])
        for fn in range(len(Files)):
            log_string('----'+str(fn)+'----')
            current_data, current_label = provider.loadDataFile(Files[fn])
            current_data = current_data[:,0:NUM_POINT,:]
            current_label = np.squeeze(current_label)
            print(current_data.shape)
            
            file_size = current_data.shape[0]
            num_batches = file_size // BATCH_SIZE
            print(file_size)
            
            for batch_idx in range(num_batches):
                start_idx = batch_idx * BATCH_SIZE
                end_idx = (batch_idx+1) * BATCH_SIZE
                # Input the point cloud and labels to the graph.
                feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
                             ops['labels_pl']: current_label[start_idx:end_idx],
                             ops['is_training_pl']: is_training}
                # Extract the global features from the input batch data.
                global_feature = np.squeeze(layers[feature_name].eval(
                    feed_dict=feed_dict,session=sess))
                
                if label_vec.shape[0] == 0:
                    global_feature_vec = global_feature
                    label_vec = current_label[start_idx:end_idx]
                else:
                    global_feature_vec = np.concatenate([global_feature_vec, global_feature])
                    label_vec = np.concatenate([label_vec, current_label[start_idx:end_idx]])      
        # Save all global features to the disk.
        FileIO.write_h5('data/extracted_feature/' + file_name + '.h5', global_feature_vec, label_vec) 
開發者ID:KuangenZhang,項目名稱:ldgcnn,代碼行數:47,代碼來源:train.py

示例12: eval_classifier_one_epoch

# 需要導入模塊: import provider [as 別名]
# 或者: from provider import loadDataFile [as 別名]
def eval_classifier_one_epoch(sess, ops, test_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for  _ in range(NUM_CLASSES)]
    file_size_sum = 0
    for fn in range(len(TEST_FILES_CLS)):
        current_data, current_label = provider.loadDataFile(TEST_FILES_CLS[fn])
        current_label = np.squeeze(current_label)
        # I find that we can increase the accuracy by about 0.2% after 
        # padding zero vectors, but I do not know the reason.
        current_data = np.concatenate([current_data, np.zeros((
                current_data.shape[0], NUM_FEATURE_CLS - current_data.shape[1]))], axis  = -1)
        
        file_size = current_data.shape[0]
        file_size_sum += file_size
        num_batches = file_size // BATCH_SIZE
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            # Input the features and labels to the graph.
            feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx,:],
                         ops['labels_pl']: current_label[start_idx:end_idx],
                         ops['is_training_pl']: is_training}
            # Calculate the loss and classification scores.
            summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
                ops['loss'], ops['pred']], feed_dict=feed_dict)
            
            test_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += (loss_val*BATCH_SIZE)
            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val[i-start_idx] == l)
    accuracy = total_correct / float(total_seen)
    class_accuracy = np.mean(np.array(total_correct_class)/np.array(
            total_seen_class,dtype=np.float))
    return accuracy, class_accuracy 
開發者ID:KuangenZhang,項目名稱:ldgcnn,代碼行數:47,代碼來源:train.py


注:本文中的provider.loadDataFile方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。