本文整理匯總了Python中torchvision.datasets.folder.default_loader方法的典型用法代碼示例。如果您正苦於以下問題:Python folder.default_loader方法的具體用法?Python folder.default_loader怎麽用?Python folder.default_loader使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torchvision.datasets.folder
的用法示例。
在下文中一共展示了folder.default_loader方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: preprocess
# 需要導入模塊: from torchvision.datasets import folder [as 別名]
# 或者: from torchvision.datasets.folder import default_loader [as 別名]
def preprocess(self, x, avoid_precomp=False):
"""
Loads a single example using this field.
Args:
x:
avoid_precomp:
Returns:
"""
if self.precomp_path and not avoid_precomp:
precomp_file = h5py.File(self.precomp_path, 'r')
precomp_data = precomp_file['data']
return precomp_data[self.precomp_index.index(x)]
else:
x = default_loader(x)
if self.preprocessing is not None:
x = self.preprocessing(x)
else:
x = transforms.ToTensor()(x)
return x
示例2: load_image
# 需要導入模塊: from torchvision.datasets import folder [as 別名]
# 或者: from torchvision.datasets.folder import default_loader [as 別名]
def load_image(img_fn):
"""Load the specified image and return a [H,W,3] Numpy array.
"""
return default_loader(img_fn)
# # Load image
# image = skimage.io.imread(img_fn)
# # If grayscale. Convert to RGB for consistency.
# if image.ndim != 3:
# image = skimage.color.gray2rgb(image)
# # If has an alpha channel, remove it for consistency
# if image.shape[-1] == 4:
# image = image[..., :3]
# return image
# Let's do 16x9
# Two common resolutions: 16x9 and 16/6 -> go to 16x8 as that's simple
# let's say width is 576. for neural motifs it was 576*576 pixels so 331776. here we have 2x*x = 331776-> 408 base
# so the best thing that's divisible by 4 is 384. that's
示例3: __init__
# 需要導入模塊: from torchvision.datasets import folder [as 別名]
# 或者: from torchvision.datasets.folder import default_loader [as 別名]
def __init__(self, path, mat_path="data/market/attribute/market_attribute.mat", transfrom=None, loader=default_loader):
assert os.path.exists(path) and os.path.exists(mat_path)
self.path = path
self.mat = loadmat(mat_path)["market_attribute"][0][0]
self.attrs = self._make_attr_dict("test")
self.transform = transfrom
self.loader = loader
file_list = os.listdir(self.path)
self.file_list = []
for item in file_list:
if not item.endswith('.jpg') and not item.endswith('.png'):
continue
self.file_list.append(item)
with open("market_name_to_id_test.json", "r") as f:
import json
data = json.load(f)
self.name_to_id = data
示例4: __init__
# 需要導入模塊: from torchvision.datasets import folder [as 別名]
# 或者: from torchvision.datasets.folder import default_loader [as 別名]
def __init__(self, files, metadata=None, loader=default_loader):
"""
metadata (List[Dict[Type]] or List[Type], Optional):
metadata to be added to each sample.
The Type can be anything that pytorch default_collate can handle.
If Type is tensor, make sure that the tensors are of same dimension.
"""
if metadata is not None:
assert isinstance(metadata, list), "metadata should be a list"
assert len(files) == len(metadata)
assert len(files) > 0, "Empty ListDataset is not allowed"
if not isinstance(metadata[0], dict):
metadata = [{"target": target} for target in metadata]
self.files = files
self.metadata = metadata
self.loader = loader
示例5: __init__
# 需要導入模塊: from torchvision.datasets import folder [as 別名]
# 或者: from torchvision.datasets.folder import default_loader [as 別名]
def __init__(self, image_roots,
transform=None,
loader=default_loader,
stacker=None,
intersection=False,
verbose=None,
size=None):
self.image_roots = image_roots
self.images = make_parallel_dataset(image_roots,
intersection=intersection, verbose=verbose)
if len(self.images) == 0:
raise RuntimeError("Found 0 images within: %s" % image_roots)
if size is not None:
self.image = self.images[:size]
if transform is not None and not hasattr(transform, '__iter__'):
transform = [transform for _ in image_roots]
self.transforms = transform
self.stacker = stacker
self.loader = loader
示例6: __init__
# 需要導入模塊: from torchvision.datasets import folder [as 別名]
# 或者: from torchvision.datasets.folder import default_loader [as 別名]
def __init__(self,root, train=True, transform=None, target_transform=None, loader=default_loader):
"""
:param root:
:param train:
:param transform:
:param target_transform:
:param loader:
"""
self.transform = transform
self.target_transform = transform
if os.path.exists(os.path.join(root,"idenprof","train","chef")) == False:
print("Downloading {}".format("https://github.com/OlafenwaMoses/IdenProf/releases/download/v1.0/idenprof-jpg.zip"))
download_file("https://github.com/OlafenwaMoses/IdenProf/releases/download/v1.0/idenprof-jpg.zip", "idenprof.zip", extract_path=root)
super(IdenProf,self).__init__(root=os.path.join(root,"idenprof","train" if train else "test"),transform=transform,target_transform=target_transform,loader=loader)
示例7: __init__
# 需要導入模塊: from torchvision.datasets import folder [as 別名]
# 或者: from torchvision.datasets.folder import default_loader [as 別名]
def __init__(self, image_folder, bone_folder, mask_folder, annotations_file_path,
exclude_fields=None, flip_rate=0.0, loader=default_loader, transform=DEFAULT_TRANS):
self.image_folder = image_folder
self.bone_folder = bone_folder
self.mask_folder = mask_folder
self.flip_rate = flip_rate
self.use_flip = self.flip_rate > 0.0
self.exclude_fields = [] if exclude_fields is None else exclude_fields
self.key_points = self.load_key_points(annotations_file_path)
self.transform = transform
self.loader = loader
示例8: __getitem__
# 需要導入模塊: from torchvision.datasets import folder [as 別名]
# 或者: from torchvision.datasets.folder import default_loader [as 別名]
def __getitem__(self, idx):
img_name = join(self.root_dir,
self.partition[self.mode][idx])
image = default_loader(img_name)
if self.transform is not None:
image = self.transform(image)
return image
示例9: _remove_all_not_found_image
# 需要導入模塊: from torchvision.datasets import folder [as 別名]
# 或者: from torchvision.datasets.folder import default_loader [as 別名]
def _remove_all_not_found_image(df: pd.DataFrame, path_to_images: Path) -> pd.DataFrame:
clean_rows = []
for _, row in df.iterrows():
image_id = row["image_id"]
try:
file_name = path_to_images / f"{image_id}.jpg"
_ = default_loader(file_name)
except (FileNotFoundError, OSError, UnboundLocalError) as ex:
logger.info(f"broken image {file_name} : {ex}")
else:
clean_rows.append(row)
df_clean = pd.DataFrame(clean_rows)
return df_clean
示例10: predict_from_file
# 需要導入模塊: from torchvision.datasets import folder [as 別名]
# 或者: from torchvision.datasets.folder import default_loader [as 別名]
def predict_from_file(self, image_path: Path):
image = default_loader(image_path)
return self.predict(image)
示例11: __getitem__
# 需要導入模塊: from torchvision.datasets import folder [as 別名]
# 或者: from torchvision.datasets.folder import default_loader [as 別名]
def __getitem__(self, item: int) -> Tuple[torch.Tensor, np.ndarray]:
row = self.df.iloc[item]
image_id = row["image_id"]
image_path = self.images_path / f"{image_id}.jpg"
image = default_loader(image_path)
x = self.transform(image)
y = row[1:].values.astype("float32")
p = y / y.sum()
return x, p
示例12: loaderfn
# 需要導入模塊: from torchvision.datasets import folder [as 別名]
# 或者: from torchvision.datasets.folder import default_loader [as 別名]
def loaderfn(fn): return (default_loader(fn), fn)
示例13: __init__
# 需要導入模塊: from torchvision.datasets import folder [as 別名]
# 或者: from torchvision.datasets.folder import default_loader [as 別名]
def __init__(self, root, transform=None, target_transform=None, loader=default_loader):
self.root = root
self.transform = transform
self.target_transform = target_transform
self.loader = loader
self.imgs = [path for path in list_pictures(self.root) if self.id(path) != -1]
# convert person id to softmax continuous label
self._id2label = {_id: idx for idx, _id in enumerate(self.unique_ids)}
示例14: imagefolder_loader
# 需要導入模塊: from torchvision.datasets import folder [as 別名]
# 或者: from torchvision.datasets.folder import default_loader [as 別名]
def imagefolder_loader(size=None,root="./data",shuffle=False,class_map=None,batch_size=32,mean=0.5,std=0.5,transform="default",allowed_exts=['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif'],source=None,target_transform=None,**loader_args):
"""
:param size:
:param root:
:param shuffle:
:param class_map:
:param batch_size:
:param mean:
:param std:
:param transform:
:param allowed_exts:
:param source:
:param target_transform:
:param loader_args:
:return:
"""
if source is not None:
if os.path.exists(root) == False:
print("Downloading {}".format(source[0]))
download_file(source[0],source[1],extract_path=root)
elif len(os.listdir(root)) == 0:
print("Downloading {}".format(source[0]))
download_file(source[0], source[1], extract_path=root)
if size is not None:
if not isinstance(size,tuple):
size = (size,size)
if transform == "default":
t = []
if size is not None:
t.append(transformations.Resize(size))
t.append(transformations.ToTensor())
if mean is not None and std is not None:
if not isinstance(mean, tuple):
mean = (mean,)
if not isinstance(std, tuple):
std = (std,)
t.append(transformations.Normalize(mean=mean, std=std))
trans = transformations.Compose(t)
else:
trans = transform
data = DataFolder(root=root,loader=default_loader,extensions=allowed_exts,transform=trans,target_transform=target_transform,class_map=class_map)
return DataLoader(data,batch_size=batch_size,shuffle=shuffle,**loader_args)