本文整理汇总了Python中params.batch_size方法的典型用法代码示例。如果您正苦于以下问题:Python params.batch_size方法的具体用法?Python params.batch_size怎么用?Python params.batch_size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类params
的用法示例。
在下文中一共展示了params.batch_size方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_usps
# 需要导入模块: import params [as 别名]
# 或者: from params import batch_size [as 别名]
def get_usps(train):
"""Get USPS dataset loader."""
# image pre-processing
pre_process = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
mean=params.dataset_mean,
std=params.dataset_std)])
# dataset and data loader
usps_dataset = USPS(root=params.data_root,
train=train,
transform=pre_process,
download=True)
usps_data_loader = torch.utils.data.DataLoader(
dataset=usps_dataset,
batch_size=params.batch_size,
shuffle=True)
return usps_data_loader
示例2: get_mnist
# 需要导入模块: import params [as 别名]
# 或者: from params import batch_size [as 别名]
def get_mnist(train):
"""Get MNIST dataset loader."""
# image pre-processing
pre_process = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
mean=params.dataset_mean,
std=params.dataset_std)])
# dataset and data loader
mnist_dataset = datasets.MNIST(root=params.data_root,
train=train,
transform=pre_process,
download=True)
mnist_data_loader = torch.utils.data.DataLoader(
dataset=mnist_dataset,
batch_size=params.batch_size,
shuffle=True)
return mnist_data_loader
示例3: valid_generator
# 需要导入模块: import params [as 别名]
# 或者: from params import batch_size [as 别名]
def valid_generator():
while True:
for start in range(0, len(ids_valid_split), batch_size):
x_batch = []
y_batch = []
end = min(start + batch_size, len(ids_valid_split))
ids_valid_batch = ids_valid_split[start:end]
for id in ids_valid_batch.values:
img = cv2.imread('input/train/{}.jpg'.format(id))
img = cv2.resize(img, (input_size, input_size))
mask = cv2.imread('input/train_masks/{}_mask.png'.format(id), cv2.IMREAD_GRAYSCALE)
mask = cv2.resize(mask, (input_size, input_size))
mask = np.expand_dims(mask, axis=2)
x_batch.append(img)
y_batch.append(mask)
x_batch = np.array(x_batch, np.float32) / 255
y_batch = np.array(y_batch, np.float32) / 255
yield x_batch, y_batch
示例4: data_loader
# 需要导入模块: import params [as 别名]
# 或者: from params import batch_size [as 别名]
def data_loader(q, ):
for start in tqdm(range(0, len(ids_test), batch_size)):
x_batch = []
end = min(start + batch_size, len(ids_test))
ids_test_batch = ids_test[start:end]
for id in ids_test_batch.values:
img = cv2.imread('input/test/{}.jpg'.format(id))
if input_size is not None:
img = cv2.resize(img, (input_size, input_size))
x_batch.append(img)
x_batch = np.array(x_batch, np.float32) / 255
q.put((ids_test_batch, x_batch))
for g in gpus:
q.put((None, None))
示例5: data_loader
# 需要导入模块: import params [as 别名]
# 或者: from params import batch_size [as 别名]
def data_loader(q, ):
for start in range(0, len(ids_test), batch_size):
x_batch = []
end = min(start + batch_size, len(ids_test))
ids_test_batch = ids_test[start:end]
for id in ids_test_batch.values:
img = cv2.imread('input/test/{}.jpg'.format(id))
img = cv2.resize(img, (input_size, input_size))
x_batch.append(img)
x_batch = np.array(x_batch, np.float32) / 255
q.put(x_batch)
开发者ID:petrosgk,项目名称:Kaggle-Carvana-Image-Masking-Challenge,代码行数:13,代码来源:test_submit_multithreaded.py
示例6: predictor
# 需要导入模块: import params [as 别名]
# 或者: from params import batch_size [as 别名]
def predictor(q, ):
for i in tqdm(range(0, len(ids_test), batch_size)):
x_batch = q.get()
with graph.as_default():
preds = model.predict_on_batch(x_batch)
preds = np.squeeze(preds, axis=3)
for pred in preds:
prob = cv2.resize(pred, (orig_width, orig_height))
mask = prob > threshold
rle = run_length_encode(mask)
rles.append(rle)
开发者ID:petrosgk,项目名称:Kaggle-Carvana-Image-Masking-Challenge,代码行数:13,代码来源:test_submit_multithreaded.py
示例7: train_generator
# 需要导入模块: import params [as 别名]
# 或者: from params import batch_size [as 别名]
def train_generator():
while True:
for start in range(0, len(ids_train_split), batch_size):
x_batch = []
y_batch = []
end = min(start + batch_size, len(ids_train_split))
ids_train_batch = ids_train_split[start:end]
for id in ids_train_batch.values:
img = cv2.imread('input/train/{}.jpg'.format(id))
img = cv2.resize(img, (input_size, input_size))
mask = cv2.imread('input/train_masks/{}_mask.png'.format(id), cv2.IMREAD_GRAYSCALE)
mask = cv2.resize(mask, (input_size, input_size))
img = randomHueSaturationValue(img,
hue_shift_limit=(-50, 50),
sat_shift_limit=(-5, 5),
val_shift_limit=(-15, 15))
img, mask = randomShiftScaleRotate(img, mask,
shift_limit=(-0.0625, 0.0625),
scale_limit=(-0.1, 0.1),
rotate_limit=(-0, 0))
img, mask = randomHorizontalFlip(img, mask)
mask = np.expand_dims(mask, axis=2)
x_batch.append(img)
y_batch.append(mask)
x_batch = np.array(x_batch, np.float32) / 255
y_batch = np.array(y_batch, np.float32) / 255
yield x_batch, y_batch
示例8: load_batch
# 需要导入模块: import params [as 别名]
# 或者: from params import batch_size [as 别名]
def load_batch(purpose):
p = purpose
assert len(imgs[p]) == len(wheels[p])
n = len(imgs[p])
assert n > 0
ii = random.sample(xrange(0, n), params.batch_size)
assert len(ii) == params.batch_size
xx, yy = [], []
for i in ii:
xx.append(imgs[p][i])
yy.append(wheels[p][i])
return xx, yy
示例9: load_batch_category_normal
# 需要导入模块: import params [as 别名]
# 或者: from params import batch_size [as 别名]
def load_batch_category_normal(purpose):
p = purpose
xx, yy = [], []
nc = len(categories)
for c in categories:
n = len(imgs_cat[p][c])
assert n > 0
ii = random.sample(xrange(0, n), int(params.batch_size/nc))
assert len(ii) == int(params.batch_size/nc)
for i in ii:
xx.append(imgs_cat[p][c][i])
yy.append(wheels_cat[p][c][i])
return xx, yy
示例10: convert
# 需要导入模块: import params [as 别名]
# 或者: from params import batch_size [as 别名]
def convert(args):
make_folder(args.save_folder)
labels = get_labels(params)
audio_conf = get_audio_conf(params)
val_batch_size = min(8, params.batch_size_val)
print("Using bs={} for validation. Parameter found was {}".format(val_batch_size, params.batch_size_val))
train_dataset = SpectrogramDataset(audio_conf=audio_conf, manifest_filepath=params.train_manifest, labels=labels,
normalize=True, augment=params.augment)
train_loader = AudioDataLoader(train_dataset, batch_size=params.batch_size,
num_workers=(1 if params.cuda else 1))
model = get_model(params)
if args.continue_from:
print("Loading checkpoint model %s" % args.continue_from)
package = torch.load(args.continue_from)
model.load_state_dict(package['state_dict'])
if params.cuda:
model = model.cuda()
if params.cuda:
model = torch.nn.DataParallel(model).cuda()
print(model)
print("Number of parameters: %d" % DeepSpeech.get_param_size(model))
# Begin ONNX conversion
model.train(False)
# Input to the model
data = next(iter(train_loader))
inputs, targets, input_percentages, target_sizes = data
inputs = torch.Tensor(inputs, requires_grad=False)
if params.cuda:
inputs = inputs.cuda()
x = inputs
print("input has size:{}".format(x.size()))
# Export the model
onnx_file_path = osp.join(osp.dirname(args.continue_from), osp.basename(args.continue_from).split('.')[0] + ".onnx")
print("Saving new ONNX model to: {}".format(onnx_file_path))
torch.onnx.export(model, # model being run
inputs, # model input (or a tuple for multiple inputs)
onnx_file_path, # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
verbose=False)
示例11: data_generator
# 需要导入模块: import params [as 别名]
# 或者: from params import batch_size [as 别名]
def data_generator(data=None, meta_data=None, labels=None, batch_size=16, augment={}, opt_shuffle=True):
indices = [i for i in range(len(labels))]
while True:
if opt_shuffle:
shuffle(indices)
x_data = np.copy(data)
x_meta_data = np.copy(meta_data)
x_labels = np.copy(labels)
for start in range(0, len(labels), batch_size):
end = min(start + batch_size, len(labels))
sel_indices = indices[start:end]
#select data
data_batch = x_data[sel_indices]
xm_batch = x_meta_data[sel_indices]
y_batch = x_labels[sel_indices]
x_batch = []
for x in data_batch:
#augment
if augment.get('Rotate', False):
x = aug.Rotate(x, u=0.1, v=np.random.random())
x = aug.Rotate90(x, u=0.1, v=np.random.random())
if augment.get('Shift', False):
x = aug.Shift(x, u=0.05, v=np.random.random())
if augment.get('Zoom', False):
x = aug.Zoom(x, u=0.05, v=np.random.random())
if augment.get('Flip', False):
x = aug.HorizontalFlip(x, u=0.5, v=np.random.random())
x = aug.VerticalFlip(x, u=0.5, v=np.random.random())
x_batch.append(x)
x_batch = np.array(x_batch, np.float32)
yield [x_batch, xm_batch], y_batch
###############################################################################