本文整理匯總了Python中config.cfg.batch_size方法的典型用法代碼示例。如果您正苦於以下問題:Python cfg.batch_size方法的具體用法?Python cfg.batch_size怎麽用?Python cfg.batch_size使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類config.cfg
的用法示例。
在下文中一共展示了cfg.batch_size方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _make_batch_generator
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import batch_size [as 別名]
def _make_batch_generator(self):
# data load and construct batch generator
self.logger.info("Creating dataset...")
trainset_loader = []
for i in range(len(cfg.trainset)):
if i > 0:
ref_joints_name = trainset_loader[0].joints_name
else:
ref_joints_name = None
trainset_loader.append(DatasetLoader(eval(cfg.trainset[i])("train"), ref_joints_name, True, transforms.Compose([\
transforms.ToTensor(),
transforms.Normalize(mean=cfg.pixel_mean, std=cfg.pixel_std)]\
)))
self.joint_num = trainset_loader[0].joint_num
trainset_loader = MultipleDatasets(trainset_loader)
self.itr_per_epoch = math.ceil(len(trainset_loader) / cfg.num_gpus / cfg.batch_size)
self.batch_generator = DataLoader(dataset=trainset_loader, batch_size=cfg.num_gpus*cfg.batch_size, shuffle=True, num_workers=cfg.num_thread, pin_memory=True)
示例2: get_generators
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import batch_size [as 別名]
def get_generators():
train_generator = TrainGenerator(base_dir=cfg.base_dir,
annotation_file=os.path.join(cfg.base_dir, 'annotation_train.txt'),
batch_size=cfg.batch_size,
img_size=(cfg.width, cfg.height),
nb_channels=cfg.nb_channels,
timesteps=cfg.timesteps,
label_len=cfg.label_len,
characters=cfg.characters)
val_generator = ValGenerator(base_dir=cfg.base_dir,
annotation_file=os.path.join(cfg.base_dir, 'annotation_val.txt'),
batch_size=5000,
img_size=(cfg.width, cfg.height),
nb_channels=cfg.nb_channels,
timesteps=cfg.timesteps,
label_len=cfg.label_len,
characters=cfg.characters)
return train_generator, val_generator
示例3: make_data
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import batch_size [as 別名]
def make_data(self):
from COCOAllJoints import COCOJoints
from dataset import Preprocessing
d = COCOJoints()
train_data, _ = d.load_data(cfg.min_kps)
from tfflat.data_provider import DataFromList, MultiProcessMapDataZMQ, BatchData, MapData
dp = DataFromList(train_data)
if cfg.dpflow_enable:
dp = MultiProcessMapDataZMQ(dp, cfg.nr_dpflows, Preprocessing)
else:
dp = MapData(dp, Preprocessing)
dp = BatchData(dp, cfg.batch_size // cfg.nr_aug)
dp.reset_state()
dataiter = dp.get_data()
return dataiter
示例4: loss_ohem
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import batch_size [as 別名]
def loss_ohem(preds, labels):
labels = tf.cast(labels, tf.int64)
labels = tf.reshape(labels, (cfg.batch_size,))
print('pre labels', labels.get_shape())
labels = tf.one_hot(labels, cfg.classes)
print('labels', labels.get_shape())
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=preds, labels=labels)
print('cross_entropy', cross_entropy.get_shape())
keep_num = tf.cast(cfg.batch_size * cfg.train.ohem_ratio, tf.int32)
cross_entropy = tf.reshape(cross_entropy, (cfg.batch_size,))
print('cross_entropy', cross_entropy.get_shape())
_, k_index = tf.nn.top_k(cross_entropy, keep_num)
loss = tf.gather(cross_entropy, k_index)
print('ohem loss', loss.get_shape())
return tf.reduce_mean(loss)
示例5: kernel_tile
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import batch_size [as 別名]
def kernel_tile(input, kernel, stride):
# output = tf.extract_image_patches(input, ksizes=[1, kernel, kernel, 1], strides=[1, stride, stride, 1], rates=[1, 1, 1, 1], padding='VALID')
input_shape = input.get_shape()
tile_filter = np.zeros(shape=[kernel, kernel, input_shape[3],
kernel * kernel], dtype=np.float32)
for i in range(kernel):
for j in range(kernel):
tile_filter[i, j, :, i * kernel + j] = 1.0
tile_filter_op = tf.constant(tile_filter, dtype=tf.float32)
output = tf.nn.depthwise_conv2d(input, tile_filter_op, strides=[
1, stride, stride, 1], padding='VALID')
output_shape = output.get_shape()
output = tf.reshape(output, shape=[int(output_shape[0]), int(
output_shape[1]), int(output_shape[2]), int(input_shape[3]), kernel * kernel])
output = tf.transpose(output, perm=[0, 1, 2, 4, 3])
return output
# input should be a tensor with size as [batch_size, caps_num_i, 16]
示例6: mat_transform
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import batch_size [as 別名]
def mat_transform(input, caps_num_c, regularizer, tag=False):
batch_size = int(input.get_shape()[0])
caps_num_i = int(input.get_shape()[1])
output = tf.reshape(input, shape=[batch_size, caps_num_i, 1, 4, 4])
# the output of capsule is miu, the mean of a Gaussian, and activation, the sum of probabilities
# it has no relationship with the absolute values of w and votes
# using weights with bigger stddev helps numerical stability
w = slim.variable('w', shape=[1, caps_num_i, caps_num_c, 4, 4], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=1.0),
regularizer=regularizer)
w = tf.tile(w, [batch_size, 1, 1, 1, 1])
output = tf.tile(output, [1, 1, caps_num_c, 1, 1])
votes = tf.reshape(tf.matmul(output, w), [batch_size, caps_num_i, caps_num_c, 16])
return votes
示例7: vec_transform
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import batch_size [as 別名]
def vec_transform(input, caps_num_out, channel_num_out):
batch_size = int(input.get_shape()[0])
caps_num_in = int(input.get_shape()[1])
channel_num_in = int(input.get_shape()[-1])
w = slim.variable('w', shape=[1, caps_num_out, caps_num_in, channel_num_in, channel_num_out], dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01)) #
w = tf.tile(w, [batch_size, 1, 1, 1, 1])
output = tf.reshape(input, shape=[batch_size, 1, caps_num_in, 1, channel_num_in])
output = tf.tile(output, [1, caps_num_out, 1, 1, 1])
output = tf.reshape(tf.matmul(output, w), [batch_size, caps_num_out, caps_num_in, channel_num_out])
return output
# input should be a tensor with size as [batch_size, caps_num_out, channel_num]
示例8: dynamic_routing
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import batch_size [as 別名]
def dynamic_routing(input):
batch_size = int(input.get_shape()[0])
caps_num_in = int(input.get_shape()[2])
caps_num_out = int(input.get_shape()[1])
input_stopped = tf.stop_gradient(input, name='stop_gradient')
b = tf.constant(np.zeros([batch_size, caps_num_out, caps_num_in, 1], dtype=np.float32))
for r_iter in range(cfg.iter_routing):
c = tf.nn.softmax(b, dim=1)
if r_iter == cfg.iter_routing-1:
s = tf.matmul(input, c, transpose_a=True)
v = squash(tf.squeeze(s))
else:
s = tf.matmul(input_stopped, c, transpose_a=True)
v = squash(tf.squeeze(s))
b += tf.reduce_sum(tf.reshape(v, shape=[batch_size, caps_num_out, 1, -1])*input_stopped, axis=-1, keep_dims=True)
return v
示例9: evaluation
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import batch_size [as 別名]
def evaluation(model, supervisor, num_label):
teX, teY, num_te_batch = load_data(cfg.dataset, cfg.batch_size, is_training=False)
fd_test_acc = save_to()
with supervisor.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
supervisor.saver.restore(sess, tf.train.latest_checkpoint(cfg.logdir))
tf.logging.info('Model restored!')
test_acc = 0
for i in tqdm(range(num_te_batch), total=num_te_batch, ncols=70, leave=False, unit='b'):
start = i * cfg.batch_size
end = start + cfg.batch_size
acc = sess.run(model.accuracy, {model.X: teX[start:end], model.labels: teY[start:end]})
test_acc += acc
test_acc = test_acc / (cfg.batch_size * num_te_batch)
fd_test_acc.write(str(test_acc))
fd_test_acc.close()
print('Test accuracy has been saved to ' + cfg.results + '/test_acc.csv')
示例10: loss
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import batch_size [as 別名]
def loss(v_len, output, x, y):
max_l = tf.square(tf.maximum(0., cfg.m_plus-v_len))
max_r = tf.square(tf.maximum(0., v_len - cfg.m_minus))
l_c = y*max_l+cfg.lambda_val * (1 - y) * max_r
margin_loss = tf.reduce_mean(tf.reduce_sum(l_c, axis=1))
origin = tf.reshape(x, shape=[cfg.batch_size, -1])
reconstruction_err = tf.reduce_mean(tf.square(output-origin))
total_loss = margin_loss+0.0005*reconstruction_err
tf.losses.add_loss(total_loss)
return total_loss
示例11: evaluate
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import batch_size [as 別名]
def evaluate(model, data_loader):
# Setting up model
test_iterator = data_loader(cfg.batch_size, mode="test")
inputs = data_loader.next_element["images"]
labels = data_loader.next_element["labels"]
model.create_network(inputs, labels)
# Create files to save evaluating results
fd = save_to(is_training=False)
saver = tf.train.Saver()
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
test_handle = sess.run(test_iterator.string_handle())
saver.restore(sess, tf.train.latest_checkpoint(cfg.logdir))
tf.logging.info('Model restored!')
probs = []
targets = []
total_acc = 0
n = 0
while True:
try:
test_acc, prob, label = sess.run([model.accuracy, model.probs, labels], feed_dict={data_loader.handle: test_handle})
probs.append(prob)
targets.append(label)
total_acc += test_acc
n += 1
except tf.errors.OutOfRangeError:
break
probs = np.concatenate(probs, axis=0)
targets = np.concatenate(targets, axis=0).reshape((-1, 1))
avg_acc = total_acc / n
out_path = os.path.join(cfg.results_dir, 'prob_test.txt')
np.savetxt(out_path, np.hstack((probs, targets)), fmt='%1.2f')
print('Classification probability for each category has been saved to ' + out_path)
fd["test_acc"].write(str(avg_acc))
fd["test_acc"].close()
out_path = os.path.join(cfg.results_dir, 'test_accuracy.txt')
print('Test accuracy has been saved to ' + out_path)
示例12: squash
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import batch_size [as 別名]
def squash(vector):
'''Squashing function.
Args:
vector: A 4-D tensor with shape [batch_size, num_caps, vec_len, 1],
Returns:
A 4-D tensor with the same shape as vector but
squashed in 3rd and 4th dimensions.
'''
vec_abs = tf.sqrt(tf.reduce_sum(tf.square(vector))) # a scalar
scalar_factor = tf.square(vec_abs) / (1 + tf.square(vec_abs))
vec_squashed = scalar_factor * tf.divide(vector, vec_abs) # element-wise
return(vec_squashed)
示例13: get_batch_data
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import batch_size [as 別名]
def get_batch_data():
trX, trY = load_mnist(cfg.dataset, cfg.is_training)
data_queues = tf.train.slice_input_producer([trX, trY])
X, Y = tf.train.shuffle_batch(data_queues, num_threads=cfg.num_threads,
batch_size=cfg.batch_size,
capacity=cfg.batch_size * 64,
min_after_dequeue=cfg.batch_size * 32,
allow_smaller_final_batch=False)
return(X, Y)
示例14: save_images
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import batch_size [as 別名]
def save_images(imgs, size, path):
'''
Args:
imgs: [batch_size, image_height, image_width]
size: a list with tow int elements, [image_height, image_width]
path: the path to save images
'''
imgs = (imgs + 1.) / 2 # inverse_transform
return(scipy.misc.imsave(path, mergeImgs(imgs, size)))
示例15: loss
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import batch_size [as 別名]
def loss(self):
# 1. The margin loss
# [batch_size, 10, 1, 1]
# max_l = max(0, m_plus-||v_c||)^2
max_l = tf.square(tf.maximum(0., cfg.m_plus - self.v_length))
# max_r = max(0, ||v_c||-m_minus)^2
max_r = tf.square(tf.maximum(0., self.v_length - cfg.m_minus))
assert max_l.get_shape() == [cfg.batch_size, 10, 1, 1]
# reshape: [batch_size, 10, 1, 1] => [batch_size, 10]
max_l = tf.reshape(max_l, shape=(cfg.batch_size, -1))
max_r = tf.reshape(max_r, shape=(cfg.batch_size, -1))
# calc T_c: [batch_size, 10]
# T_c = Y, is my understanding correct? Try it.
T_c = self.Y
# [batch_size, 10], element-wise multiply
L_c = T_c * max_l + cfg.lambda_val * (1 - T_c) * max_r
self.margin_loss = tf.reduce_mean(tf.reduce_sum(L_c, axis=1))
# 2. The reconstruction loss
orgin = tf.reshape(self.X, shape=(cfg.batch_size, -1))
squared = tf.square(self.decoded - orgin)
self.reconstruction_err = tf.reduce_mean(squared)
# 3. Total loss
self.total_loss = self.margin_loss + 0.0005 * self.reconstruction_err
# Summary
tf.summary.scalar('margin_loss', self.margin_loss)
tf.summary.scalar('reconstruction_loss', self.reconstruction_err)
tf.summary.scalar('total_loss', self.total_loss)
recon_img = tf.reshape(self.decoded, shape=(cfg.batch_size, 28, 28, 1))
tf.summary.image('reconstruction_img', recon_img)
self.merged_sum = tf.summary.merge_all()