本文整理汇总了Python中utils.normalize方法的典型用法代码示例。如果您正苦于以下问题:Python utils.normalize方法的具体用法?Python utils.normalize怎么用?Python utils.normalize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils
的用法示例。
在下文中一共展示了utils.normalize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: search
# 需要导入模块: import utils [as 别名]
# 或者: from utils import normalize [as 别名]
def search(self, model, vocab, query, n_results=10):
desc=[convert(vocab, query)]#convert desc sentence to word indices
padded_desc = pad(desc, self.data_params['desc_len'])
desc_repr=model.repr_desc([padded_desc])
desc_repr=desc_repr.astype(np.float32)
desc_repr = normalize(desc_repr).T # [dim x 1]
codes, sims = [], []
threads=[]
for i,code_reprs_chunk in enumerate(self._code_reprs):
t = threading.Thread(target=self.search_thread, args = (codes,sims,desc_repr,code_reprs_chunk,i,n_results))
threads.append(t)
for t in threads:
t.start()
for t in threads:#wait until all sub-threads finish
t.join()
return codes,sims
示例2: search
# 需要导入模块: import utils [as 别名]
# 或者: from utils import normalize [as 别名]
def search(config, model, vocab, query, n_results=10):
model.eval()
device = next(model.parameters()).device
desc, desc_len =sent2indexes(query, vocab_desc, config['desc_len'])#convert query into word indices
desc = torch.from_numpy(desc).unsqueeze(0).to(device)
desc_len = torch.from_numpy(desc_len).clamp(max=config['desc_len']).to(device)
with torch.no_grad():
desc_repr = model.desc_encoding(desc, desc_len).data.cpu().numpy().astype(np.float32) # [1 x dim]
if config['sim_measure']=='cos': # normalizing vector for fast cosine computation
desc_repr = normalize(desc_repr) # [1 x dim]
results =[]
threads = []
for i, codevecs_chunk in enumerate(codevecs):
t = threading.Thread(target=search_thread, args = (results, desc_repr, codevecs_chunk, i, n_results, config['sim_measure']))
threads.append(t)
for t in threads:
t.start()
for t in threads:#wait until all sub-threads have completed
t.join()
return results
示例3: _dynamics_func
# 需要导入模块: import utils [as 别名]
# 或者: from utils import normalize [as 别名]
def _dynamics_func(self, state, action, reuse):
"""
Takes as input a state and action, and predicts the next state
returns:
next_state_pred: predicted next state
implementation details (in order):
(a) Normalize both the state and action by using the statistics of self._init_dataset and
the utils.normalize function
(b) Concatenate the normalized state and action
(c) Pass the concatenated, normalized state-action tensor through a neural network with
self._nn_layers number of layers using the function utils.build_mlp. The resulting output
is the normalized predicted difference between the next state and the current state
(d) Unnormalize the delta state prediction, and add it to the current state in order to produce
the predicted next state
"""
### PROBLEM 1
### YOUR CODE HERE
state_norm = utils.normalize(state, self._init_dataset.state_mean, self._init_dataset.state_std)
action_norm = utils.normalize(action, self._init_dataset.action_mean, self._init_dataset.action_std)
input_layer = tf.concat([state_norm, action_norm], axis=1)
delta_pred_norm = utils.build_mlp(
input_layer,
self._state_dim,
scope='dynamics_func',
n_layers=self._nn_layers,
reuse=reuse
)
delta_pred = utils.unnormalize(delta_pred_norm, self._init_dataset.delta_state_mean, self._init_dataset.delta_state_std)
next_state_pred = state + delta_pred
return next_state_pred
示例4: _setup_training
# 需要导入模块: import utils [as 别名]
# 或者: from utils import normalize [as 别名]
def _setup_training(self, state_ph, next_state_ph, next_state_pred):
"""
Takes as input the current state, next state, and predicted next state, and returns
the loss and optimizer for training the dynamics model
returns:
loss: Scalar loss tensor
optimizer: Operation used to perform gradient descent
implementation details (in order):
(a) Compute both the actual state difference and the predicted state difference
(b) Normalize both of these state differences by using the statistics of self._init_dataset and
the utils.normalize function
(c) The loss function is the mean-squared-error between the normalized state difference and
normalized predicted state difference
(d) Create the optimizer by minimizing the loss using the Adam optimizer with self._learning_rate
"""
### PROBLEM 1
### YOUR CODE HERE
diff = next_state_ph - state_ph
diff_pred = next_state_pred - state_ph
diff_norm = utils.normalize(diff, self._init_dataset.delta_state_mean, self._init_dataset.delta_state_std)
diff_pred_norm = utils.normalize(diff_pred, self._init_dataset.delta_state_mean, self._init_dataset.delta_state_std)
loss = tf.losses.mean_squared_error(diff_norm, diff_pred_norm)
optimizer = tf.train.AdamOptimizer(self._learning_rate).minimize(loss)
return loss, optimizer
示例5: forward_fc
# 需要导入模块: import utils [as 别名]
# 或者: from utils import normalize [as 别名]
def forward_fc(self, inp, weights, prefix, reuse=False):
hidden = normalize(tf.matmul(inp, weights['w1']) + weights['b1'], activation=tf.nn.relu, reuse=reuse, scope='0')
for i in range(1,len(self.dim_hidden)):
hidden = normalize(tf.matmul(hidden, weights['w'+str(i+1)]) + weights['b'+str(i+1)], activation=tf.nn.relu, reuse=reuse, scope=str(i+1))
logits = tf.matmul(hidden, weights['w'+str(len(self.dim_hidden)+1)]) + weights['b'+str(len(self.dim_hidden)+1)]
if 'val' in prefix:
logits = tf.gather(logits, tf.range(self.dim_output_val), axis=1)
return logits
示例6: forward_fc
# 需要导入模块: import utils [as 别名]
# 或者: from utils import normalize [as 别名]
def forward_fc(self, inp, weights, reuse=False):
hidden = normalize(tf.matmul(inp, weights['w1']) + weights['b1'], activation=tf.nn.relu, reuse=reuse, scope='0')
for i in range(1,len(self.dim_hidden)):
hidden = normalize(tf.matmul(hidden, weights['w'+str(i+1)]) + weights['b'+str(i+1)], activation=tf.nn.relu, reuse=reuse, scope=str(i+1))
return tf.matmul(hidden, weights['w'+str(len(self.dim_hidden)+1)]) + weights['b'+str(len(self.dim_hidden)+1)]
示例7: localize_dt
# 需要导入模块: import utils [as 别名]
# 或者: from utils import normalize [as 别名]
def localize_dt(dt: datetime, region_name=None):
tz = region_tz(region_name)
return tz.normalize(dt) if dt.tzinfo else tz.localize(dt)
示例8: local_now
# 需要导入模块: import utils [as 别名]
# 或者: from utils import normalize [as 别名]
def local_now(region_name=None):
tz = region_tz(region_name)
return tz.normalize(pytz.utc.localize(datetime.utcnow()))
示例9: forgiving_dateparse
# 需要导入模块: import utils [as 别名]
# 或者: from utils import normalize [as 别名]
def forgiving_dateparse(dt, tz=pytz.utc):
if isinstance(dt, str):
dt = dateparse.parse_datetime(dt)
elif not isinstance(dt, datetime):
return None
if dt.tzinfo:
try:
return tz.normalize(dt)
except AttributeError:
return tz.localize(dt.replace(tzinfo=None))
return tz.localize(dt)
示例10: save
# 需要导入模块: import utils [as 别名]
# 或者: from utils import normalize [as 别名]
def save(self, *args, **kwargs):
if not self.handle:
self.handle = utils.normalize(self.name)
super().save(*args, **kwargs)
示例11: forward_fc
# 需要导入模块: import utils [as 别名]
# 或者: from utils import normalize [as 别名]
def forward_fc(self, inp, weights, reuse=False):
hidden = normalize(tf.matmul(inp, weights['w1']) + weights['b1'],
activation=tf.nn.relu, reuse=reuse, scope='0')
for i in range(1, len(self.dim_hidden)):
hidden = normalize(tf.matmul(hidden, weights['w' + str(i + 1)]) + weights['b' + str(i + 1)],
activation=tf.nn.relu, reuse=reuse, scope=str(i + 1))
return tf.matmul(hidden, weights['w' + str(len(self.dim_hidden) + 1)]) + \
weights['b' + str(len(self.dim_hidden) + 1)]
示例12: forward_fc_withT
# 需要导入模块: import utils [as 别名]
# 或者: from utils import normalize [as 别名]
def forward_fc_withT(self, inp, weights, reuse=False):
hidden = tf.matmul(tf.matmul(inp, weights['w1']) + weights['b1'], weights['w1_f'])
hidden = normalize(hidden, activation=tf.nn.relu, reuse=reuse, scope='1')
hidden = tf.matmul(tf.matmul(hidden, weights['w2']) + weights['b2'], weights['w2_f'])
hidden = normalize(hidden, activation=tf.nn.relu, reuse=reuse, scope='2')
hidden = tf.matmul(tf.matmul(hidden, weights['w3']) + weights['b3'], weights['w3_f'])
return hidden
示例13: forward_conv_withT
# 需要导入模块: import utils [as 别名]
# 或者: from utils import normalize [as 别名]
def forward_conv_withT(self, inp, weights, reuse=False, scope=''):
# reuse is for the normalization parameters.
def conv_tout(inp, cweight, bweight, rweight, reuse, scope, activation=tf.nn.relu, max_pool_pad='VALID',
residual=False):
stride, no_stride = [1, 2, 2, 1], [1, 1, 1, 1]
if FLAGS.max_pool:
conv_output = tf.nn.conv2d(inp, cweight, no_stride, 'SAME') + bweight
else:
conv_output = tf.nn.conv2d(inp, cweight, stride, 'SAME') + bweight
conv_output = tf.nn.conv2d(conv_output, rweight, no_stride, 'SAME')
normed = normalize(conv_output, activation, reuse, scope)
if FLAGS.max_pool:
normed = tf.nn.max_pool(normed, stride, stride, max_pool_pad)
return normed
channels = self.channels
inp = tf.reshape(inp, [-1, self.img_size, self.img_size, channels])
hidden1 = conv_tout(inp, weights['conv1'], weights['b1'], weights['conv1_f'], reuse, scope + '0')
hidden2 = conv_tout(hidden1, weights['conv2'], weights['b2'], weights['conv2_f'], reuse, scope + '1')
hidden3 = conv_tout(hidden2, weights['conv3'], weights['b3'], weights['conv3_f'], reuse, scope + '2')
hidden4 = conv_tout(hidden3, weights['conv4'], weights['b4'], weights['conv4_f'], reuse, scope + '3')
if FLAGS.datasource == 'miniimagenet':
# last hidden layer is 6x6x64-ish, reshape to a vector
hidden4 = tf.reshape(hidden4, [-1, np.prod([int(dim) for dim in hidden4.get_shape()[1:]])])
else:
hidden4 = tf.reduce_mean(hidden4, [1, 2])
hidden5 = tf.matmul(hidden4, weights['w5']) + weights['b5']
return tf.matmul(hidden5, weights['w5_f'])
示例14: main
# 需要导入模块: import utils [as 别名]
# 或者: from utils import normalize [as 别名]
def main():
global args
args = parser.parse_args()
if not os.path.exists(args.output_path):
comm = 'python extract_feat.py \
--arch {} \
--batch-size {} \
--input-size {} \
--feature-dim {} \
--load-path {} \
--bin-file {} \
--output-path {}'\
.format(args.arch, args.batch_size, args.input_size, args.feature_dim,
args.load_path, args.bin_file, args.output_path)
print(' '.join(comm.split()))
os.system(comm)
features = np.load(args.output_path).reshape(-1, args.feature_dim)
_, lbs = bin_loader(args.bin_file)
print('feature shape: {}'.format(features.shape))
assert features.shape[0] == 2 * len(lbs), "{} vs {}".format(
features.shape[0], 2 * len(lbs))
features = normalize(features)
_, _, acc, val, val_std, far = evaluate(features,
lbs,
nrof_folds=args.nfolds,
distance_metric=0)
print("accuracy: {:.4f}({:.4f})".format(acc.mean(), acc.std()))
示例15: repr_code
# 需要导入模块: import utils [as 别名]
# 或者: from utils import normalize [as 别名]
def repr_code(self, model):
logger.info('Loading the use data ..')
methnames = data_loader.load_hdf5(self.data_path+self.data_params['use_methname'],0,-1)
apiseqs = data_loader.load_hdf5(self.data_path+self.data_params['use_apiseq'],0,-1)
tokens = data_loader.load_hdf5(self.data_path+self.data_params['use_tokens'],0,-1)
methnames = pad(methnames, self.data_params['methname_len'])
apiseqs = pad(apiseqs, self.data_params['apiseq_len'])
tokens = pad(tokens, self.data_params['tokens_len'])
logger.info('Representing code ..')
vecs= model.repr_code([methnames, apiseqs, tokens], batch_size=10000)
vecs= vecs.astype(np.float)
vecs= normalize(vecs)
return vecs