本文整理汇总了Python中utils.Logger.append方法的典型用法代码示例。如果您正苦于以下问题:Python Logger.append方法的具体用法?Python Logger.append怎么用?Python Logger.append使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils.Logger
的用法示例。
在下文中一共展示了Logger.append方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sensing_decision
# 需要导入模块: from utils import Logger [as 别名]
# 或者: from utils.Logger import append [as 别名]
def sensing_decision(self, sensing_result, number_cpes):
"""
Algorithm sensing decision.
@param sensing_result
@param number_cpes
"""
#print "simple cycle"
#feedback cycle control
self._fb_cycle += 1
#initialize sum of scores for each decision (0,1)
sum_result0 = 0.0
sum_result1 = 0.0
#keeps the greatest reward for each decision
greatest_reward0 = 0.0
greatest_reward1 = 0.0
#sum of all scores for each decision
for decision, reward in zip(sensing_result, self._reward):
#sum of scores for "0" decision
if decision == IDLE:
sum_result0 += reward
if reward > greatest_reward0:
greatest_reward0 = reward
#sum of scores for "1" decision
elif decision == OCCUPIED:
sum_result1 += reward
if reward > greatest_reward1:
greatest_reward1 = reward
#divide the sum of scores by the total number of CPEs
score_r1 = sum_result1 / number_cpes
score_r0 = sum_result0 / number_cpes
#verifies which sum of scores is higher, score1 or score0
if score_r0 > score_r1:
sensing_decision = IDLE
elif score_r0 < score_r1:
sensing_decision = OCCUPIED
#if both scores are equal, then verifies the decision made by the cpe with the greatest individual score
elif score_r0 == score_r1:
if greatest_reward0 >= greatest_reward1:
sensing_decision = IDLE
elif greatest_reward0 < greatest_reward1:
sensing_decision = OCCUPIED
#verifies if is feedback cycle
#if (self._fb_cycle % (feedback_control-1) == 0):
Logger.append('sdc', 'decision', sensing_decision)
if sensing_decision == OCCUPIED:
self._total_occ += 1
else:
self._total_idle += 1
self.feedback(sensing_result, sensing_decision, increase_rate, decrease_rate)
示例2: decision
# 需要导入模块: from utils import Logger [as 别名]
# 或者: from utils.Logger import append [as 别名]
def decision(self, energy):
"""
@param energy
"""
energy = np.sum(energy) / energy.size
self.cycle_counter += 1
if self.cycle_counter_max == self.cycle_counter:
self.cycle_counter = 0
sp = self.do_action(self.s, self.a)
rw = self.get_reward(energy, sp)
ap = self.e_greedy_selection(sp)
self.update_q_table(self.s, self.a, rw, sp, ap)
self.s = sp
self.a = ap
#self.epsilon *= 0.999
Logger.append('bayes_learning', 'hypothesis', 1.0 if energy > self.s else 0.0)
Logger.append('bayes_learning', 'feedback', self._feedback)
Logger.append('bayes_learning', 'state', self.s)
Logger.append('bayes_learning', 'reward', rw)
Logger.append('bayes_learning', 'action', self.a)
return 1 if (energy > sp) else 0, energy
示例3: work
# 需要导入模块: from utils import Logger [as 别名]
# 或者: from utils.Logger import append [as 别名]
def work(self, input_items, output_items):
for idx in range(len(input_items[0])):
decf, vf = self.algo1.decision(input_items[0][idx])
if decf == 0:
decf, vf = self.algo2.decision(input_items[1][idx])
Logger.append('hier', 'decision', self._xx[Logger._ch_status][decf])
return len(input_items[0])
示例4: _tick
# 需要导入模块: from utils import Logger [as 别名]
# 或者: from utils.Logger import append [as 别名]
def _tick(self):
"""
Called automatically each one second.
"""
for _d in (self._bps, self._pkts):
_d['cur'] = _d['counting']
_d['counting'] = 0
Logger.append(self._name, 'bps', self._bps['cur'] ) #pylint: disable=E1101
Logger.append(self._name, 'pkts', self._pkts['cur']) #pylint: disable=E1101
示例5: get_pkt_accumulated
# 需要导入模块: from utils import Logger [as 别名]
# 或者: from utils.Logger import append [as 别名]
def get_pkt_accumulated(self, clear=False):
"""
@param clear
"""
tmp = self._pkts['accumulated']
Logger.append(self._name, 'pkt_accumulated', self._pkts['accumulated']) # pylint: disable=E1101
if clear:
self._pkts['accumulated'] = 0
return tmp
示例6: decision
# 需要导入模块: from utils import Logger [as 别名]
# 或者: from utils.Logger import append [as 别名]
def decision(self, data_in):
""""
Implementation of base class abstract method.
@param data_in Mag squared of samples.
@return Tuple (status, energy)
"""
energy = np.sum(data_in) / data_in.size
dec = 1 if self.threshold < energy else 0
Logger.append('energy_decision', 'energy', energy)
Logger.append('energy_decision', 'decision', self._xx[Logger._ch_status][dec])
return dec, energy
示例7: decision
# 需要导入模块: from utils import Logger [as 别名]
# 或者: from utils.Logger import append [as 别名]
def decision(self, data_in):
""""
Implementation of base class abstract method.
@param data_in Mag squared of samples.
@return Tuple (status, energy)
"""
_sum = self._algorithm.calculate_cyclo(data_in.tolist())
_sum = _sum / len(data_in)
dec = 1 if self.threshold < _sum else 0
Logger.append('cyclo_decision', 'decision', self._xx[Logger._ch_status][dec])
return dec, _sum
示例8: decision
# 需要导入模块: from utils import Logger [as 别名]
# 或者: from utils.Logger import append [as 别名]
def decision(self, data_in):
"""
Called from a signal processing block to made a decision.
@param data_in Mag squared of samples.
@return Tuple (status, correlation).
"""
max_corr = -1.0
for wave in self._waveforms:
max_corr = max(abs(self.correlate(wave, data_in)), max_corr)
dec = 1 if self.threshold < max_corr else 0
Logger.append('waveform_decision', 'decision', self._xx[Logger._ch_status][dec])
return dec, max_corr
示例9: decision
# 需要导入模块: from utils import Logger [as 别名]
# 或者: from utils.Logger import append [as 别名]
def decision(self, data_l, data_m):
"""
Function called from a signal processing block.
@param data_l Learner decision regarding channel occupancy.
@param data_m Manager decision regarding channel occupancy.
"""
self.strategy.wait()
self._iteraction += 1
final_dec = data_l
if self._valid_feedback:
final_dec = data_m
self._time += 19.3
self._count += 1
Logger.set('feedback_algorithm', 'total_feedback', self._count)
Logger.append('feedback_algorithm', 'activation', int(data_m))
# set feedback in our learning algorithm
self.learner.feedback = data_m
# Increase feedback interval if both algorithms are correct
if data_l == data_m:
self.strategy.increase_time()
# else decrease time
else:
self.strategy.decrease_time()
else:
Logger.append('feedback_algorithm', 'activation', -1)
self._time += 0.2
self._valid_feedback = False
if self.strategy.feedback():
self._manager.enable(True)
self._valid_feedback = True
Logger.append('feedback_algorithm', 'time', self._time)
Logger.append('feedback_algorithm', 'count', self._count)
Logger.append('bayes_decision', 'hypothesis', final_dec)
示例10: work
# 需要导入模块: from utils import Logger [as 别名]
# 或者: from utils.Logger import append [as 别名]
def work(self, input_items, output_items):
"""
@param input_items
@param output_items
"""
for idx in range(len(input_items[0])):
self._iteraction += 1
ed_dec = input_items[0][idx][0]
wf = input_items[1][idx]
final_dec = ed_dec
if ed_dec == 0:
final_dec = 1
final_dec = self._algorithm.decision(wf)[0]
Logger.set('feedback_algorithm', 'total_feedback', self._count)
Logger.append('bayes_decision', 'hypothesis', final_dec)
return len(input_items[0])
示例11: __init__
# 需要导入模块: from utils import Logger [as 别名]
# 或者: from utils.Logger import append [as 别名]
def __init__(self,
name="SNREstimator",
algorithm=SVR,
alpha=0.001):
"""
CTOR
@param name
@param algorithm
@param alpha
"""
self._estimator = digital.probe_mpsk_snr_est_c(algorithm, 10000, alpha)
UHDGenericArch.__init__(self,
name=name,
input_signature=self._estimator.input_signature(),
output_signature=self._estimator.output_signature())
Logger.register(name, ['snr', ])
self.register_scheduling(lambda: Logger.append(name, 'snr', self.get_snr()), delay_sec=0.2) #pylint: disable=E1101
示例12: decision
# 需要导入模块: from utils import Logger [as 别名]
# 或者: from utils.Logger import append [as 别名]
def decision(self, signal):
"""
Method called for a USRP block (probably bayesian_detector).
@param energy Signal Energy
@param signal
"""
#hypothesis
# update counters
if self._feedback == 0 or self._feedback == 1:
self.update_global_counter()
th = self._th
r = self._r[th]
while True:
# update bayes
self.update_th_counter(signal)
# update risk
bayes_r, self._th = self.get_min_risk()
# invalidate threshold. Wait until a new is provided
if th == self._th or r == bayes_r :
self.feedback = -1
break
th = self._th
r = self._r[th]
# Clear black list of threshold
self._black_list = []
#print "d:", self.bayesian_hypothesis(self._th, signal), " f:", self.feedback, " t:", math.e ** self._th, " e:", sum(signal)/50
bayes_hyp = self.bayesian_hypothesis(self._th, signal)
# Save all data
dec = 1 if bayes_hyp > 0 else 0
Logger.append('bayes_decision', 'decision', self._xx[Logger._ch_status][dec])
Logger.append('bayes_decision', 'threshold', self._th)
Logger.append('bayes_decision', 'risk', self._r[self._th])
return dec, 0.0
示例13: cognitive_radio_loop
# 需要导入模块: from utils import Logger [as 别名]
# 或者: from utils.Logger import append [as 别名]
def cognitive_radio_loop(options, radio, channel_list):
"""
Program loop here.
@param options
@param radio A RadioDevice instance.
@param channel_list List of Channel objects.
"""
# Export my server
command = callback_radio(radio)
my_rpc = RPCExporter(addr=("143.54.83.30", 8000 + options.my_id))
my_rpc.register_function('command', command)
my_rpc.start()
# Wait broker start
while not command.run:
1
####
#Import OTHER RADIOS RPCS
####
rpc_arr = []
for i in [0, 1, 2, 3]:
rpc_cli = RPCImporter(addr="http://%s:%d" % (HOSTS_IP[i], 8000 + i))
rpc_cli.register_function('command')
rpc_arr.append(rpc_cli)
# Import PassiveRadio RPC calls
bs_rpc = RPCImporter(addr="http://%s:9000" % (options.broker_ip))
bs_rpc.register_function('command')
# Register parameters for transmission
Logger.register('radio', ['tx_pkts', 'rx_pkts', 'rx2_pkts', 'channel', 'operation', 'receiver', 'starving',
'total_tx', 'total_rx', 'total_starving'])
# loop
pkt_len = options.pkt_len
payload = struct.pack('%sB' % pkt_len, *[options.my_id] * pkt_len)
print '##### Entering Transmitter loop'
c_starving = 0
while command.run:
"""
######## FUNCOES:
---- BW do canal
---- Channel's bandwidth.
radio.get_bandwidth()
---- Num. simbolos na modulacao
--- Number of symbols in the modulation.
radio.{tx,rx}.symbols()
---- B/S da modulacao
---- B/S of the modulation
radio.{tx,rx}.bits_per_symbol()
---- Pkt/s NO ULTIMO SEGUNDO
---- Pkt/s in the last second.
radio.{tx,rx}.counter.get_pkts()
---- b/s NO ULTIMO SEGUNDO.
---- b/s in the last second.
radio.{tx,rx}.counter.get_bps()
---- Pacotes acumulados desde a ultima chamada.
---- Accumulated packages since the last call.
radio.{tx,rx}.counter.get_pkt_accumulated(clear = False)
---- Troca de canal. channel eh um objeto Channel
---- Channel changing. channel is a Channel object.
radio.set_channel(channel)
#################
"""
# sense
while not command.sense and command.run:
1
if not command.run:
break
sense_data = []
radio.set_gain(0)
for channel in channel_list:
decision, energy = radio.ss.sense_channel(channel, 0.1)
sense_data.append((decision, float(energy), channel.get_channel()))
bs_rpc.command([options.my_id, 'sense_data', sense_data])
# CHOOSE RECEIVER
#while not command.request_transmission:
# 1
# Select a receiver randomly
#opt = [0, 1, 2, 3]
#opt.remove(options.my_id)
#receiver = random.choice(opt)
#print '##### DEVICE %d requesting TX to %d' % (options.my_id, receiver)
#.........这里部分代码省略.........
示例14: select_links_and_channels
# 需要导入模块: from utils import Logger [as 别名]
# 或者: from utils.Logger import append [as 别名]
def select_links_and_channels():
"""
"""
_idx = random.randint(0, len(globs.arch) - 1)
tups = globs.arch[_idx]
links = []
links_to_tx_rx = {}
for tup in tups:
dev_tx = tup[0]
dev_rx = tup[1]
globs.tx_links[dev_tx] = [dev_rx, 'tx', -1]
globs.tx_links[dev_rx] = [dev_tx, 'rx', -1]
l = globs.links[dev_tx][dev_rx]
links.append(l)
links_to_tx_rx[l] = (dev_tx, dev_rx)
t_now = time.time()
links_to_ch = globs.decision_func(links, globs.use_channels)
Logger.append('bs', 'decision_time', time.time() - t_now)
for link, ch in links_to_ch.iteritems():
dev_tx, dev_rx = links_to_tx_rx[link]
# increment counter os used channels
globs.p_channels_count[ch] += 1
globs.allocated_channels.append(ch)
# tx links was {key: [other_side, operation]}
# tx links and now is {key: [other_side, operation, channel]}
globs.tx_links[dev_tx] = [dev_rx, "tx", ch]
globs.tx_links[dev_rx] = [dev_tx, "rx", ch]
#links = copy.deepcopy(globs.tx_links)
#used = []
#for dev1, _tup in links.iteritems():
# if globs.use_channels and dev1 not in used:
# dev2 = _tup[0]
# operation = _tup[1]
# used.extend( [dev1, dev2])
# ## LOGIC TO SELECT CHANNELS
# channel = globs.decision_func()
# # channel can be either a single element OR
# # a dictionary: {link_x: frequency, link_y: frequency y, ...}
# if isinstance(channel, dict):
# print "##### Channel list returned is a dict"
# channel = channel[globs.links[dev1][dev2]]
# ####
# # increment counter os used channels
# globs.p_channels_count[channel] += 1
# globs.allocated_channels.append( channel )
# # tx links was {key: [other_side, operation]}
# # tx links and now is {key: [other_side, operation, channel]}
# globs.tx_links[dev1].append(channel)
# globs.tx_links[dev2].append(channel)
Logger.append('bs', 'links', globs.tx_links)
示例15: main
# 需要导入模块: from utils import Logger [as 别名]
# 或者: from utils.Logger import append [as 别名]
#.........这里部分代码省略.........
trainset = dataloader(root=args.data_root, train=True, download=True, transform=transform_train)
trainloader = data.DataLoader(trainset, batch_size=args.train_batch, shuffle=True, num_workers=args.workers)
testset = dataloader(root=args.data_root, train=False, download=False, transform=transform_test)
testloader = data.DataLoader(testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)
# Model
print("==> creating model '{}'".format(args.arch))
if args.arch.startswith('resnext'):
model = models.__dict__[args.arch](
cardinality=args.cardinality,
num_classes=num_classes,
depth=args.depth,
widen_factor=args.widen_factor,
dropRate=args.drop,
)
elif args.arch.startswith('densenet'):
model = models.__dict__[args.arch](
num_classes=num_classes,
depth=args.depth,
growthRate=args.growthRate,
compressionRate=args.compressionRate,
dropRate=args.drop,
)
elif args.arch.startswith('wrn'):
model = models.__dict__[args.arch](
num_classes=num_classes,
depth=args.depth,
widen_factor=args.widen_factor,
dropRate=args.drop,
)
elif args.arch.endswith('resnet'):
model = models.__dict__[args.arch](
num_classes=num_classes,
depth=args.depth,
)
else:
model = models.__dict__[args.arch](num_classes=num_classes)
model = torch.nn.DataParallel(model).cuda()
cudnn.benchmark = True
print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# Resume
title = 'cifar-10-' + args.arch
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
args.checkpoint = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
else:
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])
if args.evaluate:
print('\nEvaluation only')
test_loss, test_acc = test(testloader, model, criterion, start_epoch, use_cuda)
print(' Test Loss: %.8f, Test Acc: %.2f' % (test_loss, test_acc))
return
# Train and val
for epoch in range(start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
train_loss, train_acc = train(trainloader, model, criterion, optimizer, epoch, use_cuda)
test_loss, test_acc = test(testloader, model, criterion, epoch, use_cuda)
# append logger file
logger.append([state['lr'], train_loss, test_loss, train_acc, test_acc])
# save model
is_best = test_acc > best_acc
best_acc = max(test_acc, best_acc)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'acc': test_acc,
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
}, is_best, checkpoint=args.checkpoint)
logger.close()
logger.plot()
savefig(os.path.join(args.checkpoint, 'log.eps'))
print('Best acc:')
print(best_acc)