本文整理匯總了Python中net.Net類的典型用法代碼示例。如果您正苦於以下問題:Python Net類的具體用法?Python Net怎麽用?Python Net使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了Net類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: run_small_net
def run_small_net():
global training_data2, n2, t2, testing_data
layers = []
layers.append({'type': 'input', 'out_sx': 24, 'out_sy': 24, 'out_depth': 1})
#layers.append({'type': 'fc', 'num_neurons': 50, 'activation': 'relu'})
layers.append({'type': 'softmax', 'num_classes': 10})
print 'Layers made...'
n2 = Net(layers)
print 'Smaller Net made...'
print n2
t2 = Trainer(n2, {'method': 'sgd', 'momentum': 0.0})
print 'Trainer made for smaller net...'
print 'In training of smaller net...'
print 'k', 'time\t\t ', 'loss\t ', 'training accuracy'
print '----------------------------------------------------'
try:
for x, y in training_data2:
stats = t2.train(x, y)
print stats['k'], stats['time'], stats['loss'], stats['accuracy']
except: #hit control-c or other
pass
print 'Testing smaller net: 5000 trials'
right = 0
count = 5000
for x, y in sample(testing_data, count):
n2.forward(x)
right += n2.getPrediction() == y
accuracy = float(right) / count * 100
print accuracy
示例2: OpenNET
def OpenNET(url):
try:
net = Net(cookie_file=cookiejar)
#net = Net(cookiejar)
try:
second_response = net.http_GET(url)
except:
second_response = net.http_GET(url.encode("utf-8"))
return second_response.content
except:
d = xbmcgui.Dialog()
d.ok(url,"Can't Connect to site",'Try again in a moment')
示例3: FeatureExtractor
class FeatureExtractor:
''' Class for extracting trained features
Feature will be stored in a txt file as a matrix. The size of the feature matrix is [num_img, feature_dimension]
Run it as::
>>> extractor = FeatureExtractor(solver_file, snapshot, gpu_idx)
>>> extractor.build_net()
>>> extractor.run(layer_name, feature_path)
:ivar str solver_file: path of the solver file in Caffe's proto format
:ivar int snapshot: the snapshot for testing
:ivar str layer_name: name of the ayer that produce feature
:ivar int gpu_idx: which gpu to perform the test
'''
def __init__(self, solver_file, snapshot, gpu_idx = 0):
self.solver_file = solver_file
self.snapshot = snapshot
self.gpu = owl.create_gpu_device(gpu_idx)
owl.set_device(self.gpu)
def build_net(self):
self.owl_net = Net()
self.builder = CaffeNetBuilder(self.solver_file)
self.snapshot_dir = self.builder.snapshot_dir
self.builder.build_net(self.owl_net)
self.owl_net.compute_size('TEST')
self.builder.init_net_from_file(self.owl_net, self.snapshot_dir, self.snapshot)
def run(s, layer_name, feature_path):
''' Run feature extractor
:param str layer_name: the layer to extract feature from
:param str feature_path: feature output path
'''
feature_unit = s.owl_net.units[s.owl_net.name_to_uid[layer_name][0]]
feature_file = open(feature_path, 'w')
batch_dir = 0
for testiteridx in range(s.owl_net.solver.test_iter[0]):
s.owl_net.forward('TEST')
feature = feature_unit.out.to_numpy()
feature_shape = np.shape(feature)
img_num = feature_shape[0]
feature_length = np.prod(feature_shape[1:len(feature_shape)])
feature = np.reshape(feature, [img_num, feature_length])
for imgidx in range(img_num):
for feaidx in range(feature_length):
info ='%f ' % (feature[imgidx, feaidx])
feature_file.write(info)
feature_file.write('\n')
print "Finish One Batch %d" % (batch_dir)
batch_dir += 1
feature_file.close()
示例4: MultiviewTester
class MultiviewTester:
''' Class for performing multi-view testing
Run it as::
>>> tester = MultiviewTester(solver_file, softmax_layer, snapshot, gpu_idx)
>>> tester.build_net()
>>> tester.run()
:ivar str solver_file: path of the solver file in Caffe's proto format
:ivar int snapshot: the snapshot for testing
:ivar str softmax_layer_name: name of the softmax layer that produce prediction
:ivar int gpu_idx: which gpu to perform the test
'''
def __init__(self, solver_file, softmax_layer_name, snapshot, gpu_idx = 0):
self.solver_file = solver_file
self.softmax_layer_name = softmax_layer_name
self.snapshot = snapshot
self.gpu = owl.create_gpu_device(gpu_idx)
owl.set_device(self.gpu)
def build_net(self):
self.owl_net = Net()
self.builder = CaffeNetBuilder(self.solver_file)
self.snapshot_dir = self.builder.snapshot_dir
self.builder.build_net(self.owl_net)
self.owl_net.compute_size('MULTI_VIEW')
self.builder.init_net_from_file(self.owl_net, self.snapshot_dir, self.snapshot)
def run(s):
#multi-view test
acc_num = 0
test_num = 0
loss_unit = s.owl_net.units[s.owl_net.name_to_uid[s.softmax_layer_name][0]]
for testiteridx in range(s.owl_net.solver.test_iter[0]):
for i in range(10):
s.owl_net.forward('MULTI_VIEW')
if i == 0:
softmax_val = loss_unit.ff_y
batch_size = softmax_val.shape[1]
softmax_label = loss_unit.y
else:
softmax_val = softmax_val + loss_unit.ff_y
test_num += batch_size
predict = softmax_val.argmax(0)
truth = softmax_label.argmax(0)
correct = (predict - truth).count_zero()
acc_num += correct
print "Accuracy the %d mb: %f, batch_size: %d" % (testiteridx, correct, batch_size)
sys.stdout.flush()
print "Testing Accuracy: %f" % (float(acc_num)/test_num)
示例5: test_basic
def test_basic():
net = Net([2, 2, 1], 1, weights=1)
err = net.train([0, 0], [1])
for a, b in zip(basic_weights1, net.weights):
print a
print b
print a == b
n.testing.assert_array_almost_equal(a, b)
err = net.train([0, 1], [0])
for a, b in zip(basic_weights2, net.weights):
print a
print b
print a == b
n.testing.assert_array_almost_equal(a, b)
示例6: __init__
def __init__(self, verbose=1, maxq=200):
Net.__init__(self)
Tools.__init__(self)
"""
Multithreaded network tools
"""
self.verbose = verbose
self.maxq = maxq
self.timeout = 0.2 #
self.buffers = 256 #for check_port
示例7: Solver
class Solver():
def __init__(self, args):
# prepare a datasets
self.train_data = Dataset(train=True,
data_root=args.data_root,
size=args.image_size)
self.test_data = Dataset(train=False,
data_root=args.data_root,
size=args.image_size)
self.train_loader = DataLoader(self.train_data,
batch_size=args.batch_size,
num_workers=1,
shuffle=True, drop_last=True)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.net = Net().to(self.device)
self.loss_fn = torch.nn.L1Loss()
self.optim = torch.optim.Adam(self.net.parameters(), args.lr)
self.args = args
if not os.path.exists(args.ckpt_dir):
os.makedirs(args.ckpt_dir)
def fit(self):
args = self.args
for epoch in range(args.max_epochs):
self.net.train()
for step, inputs in enumerate(self.train_loader):
gt_gray = inputs[0].to(self.device)
gt_ab = inputs[1].to(self.device)
pred_ab = self.net(gt_gray)
loss = self.loss_fn(pred_ab, gt_ab)
self.optim.zero_grad()
loss.backward()
self.optim.step()
if (epoch+1) % args.print_every == 0:
print("Epoch [{}/{}] loss: {:.6f}".format(epoch+1, args.max_epochs, loss.item()))
self.save(args.ckpt_dir, args.ckpt_name, epoch+1)
def save(self, ckpt_dir, ckpt_name, global_step):
save_path = os.path.join(
ckpt_dir, "{}_{}.pth".format(ckpt_name, global_step))
torch.save(self.net.state_dict(), save_path)
示例8: train
def train(args):
# prepare the MNIST dataset
train_dataset = datasets.MNIST(root="./data/",
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = datasets.MNIST(root="./data/",
train=False,
transform=transforms.ToTensor())
# create the data loader
train_loader = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True, drop_last=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=args.batch_size,
shuffle=False)
# turn on the CUDA if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = Net().to(device)
loss_op = nn.CrossEntropyLoss()
optim = torch.optim.Adam(net.parameters(), lr=args.lr)
for epoch in range(args.max_epochs):
net.train()
for step, inputs in enumerate(train_loader):
images = inputs[0].to(device)
labels = inputs[1].to(device)
# forward-propagation
outputs = net(images)
loss = loss_op(outputs, labels)
# back-propagation
optim.zero_grad()
loss.backward()
optim.step()
acc = evaluate(net, test_loader, device)
print("Epoch [{}/{}] loss: {:.5f} test acc: {:.3f}"
.format(epoch+1, args.max_epochs, loss.item(), acc))
torch.save(net.state_dict(), "mnist-final.pth")
示例9: __init__
def __init__(self, args):
self.epsilonStart = args.epsilonStart
self.epsilonEnd = args.epsilonEnd
self.epsilonDecayLength = args.epsilonDecayLength
self.testEpsilon = args.testEpsilon
self.replaySize = args.replaySize
self.minReplaySize = args.minReplaySize
self.framesPerState = args.framesPerState
self.learnFrequency = args.learnFrequency
self.targetNetworkUpdateFrequency = args.targetNetworkUpdateFrequency
self.batchSize = args.batchSize
self.actionNb = args.actionNb
self.lastAction = 0
self.lastFrame = None
self.rng = np.random.RandomState(42)
self.data = Data(self.replaySize, self.framesPerState, (100,100))
self.tickCount = 0
self.learnCount = 0
self.rewardAcc = 0.0
self.episodeNb = 0
self.qValueAcc = 0.0
self.qValueNb = 0
self.maxReward = 0
self.episodeReward = 0
self.test = False
self.lastQs = collections.deque(maxlen=60)
self.net = Net(args)
self.qValues = []
self.rewards = []
self.tickCount = 0
示例10: __init__
def __init__(self, meta, layers=[], rate=.05, target=None, momentum=None, trans=None, wrange=100):
Learner.__init__(self, meta, target)
inputs = len(self.meta.names()) - 1
_, possible = self.meta[self.target]
self.outputs = possible
self.net = Net([inputs] + layers + [len(possible)], rate=rate, momentum=momentum, wrange=wrange, trans=trans)
示例11: build_net
def build_net(self):
self.owl_net = Net()
self.builder = CaffeNetBuilder(self.solver_file)
self.snapshot_dir = self.builder.snapshot_dir
self.builder.build_net(self.owl_net)
self.owl_net.compute_size('TEST')
self.builder.init_net_from_file(self.owl_net, self.snapshot_dir, self.snapshot)
示例12: __init__
def __init__(self, args):
# prepare a datasets
self.train_data = Dataset(args.scale, train=True,
data_root=args.data_root,
size=args.image_size)
self.test_data = Dataset(args.scale, train=False,
data_root=args.data_root,
size=args.image_size)
self.train_loader = DataLoader(self.train_data,
batch_size=args.batch_size,
num_workers=1,
shuffle=True, drop_last=True)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.net = Net(args.scale).to(self.device)
self.loss_fn = torch.nn.L1Loss()
self.optim = torch.optim.Adam(self.net.parameters(), args.lr)
self.args = args
if not os.path.exists(args.ckpt_dir):
os.makedirs(args.ckpt_dir)
if not os.path.exists(args.result_dir):
os.makedirs(args.result_dir)
示例13: Solver
class Solver(object):
"""Docstring for Solver. """
def __init__(self, param):
"""TODO: to be defined1. """
self.param = param
self.init_train_net(param)
def init_train_net(self, param):
net_param = pb.NetParameter()
with open(param.net, "rb") as f:
text_format.Merge(f.read(), net_param)
net_state = pb.NetState()
net_state.phase = pb.TRAIN
# net_state.MergeFrom(net_param.state)
# net_state.MergeFrom(param.train_state)
net_param.state.CopyFrom(net_state)
self.train_net = Net(net_param)
def step(self, iters):
avg_loss = self.param.average_loss
losses = []
smoothed_loss = 0
for i in range(iters):
loss = self.train_net.forward_backward()
if len(losses) < avg_loss:
losses.append(loss)
size = len(losses)
smoothed_loss = (smoothed_loss * (size - 1) + loss) / size
else:
idx = (i - 0) % avg_loss
smoothed_loss += (loss - losses[idx]) / avg_loss
log.info("Iteration %d, loss %f", i, smoothed_loss)
self.compute_update_value(i)
# self.train_net.update()
def compute_update_value(self, i):
current_step = i / 100000.0
base_lr = 0.01
gamma = 0.1
rate = base_lr * pow(gamma, current_step)
weight_decay = 0.0005
momentum = 0.9
self.train_net.update_params(rate, weight_decay, momentum)
示例14: setup_net
def setup_net(self):
if not self.setup:
self.setup = True
self.net = Net(*SimpleParser(open(self.sisc_file)).parse(), **self.options)
for c,i in enumerate(self.net.inputs):
self.net.inputs[i] = self.inputs[c] == 1
for c,i in enumerate(self.net.outputs):
self.net.outputs[i] = self.outputs[c] == 1
self.components = TwoWayDict(dict(enumerate(self.net.gates.keys())))
示例15: BackProp
class BackProp(Learner):
def __init__(self, meta, layers=[], rate=.05, target=None, momentum=None, trans=None, wrange=100):
Learner.__init__(self, meta, target)
inputs = len(self.meta.names()) - 1
_, possible = self.meta[self.target]
self.outputs = possible
self.net = Net([inputs] + layers + [len(possible)], rate=rate, momentum=momentum, wrange=wrange, trans=trans)
def state(self):
return [x.copy() for x in self.net.weights]
def use_state(self, state):
self.net.weights = state
def classify(self, data):
output = self.net.classify(data)
# print 'result'
# print output
# print 'result', output, self.outputs
return self.outputs[output[-1].argmax()]
def validate(self, data, real):
output = self.net.classify(data)[-1]
label = self.outputs[output.argmax()]
target = n.zeros(len(self.outputs))
target[self.outputs.index(real)] = 1
squerr = (target - output)**2
return label, squerr.mean()
def train(self, data, target):
output = n.zeros(len(self.outputs))
# print self.outputs, target
output[self.outputs.index(target)] = 1
if LOG:
print 'training'
print 'data', data
print 'expected', output
print 'weights'
for level in self.net.weights:
print ' ', level
err = self.net.train(data, output)
return err