本文整理汇总了Python中Analysis类的典型用法代码示例。如果您正苦于以下问题:Python Analysis类的具体用法?Python Analysis怎么用?Python Analysis使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Analysis类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: OnPressEnter2
def OnPressEnter2(self,event):
solution = self.entryVariable2.get()
part = solution.split("-")
rw_csv.deleteOldGoal(part[1])
item = part[1].split(" ")
score = item[len(item)-1]
rw_csv.writeScore({part[0]:int(score)})
for i in self.tree.get_children():
self.tree.delete(i)
result = rw_csv.readScore()
self.tree.insert("" , 0, text="Score", values=(result["Family"],result["Travel"],result["Studying"],result["Friend"],result["Volunteer"]))
goal = rw_csv.readGoal()
id2 = self.tree.insert("", 1, "Task", text="Task")
family_text = ""
for x in goal["Family"]:
family_text = family_text + x+ "\n"
travel_text = ""
for x in goal["Travel"]:
travel_text = travel_text + x+ "\n"
studying_text = ""
for x in goal["Studying"]:
studying_text = studying_text + x+"\n"
friend_text = ""
for x in goal["Friend"]:
friend_text = friend_text + x+"\n"
volunteer_text = ""
for x in goal["Volunteer"]:
volunteer_text = volunteer_text + x+"\n"
self.tree.insert(id2, "end", text="", values=(family_text,travel_text,studying_text,friend_text,volunteer_text))
Analysis.makingBalanceLifeGraph(result["Family"],result["Travel"],result["Studying"],result["Friend"],result["Volunteer"])
img = self.img = ImageTk.PhotoImage(Image.open('BalanceLifeGraph.png'))
self.panel = Tkinter.Label(self,image=img)
self.panel.pack(side = "left")
示例2: single_processor
def single_processor(filequeue, pointslist, donelist, bgimg = None,
basedir = None, thresh=THRESH):
while not filequeue.empty():
fname = filequeue.get()
frame_num = int(path.splitext(path.split(fname)[-1])[0])
if basedir:
pass
if len(donelist)%100 == 0:
sys.stderr.write("Completed %d\n"%(len(donelist)))
sys.stderr.write("Found %d holes\n" %
len(pointslist))
try:
img1, img2 = Analysis.loadsplit(fname, bgimg = bgimg)
pointslist.extend(Analysis.findpairs(img1, img2, thresh=thresh,
DEBUG=DEBUG, abs_thresh=True,
frame_num = frame_num))
donelist.append(fname)
except IOError:
pass
except KeyboardInterrupt:
return
示例3: __init__
def __init__(self,Pathx, B):
path = "/media/iglu/Data/DatasetIglu"
i = 0
Analysis.initial()
files = open("Output_1_L.txt", 'w')
start_time = timeit.default_timer()
for x in os.listdir(path):
# if i == 0:
# i += 1
# continue
# else:
if Pathx != x:
continue
for y in os.listdir(path + "/" + x) :
if (y != "Objects.txt") and not y.endswith(".zip"):
# print path+"/"+x+"/"+y+"/k1"
f = open(path + "/" + x + "/" + y + "/k1" + "/List.txt", 'r')
f2 = open(path + "/" + x + "/" + y + "/speech.txt", 'r')
speak = f2.readline()
if speak == 'This...':
sp = 1
elif speak == 'That...':
sp = 2
else:
sp = 0
mn = 0
for line in f:
Time = line
file1 = next(f).rstrip('\n')
file2 = next(f).rstrip('\n')
Label = next(f).rstrip('\n')
# files.write(RGB.__str__())
# files.write(Depth.__str__())
# files.write(Label+" "+x+" "+file1+"\n")
if mn == 0:
RGB = cv2.imread(path + "/" + x + "/" + y + "/k1" + "/RGB/" + file1)
Depth = cv2.imread(path + "/" + x + "/" + y + "/k1" + "/Depth/" + file2)
Analysis.addDepth(Depth)
Analysis.addRGB(RGB)
Analysis.addSpeech(sp)
Analysis.addScene(Label)
else:
if mn == 2:
mn = -1
mn += 1
# print "Enviado " + x + " " + y
i += 1
elapsed = timeit.default_timer() - start_time
print "Tiempo: " + elapsed.__str__()
print "Starting Training"
start_time = timeit.default_timer()
Analysis.CompleteAnalysis(B, False, True, 0.3, files)
files.close()
# Analysis.Mostrar()
# code you want to evaluate
elapsed = timeit.default_timer() - start_time
print "Tiempo: " + elapsed.__str__()
示例4: makebackground
def makebackground(filelist, n = 300):
import numpy as np
from random import randrange as rand
filedesc = Analysis.imread(filelist[0])
[r,c] = np.shape(filedesc)
mat = np.empty((r,c,n))
for i in range(n):
mat[:,:,i] = Analysis.imread(filelist[rand(len(filelist))])
return np.median(mat, axis=2)
示例5: _line_spectrum
def _line_spectrum(data, min, line, dE, width, width_error):
# Draw histogram
n, bins = Analysis.histogram(data, binsize=binsize)
if method in ("cs"):
gn, gbins = Analysis.group_bin(n, bins, min=min)
else:
# No grouping in mle and ls
gn, gbins = n, bins
ngn = gn/(np.diff(gbins))
ngn_sigma = np.sqrt(gn)/(np.diff(gbins))
cbins = (gbins[1:]+gbins[:-1])/2
if plotting:
figure()
if width_error is not None:
label = 'FWHM$=%.2f\pm %.2f$ eV' % (width, width_error)
else:
label = 'FWHM$=%.2f$ eV (Fixed)' % width
if method == "cs":
errorbar(cbins, ngn, yerr=ngn_sigma, xerr=np.diff(gbins)/2, capsize=0, ecolor='k', fmt=None, label=label)
else:
hist(data, bins=gbins, weights=np.ones(len(data))/binsize, histtype='step', ec='k', label=label)
E = np.linspace(bins.min(), bins.max(), 1000)
model = Analysis.normalization(ngn, gbins, dE, width, line=line, shift=shift) \
* Analysis.line_model(E, dE, width, line=line, shift=shift, full=True)
# Plot theoretical model
plot(E, model[0], 'r-')
# Plot fine structures
for m in model[1:]:
plot(E, m, 'b--')
xlabel('Energy$\quad$(eV)')
ylabel('Normalized Count$\quad$(count/eV)')
legend(frameon=False)
ymin, ymax = ylim()
ylim(ymin, ymax*1.1)
tight_layout()
savefig("%s-%s.pdf" % (session, line))
if savedat:
np.savetxt('%s-%s.dat' % (session, line), np.vstack((cbins, gn)).T,
header='Energy (keV), Count', delimiter='\t')
示例6: processor
def processor(args):
"""map-able function that processes a single frame of data
Argument: a single tuple composed of the following, in order
file_name : string, required
pointslist : list, required
file_number : ignored, optional
background : array, optional
donelist : list, optional
A list of files that have already been processed
status : ??????
"""
fname = args[0]
frame_num = int(path.splitext(path.split(fname)[-1])[0])
pointslist = args[1]
if len(args) > 2: i = args[2]
if len(args) > 3:
bgimg = args[3]
else:
bgimg = None
if len(args) > 4:
donelist = args[4]
donelist.append(i)
if len(donelist)%1000 == 1:
sys.stderr.write("Completed %d\n"%(len(donelist)-1))
sys.stderr.write("Found %d holes\n" % len(pointslist))
if len(donelist)%10000 == 0 and len(pointslist) > 0:
xl, yl, varxl, varyl, el, xr, yr, varxr, varyr, er, framenum =\
zip(*list(pointslist))
savemat('tempfile.mat',
{'xl':xl, 'yl':yl, 'varxl': varxl, 'xr':xr, 'yr':yr,
'el': el, 'er': er, 'varxr': varxr, 'framenum':framenum},
oned_as = 'row')
sys.stderr.write("Saved a snapshot\n")
if len(args) > 5:
THRESH = args[5]
# Finally, load in the image
try:
img1, img2 = Analysis.loadsplit(fname, bgimg = bgimg)
except IOError:
print "Failed loading!"
return
pointslist.extend(Analysis.findpairs(img1, img2, thresh=THRESH,
DEBUG=DEBUG, abs_thresh=True,
frame_num = frame_num))
示例7: __init__
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
img = self.img = ImageTk.PhotoImage(Image.open('build_your_career.jpg'))
panel = Tkinter.Label(self,image=img)
panel.place(x="0",y="0")
scrollbar = tk.Scrollbar(self)
scrollbar.pack(side="right", fill="y")
result = Analysis.infoOLD()
text_field = "Quiz: Which Major Is Right For You?\nYour last result: "+result+"\nSource: http://getcollegecredit.com/blog/article/which_major_is_right_for_you\n"
f = open('quiz.txt','r')
text_field = text_field + str(f.read())
text = tk.Text(self)
text.insert("end",text_field)
text.pack()
text.config(state="disabled",yscrollcommand=scrollbar.set)
scrollbar.config(command=text.yview)
self.entryVariable = Tkinter.StringVar()
self.entry = Tkinter.Entry(self,textvariable=self.entryVariable, width = 50)
self.entry.bind("<Return>", self.OnPressEnter)
self.entryVariable.set(u"Ex: 1 D,2 C, ...")
self.entry.pack()
self.labelVariable = Tkinter.StringVar()
label = Tkinter.Label(self,textvariable=self.labelVariable,justify="left",wraplength=500,fg="yellow",bg="blue",width = 80,height=10)
self.labelVariable.set(u"Hello!")
label.pack()
button1 = tk.Button(self, text="Back to Home",
command=lambda: controller.show_frame(HomePage))
button1.pack()
示例8: __init__
def __init__(self, data, filename, view, parent):
super(DisassemblerView, self).__init__(parent)
self.status = ""
self.view = view
self.data = data
for type in ExeFormats:
exe = type(data)
if exe.valid:
self.data = exe
self.view.exe = exe
break
# Create analysis and start it in another thread
self.analysis = Analysis(self.data)
self.analysis_thread = threading.Thread(None, self.analysis_thread_proc)
self.analysis_thread.daemon = True
self.analysis_thread.start()
# Start disassembly view at the entry point of the binary
if hasattr(self.data, "entry"):
self.function = self.data.entry()
else:
self.function = None
self.update_id = None
self.ready = False
self.desired_pos = None
self.highlight_token = None
self.cur_instr = None
self.scroll_mode = False
self.blocks = {}
self.show_il = False
self.simulation = None
# Create timer to automatically refresh view when it needs to be updated
self.updateTimer = QTimer()
self.updateTimer.setInterval(100)
self.updateTimer.setSingleShot(False)
self.updateTimer.timeout.connect(self.updateTimerEvent)
self.updateTimer.start()
self.initFont()
# Initialize scroll bars
self.width = 0
self.height = 0
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.horizontalScrollBar().setSingleStep(self.charWidth)
self.verticalScrollBar().setSingleStep(self.charHeight)
areaSize = self.viewport().size()
self.adjustSize(areaSize.width(), areaSize.height())
# Setup navigation
self.view.register_navigate("disassembler", self, self.navigate)
self.view.register_navigate("make_proc", self, self.make_proc)
self.search_regex = None
self.last_search_type = FindDialog.SEARCH_HEX
示例9: analyzeParsedSmali
def analyzeParsedSmali(classes,sharedobjs):
# Begin Analysis
log.info("Analysis Started")
dependencies = {}
dependencies["all"],dependencies["internal"],dependencies["external"],dependencies["unknown"] = getDependencies(classes)
results = Analysis.runAnalysis(classes,dependencies,sharedobjs)
log.saveLibrariesToPickle(dependencies["all"])
return results
示例10: get_total_SSRO_events
def get_total_SSRO_events(pqf, RO_start, RO_length, marker_chan, chan_rnd_0, chan_rnd_1, sync_time_lim, VERBOSE = True):
"""
Returns SSRO data for all marked events.
Colums are:
Sync Nymber | number of photons | RND number indicator | RND number | Sync Time RND number | Sync Times photon 1-24 |
"""
_a = get_attributes(pqf)
# Gets the number of blocks in the data
num_blocks = Analysis.get_num_blocks(pqf)
if VERBOSE or (num_blocks > 1):
print 'The total number of blocks is:', num_blocks
# Initializes arrays to save the PQ-data
PQ_sync_number = np.empty((0,), dtype = np.uint32)
PQ_special = np.empty((0,), dtype = np.uint32)
PQ_sync_time = np.empty((0,), dtype = np.uint64)
PQ_time = np.empty((0,), dtype = np.uint64)
PQ_channel = np.empty((0,), dtype = np.uint32)
# Initializes an array to save the SSRO data
total_SSRO_events = np.empty((0,30), dtype = np.uint64)
# Loops over every block
for i in range(num_blocks):
# Get the SSRO events and PQ data for these sync numbers
gc.collect()
_events, _PQ_sync_number, _PQ_special, _PQ_sync_time, _PQ_time, _PQ_channel = \
get_SSRO_events(pqf, marker_chan, RO_start, RO_length, chan_rnd_0, chan_rnd_1, sync_time_lim = sync_time_lim, index = i+1, VERBOSE = VERBOSE)
# Concatenates all PQ data
PQ_sync_number = np.hstack((PQ_sync_number,_PQ_sync_number))
PQ_special = np.hstack((PQ_special, _PQ_special))
PQ_sync_time = np.hstack((PQ_sync_time, _PQ_sync_time))
PQ_time = np.hstack((PQ_time, _PQ_time))
PQ_channel = np.hstack((PQ_channel, _PQ_channel))
# Stacks all SSRO data
total_SSRO_events = np.vstack((total_SSRO_events, _events))
if VERBOSE>2:
print
print 'Found {} valid marked SSRO events in block'.format(int(len(_events))), i+1
print '===================================='
print
if VERBOSE:
print
print 'Found {} valid marked SSRO events in all blocks'.format(int(len(total_SSRO_events)))
print '===================================='
print
return total_SSRO_events, _a, PQ_sync_number, PQ_special, PQ_sync_time, PQ_time, PQ_channel
示例11: OnPressEnter
def OnPressEnter(self,event):
solution = self.entryVariable.get()
data_point = rw_csv.readStudyHrs()
x = []
y = []
for point in data_point:
x.append(int(point[0]))
y.append(int(point[1]))
(anal,predict_value) = Analysis.regression(x,y,int(solution))
self.text.insert("end",anal+"You should study for "+str(predict_value)+" hours for next exam!!!")
self.text.pack()
self.entry.focus_set()
self.entry.selection_range(0, Tkinter.END)
示例12: __init__
def __init__(self):
def nothing(x):
pass
self.node_name = "cv_bridge_demo"
rospy.init_node(self.node_name)
self.BoW = BoW.BoW("/home/iglu/catkin_ws/src/RGBDHand/src/bof.pkl")
Analysis.initial()
# What we do during shutdown
rospy.on_shutdown(self.cleanup)
# Create the OpenCV display window for the RGB image
self.cv_window_name = self.node_name
cv.NamedWindow(self.cv_window_name, cv.CV_WINDOW_NORMAL)
cv.MoveWindow(self.cv_window_name, 25, 75)
# And one for the depth image
cv.NamedWindow("Depth Image", cv.CV_WINDOW_NORMAL)
cv.MoveWindow("Depth Image", 25, 350)
# And one for the depth image
cv.NamedWindow("Histogram", cv.CV_WINDOW_NORMAL)
cv.MoveWindow("Histogram", 480, 350)
self.bridge = CvBridge()
self.Im_p = Image_Process.Image_Process()
# Subscribe to the camera image and depth topics and set
# the appropriate callbacks
self.RGB_sub = message_filters.Subscriber('/camera/rgb/image_raw', Image)
self.Depth_sub = message_filters.Subscriber('/camera/depth/image_raw', Image)
# rospy.Subscriber('Scene',String,callbackS)
self.ts = message_filters.ApproximateTimeSynchronizer([self.RGB_sub, self.Depth_sub], 1,1)
# self.image_sub = rospy.Subscriber("/camera/rgb/image_rect_color", Image, self.image_callback)
# self.depth_sub = rospy.Subscriber("/camera/depth/image_raw", Image, self.depth_callback)
self.ts.registerCallback(self.image_callback)
self.depth = np.zeros((480,680))
rospy.loginfo("Waiting for image topics...")
示例13: _line_fit
def _line_fit(data, min, line):
# Fit
(dE, width), (dE_error, width_error), e = Analysis.fit(data, binsize=binsize, min=min, line=line, shift=shift, method=method)
if method == "cs":
chi_squared, dof = e
if method in ("mle", "ls"):
print "%s: %.2f +/- %.2f eV @ Ec%+.2f eV" \
% (line, width, width_error, dE)
elif method == "cs":
print "%s: %.2f +/- %.2f eV @ Ec%+.2f eV (Red. chi^2 = %.1f/%d = %.2f)" \
% (line, width, width_error, dE, chi_squared, dof, chi_squared/dof)
return dE, width, width_error
示例14: __init__
def __init__(self,B):
path = "/media/iglu/Data/Dataset/DatasetIglu/Dataset_cleaned_v4"
i = 0
Analysis.initial()
files = open("Output_1_L.txt", 'w')
start_time = timeit.default_timer()
for x in os.listdir(path):
# if i == 0:
# i += 1
# continue
# else:
f = open(path+"/"+x+"/List.txt",'r')
for line in f:
Time = line
file1 = next(f).rstrip('\n')
file2 = next(f).rstrip('\n')
Label = next(f).rstrip('\n')
RGB = cv2.imread(path+"/"+x+"/RGB/"+file1)
Depth = cv2.imread(path+"/"+x+"/Depth/"+file2)
# files.write(RGB.__str__())
# files.write(Depth.__str__())
# files.write(Label+" "+x+" "+file1+"\n")
Analysis.addDepth(Depth)
Analysis.addRGB(RGB)
Analysis.addScene(Label)
i += 1
print "Enviado "+x
# if i > 150:
# break
elapsed = timeit.default_timer() - start_time
print "Tiempo: "+elapsed.__str__()
print "Starting Training"
start_time = timeit.default_timer()
Analysis.CompleteAnalysis(B,True,0.3,files)
files.close()
#Analysis.Mostrar()
# code you want to evaluate
elapsed = timeit.default_timer() - start_time
print "Tiempo: "+elapsed.__str__()
示例15: dumbClustering
def dumbClustering(wordList, fName = 'clusters.txt'):
"""
Clusters by starting each comment as it's own cluster, and seeing which other clusters
have a lot of words in common with it
No stemming, all words are included
It's pretty bad, really.
"""
#make clusters
allClusters = []
for c in wordList:
tempCluster = Cluster()
freqTuple = Analysis.wordFrequency([c])
if len(freqTuple) == 0:
continue
freqDict = {}
for w, f in freqTuple:
freqDict[w] = f
tempCluster.addComment(c, freqDict)
allClusters.append(tempCluster)
#play with these values
start = 1.0
mid = .6
end = .3
step = -1.0/15
#combine clusters
dumbClusterStep(start, mid, step, allClusters)
singleClusters = Cluster()
for j in range(len(allClusters)-1, -1, -1):
if allClusters[j].numComments() <= 5:
singleClusters.addCluster(allClusters[j])
del allClusters[j]
allClusters.append(singleClusters)
dumbClusterStep(mid, end, step, allClusters)
f = open(fName, 'w')
for c in allClusters:
f.write(str(c)+'\n')
f.close()