本文整理汇总了Python中PyDbLite.Base.insert方法的典型用法代码示例。如果您正苦于以下问题:Python Base.insert方法的具体用法?Python Base.insert怎么用?Python Base.insert使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类PyDbLite.Base
的用法示例。
在下文中一共展示了Base.insert方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: welcome
# 需要导入模块: from PyDbLite import Base [as 别名]
# 或者: from PyDbLite.Base import insert [as 别名]
def welcome():
db = Base('alf.db')
db.create('name','pwd',mode="open") #override
user = request.form['Username']
passwd = request.form['password1']
db.insert(name=user,pwd=passwd)
db.commit()
return 'welcome ' + user
示例2: KopeteLog
# 需要导入模块: from PyDbLite import Base [as 别名]
# 或者: from PyDbLite.Base import insert [as 别名]
class KopeteLog():
def __init__(self, directory=None):
if not directory:
directory=os.path.join(os.path.expanduser("~"),
".kde/share/apps/kopete/logs")
self.messages = Base('kopete.db') # Database stuff - Initializing...
self.messages.create('protocol', 'date', 'time', 'msgfrom', 'msgto', 'sender', 'inbound', 'nick', 'message', mode='override')
for file in self.searchLogs(directory):
self.feedDatabase(file)
def searchLogs(self, dir):
logfiles = []
for (basepath, dirnames, filenames) in os.walk(dir):
for child in filenames:
if child.endswith (".xml"):
logfiles.append(os.path.join(basepath, child))
return logfiles
def feedDatabase(self, filepath):
if 'WlmProtocol' in filepath:
protocol = 'wlm'
elif 'ICQProtocol' in filepath:
protocol = 'icq'
elif 'JabberProtocol' in filepath:
protocol = 'jabber'
else:
protocol = 'unknown'
xmllog = parse(filepath)
for head in xmllog.getiterator('head'):
for date in head.getiterator('date'):
month=date.attrib['month']
year=date.attrib['year']
for contact in head.getiterator('contact'):
if contact.attrib.has_key('type'):
if contact.attrib['type'] == 'myself':
contactfrom = contact.attrib['contactId']
else:
contactto = contact.attrib['contactId']
for msg in xmllog.getiterator('msg'):
nick = msg.attrib['nick']
time = msg.attrib['time']
inbound = msg.attrib['in']
message = msg.text
sender = msg.attrib['from']
date = datetime.strptime("%s;%s;%s" %
(year, month, msg.attrib['time']) ,
"%Y;%m;%d %H:%M:%S")
self.messages.insert(
protocol=protocol,
date=date.strftime("%Y%m%d"),
time=date.strftime("%H:%M:%S"),
msgfrom=contactfrom, msgto=contactto,
sender=sender, inbound=inbound, nick=nick,
message=message)
示例3: Queue
# 需要导入模块: from PyDbLite import Base [as 别名]
# 或者: from PyDbLite.Base import insert [as 别名]
class Queue(object):
""" Simple queue with PyDbLite backend. """
def __init__(self, queue_type=0, config=None):
if config is not None:
# copy config
pass
else:
self.commit = False
self.db_filename = "/tmp/queue.pydb"
self.mode = "override"
self.queue = Base(self.db_filename)
def create_queue(self):
self.queue.create('id', 'item', mode = self.mode)
self.queue.create_index('id')
return None
def push(self, item):
self.queue.insert(self.length(), item)
return None
def pop(self):
if not self.is_empty():
id = self.length() - 1
r = self.queue._id[id]
self.queue.delete(r)
return r
else:
return None
def list(self):
return self.queue.records
def length(self):
return len(self.queue)
def is_empty(self):
return self.length() == 0
def commit(self):
if self.commit is True:
self.queue.commit()
return None
示例4: makeDB
# 需要导入模块: from PyDbLite import Base [as 别名]
# 或者: from PyDbLite.Base import insert [as 别名]
def makeDB(read, write, startTime = "2010-01-01 00:00:00", \
endTime = "2010-01-01 00:10:00"):
db = Base(write)
startTime = calc.datetonumber(startTime)
endTime = calc.datetonumber(endTime)
#Day comes from day of the week. It is a number from 0 to 6.
#0 = Monday 6 = Sunday.
db.create('sensor', 'date', 'weekday', 'index', mode="override")
db.open()
allData = {}
for i in range(len(bbdata.allSensors)):
s = bbdata.allSensors[i]
data = []
print "Parsing sensor " + str(s)
try:
sString = read + "sensor" + str(s) + ".txt"
f = open(sString).readlines()
oldD = None
for timeLine in f:
tmp = timeLine.split()
tmp = tmp[1] + " " + tmp[2]
#tmp = tmp[0] + " " + tmp[1]
d = datetime.datetime.strptime(tmp, "%Y-%m-%d %H:%M:%S")
foo = calc.datetonumber(d)
if foo >= startTime and foo <= endTime:
data.append(calc.datetonumber(d))
if d.toordinal() != oldD:
#Add to database
db.insert(s, d.toordinal(), d.weekday(), len(data) - 1)
oldD = d.toordinal()
print " " + str(d)
except Exception, e:
print "Except:" + str(e)
pass
allData[s] = data
示例5: __init__
# 需要导入模块: from PyDbLite import Base [as 别名]
# 或者: from PyDbLite.Base import insert [as 别名]
class EventPassportOffice:
#what do we need in init?
#pressure run ID number
#acoustic ID number
#(btw marking those separate is a bad idea on the operators part)
def __init__(self):
self.EventPassport = Base('EventPassport/EventPassport.pdl')
#check if the DB exists. If Yes, open, if not
#create it:
if not self.EventPassport.exists():
self.genPDL()
else:
self.EventPassport.open()
self.CleanEvents = CleanEvents.CleanData()
def genPDL(self):
#Create the PDL file for database
self.EventPassport.create('EventID','Temperature','Pressure','Time', 'RunNumber','Path', 'RunType', mode = "open")
#RunNumber is defined as RunNumberAcoustic
#Runtype can be neutron or alpha
def genPassport(self, Path, RunNumberAcoustic, RunNumberPressure, RunType_WS):
FilteredData = self.CleanEvents.MatchEvent_PressurePiezo(Path, str(RunNumberAcoustic), str(RunNumberPressure))
#Get the last EventID
recs = [ Record['EventID'] for Record in self.EventPassport if Record['RunNumber'] == RunNumberAcoustic]
if len(recs) == 0:
EID = str(RunNumberAcoustic)+"0001"
EID = int(EID)
else:
EID = max(recs)+1
#check if we have a duplicate!
for DataPoint in FilteredData:
timestamp = DataPoint[1]
#Check if we have a dupe/conflict
x = [Event for Event in self.EventPassport if Event['Time']-timedelta(seconds=2)<=timestamp<=Event['Time']+timedelta(seconds=2)]
if len(x) == 0:
self.EventPassport.insert(EventID = EID ,Temperature = DataPoint[3],Pressure = DataPoint[2],Time = DataPoint[1], RunNumber = RunNumberAcoustic, Path = DataPoint[0], RunType = RunType_WS)
EID += 1
print("Inserting Entry ...")
else:
print "Duplicate entry found at: "+str(DataPoint[1])+" Event ID: "+str(x[0]['EventID'])
self.EventPassport.commit()
def CheckPassport_RunNumber(self, RunNumberQry):
return self.EventPassport(RunNumber = RunNumberQry)
def CheckPassport_Temperature(self, HighTemp, LowTemp):
return self.EventPassport(HighTemp>Temperature>LowTemp)
def CheckPassport_Time(self, fromTime, toTime):
recs = [ r for r in self.EventPassport if fromTime < r['Time'] < toTime]
return recs
def SizeofPassportDB(self):
return len(self.EventPassport)
def CheckPassport_Runtype(self, runtype_WS):
return self.EventPassport(RunType = runtype_WS)
def CheckPassport_eventID(self, EventID_WS):
return self.EventPassport(EventID = EventID_WS)
def _deleteEvent(self, RecID_WS):
del self.EventPassport[RecID_WS]
self.EventPassport.commit()
示例6: range
# 需要导入模块: from PyDbLite import Base [as 别名]
# 或者: from PyDbLite.Base import insert [as 别名]
db.create_index("parent")
nbthreads = 200
for i in range(nbthreads):
# generate thread
author = "pierre"
title = sentence(10, 10)
text = sentence(100, 10)
date = datetime.datetime(
random.randint(2004, 2006),
random.randint(1, 12),
random.randint(1, 28),
random.randint(0, 23),
random.randint(0, 59),
random.randint(0, 59),
)
thread_id = db.insert(parent=-1, author=author, title=title, text=text, date=date)
# generate comments
nbcomments = random.randint(0, 5)
for i in range(nbcomments):
author = word(10)
text = sentence(50, 10)
tdelta = datetime.date(2007, 1, 1) - date.date()
c_date = date + datetime.timedelta(random.randint(1, tdelta.days))
c_date = datetime.datetime(
c_date.year, c_date.month, c_date.day, random.randint(0, 23), random.randint(0, 59), random.randint(0, 59)
)
db.insert(parent=thread_id, author=author, title=title, text=text, date=c_date)
db.commit()
示例7: db
# 需要导入模块: from PyDbLite import Base [as 别名]
# 或者: from PyDbLite.Base import insert [as 别名]
driver.find_element_by_id('fm1').submit()
driver.find_element_by_id('tabLink_u1240l1s214').click()
driver.find_element_by_id('formMenu:linknotes1').click()
driver.find_element_by_id('_id137Pluto_108_u1240l1n228_50520_:tabledip:0:_id158Pluto_108_u1240l1n228_50520_').click()
page = driver.find_element_by_id('_id111Pluto_108_u1240l1n228_50520_:tableel:tbody_element')
i = 0
for item in page.text.splitlines( ):
if item.endswith('20'):
line = item.split(' ',1)[1].lstrip()
note = line.rsplit(' ', 1)[1]
field = line.rsplit(' ', 1)[0]
courseindb = db("course")==field
if (len(courseindb) == 0):
db.insert(grade=note, course=field)
graph.post(path='486181564779150/feed/', message='Nouvelle note : ' + field)
db.commit()
Freq = 2500 # Set Frequency To 2500 Hertz
Dur = 1000 # Set Duration To 1000 ms == 1 second
winsound.Beep(Freq,Dur)
print "A new grade is available " + field + " : " + note
else:
for rec in courseindb: #only one
if (rec["grade"] != note):
#FB update
Freq = 2500 # Set Frequency To 2500 Hertz
Dur = 1000 # Set Duration To 1000 ms == 1 second
winsound.Beep(Freq,Dur)
print "A grade has just been updated for " + field + " : " + note
示例8: TagOrganizer
# 需要导入模块: from PyDbLite import Base [as 别名]
# 或者: from PyDbLite.Base import insert [as 别名]
class TagOrganizer(Organizer):
def __init__(self, cache, category=None):
self.tags = None
self.category = category
Organizer.__init__(self, cache, False)
def reset(self):
if not self.tags:
self.tags = Base(DB_FILE_TAGS)
self.tags.create('realpath', 'category', 'tag', mode = 'override')
self.tags.create_index('realpath')
self.tags.create_index('category')
Organizer.reset(self)
def updatecache(self):
self._generatetags()
Organizer.updatecache(self)
def _deletefromcache(self, path):
realpath = self.realpath(path)
logger.debug("_deletefromcache(%s)" % realpath)
for tag in self.tags.get_index('realpath')[realpath]:
self.tags.delete(tag)
def deletefromcache(self, path):
self._deletefromcache(path)
Organizer.deletefromcache(self, path)
def addtocache(self, path):
self._deletefromcache(path)
self.generatetags(self.realpath(path))
Organizer.addtocache(self, path)
def generatepaths(self, realpath):
for record in self.tags.get_index('realpath')[realpath]:
yield os.path.join(os.sep, record['tag'],
os.path.basename(realpath))
def dirlist(self, path):
if path == '/':
return self.taglist(self.category)
else:
return []
############################################
# Tag functions
def _generatetags(self):
for filename in filter(util.ignoretag, #IGNORE:W0141
self.cache.filelist()):
self.generatetags(filename)
def generatetags(self, filename):
pass
def tag(self, realpath, category, tag):
logger.debug('tag(%s, %s, %s)' % (realpath, category, tag))
if not tag == None and not tag == '':
self.tags.insert(realpath, category, tag)
def filelistbytags(self, category, tags):
self.refreshcache()
for record in self.tags.get_index('category')[category]:
if record['tag'] in tags:
yield os.path.basename(record['realpath'])
def taglist(self, category):
self.refreshcache()
return util.unique([record['tag'] for record in
self.tags.get_index('category')[category]])
示例9: Organizer
# 需要导入模块: from PyDbLite import Base [as 别名]
# 或者: from PyDbLite.Base import insert [as 别名]
#.........这里部分代码省略.........
currentpath = self.cache.filter.root
for pathpart in util.pathparts(util.removeroot(realpath,
self.cache.filter.root)):
currentpath = os.path.join(currentpath, pathpart)
self.addfile(currentpath)
else:
self.addfile(realpath)
def addfile(self, realpath):
"""
Stores a file in self.transformed if not there already and returns the
paths for that file in the proxy file system
"""
logger.debug('addfile(%s)' % realpath)
if not util.ignoretag(util.removeroot(realpath,
self.cache.filter.root)):
return []
self.refreshcache()
transformed = self.transformed.get_index('realpath')[realpath]
if transformed:
return (record['path'] for record in transformed)
else:
paths = []
for path in self.paths(realpath):
while self.transformed.get_index('path')[path]:
path = self.increasefilename(path)
dirname = os.path.dirname(path)
logger.debug('addfile(%s, %s, %s)' % (realpath, path, dirname))
self.transformed.insert(realpath=realpath, path=path,
dirname=dirname)
paths.append(path)
return paths
def increasefilename(self, filename):
"""
Returns a new filename in sequence. Called if the current filename
already exists. This default implementation adds a "(1)" to the end if
not present or increases that number by one.
"""
root, ext = os.path.splitext(filename)
num = 1
matches = _INCREASE_REGEX.match(root)
if not matches is None:
num = int(matches.group(2)) + 1
filename = matches.group(1)
return '%s(%i)%s' % (root, num, ext)
############################################
# General functions that read the cache
def filelist(self, path):
"""
Returns a list of directories and filenames in a list from cache
"""
logger.debug('filelist(%s)' % path)
self.refreshcache()
示例10: __init__
# 需要导入模块: from PyDbLite import Base [as 别名]
# 或者: from PyDbLite.Base import insert [as 别名]
class PressureVeto:
#We need the run number for init. We will use PyDBLite
#so we need to gen the db first. There will be another
#function for that. The reason we use this is because
#of native python compatibility
def __init__(self, RunNumber):
#property self.RunNumber assigned.
#This is typecasted to string for manipulation
self.RunNumber = str(RunNumber)
#property self.PyDB -> Database for pressures
self.PyDB = Base('pressures/'+self.RunNumber+'.dbl')
#check if the DB exists. If Yes, open, if not
#create it:
if not self.PyDB.exists():
self.genPDL()
else:
self.PyDB.open()
#Define the time iteration between bubbles minimum threshold
#Remember, each iteration is 1/10th second!
#Iter must be integer!
minSecondsBetweenBubbles = 4
self.minIterBetweenBubbles = int(minSecondsBetweenBubbles*10)
#Funtion to generate PyDBLite database
#I will deliberately not give this MySQL abilities
#since I dont want my data wiped out by "mistake"
#The human veto has to be in here somewhere.
def genPDL(self):
#Create the PDL file for database
self.PyDB.create('id','temp','pressure','time', mode = "override")
#import CSV for CSV file ops. Import ONLY if needed, so its here.
import csv
#filename in CSV file. Assumption -> RunID.csv
fname_csv = self.RunNumber+".csv"
PTcsv = csv.reader(open(fname_csv))
#convert CSV to PyDB line by line
for line in PTcsv:
self.PyDB.insert(id = int(line[0]),temp=float(line[1]), pressure=float(line[2]), time=datetime.strptime(line[3], "%Y-%m-%d %H:%M:%S"))
#Commit the database
self.PyDB.commit()
#Print a confirmation
print "Creating PyDB complete."
#this function finds the "peaks" in the pressures.
#Criterion: Peaks are above 30 PSI
def findBubbleTimings(self):
'''Finds the bubble timings
In -> Pressure data
Out -> Timings (datetime.datetime)
Assumptions -> Bubble PSI > 30 PSI
'''
#Select records with pressure > 30.0 PSI
recs = [r for r in self.PyDB]
#Make an iterator of this list
RecIter = itertools.islice(recs, None)
#Declare memory space for:
#Valid Bubbles
#Temporary Storage
#Last Record's ID (to stop Iterator)
ValidBubbles = []
_VBubbleAmpTemporaryStorage = []
RecLastID = recs[-1:][0]['__id__']
#Go record by record:
for record in RecIter:
#If pressure > 30:
if record['pressure'] >= 30.0:
#Assign the temporary memory with present pressure, time
_VBubbleAmpTemporaryStorage = [record['pressure'], record['time'], record['temp']]
#Number of steps to iter so we dont go beyond the last rec
stepsTillLastRec = RecLastID - record['__id__']
stepsIter = self.minIterBetweenBubbles if ( stepsTillLastRec > self.minIterBetweenBubbles) else stepsTillLastRec
#Investigate for next minIterBetweenBubbles for a maxima
for i in xrange(stepsIter):
#Progress iterator by 1
record = RecIter.next()
#is present iteration > memory stored variable? Yes: Store it, No: Continue
_VBubbleAmpTemporaryStorage = [record['pressure'], record['time'], record['temp']] if record['pressure']>=_VBubbleAmpTemporaryStorage else _VBubbleAmpTemporaryStorage
#The local maxima is found, store it as good data, continue searching
ValidBubbles.append(_VBubbleAmpTemporaryStorage)
#clear the temporary space
_VBubbleAmpTemporaryStorage = []
#Return the time cut!
return ValidBubbles
示例11: __init__
# 需要导入模块: from PyDbLite import Base [as 别名]
# 或者: from PyDbLite.Base import insert [as 别名]
class BARTSIDEE_CONFIG:
def __init__(self):
self.path_temp = mc.GetTempDir()
self.path_module = os.path.join(CWD, "modules")
if not os.path.exists(self.path_module):
os.makedirs(self.path_module)
sys.path.append(self.path_module)
self.app_version = VERSION
self.db_version = 11
self.initDB()
GA.debug = self.get("debug")
def initDB(self):
self.db = Base("maindb")
self.db.create("id", "data", mode="open")
self.searchdb = Base("searchdb")
self.searchdb.create("module", "id", "timestamp", "data", mode="open")
self.cache = tools.storage()
try:
if len(self.db) < 3:
self.default()
except:
self.default()
records = self.db(id="version")
if records[0]["data"] < self.db_version:
self.default()
def get(self, key):
records = self.db(id=key)
if records:
return records[0]["data"]
else:
return False
def set(self, key, data):
records = self.db(id=key)
if records:
self.db.update(records, data=data)
else:
self.db.insert(id=key, data=data)
self.db.commit()
def default(self):
self.clearDB()
try:
pointer = os.path.join(CWD, "settings.json")
file = open(pointer, "r")
defaults = json.load(file)
file.close()
except:
print traceback.format_exc()
defaults = {}
for key in defaults.keys():
self.db.insert(str(key), defaults[key])
self.db.insert("version", self.db_version)
self.db.commit()
def clearDB(self):
try:
records = self.db()
self.db.delete(records)
except:
self.db.create("id", "data", mode="override")
self.db.commit()
def clearCache(self):
self.cache.empty()
def clearSearch(self):
try:
records = self.searchdb()
self.searchdb.delete(records)
except:
self.searchdb.create("module", "id", "timestamp", "data", mode="override")
self.searchdb.commit()
def clearAll(self):
self.default()
self.clearCache()
self.clearSearch()