本文整理汇总了Python中convirt.model.DBSession类的典型用法代码示例。如果您正苦于以下问题:Python DBSession类的具体用法?Python DBSession怎么用?Python DBSession使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DBSession类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: associate_defns
def associate_defns(self, site_id, group_id, def_type, def_ids, auth, op_level=None):
error_desc=""
site = self.manager.getSite(site_id)
group=self.manager.getGroup(auth,group_id)
group_list = self.manager.getGroupList(auth, site_id)
def_id_list = def_ids.split(",")
for def_id in def_id_list:
new_sd = DBSession.query(StorageDef).filter_by(id=def_id).first()
node = None
try:
associate=True
self.sync_manager.add_defn(new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.ATTACH, "ADD_STORAGE_DEF", self.storage_manager, self.manager, op_level, associate)
#matching disks on association of storage.
vm_disks = self.manager.get_vm_disks_from_pool(auth, group_id)
storage_disks = DBSession.query(StorageDisks).filter_by(storage_id=def_id)
if storage_disks:
for eachdisk in storage_disks:
self.manager.matching_disk_on_discover_storage(vm_disks, eachdisk.id)
except Exception, ex:
error_desc = to_str(ex)
print_traceback()
LOGGER.error(to_str(ex).replace("'",""))
#if we get any exception while adding/ sync definition then are removing the definition.
add_mode=True
try:
self.sync_manager.remove_defn(new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level)
except Exception, ex1:
print_traceback()
LOGGER.error(to_str(ex1).replace("'",""))
raise Exception(to_str(ex1))
if error_desc:
raise Exception(error_desc)
示例2: get_storage_stats
def get_storage_stats(self, vm_id=None):
storage_stats = {}
disk_stats = {}
disk_detail = {}
if not vm_id:
vm_id = self.vm_id
if vm_id:
vm_disks = DBSession.query(VMDisks).filter_by(vm_id=vm_id)
for vm_disk in vm_disks:
disk_detail = {}
disk_detail["DEV_TYPE"] = vm_disk.dev_type
disk_detail["IS_LOCAL"] = self.get_remote(vm_disk.disk_name)
disk_detail["DISK_SIZE"] = vm_disk.disk_size
disk_detail["DISK_NAME"] = vm_disk.disk_name
storage_disk_id = None
vm_storage_link = DBSession.query(VMStorageLinks).filter_by(vm_disk_id=vm_disk.id).first()
if vm_storage_link:
storage_disk_id = vm_storage_link.storage_disk_id
disk_detail["STORAGE_DISK_ID"] = storage_disk_id
disk_stats[vm_disk.disk_name] = disk_detail
storage_stats["LOCAL_ALLOCATION"] = 0
storage_stats["SHARED_ALLOCATION"] = 0
storage_stats["DISK_STATS"] = disk_stats
return storage_stats
示例3: resume_work
def resume_work(self,context):
"""
on resume setting value from task context
"""
execution_context=context["execution_context"]
WRK_LOGGER.debug("RESUMING WORKER for :"+self.worker )
if execution_context:
self.start_time=execution_context.get("start_time",datetime.utcnow())
self.worker_ids=execution_context.get("worker_ids",[])
self.sp_list=execution_context.get("sp_list",[])
##validate all the worker ids are taken care of
ets = DBSession.query(EntityTasks).filter(EntityTasks.worker==self.worker).\
filter(not_(EntityTasks.worker_id.in_(self.worker_ids))).all()
if len(ets) > 0:
xtra_work_ids = [et.worker_id for et in ets]
WRK_LOGGER.error("GOT ENT Tasks different from execution_context :"+self.worker+\
": CONTEXT WORKERS : "+str(self.worker_ids) +": XTRA WORKERS :"+str(xtra_work_ids))
r = DBSession.query(EntityTasks.entity_id).\
filter(EntityTasks.worker_id.in_(xtra_work_ids)).\
filter(EntityTasks.worker==self.worker).\
update(values=dict(worker_id=None,finished=True,end_time=datetime.utcnow()))
transaction.commit()
WRK_LOGGER.debug("Cleaned Up entity_tasks . worker:rows : "+self.worker+":"+str(r))
WRK_LOGGER.debug("RESUMING WORKER for :"+self.worker+":"+str(self.start_time)+":"+str(self.worker_ids) )
self.do_work()
示例4: get_vm_linked_with_storage
def get_vm_linked_with_storage(self, storage_disk_id):
vm=None
if storage_disk_id:
vm_storage_link = DBSession.query(VMStorageLinks).filter_by(storage_disk_id=storage_disk_id).first()
if vm_storage_link:
vm_disk = DBSession.query(VMDisks).filter_by(id=vm_storage_link.vm_disk_id).first()
if vm_disk:
vm = DBSession.query(VM).filter_by(id=vm_disk.vm_id).first()
return vm
示例5: vm_action
def vm_action(self, dom_id, node_id, action,date=None,time=None):
self.authenticate()
try:
wait_time=None
dom=DBSession().query(VM).filter(VM.id==dom_id).one()
self.tc.vm_action(session['auth'],dom_id,node_id,action,date,time)
if action == constants.START:
wait_time=dom.get_wait_time('view_console')
except Exception, ex:
print_traceback()
return "{success: false,msg:'"+to_str(ex).replace("'","").replace("\n","")+"'}"
示例6: add_site_defn
def add_site_defn(self, site_id, def_id, def_type, status, oos_count):
#Check whether the record is already present...
row = DBSession.query(DCDefLink).filter_by(site_id = site_id, def_id = def_id).first()
if not row:
DCDL = DCDefLink()
DCDL.site_id = site_id
DCDL.def_type = def_type
DCDL.def_id = def_id
DCDL.status = to_unicode(status)
DCDL.oos_count = oos_count
DCDL.dt_time = datetime.utcnow()
DBSession.add(DCDL)
示例7: add_node_defn
def add_node_defn(self, node_id, def_id, def_type, status, details):
#Check whether the record is already present...
row = DBSession.query(ServerDefLink).filter_by(server_id = node_id, def_id = def_id).first()
if not row:
node_defn = ServerDefLink()
node_defn.server_id = to_unicode(node_id)
node_defn.def_type = to_unicode(def_type)
node_defn.def_id = def_id
node_defn.status = to_unicode(status)
node_defn.details = to_unicode(details)
node_defn.dt_time = datetime.utcnow()
DBSession.add(node_defn)
示例8: getSiteDefListToAssociate
def getSiteDefListToAssociate(self, site_id, group_id, defType):
sdArray=[]
if site_id:
dc_rs = DBSession.query(DCDefLink).filter_by(site_id=site_id, def_type=defType)
for row in dc_rs:
sp_def = DBSession.query(SPDefLink).filter_by(group_id=group_id, def_id=row.def_id, def_type=defType).first()
if not sp_def:
defn = DBSession.query(NwDef).filter_by(id=row.def_id, scope=constants.SCOPE_DC).first()
if defn:
defn.status = row.status
sdArray.append(defn)
return sdArray
示例9: add_group_defn
def add_group_defn(self, group_id, def_id, def_type, status, oos_count):
#Check whether the record is already present...
row = DBSession.query(SPDefLink).filter_by(group_id = group_id, def_id = def_id).first()
if not row:
SPDL = SPDefLink()
SPDL.group_id = group_id
SPDL.def_type = def_type
SPDL.def_id = def_id
SPDL.status = status
SPDL.oos_count = oos_count
SPDL.dt_time = datetime.utcnow()
DBSession.add(SPDL)
示例10: update_execution_context
def update_execution_context(self):
"""
storing context in task for resume process
"""
tid = TaskUtil.get_task_context()
WRK_LOGGER.debug("in update_execution_context Parent task : "+str(tid)+" : child tasks :"+str(self.worker_ids))
task=Task.get_task(tid)
if task is not None:
self.execution_context["start_time"]=self.start_time
self.execution_context["worker_ids"]=self.worker_ids
task.context["execution_context"]=self.execution_context
DBSession.add(task)
WRK_LOGGER.debug("in update_execution_context updated Parent task : "+str(tid))
示例11: get_lock
def get_lock(self,sub_system,entity_id,operation,table_name):
# select query with lock
lock_m=DBSession.query(CMS_Locks).with_lockmode("update").\
filter(CMS_Locks.sub_system==sub_system).\
filter(CMS_Locks.entity_id==entity_id).\
filter(CMS_Locks.operation==operation).\
filter(CMS_Locks.table_name==table_name).all()
# lock_m=DBSession.query(CMS_Locks).with_lockmode("update").\
# filter(CMS_Locks.table_name==table_name).first()
if len(lock_m) == 0:
lm=CMS_Locks(sub_system,entity_id,operation,table_name)
DBSession.add(lm)
示例12: get_disk_stat
def get_disk_stat(self, vm_id, filename):
disk_detail = {}
storage_disk = DBSession.query(StorageDisks).filter_by(unique_path=filename).first()
if storage_disk:
vm_disk = DBSession.query(VMDisks).filter_by(vm_id=vm_id, disk_name=filename).first()
if vm_disk:
disk_detail = {}
disk_detail["DEV_TYPE"] = vm_disk.dev_type
disk_detail["IS_LOCAL"] = self.get_remote(vm_disk.disk_name)
disk_detail["DISK_SIZE"] = vm_disk.disk_size
disk_detail["DISK_NAME"] = vm_disk.disk_name
disk_detail["STORAGE_DISK_ID"] = storage_disk.id
return disk_detail
示例13: send_deployment_stats
def send_deployment_stats(self):
#task_service = self.svc_central.get_service(self.task_service_id)
t = SendDeploymentStatsTask(u'Send Deployment Stats', {'quiet':True}, [],\
dict(), None, u'admin')
dc_ent = DBSession.query(Entity).filter(Entity.type_id==1).first()
t.set_entity_info(dc_ent)
t.set_interval(TaskInterval(interval=None,
next_execution=datetime.utcnow()))
DBSession.add(t)
import transaction
transaction.commit()
logger.debug("SendDeploymentStatsTask Submitted")
return t.task_id
示例14: on_remove_group
def on_remove_group(self, site_id, groupId, auth, def_manager):
op = constants.DETACH
defType = def_manager.getType()
site = DBSession.query(Site).filter_by(id=site_id).first()
group = DBSession.query(ServerGroup).filter_by(id = groupId).first()
defn_list=[]
#get all the definitions from the group
#getting pool level definitions here
sp_defns = DBSession.query(SPDefLink).filter_by(group_id=groupId)
if sp_defns:
for eachdefn in sp_defns:
defn = def_manager.get_defn(eachdefn.def_id)
if defn:
defn_list.append(defn)
for each_defn in defn_list:
group_defn = DBSession.query(SPDefLink).filter_by(def_id = each_defn.id, def_type = defType).first()
if group_defn:
DBSession.delete(group_defn)
#delete only those definitions which are having scope server pool.
#data center level definitions can not be deleted since we are removing server pool only.
if each_defn.scope == constants.SCOPE_SP:
DBSession.delete(each_defn)
示例15: check_if_hung
def check_if_hung(self):
WRK_LOGGER.debug("Check if Task, "+self.name+" is hung? ")
marked_hung = False
try:
marked_hung = self.mark_hung
if marked_hung :
WRK_LOGGER.debug("Task, "+self.name+"("+str(self.task_id)+") was marked hung. updating entity_tasks")
DBSession.query(EntityTasks).\
filter(EntityTasks.worker_id==to_unicode(self.task_id)).\
update(dict(worker_id=None,finished=True, end_time=datetime.utcnow()))
# transaction.commit()
except AttributeError, e:
pass