本文整理汇总了Python中convirt.model.DBSession.query方法的典型用法代码示例。如果您正苦于以下问题:Python DBSession.query方法的具体用法?Python DBSession.query怎么用?Python DBSession.query使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类convirt.model.DBSession
的用法示例。
在下文中一共展示了DBSession.query方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: resume_work
# 需要导入模块: from convirt.model import DBSession [as 别名]
# 或者: from convirt.model.DBSession import query [as 别名]
def resume_work(self,context):
"""
on resume setting value from task context
"""
execution_context=context["execution_context"]
WRK_LOGGER.debug("RESUMING WORKER for :"+self.worker )
if execution_context:
self.start_time=execution_context.get("start_time",datetime.utcnow())
self.worker_ids=execution_context.get("worker_ids",[])
self.sp_list=execution_context.get("sp_list",[])
##validate all the worker ids are taken care of
ets = DBSession.query(EntityTasks).filter(EntityTasks.worker==self.worker).\
filter(not_(EntityTasks.worker_id.in_(self.worker_ids))).all()
if len(ets) > 0:
xtra_work_ids = [et.worker_id for et in ets]
WRK_LOGGER.error("GOT ENT Tasks different from execution_context :"+self.worker+\
": CONTEXT WORKERS : "+str(self.worker_ids) +": XTRA WORKERS :"+str(xtra_work_ids))
r = DBSession.query(EntityTasks.entity_id).\
filter(EntityTasks.worker_id.in_(xtra_work_ids)).\
filter(EntityTasks.worker==self.worker).\
update(values=dict(worker_id=None,finished=True,end_time=datetime.utcnow()))
transaction.commit()
WRK_LOGGER.debug("Cleaned Up entity_tasks . worker:rows : "+self.worker+":"+str(r))
WRK_LOGGER.debug("RESUMING WORKER for :"+self.worker+":"+str(self.start_time)+":"+str(self.worker_ids) )
self.do_work()
示例2: on_remove_group
# 需要导入模块: from convirt.model import DBSession [as 别名]
# 或者: from convirt.model.DBSession import query [as 别名]
def on_remove_group(self, site_id, groupId, auth, def_manager):
op = constants.DETACH
defType = def_manager.getType()
site = DBSession.query(Site).filter_by(id=site_id).first()
group = DBSession.query(ServerGroup).filter_by(id = groupId).first()
defn_list=[]
#get all the definitions from the group
#getting pool level definitions here
sp_defns = DBSession.query(SPDefLink).filter_by(group_id=groupId)
if sp_defns:
for eachdefn in sp_defns:
defn = def_manager.get_defn(eachdefn.def_id)
if defn:
defn_list.append(defn)
for each_defn in defn_list:
group_defn = DBSession.query(SPDefLink).filter_by(def_id = each_defn.id, def_type = defType).first()
if group_defn:
DBSession.delete(group_defn)
#delete only those definitions which are having scope server pool.
#data center level definitions can not be deleted since we are removing server pool only.
if each_defn.scope == constants.SCOPE_SP:
DBSession.delete(each_defn)
示例3: associate_defns
# 需要导入模块: from convirt.model import DBSession [as 别名]
# 或者: from convirt.model.DBSession import query [as 别名]
def associate_defns(self, site_id, group_id, def_type, def_ids, auth, op_level=None):
error_desc=""
site = self.manager.getSite(site_id)
group=self.manager.getGroup(auth,group_id)
group_list = self.manager.getGroupList(auth, site_id)
def_id_list = def_ids.split(",")
for def_id in def_id_list:
new_sd = DBSession.query(StorageDef).filter_by(id=def_id).first()
node = None
try:
associate=True
self.sync_manager.add_defn(new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.ATTACH, "ADD_STORAGE_DEF", self.storage_manager, self.manager, op_level, associate)
#matching disks on association of storage.
vm_disks = self.manager.get_vm_disks_from_pool(auth, group_id)
storage_disks = DBSession.query(StorageDisks).filter_by(storage_id=def_id)
if storage_disks:
for eachdisk in storage_disks:
self.manager.matching_disk_on_discover_storage(vm_disks, eachdisk.id)
except Exception, ex:
error_desc = to_str(ex)
print_traceback()
LOGGER.error(to_str(ex).replace("'",""))
#if we get any exception while adding/ sync definition then are removing the definition.
add_mode=True
try:
self.sync_manager.remove_defn(new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level)
except Exception, ex1:
print_traceback()
LOGGER.error(to_str(ex1).replace("'",""))
raise Exception(to_str(ex1))
if error_desc:
raise Exception(error_desc)
示例4: get_storage_stats
# 需要导入模块: from convirt.model import DBSession [as 别名]
# 或者: from convirt.model.DBSession import query [as 别名]
def get_storage_stats(self, vm_id=None):
storage_stats = {}
disk_stats = {}
disk_detail = {}
if not vm_id:
vm_id = self.vm_id
if vm_id:
vm_disks = DBSession.query(VMDisks).filter_by(vm_id=vm_id)
for vm_disk in vm_disks:
disk_detail = {}
disk_detail["DEV_TYPE"] = vm_disk.dev_type
disk_detail["IS_LOCAL"] = self.get_remote(vm_disk.disk_name)
disk_detail["DISK_SIZE"] = vm_disk.disk_size
disk_detail["DISK_NAME"] = vm_disk.disk_name
storage_disk_id = None
vm_storage_link = DBSession.query(VMStorageLinks).filter_by(vm_disk_id=vm_disk.id).first()
if vm_storage_link:
storage_disk_id = vm_storage_link.storage_disk_id
disk_detail["STORAGE_DISK_ID"] = storage_disk_id
disk_stats[vm_disk.disk_name] = disk_detail
storage_stats["LOCAL_ALLOCATION"] = 0
storage_stats["SHARED_ALLOCATION"] = 0
storage_stats["DISK_STATS"] = disk_stats
return storage_stats
示例5: update_avail
# 需要导入模块: from convirt.model import DBSession [as 别名]
# 或者: from convirt.model.DBSession import query [as 别名]
def update_avail(node, new_state, monit_state, timestamp, reason, logger, update=True, auth=None):
sv_point = transaction.savepoint()
try:
strt = p_task_timing_start(logger, "UpdateAvailability", node.id, log_level="DEBUG")
#there is a status change, update and send event
#update current availability,
#we only update avail-state, monit_state is updated
#only by user actions
node.current_state.avail_state = new_state
node.current_state.timestamp = timestamp
node.current_state.description = reason
avh=DBSession.query(AvailHistory).filter(AvailHistory.entity_id==node.id).\
order_by(AvailHistory.timestamp.desc()).first()
if avh is not None:
avh.endtime=timestamp
time_diff=timestamp-avh.timestamp
avh.period=time_diff.days*24*60+time_diff.seconds/60
DBSession.add(avh)
#insert availability history
ah = AvailHistory(node.id, new_state, monit_state, timestamp, reason)
DBSession.add(ah)
if update==True:
ent = DBSession.query(Entity).filter(Entity.entity_id==node.id).first()
from convirt.model.ManagedNode import ManagedNode
if ent.type.name == constants.MANAGED_NODE:
if new_state == ManagedNode.DOWN:
notify_node_down(ent.name, reason)
else:
node_up_action(auth, node.id)
except Exception, e:
#defer to next time
import traceback
traceback.print_exc()
logger.error(e)
sv_point.rollback()
示例6: get_vm_linked_with_storage
# 需要导入模块: from convirt.model import DBSession [as 别名]
# 或者: from convirt.model.DBSession import query [as 别名]
def get_vm_linked_with_storage(self, storage_disk_id):
vm=None
if storage_disk_id:
vm_storage_link = DBSession.query(VMStorageLinks).filter_by(storage_disk_id=storage_disk_id).first()
if vm_storage_link:
vm_disk = DBSession.query(VMDisks).filter_by(id=vm_storage_link.vm_disk_id).first()
if vm_disk:
vm = DBSession.query(VM).filter_by(id=vm_disk.vm_id).first()
return vm
示例7: wait_for_workers_to_finish
# 需要导入模块: from convirt.model import DBSession [as 别名]
# 或者: from convirt.model.DBSession import query [as 别名]
def wait_for_workers_to_finish(self, task_ids):
WRK_LOGGER.debug("wait_for_workers_to_finish for "+self.worker+" max_worker_wait_time: "+str(self.max_worker_wait_time))
task_completed = False
self.wait_start_time=datetime.utcnow()
###this is an infinite loop until we find a completed task
###we need to add some wait time to check on the status of child tasks
while task_completed == False:
time.sleep(5)
completed_tasks = self.check_tasks_completed(task_ids)
WRK_LOGGER.debug("wait_for_workers_to_finish for "+self.worker+" completed_tasks :"+str(completed_tasks))
if len(completed_tasks) > 0:
task_completed = True
for task in completed_tasks:
self.worker_ids.remove(task['task_id'])
WRK_LOGGER.debug("child task completed, update EntityTasks "+self.worker+" completed_tasks :"+str(task['task_id']))
ets = DBSession.query(EntityTasks).\
filter(EntityTasks.worker_id==to_unicode(task['task_id'])).all()
for et in ets:
et.worker_id=None
et.finished=True
et.end_time=datetime.utcnow()
DBSession.merge(et)
transaction.commit()
WRK_LOGGER.debug("child tasks completed, updated EntityTasks "+self.worker)
else :
# if True:
# continue
wait_time_sec=(datetime.utcnow()-self.wait_start_time).seconds
WRK_LOGGER.debug("No completed child tasks for "+self.worker+". waiting for "+str(wait_time_sec))
if wait_time_sec > self.max_worker_wait_time:
task_service = self.svc_central.get_service(self.task_service_id)
past_time = self.start_time-timedelta(minutes=1)
for task_id in task_ids:
task_obj = task_service.get_running_task_obj(task_id)
if task_obj:
(hung, completed, pending) = task_obj.get_running_status()
WRK_LOGGER.debug("HUNG STATUS for "+self.worker+":"+str(hung)+":"+str(task_id)+\
":"+str(completed)+":"+str(pending))
if hung:
task_completed = True
self.worker_ids.remove(task_id)
WRK_LOGGER.debug("Hung task. Cleanup EntityTask for "+self.worker+". task id : "+str(task_id))
DBSession.query(EntityTasks).filter(EntityTasks.worker==self.worker).\
filter(EntityTasks.entity_id.in_(completed)).\
update(dict(worker_id=None,finished=True, end_time=datetime.utcnow()))
DBSession.query(EntityTasks).filter(EntityTasks.worker==self.worker).\
filter(EntityTasks.entity_id.in_(pending)).\
update(dict(worker_id=None,finished=True, start_time=past_time))
transaction.commit()
WRK_LOGGER.debug("Hung task. Cleaned up EntityTask for "+self.worker+". task id : "+str(task_id))
示例8: getSiteDefListToAssociate
# 需要导入模块: from convirt.model import DBSession [as 别名]
# 或者: from convirt.model.DBSession import query [as 别名]
def getSiteDefListToAssociate(self, site_id, group_id, defType):
sdArray=[]
if site_id:
dc_rs = DBSession.query(DCDefLink).filter_by(site_id=site_id, def_type=defType)
for row in dc_rs:
sp_def = DBSession.query(SPDefLink).filter_by(group_id=group_id, def_id=row.def_id, def_type=defType).first()
if not sp_def:
defn = DBSession.query(NwDef).filter_by(id=row.def_id, scope=constants.SCOPE_DC).first()
if defn:
defn.status = row.status
sdArray.append(defn)
return sdArray
示例9: check_if_hung
# 需要导入模块: from convirt.model import DBSession [as 别名]
# 或者: from convirt.model.DBSession import query [as 别名]
def check_if_hung(self):
WRK_LOGGER.debug("Check if Task, "+self.name+" is hung? ")
marked_hung = False
try:
marked_hung = self.mark_hung
if marked_hung :
WRK_LOGGER.debug("Task, "+self.name+"("+str(self.task_id)+") was marked hung. updating entity_tasks")
DBSession.query(EntityTasks).\
filter(EntityTasks.worker_id==to_unicode(self.task_id)).\
update(dict(worker_id=None,finished=True, end_time=datetime.utcnow()))
# transaction.commit()
except AttributeError, e:
pass
示例10: get_disk_stat
# 需要导入模块: from convirt.model import DBSession [as 别名]
# 或者: from convirt.model.DBSession import query [as 别名]
def get_disk_stat(self, vm_id, filename):
disk_detail = {}
storage_disk = DBSession.query(StorageDisks).filter_by(unique_path=filename).first()
if storage_disk:
vm_disk = DBSession.query(VMDisks).filter_by(vm_id=vm_id, disk_name=filename).first()
if vm_disk:
disk_detail = {}
disk_detail["DEV_TYPE"] = vm_disk.dev_type
disk_detail["IS_LOCAL"] = self.get_remote(vm_disk.disk_name)
disk_detail["DISK_SIZE"] = vm_disk.disk_size
disk_detail["DISK_NAME"] = vm_disk.disk_name
disk_detail["STORAGE_DISK_ID"] = storage_disk.id
return disk_detail
示例11: get_defn_status
# 需要导入模块: from convirt.model import DBSession [as 别名]
# 或者: from convirt.model.DBSession import query [as 别名]
def get_defn_status(self, defn, defType, site_id, group_id, node_id):
status=None
if defn.scope == constants.SCOPE_DC:
dc_defn = DBSession.query(DCDefLink).filter_by(site_id=site_id, def_id = defn.id, def_type = defType).first()
if dc_defn:
status = dc_defn.status
elif defn.scope == constants.SCOPE_SP:
sp_defn = DBSession.query(SPDefLink).filter_by(group_id=group_id, def_id = defn.id, def_type = defType).first()
if sp_defn:
status = sp_defn.status
elif defn.scope == constants.SCOPE_S:
s_defn = DBSession.query(ServerDefLink).filter_by(server_id=node_id, def_id = defn.id, def_type = defType).first()
if s_defn:
status = s_defn.status
return status
示例12: metric_cache
# 需要导入模块: from convirt.model import DBSession [as 别名]
# 或者: from convirt.model.DBSession import query [as 别名]
def metric_cache(self, node_id, metric, metric_type, rollup_type, per_type, date1, date2, period):
"""
Setting value for cache by checking the conditions
"""
now = datetime.utcnow()
status = False
ent = DBSession.query(Entity).filter(Entity.entity_id == node_id).one()
cache_key = (node_id, ent.type.name, metric, period)
# checking cache's key is already exisiting
if self.cache.has_key(cache_key):
# print "FOUNDDDDDDDDDDDDDDDDDD==",(node_id[0],ent.type.name,period,metric)
cached_time = self.cache[cache_key].get("cached_time")
if now > cached_time:
status = True
else:
self.check_cache_limit(self.cache)
status = True
if status:
# quering the result and set it to cache
result = self.chart_service.get_metrics_specific_value(
[node_id], metric, metric_type, rollup_type, per_type, date1, date2
)
cache_time = now + timedelta(minutes=int(tg.config.get(constants.CACHE_TIME)))
self.cache[cache_key] = {"cached_time": cache_time, "value": result}
self.cache[cache_key]["last_accessed"] = now
return self.cache[cache_key].get("value")
示例13: get_server_def_list
# 需要导入模块: from convirt.model import DBSession [as 别名]
# 或者: from convirt.model.DBSession import query [as 别名]
def get_server_def_list(self,site_id, group_id, def_id):
try:
server_def_list=[]
node_defns = self.sync_manager.get_node_defns(def_id, to_unicode(constants.STORAGE))
if node_defns:
for eachdefn in node_defns:
temp_dic={}
if eachdefn:
node = DBSession.query(ManagedNode).filter_by(id=eachdefn.server_id).first()
temp_dic['id']=eachdefn.server_id
if node:
temp_dic['name']=node.hostname
else:
temp_dic['name']=None
temp_dic['status']=eachdefn.status
if eachdefn.details:
temp_dic['details']=eachdefn.details
else:
temp_dic['details']=None
server_def_list.append(temp_dic)
except Exception, ex:
LOGGER.error(to_str(ex).replace("'",""))
return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
示例14: resume_task
# 需要导入模块: from convirt.model import DBSession [as 别名]
# 或者: from convirt.model.DBSession import query [as 别名]
def resume_task(self, auth, ctx, appliance_entry, image_store, group_id, \
image_name, platform, force):
###TODO:disk cleanup
img = DBSession.query(Image).filter(Image.name==image_name).first()
if img is None:
raise Exception(constants.INCOMPLETE_TASK)
示例15: exec_task
# 需要导入模块: from convirt.model import DBSession [as 别名]
# 或者: from convirt.model.DBSession import query [as 别名]
def exec_task(self, auth, ctx,node_ids):
LOGGER.debug('entered in exec task for VMAvailability task')
strt = p_task_timing_start(AVL_LOGGER, "VMAvailability", node_ids)
try:
self.completed_nodes = []
self.pending_nodes = [node_id for node_id in node_ids]
self.exc_node_ids = [node_id for node_id in node_ids]
index = 0
node_id = self.get_next_node_id(index)
while node_id is not None:
self.pending_nodes.remove(node_id)
node = DBSession.query(ManagedNode).filter(ManagedNode.id == node_id).first()
index+=1
node_id = self.get_next_node_id(index)
if node and node.is_up():
self.current_node = node
self.start_time = datetime.utcnow()
try:
try:
strt1 = p_task_timing_start(AVL_LOGGER, "RefreshVMAvail", node.id)
node.refresh_vm_avail()
p_task_timing_end(AVL_LOGGER, strt1)
except Exception, e:
LOGGER.error("Error updating VM availability . Server :"+node.hostname)
traceback.print_exc()
finally:
self.completed_nodes.append(node.id)
finally:
self.check_if_hung()
p_task_timing_end(AVL_LOGGER, strt)