本文整理汇总了Python中nailgun.db.db.commit函数的典型用法代码示例。如果您正苦于以下问题:Python commit函数的具体用法?Python commit怎么用?Python commit使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了commit函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: consume_msg
def consume_msg(self, body, msg):
callback = getattr(self.receiver, body["method"])
try:
callback(**body["args"])
except errors.CannotFindTask as e:
logger.warn(str(e))
msg.ack()
except OperationalError as e:
if (
'TransactionRollbackError' in e.message or
'deadlock' in e.message
):
logger.exception("Deadlock on message: %s", msg)
msg.requeue()
else:
logger.exception("Operational error on message: %s", msg)
msg.ack()
except Exception:
logger.exception("Message consume failed: %s", msg)
msg.ack()
except KeyboardInterrupt:
logger.error("Receiverd interrupted.")
msg.requeue()
raise
else:
db.commit()
msg.ack()
finally:
db.remove()
示例2: prepare
def prepare():
meta = base.reflect_db_metadata()
result = db.execute(
meta.tables['releases'].insert(),
[{
'name': 'test_name',
'version': '2015.1-8.0',
'operating_system': 'ubuntu',
'state': 'available',
'networks_metadata': jsonutils.dumps({
'neutron': {
'networks': [],
'config': {}
}
})
}]
)
releaseid = result.inserted_primary_key[0]
db.execute(
meta.tables['clusters'].insert(),
[{
'name': 'test_env',
'release_id': releaseid,
'mode': 'ha_compact',
'status': 'new',
'net_provider': 'neutron',
'grouping': 'roles',
'fuel_version': '8.0',
'deployment_tasks': '[]',
'replaced_deployment_info': '{}'
}])
db.commit()
示例3: prepare
def prepare():
meta = base.reflect_db_metadata()
releaseid = insert_table_row(
meta.tables["releases"],
{"name": "test_name", "version": "2014.2.2-6.1", "operating_system": "ubuntu", "state": "available"},
)
clusterid = insert_table_row(
meta.tables["clusters"],
{
"name": "test_env",
"release_id": releaseid,
"mode": "ha_compact",
"status": "new",
"net_provider": "neutron",
"grouping": "roles",
"fuel_version": "6.1",
},
)
db.execute(
meta.tables["nodegroups"].insert(),
[
{"cluster_id": clusterid, "name": "test_nodegroup_a"},
{"cluster_id": clusterid, "name": "test_nodegroup_a"},
{"cluster_id": clusterid, "name": "test_nodegroup_b"},
{"cluster_id": clusterid, "name": "test_nodegroup_b"},
],
)
db.commit()
示例4: load_db_driver
def load_db_driver(handler):
"""Wrap all handlers calls in a special construction, that's call
rollback if something wrong or commit changes otherwise. Please note,
only HTTPError should be rised up from this function. All another
possible errors should be handle.
"""
try:
# execute handler and commit changes if all is ok
response = handler()
db.commit()
return response
except web.HTTPError:
# a special case: commit changes if http error ends with
# 200, 201, 202, etc
if web.ctx.status.startswith('2'):
db.commit()
else:
db.rollback()
raise
except (sa_exc.IntegrityError, sa_exc.DataError) as exc:
# respond a "400 Bad Request" if database constraints were broken
db.rollback()
raise BaseHandler.http(400, exc.message)
except Exception:
db.rollback()
raise
finally:
db.remove()
示例5: collect
def collect(resource_type):
try:
operational_clusters = ClusterCollection.filter_by(
iterable=None, status=consts.CLUSTER_STATUSES.operational).all()
error_clusters = ClusterCollection.filter_by(
iterable=None, status=consts.CLUSTER_STATUSES.error).all()
all_envs_last_recs = \
OpenStackWorkloadStatsCollection.get_last_by_resource_type(
resource_type)
ready_or_error_ids = set([c.id for c in operational_clusters] +
[c.id for c in error_clusters])
envs_ids_to_clear = set(r.cluster_id for r in all_envs_last_recs) - \
ready_or_error_ids
# Clear current resource data for unavailable clusters.
# Current OSWL data is cleared for those clusters which status is not
# 'operational' nor 'error' or when cluster was removed. Data is
# cleared for cluster only if it was updated recently (today or
# yesterday). While this collector is running with interval much
# smaller than one day it should not miss any unavailable cluster.
for id in envs_ids_to_clear:
oswl_statistics_save(id, resource_type, [])
# Collect current OSWL data and update data in DB
for cluster in operational_clusters:
try:
client_provider = helpers.ClientProvider(cluster)
proxy_for_os_api = utils.get_proxy_for_cluster(cluster)
version_info = utils.get_version_info(cluster)
with utils.set_proxy(proxy_for_os_api):
data = helpers.get_info_from_os_resource_manager(
client_provider, resource_type)
oswl_statistics_save(cluster.id, resource_type, data,
version_info=version_info)
except errors.StatsException as e:
logger.error("Cannot collect OSWL resource {0} for cluster "
"with id {1}. Details: {2}."
.format(resource_type,
cluster.id,
six.text_type(e))
)
except Exception as e:
logger.exception("Error while collecting OSWL resource {0} "
"for cluster with id {1}. Details: {2}."
.format(resource_type,
cluster.id,
six.text_type(e))
)
db.commit()
except Exception as e:
logger.exception("Exception while collecting OS workloads "
"for resource name {0}. Details: {1}"
.format(resource_type, six.text_type(e)))
finally:
db.remove()
示例6: _insert_deployment_graph
def _insert_deployment_graph(self):
result = db.execute(
self.meta.tables['deployment_graphs'].insert(),
[{'name': 'test_graph'}]
)
db.commit()
deployment_graph_id = result.inserted_primary_key[0]
return deployment_graph_id
示例7: setup_module
def setup_module():
dropdb()
alembic.command.upgrade(ALEMBIC_CONFIG, _prepare_revision)
prepare()
db.commit()
alembic.command.downgrade(ALEMBIC_CONFIG, _test_revision)
示例8: prepare
def prepare():
meta = base.reflect_db_metadata()
db.execute(
meta.tables['releases'].insert(),
[{
'name': 'test_name',
'version': '2015.1-8.0',
'operating_system': 'ubuntu',
'state': 'available',
'networks_metadata': jsonutils.dumps({
'neutron': {
'networks': [],
'config': {}
}
}),
'volumes_metadata': jsonutils.dumps({})
}])
db.execute(
meta.tables['nodes'].insert(),
[{
'uuid': '26b508d0-0d76-4159-bce9-f67ec2765480',
'cluster_id': None,
'group_id': None,
'status': 'discover',
'meta': '{}',
'mac': 'aa:aa:aa:aa:aa:aa',
'timestamp': datetime.datetime.utcnow(),
}]
)
db.execute(
meta.tables['tasks'].insert(),
[
{
'id': 55,
'uuid': '219eaafe-01a1-4f26-8edc-b9d9b0df06b3',
'name': 'deployment',
'status': 'running',
'deployment_info': jsonutils.dumps({})
},
]
)
db.execute(
meta.tables['deployment_history'].insert(),
[
{
'uuid': 'fake_uuid_0',
'deployment_graph_task_name': 'fake',
'node_id': 'fake_node_id',
'task_id': 55,
'status': 'pending',
'summary': jsonutils.dumps({'fake': 'fake'}),
}
]
)
db.commit()
示例9: execute
def execute(self, force=False, **kwargs):
try:
self.clear_tasks_history(force=force)
except errors.TaskAlreadyRunning:
raise errors.DeploymentAlreadyStarted(
"Can't reset environment '{0}' when "
"running deployment task exists.".format(
self.cluster.id
)
)
# FIXME(aroma): remove updating of 'deployed_before'
# when stop action is reworked. 'deployed_before'
# flag identifies whether stop action is allowed for the
# cluster. Please, refer to [1] for more details.
# [1]: https://bugs.launchpad.net/fuel/+bug/1529691
objects.Cluster.set_deployed_before_flag(self.cluster, value=False)
nodes = objects.Cluster.get_nodes_by_role(
self.cluster, consts.VIRTUAL_NODE_TYPES.virt
)
for node in nodes:
objects.Node.reset_vms_created_state(node)
objects.ClusterPluginLinkCollection.delete_by_cluster_id(
self.cluster.id)
db().commit()
supertask = Task(
name=consts.TASK_NAMES.reset_environment,
cluster=self.cluster
)
db().add(supertask)
al = TaskHelper.create_action_log(supertask)
reset_nodes = supertask.create_subtask(
consts.TASK_NAMES.reset_nodes
)
remove_keys_task = supertask.create_subtask(
consts.TASK_NAMES.remove_keys
)
remove_ironic_bootstrap_task = supertask.create_subtask(
consts.TASK_NAMES.remove_ironic_bootstrap
)
db.commit()
rpc.cast('naily', [
tasks.ResetEnvironmentTask.message(reset_nodes),
tasks.RemoveIronicBootstrap.message(remove_ironic_bootstrap_task),
tasks.RemoveClusterKeys.message(remove_keys_task)
])
TaskHelper.update_action_log(supertask, al)
return supertask
示例10: test_deployment_graph_creation
def test_deployment_graph_creation(self):
result = db.execute(
self.meta.tables['deployment_graphs'].insert(),
[{'name': 'test_graph'}]
)
db.commit()
graph_key = result.inserted_primary_key[0]
result = db.execute(
sa.select([
self.meta.tables['deployment_graphs']
]))
self.assertIn((graph_key, u'test_graph'), list(result))
示例11: test_db_driver
def test_db_driver(handler):
try:
return handler()
except web.HTTPError:
if str(web.ctx.status).startswith(("4", "5")):
db.rollback()
raise
except Exception:
db.rollback()
raise
finally:
db.commit()
示例12: execute
def execute(self):
deploy_running = db().query(Task).filter_by(
cluster=self.cluster,
name=consts.TASK_NAMES.deploy,
status='running'
).first()
if deploy_running:
raise errors.DeploymentAlreadyStarted(
u"Can't reset environment '{0}' when "
u"deployment is running".format(
self.cluster.id
)
)
obsolete_tasks = db().query(Task).filter_by(
cluster_id=self.cluster.id,
).filter(
Task.name.in_([
consts.TASK_NAMES.deploy,
consts.TASK_NAMES.deployment,
consts.TASK_NAMES.stop_deployment
])
)
for task in obsolete_tasks:
db().delete(task)
nodes = objects.Cluster.get_nodes_by_role(
self.cluster, consts.VIRTUAL_NODE_TYPES.virt)
for node in nodes:
objects.Node.reset_vms_created_state(node)
db().commit()
supertask = Task(
name=consts.TASK_NAMES.reset_environment,
cluster=self.cluster
)
db().add(supertask)
al = TaskHelper.create_action_log(supertask)
remove_keys_task = supertask.create_subtask(
consts.TASK_NAMES.reset_environment
)
db.commit()
rpc.cast('naily', [
tasks.ResetEnvironmentTask.message(supertask),
tasks.RemoveClusterKeys.message(remove_keys_task)
])
TaskHelper.update_action_log(supertask, al)
return supertask
示例13: POST
def POST(self, cluster_id):
cluster = self.get_object_or_404(objects.Cluster, cluster_id)
data = self.checked_data()
node_id = data["node_id"]
node = self.get_object_or_404(objects.Node, node_id)
netgroups_mapping = self.get_netgroups_map(node.cluster, cluster)
orig_roles = node.roles
objects.Node.update_roles(node, []) # flush
objects.Node.update_pending_roles(node, []) # flush
node.replaced_deployment_info = []
node.deployment_info = []
node.kernel_params = None
node.cluster_id = cluster.id
node.group_id = None
objects.Node.assign_group(node) # flush
objects.Node.update_pending_roles(node, orig_roles) # flush
for ip in node.ip_addrs:
ip.network = netgroups_mapping[ip.network]
nic_assignments = db.query(models.NetworkNICAssignment).\
join(models.NodeNICInterface).\
filter(models.NodeNICInterface.node_id == node.id).\
all()
for nic_assignment in nic_assignments:
nic_assignment.network_id = \
netgroups_mapping[nic_assignment.network_id]
bond_assignments = db.query(models.NetworkBondAssignment).\
join(models.NodeBondInterface).\
filter(models.NodeBondInterface.node_id == node.id).\
all()
for bond_assignment in bond_assignments:
bond_assignment.network_id = \
netgroups_mapping[bond_assignment.network_id]
objects.Node.add_pending_change(node,
consts.CLUSTER_CHANGES.interfaces)
node.pending_addition = True
node.pending_deletion = False
task = models.Task(name=consts.TASK_NAMES.node_deletion,
cluster=cluster)
db.commit()
self.delete_node_by_astute(task, node)
示例14: execute
def execute(self):
stop_running = db().query(Task).filter_by(
cluster=self.cluster,
name='stop_deployment'
).first()
if stop_running:
if stop_running.status == 'running':
raise errors.StopAlreadyRunning(
"Stopping deployment task "
"is already launched"
)
else:
db().delete(stop_running)
db().commit()
deploy_running = db().query(Task).filter_by(
cluster=self.cluster,
name='deployment',
status='running'
).first()
if not deploy_running:
provisioning_running = db().query(Task).filter_by(
cluster=self.cluster,
name='provision',
status='running'
).first()
if provisioning_running:
raise errors.DeploymentNotRunning(
u"Provisioning interruption for environment "
u"'{0}' is not implemented right now".format(
self.cluster.id
)
)
raise errors.DeploymentNotRunning(
u"Nothing to stop - deployment is "
u"not running on environment '{0}'".format(
self.cluster.id
)
)
task = Task(
name="stop_deployment",
cluster=self.cluster
)
db().add(task)
db.commit()
self._call_silently(
task,
tasks.StopDeploymentTask,
deploy_task=deploy_running
)
return task
示例15: prepare
def prepare():
meta = base.reflect_db_metadata()
# Fill in migration table with data
db.execute(
meta.tables[extensions_migration_buffer_table_name].insert(),
[{'extension_name': 'volume_manager',
'data': jsonutils.dumps({'node_id': 1, 'volumes': [{'volume': 1}]})},
{'extension_name': 'volume_manager',
'data': jsonutils.dumps({'node_id': 2, 'volumes': [{'volume': 2}]})},
{'extension_name': 'some_different_extension',
'data': 'some_data'}])
db.commit()
开发者ID:ekorekin,项目名称:fuel-web,代码行数:14,代码来源:test_migration_volume_manager_extension_001_add_volumes_table.py