本文整理汇总了Python中ovs.extensions.generic.sshclient.SSHClient.file_delete方法的典型用法代码示例。如果您正苦于以下问题:Python SSHClient.file_delete方法的具体用法?Python SSHClient.file_delete怎么用?Python SSHClient.file_delete使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ovs.extensions.generic.sshclient.SSHClient
的用法示例。
在下文中一共展示了SSHClient.file_delete方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: delete_volume
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import file_delete [as 别名]
def delete_volume(vdisk, vpool, loop_device=None, root_client=None, wait=True):
"""
Delete a volume
:param vdisk: Virtual disk to delete
:param vpool: vPool which hosts the Virtual Disk
:param loop_device: Loop device where volume is mounted on
:param root_client: SSHClient object
:param wait: Wait for the volume to be deleted from model
:return: None
"""
location = GeneralVDisk.get_filesystem_location(vpool=vpool,
vdisk_name=vdisk.name)
if root_client is None:
root_client = SSHClient('127.0.0.1', username='root')
if loop_device is not None:
root_client.run('umount /dev/{0}'.format(loop_device))
root_client.run('losetup -d /dev/{0}'.format(loop_device))
root_client.dir_delete('/mnt/{0}'.format(loop_device))
root_client.file_delete(location)
if wait is True:
counter = 0
timeout = 60
volume_name = os.path.basename(location).replace('-flat.vmdk', '').replace('.raw', '')
while True and counter < timeout:
time.sleep(1)
vdisks = GeneralVDisk.get_vdisk_by_name(name=volume_name)
if vdisks is None:
break
counter += 1
if counter == timeout:
raise RuntimeError('Disk {0} was not deleted from model after {1} seconds'.format(volume_name, timeout))
示例2: delete_volume
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import file_delete [as 别名]
def delete_volume(vdisk, vpool, loop_device=None, root_client=None, wait=True):
"""
Delete a volume
:param vdisk: Virtual disk to delete
:param vpool: vPool which hosts the Virtual Disk
:param loop_device: Loop device where volume is mounted on
:param root_client: SSHClient object
:param wait: Wait for the volume to be deleted from model
:return: None
"""
location = GeneralVDisk.get_filesystem_location(vpool=vpool,
vdisk_name=vdisk.name)
if root_client is None:
root_client = SSHClient('127.0.0.1', username='root')
if loop_device is not None:
GeneralVDisk.disconnect_volume(loop_device, root_client)
root_client.file_delete(location)
if wait is True:
counter = 0
timeout = 60
volume_name = '/' + os.path.basename(location)
while True and counter < timeout:
time.sleep(1)
vdisks = VDiskList.get_by_devicename_and_vpool(volume_name, vpool)
if vdisks is None:
break
counter += 1
if counter == timeout:
raise RuntimeError('Disk {0} was not deleted from model after {1} seconds'.format(volume_name, timeout))
示例3: clean_leftover_arakoon_data
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import file_delete [as 别名]
def clean_leftover_arakoon_data(ip, directories):
"""
Delete existing arakoon data or copy to the side
Directories should be a dict with key the absolute paths and value a boolean indicating archive or delete
eg: {'/var/log/arakoon/ovsdb': True, --> Files under this directory will be archived
'/opt/OpenvStorage/db/arakoon/ovsdb/tlogs': False} --> Files under this directory will be deleted
:param ip: IP on which to check for existing data
:type ip: str
:param directories: Directories to archive or delete
:type directories: dict
:return: None
"""
root_client = SSHClient(ip, username='root')
# Verify whether all files to be archived have been released properly
open_file_errors = []
ArakoonInstaller._logger.debug('Cleanup old arakoon - Checking open files')
dirs_with_files = {}
for directory, archive in directories.iteritems():
ArakoonInstaller._logger.debug('Cleaning old arakoon - Checking directory {0}'.format(directory))
if root_client.dir_exists(directory):
ArakoonInstaller._logger.debug('Cleaning old arakoon - Directory {0} exists'.format(directory))
file_names = root_client.file_list(directory, abs_path=True, recursive=True)
if len(file_names) > 0:
ArakoonInstaller._logger.debug('Cleaning old arakoon - Files found in directory {0}'.format(directory))
dirs_with_files[directory] = {'files': file_names,
'archive': archive}
for file_name in file_names:
try:
open_files = root_client.run('lsof {0}'.format(file_name))
if open_files != '':
open_file_errors.append('Open file {0} detected in directory {1}'.format(os.path.basename(file_name), directory))
except CalledProcessError:
continue
if len(open_file_errors) > 0:
raise RuntimeError('\n - ' + '\n - '.join(open_file_errors))
for directory, info in dirs_with_files.iteritems():
if info['archive'] is True:
# Create zipped tar
ArakoonInstaller._logger.debug('Cleanup old arakoon - Start archiving directory {0}'.format(directory))
archive_dir = '{0}/archive'.format(directory)
if not root_client.dir_exists(archive_dir):
ArakoonInstaller._logger.debug('Cleanup old arakoon - Creating archive directory {0}'.format(archive_dir))
root_client.dir_create(archive_dir)
ArakoonInstaller._logger.debug('Cleanup old arakoon - Creating tar file')
tar_name = '{0}/{1}.tgz'.format(archive_dir, int(time.time()))
root_client.run('cd {0}; tar -cz -f {1} --exclude "archive" *'.format(directory, tar_name))
ArakoonInstaller._logger.debug('Cleanup old arakoon - Removing old files from {0}'.format(directory))
root_client.file_delete(info['files'])
示例4: get_update_metadata
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import file_delete [as 别名]
def get_update_metadata(storagerouter_ip):
"""
Returns metadata required for updating
- Checks if 'at' is installed properly
- Checks if ongoing updates are busy
- Check if StorageRouter is reachable
:param storagerouter_ip: IP of the Storage Router to check the metadata for
:type storagerouter_ip: str
:return: Update status for specified storage router
:rtype: dict
"""
at_ok = True
reachable = True
root_client = None
update_ongoing = False
try:
root_client = SSHClient(endpoint=storagerouter_ip, username='root')
update_ongoing = root_client.file_exists(filename='/etc/update_ongoing')
root_client.run(['which', 'at'])
root_client.run('echo "echo test > /tmp/test_at_2" > /tmp/test_at_1', allow_insecure=True)
root_client.run(['at', '-f', '/tmp/test_at_1', 'now'])
counter = 0
while counter < 10:
if root_client.file_exists('/tmp/test_at_2'):
at_ok = True
if root_client.file_read('/tmp/test_at_2').strip() != 'test':
at_ok = False
break
at_ok = False
time.sleep(0.1)
counter += 1
except UnableToConnectException:
UpdateController._logger.warning('StorageRouter with IP {0} could not be checked'.format(storagerouter_ip))
reachable = False
except CalledProcessError:
UpdateController._logger.exception('Verifying "at" dependency on StorageRouter with IP {0} failed'.format(storagerouter_ip))
at_ok = False
finally:
if root_client is not None:
root_client.file_delete(['/tmp/test_at_2', '/tmp/test_at_1'])
return {'at_ok': at_ok,
'reachable': reachable,
'update_ongoing': update_ongoing}
示例5: clean_leftover_arakoon_data
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import file_delete [as 别名]
def clean_leftover_arakoon_data(ip, directories):
"""
Delete existing arakoon data
:param ip: IP on which to check for existing data
:type ip: str
:param directories: Directories to delete
:type directories: list
:return: None
"""
if os.environ.get('RUNNING_UNITTESTS') == 'True':
return
root_client = SSHClient(ip, username='root')
# Verify whether all files to be archived have been released properly
open_file_errors = []
ArakoonInstaller._logger.debug('Cleanup old arakoon - Checking open files')
dirs_with_files = {}
for directory in directories:
ArakoonInstaller._logger.debug('Cleaning old arakoon - Checking directory {0}'.format(directory))
if root_client.dir_exists(directory):
ArakoonInstaller._logger.debug('Cleaning old arakoon - Directory {0} exists'.format(directory))
file_names = root_client.file_list(directory, abs_path=True, recursive=True)
if len(file_names) > 0:
ArakoonInstaller._logger.debug('Cleaning old arakoon - Files found in directory {0}'.format(directory))
dirs_with_files[directory] = file_names
for file_name in file_names:
try:
open_files = root_client.run(['lsof', file_name])
if open_files != '':
open_file_errors.append('Open file {0} detected in directory {1}'.format(os.path.basename(file_name), directory))
except CalledProcessError:
continue
if len(open_file_errors) > 0:
raise RuntimeError('\n - ' + '\n - '.join(open_file_errors))
for directory, info in dirs_with_files.iteritems():
ArakoonInstaller._logger.debug('Cleanup old arakoon - Removing old files from {0}'.format(directory))
root_client.file_delete(info)
示例6: OpenStackManagement
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import file_delete [as 别名]
#.........这里部分代码省略.........
cfg.set("DEFAULT", "notification_topics", ",".join(notification_topics))
else:
changed = True
cfg.set("DEFAULT", "notification_topics", "notifications")
if config_file == self._NOVA_CONF:
for param, value in {'notify_on_any_change': 'True',
'notify_on_state_change': 'vm_and_task_state'}.iteritems():
if not cfg.has_option("DEFAULT", param):
changed = True
cfg.set("DEFAULT", param, value)
if changed is True:
with rem.open(config_file, "w") as fp:
cfg.write(fp)
# 5. Enable events consumer
self._logger.info(' - Enabling events consumer service')
service_name = 'openstack-events-consumer'
if not ServiceManager.has_service(service_name, self.client):
ServiceManager.add_service(service_name, self.client)
ServiceManager.enable_service(service_name, self.client)
ServiceManager.start_service(service_name, self.client)
def unconfigure_host(self, ip):
if self._is_devstack is False and self._is_openstack is False or self._cinder_installed is False or self._nova_installed is False:
self._logger.warning('Unconfigure host: No OpenStack nor DevStack installation detected or Cinder and Nova plugins are not installed')
return
# 1. Remove driver code
self._logger.info('*** Unconfiguring host with IP {0} ***'.format(ip))
self._logger.info(' Removing driver code')
if self._is_devstack is True:
self.client.file_delete(self._devstack_driver)
else:
self.client.file_delete('{0}/cinder/volume/drivers/openvstorage.py'.format(self._driver_location))
# 2. Removing users from group
self._logger.info(' Removing users from group ovs')
for user in ['libvirt-qemu', 'stack'] if self._is_devstack is True else self._openstack_users:
self.client.run('deluser {0} ovs'.format(user))
# 3. Revert patches
self._logger.info(' Reverting patches')
nova_base_path = self._get_base_path('nova')
cinder_base_path = self._get_base_path('cinder')
if self._is_devstack is True:
nova_volume_file = '{0}/virt/libvirt/volume.py'.format(nova_base_path)
nova_driver_file = '{0}/virt/libvirt/driver.py'.format(nova_base_path)
cinder_brick_initiator_file = '{0}/brick/initiator/connector.py'.format(cinder_base_path)
else:
nova_volume_file = '{0}/nova/virt/libvirt/volume.py'.format(self._driver_location)
nova_driver_file = '{0}/nova/virt/libvirt/driver.py'.format(self._driver_location)
cinder_brick_initiator_file = '{0}/cinder/brick/initiator/connector.py'.format(self._driver_location)
self._logger.info(' Reverting patched file: {0}'.format(nova_volume_file))
new_contents = []
skip_class = False
for line in self.client.file_read(nova_volume_file).splitlines():
if line.startswith('class LibvirtFileVolumeDriver(LibvirtBaseVolumeDriver):'):
skip_class = True
continue
if line.startswith('class'):
skip_class = False
if skip_class is False:
示例7: remove_node
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import file_delete [as 别名]
#.........这里部分代码省略.........
cluster_ip=storage_router_to_remove.ip,
master_ip=master_ip,
ip_client_map=ip_client_map,
unique_id=storage_router_to_remove.machine_id,
unconfigure_memcached=internal_memcached,
unconfigure_rabbitmq=internal_rabbit_mq,
offline_nodes=storage_routers_offline,
)
# Stop / remove services
Toolbox.log(logger=NodeRemovalController._logger, messages="Stopping and removing services")
config_store = Configuration.get_store()
if storage_router_to_remove_online is True:
client = SSHClient(endpoint=storage_router_to_remove, username="root")
NodeRemovalController.remove_services(
client=client,
node_type=storage_router_to_remove.node_type.lower(),
logger=NodeRemovalController._logger,
)
service = "watcher-config"
if ServiceManager.has_service(service, client=client):
Toolbox.log(logger=NodeRemovalController._logger, messages="Removing service {0}".format(service))
ServiceManager.stop_service(service, client=client)
ServiceManager.remove_service(service, client=client)
if config_store == "etcd":
from ovs.extensions.db.etcd.installer import EtcdInstaller
if Configuration.get(key="/ovs/framework/external_config") is None:
Toolbox.log(logger=NodeRemovalController._logger, messages=" Removing Etcd cluster")
try:
EtcdInstaller.stop("config", client)
EtcdInstaller.remove("config", client)
except Exception as ex:
Toolbox.log(
logger=NodeRemovalController._logger,
messages=["\nFailed to unconfigure Etcd", ex],
loglevel="exception",
)
Toolbox.log(logger=NodeRemovalController._logger, messages="Removing Etcd proxy")
EtcdInstaller.remove_proxy("config", client.ip)
Toolbox.run_hooks(
component="noderemoval",
sub_component="remove",
logger=NodeRemovalController._logger,
cluster_ip=storage_router_to_remove.ip,
complete_removal=remove_asd_manager,
)
# Clean up model
Toolbox.log(logger=NodeRemovalController._logger, messages="Removing node from model")
for service in storage_router_to_remove.services:
service.delete()
for disk in storage_router_to_remove.disks:
for partition in disk.partitions:
partition.delete()
disk.delete()
for j_domain in storage_router_to_remove.domains:
j_domain.delete()
Configuration.delete("/ovs/framework/hosts/{0}".format(storage_router_to_remove.machine_id))
NodeTypeController.restart_framework_and_memcache_services(
clients=ip_client_map,
offline_node_ips=[node.ip for node in storage_routers_offline],
logger=NodeRemovalController._logger,
)
if storage_router_to_remove_online is True:
client = SSHClient(endpoint=storage_router_to_remove, username="root")
if config_store == "arakoon":
client.file_delete(filenames=[ArakoonConfiguration.CACC_LOCATION])
client.file_delete(filenames=[Configuration.BOOTSTRAP_CONFIG_LOCATION])
storage_router_to_remove.delete()
Toolbox.log(logger=NodeRemovalController._logger, messages="Successfully removed node\n")
except Exception as exception:
Toolbox.log(logger=NodeRemovalController._logger, messages="\n")
Toolbox.log(
logger=NodeRemovalController._logger,
messages=["An unexpected error occurred:", str(exception)],
boxed=True,
loglevel="exception",
)
sys.exit(1)
except KeyboardInterrupt:
Toolbox.log(logger=NodeRemovalController._logger, messages="\n")
Toolbox.log(
logger=NodeRemovalController._logger,
messages="This setup was aborted. Open vStorage may be in an inconsistent state, make sure to validate the installation.",
boxed=True,
loglevel="error",
)
sys.exit(1)
if remove_asd_manager is True:
Toolbox.log(logger=NodeRemovalController._logger, messages="\nRemoving ASD Manager")
with remote(storage_router_to_remove.ip, [os]) as rem:
rem.os.system("asd-manager remove --force-yes")
Toolbox.log(logger=NodeRemovalController._logger, messages="Remove nodes finished", title=True)
示例8: Sdk
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import file_delete [as 别名]
#.........这里部分代码省略.........
get vm based on filename: vmachines/template/template.xml
"""
vmid = filename.split("/")[-1].replace(".xml", "")
return self.get_vm_object(vmid)
@authenticated
def get_vms(self):
"""
return a list of virDomain objects, representing virtual machines
"""
return self._conn.listAllDomains()
def shutdown(self, vmid):
vm_object = self.get_vm_object(vmid)
vm_object.shutdown()
return self.get_power_state(vmid)
@authenticated
def delete_vm(self, vmid, devicename, disks_info):
"""
Delete domain from libvirt
Try to delete all files from vpool (xml, .raw)
"""
if self.ssh_client is None:
self.ssh_client = SSHClient(self.host, username="root")
vm_object = None
try:
vm_object = self.get_vm_object(vmid)
except Exception as ex:
logger.error("SDK domain retrieve failed: {}".format(ex))
found_files = self.find_devicename(devicename)
if found_files is not None:
for found_file in found_files:
self.ssh_client.file_delete(found_file)
logger.info("File on vpool deleted: {0}".format(found_file))
if vm_object:
found_file = ""
# VM partially created, most likely we have disks
for disk in self._get_disks(vm_object):
if disk["device"] == "cdrom":
continue
if "file" in disk["source"]:
found_file = disk["source"]["file"]
elif "dev" in disk["source"]:
found_file = disk["source"]["dev"]
if found_file and os.path.exists(found_file) and os.path.isfile(found_file):
self.ssh_client.file_delete(found_file)
logger.info("File on vpool deleted: {0}".format(found_file))
vm_object.undefine()
elif disks_info:
# VM not created, we have disks to rollback
for path, devicename in disks_info:
found_file = "{}/{}".format(path, devicename)
if os.path.exists(found_file) and os.path.isfile(found_file):
self.ssh_client.file_delete(found_file)
logger.info("File on vpool deleted: {0}".format(found_file))
return True
def power_on(self, vmid):
vm_object = self.get_vm_object(vmid)
vm_object.create()
return self.get_power_state(vmid)
def find_devicename(self, devicename):
"""
Searched for a given devicename
示例9: test_basic_logrotate
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import file_delete [as 别名]
#.........这里部分代码省略.........
assert_raises(excClass=CalledProcessError,
callableObj=root_client.run,
command='logrotate {0}'.format(custom_logrotate_cfg_file))
##########################################
# Test 1st logrotate configuration entry #
##########################################
root_client.dir_create(directories=custom_logrotate_dir)
root_client.dir_chown(directories=custom_logrotate_dir,
user='ovs',
group='ovs',
recursive=True)
root_client.run(command='touch {0}'.format(custom_logrotate_file1))
root_client.run(command='touch {0}'.format(custom_logrotate_file2))
root_client.file_chmod(filename=custom_logrotate_file1, mode=666)
root_client.file_chmod(filename=custom_logrotate_file2, mode=666)
# Write data to the file less than size for rotation and verify rotation
GeneralVDisk.write_to_volume(location=custom_logrotate_file1,
count=15,
bs='1M',
input_type='zero',
root_client=root_client)
root_client.run('logrotate {0}'.format(custom_logrotate_cfg_file))
assert_equal(first=len(root_client.file_list(directory=custom_logrotate_dir)),
second=2,
msg='More files than expected present in {0}'.format(custom_logrotate_dir))
# Write data to file larger than size in configuration and verify amount of rotations
files_to_delete = []
for counter in range(7):
expected_file = '{0}.{1}.gz'.format(custom_logrotate_file1, counter + 1 if counter < 5 else 5)
GeneralVDisk.write_to_volume(location=custom_logrotate_file1,
count=30,
bs='1M',
input_type='zero',
root_client=root_client)
root_client.run('logrotate {0}'.format(custom_logrotate_cfg_file))
assert_equal(first=len(root_client.file_list(directory=custom_logrotate_dir)),
second=counter + 3 if counter < 5 else 7,
msg='Not the expected amount of files present in {0}'.format(custom_logrotate_dir))
assert_true(expr=root_client.file_exists(filename=expected_file),
msg='Logrotate did not create the expected file {0}'.format(expected_file))
user_info = General.get_owner_group_for_path(path=expected_file,
root_client=root_client)
assert_equal(first='root',
second=user_info['user']['name'],
msg='Expected file to be owned by user "root", but instead its owned by "{0}"'.format(user_info['user']['name']))
assert_equal(first='root',
second=user_info['group']['name'],
msg='Expected file to be owned by group "root", but instead its owned by "{0}"'.format(user_info['group']['name']))
files_to_delete.append(expected_file)
root_client.file_delete(filenames=files_to_delete)
##########################################
# Test 2nd logrotate configuration entry #
##########################################
root_client.file_chown(filenames=custom_logrotate_file2,
user='ovs',
group='ovs')
# Write data to the file less than size for rotation and verify rotation
GeneralVDisk.write_to_volume(location=custom_logrotate_file2,
count=15,
bs='1M',
input_type='zero',
root_client=root_client)
root_client.run('logrotate {0}'.format(custom_logrotate_cfg_file))
assert_equal(first=len(root_client.file_list(directory=custom_logrotate_dir)),
second=2,
msg='More files than expected present in {0}'.format(custom_logrotate_dir))
# Write data to file larger than size in configuration and verify amount of rotations
for counter in range(12):
if counter == 0: # Delaycompress --> file is not compressed during initial cycle
expected_file = '{0}.1'.format(custom_logrotate_file2)
else:
expected_file = '{0}.{1}.gz'.format(custom_logrotate_file2, counter + 1 if counter < 10 else 10)
GeneralVDisk.write_to_volume(location=custom_logrotate_file2,
count=30,
bs='1M',
input_type='zero',
root_client=root_client)
root_client.run('logrotate {0}'.format(custom_logrotate_cfg_file))
assert_equal(first=len(root_client.file_list(directory=custom_logrotate_dir)),
second=counter + 3 if counter < 10 else 12,
msg='Not the expected amount of files present in {0}'.format(custom_logrotate_dir))
assert_true(expr=root_client.file_exists(filename=expected_file),
msg='Logrotate did not create the expected file {0}'.format(expected_file))
user_info = General.get_owner_group_for_path(path=expected_file,
root_client=root_client)
assert_equal(first='ovs',
second=user_info['user']['name'],
msg='Expected file to be owned by user "root", but instead its owned by "{0}"'.format(user_info['user']['name']))
assert_equal(first='ovs',
second=user_info['group']['name'],
msg='Expected file to be owned by group "root", but instead its owned by "{0}"'.format(user_info['group']['name']))
root_client.dir_delete(directories=custom_logrotate_dir)
root_client.file_delete(filenames=custom_logrotate_cfg_file)
示例10: Sdk
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import file_delete [as 别名]
#.........这里部分代码省略.........
@authenticated
def get_vms(self):
"""
return a list of virDomain objects, representing virtual machines
"""
return self._conn.listAllDomains()
def shutdown(self, vmid):
"""
Shutdown vm
:param vmid: ID of vm
"""
vm_object = self.get_vm_object(vmid)
vm_object.shutdown()
return self.get_power_state(vmid)
@authenticated
def delete_vm(self, vmid, devicename, disks_info):
"""
Delete domain from libvirt
Try to delete all files from vpool (xml, .raw)
"""
if self.ssh_client is None:
self.ssh_client = SSHClient(self.host, username='root')
vm_object = None
try:
vm_object = self.get_vm_object(vmid)
except Exception as ex:
self._logger.error('SDK domain retrieve failed: {}'.format(ex))
found_files = self.find_devicename(devicename)
if found_files is not None:
for found_file in found_files:
self.ssh_client.file_delete(found_file)
self._logger.info('File on vpool deleted: {0}'.format(found_file))
if vm_object:
found_file = ''
# VM partially created, most likely we have disks
for disk in self._get_disks(vm_object):
if disk['device'] == 'cdrom':
continue
if 'file' in disk['source']:
found_file = disk['source']['file']
elif 'dev' in disk['source']:
found_file = disk['source']['dev']
if found_file and os.path.exists(found_file) and os.path.isfile(found_file):
self.ssh_client.file_delete(found_file)
self._logger.info('File on vpool deleted: {0}'.format(found_file))
vm_object.undefine()
elif disks_info:
# VM not created, we have disks to rollback
for path, devicename in disks_info:
found_file = '{}/{}'.format(path, devicename)
if os.path.exists(found_file) and os.path.isfile(found_file):
self.ssh_client.file_delete(found_file)
self._logger.info('File on vpool deleted: {0}'.format(found_file))
return True
def power_on(self, vmid):
"""
Power on vm
:param vmid: ID of vm
"""
vm_object = self.get_vm_object(vmid)
vm_object.create()
return self.get_power_state(vmid)