本文整理汇总了Python中pysphere.vi_task.VITask.get_error_message方法的典型用法代码示例。如果您正苦于以下问题:Python VITask.get_error_message方法的具体用法?Python VITask.get_error_message怎么用?Python VITask.get_error_message使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pysphere.vi_task.VITask
的用法示例。
在下文中一共展示了VITask.get_error_message方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: delete_vm
# 需要导入模块: from pysphere.vi_task import VITask [as 别名]
# 或者: from pysphere.vi_task.VITask import get_error_message [as 别名]
def delete_vm(vsphere_client, module, guest, vm, force):
try:
if vm.is_powered_on():
if force:
try:
vm.power_off(sync_run=True)
vm.get_status()
except Exception, e:
module.fail_json(
msg='Failed to shutdown vm %s: %s' % (guest, e))
else:
module.fail_json(
msg='You must use either shut the vm down first or '
'use force ')
# Invoke Destroy_Task
request = VI.Destroy_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
ret = vsphere_client._proxy.Destroy_Task(request)._returnval
task = VITask(ret, vsphere_client)
# Wait for the task to finish
status = task.wait_for_state(
[task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_ERROR:
vsphere_client.disconnect()
module.fail_json(msg="Error removing vm: %s %s" %
task.get_error_message())
module.exit_json(changed=True, changes="VM %s deleted" % guest)
示例2: destroy
# 需要导入模块: from pysphere.vi_task import VITask [as 别名]
# 或者: from pysphere.vi_task.VITask import get_error_message [as 别名]
def destroy(self, sync_run=True):
"""
Destroys this object, deleting its contents and removing it from its
parent folder (if any)
* sync_run: (default True), If False does not wait for the task to
finish and returns an instance of a VITask for the user to monitor
its progress
"""
try:
request = VI.Destroy_TaskRequestMsg()
_this = request.new__this(self._mor)
_this.set_attribute_type(self._mor.get_attribute_type())
request.set_element__this(_this)
task = self._server._proxy.Destroy_Task(request)._returnval
vi_task = VITask(task, self._server)
if sync_run:
status = vi_task.wait_for_state([vi_task.STATE_SUCCESS,
vi_task.STATE_ERROR])
if status == vi_task.STATE_ERROR:
raise VIException(vi_task.get_error_message(),
FaultTypes.TASK_ERROR)
return
return vi_task
except (VI.ZSI.FaultException) as e:
raise VIApiException(e)
示例3: add_existence_vmdk
# 需要导入模块: from pysphere.vi_task import VITask [as 别名]
# 或者: from pysphere.vi_task.VITask import get_error_message [as 别名]
def add_existence_vmdk(self, vm_name, path):
"""
Add existence hard drive (.vmdk) to the virtual machine
:param vm_name: virtual machine name
:param path: hard drive path
:param space: space for hard drive
:raise: ExistenceException, CreatorException
"""
self._connect_to_esx()
try:
vm = self.esx_server.get_vm_by_name(vm_name)
except Exception:
raise ExistenceException("Couldn't find the virtual machine %s" % vm_name)
unit_number = -1
for disk in vm._disks:
unit_number = max(unit_number, disk["device"]["unitNumber"])
unit_number += 1
request = VI.ReconfigVM_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
spec = request.new_spec()
dc = spec.new_deviceChange()
dc.Operation = "add"
hd = VI.ns0.VirtualDisk_Def("hd").pyclass()
hd.Key = -100
hd.UnitNumber = unit_number
hd.CapacityInKB = 0
hd.ControllerKey = 1000
backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("backing").pyclass()
backing.FileName = path
backing.DiskMode = "persistent"
backing.ThinProvisioned = False
hd.Backing = backing
connectable = hd.new_connectable()
connectable.StartConnected = True
connectable.AllowGuestControl = False
connectable.Connected = True
hd.Connectable = connectable
dc.Device = hd
spec.DeviceChange = [dc]
request.Spec = spec
task = self.esx_server._proxy.ReconfigVM_Task(request)._returnval
vi_task = VITask(task, self.esx_server)
# Wait for task to finis
status = vi_task.wait_for_state([vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
if status == vi_task.STATE_ERROR:
self._disconnect_from_esx()
raise CreatorException("ERROR CONFIGURING VM:%s" % vi_task.get_error_message())
示例4: destroy_vm
# 需要导入模块: from pysphere.vi_task import VITask [as 别名]
# 或者: from pysphere.vi_task.VITask import get_error_message [as 别名]
def destroy_vm(self, vmname):
"""
Destroys virtual machine by name
:param vmname: virtual machine name
:raise: ExistenceException, CreatorException
"""
self._connect_to_esx()
try:
vm = self.esx_server.get_vm_by_name(vmname)
except Exception as error:
self._disconnect_from_esx()
raise ExistenceException("Couldn't find VM '%s' - %s" % (vmname, error.message))
try:
if vm.is_powered_on() or vm.is_powering_off() or vm.is_reverting():
vm.power_off()
request = VI.Destroy_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
ret = self.esx_server._proxy.Destroy_Task(request)._returnval
# Wait for the task to finish
task = VITask(ret, self.esx_server)
status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status != task.STATE_SUCCESS:
raise CreatorException("Couldn't destroy vm - " + task.get_error_message())
except Exception:
self._disconnect_from_esx()
raise CreatorException("Couldn't destroy the virtual machine %s" % vmname)
示例5: rename
# 需要导入模块: from pysphere.vi_task import VITask [as 别名]
# 或者: from pysphere.vi_task.VITask import get_error_message [as 别名]
def rename(self, new_name, sync_run=True):
"""
Renames this managed entity.
* new_name: Any / (slash), \ (backslash), character used in this name
element will be escaped. Similarly, any % (percent) character used
in this name element will be escaped, unless it is used to start an
escape sequence. A slash is escaped as %2F or %2f. A backslash is
escaped as %5C or %5c, and a percent is escaped as %25.
* sync_run: (default True), If False does not wait for the task to
finish and returns an instance of a VITask for the user to monitor
its progress
"""
try:
request = VI.Rename_TaskRequestMsg()
_this = request.new__this(self._mor)
_this.set_attribute_type(self._mor.get_attribute_type())
request.set_element__this(_this)
request.set_element_newName(new_name)
task = self._server._proxy.Rename_Task(request)._returnval
vi_task = VITask(task, self._server)
if sync_run:
status = vi_task.wait_for_state([vi_task.STATE_SUCCESS,
vi_task.STATE_ERROR])
if status == vi_task.STATE_ERROR:
raise VIException(vi_task.get_error_message(),
FaultTypes.TASK_ERROR)
return
return vi_task
except (VI.ZSI.FaultException) as e:
raise VIApiException(e)
示例6: main
# 需要导入模块: from pysphere.vi_task import VITask [as 别名]
# 或者: from pysphere.vi_task.VITask import get_error_message [as 别名]
def main():
opts = options()
# CONNECTION PARAMTERS
server = opts.esx_host
user = opts.user
password = opts.passwd
# REQUIRED PARAMETERS
vmname = opts.name
# CONNECT TO THE SERVER
s = VIServer()
s.connect(server, user, password)
try:
vm = s.get_vm_by_name(opts.name)
vm.shutdown_guest()
count = 1
wait_for = 60
try:
while count < wait_for and vm.is_powered_off() == False:
count += 1
time.sleep(1)
print "Elapsed %s seconds ..." % str(count)
except Exception as e:
if count >= wait_for:
print "Failed to shutdown the VM (%s) even after %s seconds." % (vmname, str(wait_for))
print "Please login to the EXSi server and fix the issue. Exception: %s" % str(e)
sys.exit(1)
check_count(count, wait_for)
except Exception as e:
print "Failed to locate and shutdown the new VM using:", opts.name
print "VM could not be deleted."
print "Exception:", str(e)
# Invoke Destroy_Task
request = VI.Destroy_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
ret = s._proxy.Destroy_Task(request)._returnval
# Wait for the task to finish
task = VITask(ret, s)
status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_SUCCESS:
print "VM successfully deleted from disk"
elif status == task.STATE_ERROR:
print "Error removing vm:", task.get_error_message()
# disconnect from the server
s.disconnect()
示例7: destroyGuest
# 需要导入模块: from pysphere.vi_task import VITask [as 别名]
# 或者: from pysphere.vi_task.VITask import get_error_message [as 别名]
def destroyGuest(host_con, guest_name):
powerOffGuest(host_con, guest_name)
try:
vm = host_con.get_vm_by_name(guest_name)
request = VI.Destroy_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
ret = host_con._proxy.Destroy_Task(request)._returnval
task = VITask(ret, host_con)
print 'Waiting for VM to be deleted'
status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_SUCCESS:
result = 'Succesfully removed guest: %s' % guest_name
elif status == task.STATE_ERROR:
result = 'Failed to remove VM: %s\n%s' % (guest_name, task.get_error_message())
except Exception as e:
result = 'Failed to remove VM: %s\n%s' % (guest_name, str(e))
return result
示例8: handler_revert_to_snapshot
# 需要导入模块: from pysphere.vi_task import VITask [as 别名]
# 或者: from pysphere.vi_task.VITask import get_error_message [as 别名]
def handler_revert_to_snapshot(self, task_id, parameters):
vm_id = parameters['vm_id']
snapshot_id = parameters['snapshot_id']
vm_mor = VIMor(vm_id, MORTypes.VirtualMachine)
snapshot_mor = VIMor(snapshot_id, MORTypes.VirtualMachineSnapshot)
vm_properties_future = self.application.executor.submit(self.server._get_object_properties, vm_mor, ['name', 'snapshot'])
request = VI.RevertToSnapshot_TaskRequestMsg()
mor_snap = request.new__this(snapshot_mor)
mor_snap.set_attribute_type(snapshot_mor.get_attribute_type())
request.set_element__this(mor_snap)
vm_name = None
snapshot_name = None
vm_properties = yield vm_properties_future
for prop in vm_properties.PropSet:
if prop.Name == 'name':
vm_name = prop.Val
elif prop.Name == 'snapshot':
snapshot_dict = ActionHandler.build_snapshot_dict(prop.Val.RootSnapshotList)
snapshot_name = snapshot_dict[snapshot_mor].Name
TaskStatusHandler.update_task(task_id, 'Reverting {0} to {1}...'.format(vm_name, snapshot_name))
vi_task = self.server._proxy.RevertToSnapshot_Task(request)._returnval
vi_task = VITask(vi_task, self.server)
status = yield self.application.executor.submit(
vi_task.wait_for_state, [vi_task.STATE_SUCCESS,
vi_task.STATE_ERROR])
if status == vi_task.STATE_ERROR:
raise VIException(vi_task.get_error_message(),
FaultTypes.TASK_ERROR)
TaskStatusHandler.update_task(task_id, 'Successfully reverted {0} to {1}'.format(vm_name, snapshot_name))
TaskStatusHandler.delete_task(task_id)
self.send_vm_update(vm_id)
示例9: destroy_node
# 需要导入模块: from pysphere.vi_task import VITask [as 别名]
# 或者: from pysphere.vi_task.VITask import get_error_message [as 别名]
def destroy_node(self, node, ex_remove_files=True):
"""
:param ex_remove_files: Remove all the files from the datastore.
:type ex_remove_files: ``bool``
"""
ex_remove_files = False
vm = self._get_vm_for_node(node=node)
server = self.connection.client
# Based on code from
# https://pypi.python.org/pypi/pyxenter
if ex_remove_files:
request = VI.Destroy_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
ret = server._proxy.Destroy_Task(request)._returnval
task = VITask(ret, server)
# Wait for the task to finish
status = task.wait_for_state([task.STATE_SUCCESS,
task.STATE_ERROR])
if status == task.STATE_ERROR:
raise LibcloudError('Error destroying node: %s' %
(task.get_error_message()))
else:
request = VI.UnregisterVMRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
ret = server._proxy.UnregisterVM(request)
task = VITask(ret, server)
return True
示例10: reconfigure_vm
# 需要导入模块: from pysphere.vi_task import VITask [as 别名]
# 或者: from pysphere.vi_task.VITask import get_error_message [as 别名]
def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force):
spec = None
changed = False
changes = {}
request = VI.ReconfigVM_TaskRequestMsg()
shutdown = False
memoryHotAddEnabled = bool(vm.properties.config.memoryHotAddEnabled)
cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled)
cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled)
# Change Memory
if vm_hardware['memory_mb']:
if int(vm_hardware['memory_mb']) != vm.properties.config.hardware.memoryMB:
spec = spec_singleton(spec, request, vm)
if vm.is_powered_on():
if force:
# No hot add but force
if not memoryHotAddEnabled:
shutdown = True
elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
shutdown = True
else:
# Fail on no hot add and no force
if not memoryHotAddEnabled:
module.fail_json(
msg="memoryHotAdd is not enabled. force is "
"required for shutdown")
# Fail on no force and memory shrink
elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
module.fail_json(
msg="Cannot lower memory on a live VM. force is "
"required for shutdown")
# set the new RAM size
spec.set_element_memoryMB(int(vm_hardware['memory_mb']))
changes['memory'] = vm_hardware['memory_mb']
# ====( Config Memory )====#
if vm_hardware['num_cpus']:
if int(vm_hardware['num_cpus']) != vm.properties.config.hardware.numCPU:
spec = spec_singleton(spec, request, vm)
if vm.is_powered_on():
if force:
# No hot add but force
if not cpuHotAddEnabled:
shutdown = True
elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
if not cpuHotRemoveEnabled:
shutdown = True
else:
# Fail on no hot add and no force
if not cpuHotAddEnabled:
module.fail_json(
msg="cpuHotAdd is not enabled. force is "
"required for shutdown")
# Fail on no force and cpu shrink without hot remove
elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
if not cpuHotRemoveEnabled:
module.fail_json(
msg="Cannot lower CPU on a live VM without "
"cpuHotRemove. force is required for shutdown")
spec.set_element_numCPUs(int(vm_hardware['num_cpus']))
changes['cpu'] = vm_hardware['num_cpus']
if len(changes):
if shutdown and vm.is_powered_on():
try:
vm.power_off(sync_run=True)
vm.get_status()
except Exception, e:
module.fail_json(
msg='Failed to shutdown vm %s: %s' % (guest, e)
)
request.set_element_spec(spec)
ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval
# Wait for the task to finish
task = VITask(ret, vsphere_client)
status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_SUCCESS:
changed = True
elif status == task.STATE_ERROR:
module.fail_json(
msg="Error reconfiguring vm: %s" % task.get_error_message())
if vm.is_powered_off():
try:
vm.power_on(sync_run=True)
except Exception, e:
#.........这里部分代码省略.........
示例11: VITask
# 需要导入模块: from pysphere.vi_task import VITask [as 别名]
# 或者: from pysphere.vi_task.VITask import get_error_message [as 别名]
create_vm_request.set_element__this(folder_mor)
rp_mor = create_vm_request.new_pool(rpmor)
rp_mor.set_attribute_type(rpmor.get_attribute_type())
create_vm_request.set_element_pool(rp_mor)
host_mor = create_vm_request.new_host(hostmor)
host_mor.set_attribute_type(hostmor.get_attribute_type())
create_vm_request.set_element_host(host_mor)
# CREATE THE VM
taskmor = vsphere_client._proxy.CreateVM_Task(create_vm_request)._returnval
task = VITask(taskmor, vsphere_client)
task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if task.get_state() == task.STATE_ERROR:
vsphere_client.disconnect()
module.fail_json(msg="Error creating vm: %s" %
task.get_error_message())
else:
# We always need to get the vm because we are going to gather facts
vm = vsphere_client.get_vm_by_name(guest)
# VM was created. If there is any extra config options specified, set
# them here , disconnect from vcenter, then exit.
if vm_extra_config:
vm.set_extra_config(vm_extra_config)
# Power on the VM if it was requested
power_state(vm, state, True)
vsphere_client.disconnect()
module.exit_json(
ansible_facts=gather_facts(vm),
示例12: create_vm
# 需要导入模块: from pysphere.vi_task import VITask [as 别名]
# 或者: from pysphere.vi_task.VITask import get_error_message [as 别名]
#.........这里部分代码省略.........
if not ds:
raise CreatorException("Datastore is not available")
volume_name = "[%s]" % ds_name
# add parameters to the create vm task
create_vm_request = VI.CreateVM_TaskRequestMsg()
config = create_vm_request.new_config()
vmfiles = config.new_files()
vmfiles.set_element_vmPathName(volume_name)
config.set_element_files(vmfiles)
config.set_element_name(vm_name)
config.set_element_annotation(description)
config.set_element_memoryMB(memory_size)
config.set_element_numCPUs(cpu_count)
config.set_element_guestId(guestosid)
devices = []
# add a scsi controller
disk_ctrl_key = 1
scsi_ctrl_spec = config.new_deviceChange()
scsi_ctrl_spec.set_element_operation("add")
scsi_ctrl = VI.ns0.VirtualLsiLogicController_Def("scsi_ctrl").pyclass()
scsi_ctrl.set_element_busNumber(0)
scsi_ctrl.set_element_key(disk_ctrl_key)
scsi_ctrl.set_element_sharedBus("noSharing")
scsi_ctrl_spec.set_element_device(scsi_ctrl)
devices.append(scsi_ctrl_spec)
# find ide controller
if iso:
ide_ctlr = None
for dev in defaul_devs:
if dev.typecode.type[1] == "VirtualIDEController":
ide_ctlr = dev
# add a cdrom based on a physical device
if ide_ctlr:
cd_spec = config.new_deviceChange()
cd_spec.set_element_operation("add")
cd_ctrl = VI.ns0.VirtualCdrom_Def("cd_ctrl").pyclass()
cd_device_backing = VI.ns0.VirtualCdromIsoBackingInfo_Def("cd_device_backing").pyclass()
ds_ref = cd_device_backing.new_datastore(ds)
ds_ref.set_attribute_type(ds.get_attribute_type())
cd_device_backing.set_element_datastore(ds_ref)
cd_device_backing.set_element_fileName("%s %s" % (volume_name, iso))
cd_ctrl.set_element_backing(cd_device_backing)
cd_ctrl.set_element_key(20)
cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key())
cd_ctrl.set_element_unitNumber(0)
cd_spec.set_element_device(cd_ctrl)
devices.append(cd_spec)
# create a new disk - file based - for the vm
if disk_size != 0:
disk_spec = config.new_deviceChange()
disk_spec.set_element_fileOperation("create")
disk_spec.set_element_operation("add")
disk_ctlr = VI.ns0.VirtualDisk_Def("disk_ctlr").pyclass()
disk_backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("disk_backing").pyclass()
disk_backing.set_element_fileName(volume_name)
disk_backing.set_element_diskMode("persistent")
disk_ctlr.set_element_key(0)
disk_ctlr.set_element_controllerKey(disk_ctrl_key)
disk_ctlr.set_element_unitNumber(0)
disk_ctlr.set_element_backing(disk_backing)
disk_ctlr.set_element_capacityInKB(disk_size)
disk_spec.set_element_device(disk_ctlr)
devices.append(disk_spec)
# add a NIC. the network Name must be set as the device name to create the NIC.
for network_name in networks:
nic_spec = config.new_deviceChange()
nic_spec.set_element_operation("add")
nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass()
nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def("nic_backing").pyclass()
nic_backing.set_element_deviceName(network_name)
nic_ctlr.set_element_addressType("generated")
nic_ctlr.set_element_backing(nic_backing)
nic_ctlr.set_element_key(4)
nic_spec.set_element_device(nic_ctlr)
devices.append(nic_spec)
config.set_element_deviceChange(devices)
create_vm_request.set_element_config(config)
folder_mor = create_vm_request.new__this(vmfmor)
folder_mor.set_attribute_type(vmfmor.get_attribute_type())
create_vm_request.set_element__this(folder_mor)
rp_mor = create_vm_request.new_pool(rpmor)
rp_mor.set_attribute_type(rpmor.get_attribute_type())
create_vm_request.set_element_pool(rp_mor)
host_mor = create_vm_request.new_host(hostmor)
host_mor.set_attribute_type(hostmor.get_attribute_type())
create_vm_request.set_element_host(host_mor)
# CREATE THE VM - add option "wait"
taskmor = self.esx_server._proxy.CreateVM_Task(create_vm_request)._returnval
task = VITask(taskmor, self.esx_server)
task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if task.get_state() == task.STATE_ERROR:
self._disconnect_from_esx()
raise CreatorException("Error creating vm: %s" % task.get_error_message())
示例13: getMac
# 需要导入模块: from pysphere.vi_task import VITask [as 别名]
# 或者: from pysphere.vi_task.VITask import get_error_message [as 别名]
new_vmf_mor.set_attribute_type(vmf_mor.get_attribute_type())
new_rp_mor=create_vm_request.new_pool(rp_mor)
new_rp_mor.set_attribute_type(rp_mor.get_attribute_type())
new_host_mor=create_vm_request.new_host(host_mor)
new_host_mor.set_attribute_type(host_mor.get_attribute_type())
create_vm_request.set_element__this(new_vmf_mor)
create_vm_request.set_element_pool(new_rp_mor)
create_vm_request.set_element_host(new_host_mor)
#finally actually create the guest :)
task_mor=host_con._proxy.CreateVM_Task(create_vm_request)._returnval
task=VITask(task_mor,host_con)
task.wait_for_state([task.STATE_SUCCESS,task.STATE_ERROR])
if task.get_state()==task.STATE_ERROR:
return "Cannot create guest: "+task.get_error_message()
else:
return "Succesfully created guest: "+guest_name
def getMac(host_con,guest_name):
vm=host_con.get_vm_by_name(guest_name)
net = vm.get_property('net', from_cache=False)
if net:
for interface in net:
mac = interface.get('mac_address', None)
if mac:
return mac
#for v in vm.get_property("devices").values():
# if v.get('macAddress'):
# return v.get('macAddress')
示例14: create_vm
# 需要导入模块: from pysphere.vi_task import VITask [as 别名]
# 或者: from pysphere.vi_task.VITask import get_error_message [as 别名]
#.........这里部分代码省略.........
scsi_ctrl_spec = config.new_deviceChange()
scsi_ctrl_spec.set_element_operation('add')
scsi_ctrl = VI.ns0.VirtualLsiLogicController_Def("scsi_ctrl").pyclass()
scsi_ctrl.set_element_busNumber(0)
scsi_ctrl.set_element_key(disk_ctrl_key)
scsi_ctrl.set_element_sharedBus("noSharing")
scsi_ctrl_spec.set_element_device(scsi_ctrl)
devices.append(scsi_ctrl_spec)
# find ide controller
ide_ctlr = None
for dev in defaul_devs:
if dev.typecode.type[1] == "VirtualIDEController":
ide_ctlr = dev
# add a cdrom based on a physical device
if ide_ctlr:
cd_spec = config.new_deviceChange()
cd_spec.set_element_operation('add')
cd_ctrl = VI.ns0.VirtualCdrom_Def("cd_ctrl").pyclass()
cd_device_backing = VI.ns0.VirtualCdromIsoBackingInfo_Def("cd_device_backing").pyclass()
ds_ref = cd_device_backing.new_datastore(ds)
ds_ref.set_attribute_type(ds.get_attribute_type())
cd_device_backing.set_element_datastore(ds_ref)
cd_device_backing.set_element_fileName("%s %s" % (volume_name,
cd_iso_location))
cd_ctrl.set_element_backing(cd_device_backing)
cd_ctrl.set_element_key(20)
cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key())
cd_ctrl.set_element_unitNumber(0)
cd_spec.set_element_device(cd_ctrl)
devices.append(cd_spec)
# create a new disk - file based - for the vm
disk_spec = config.new_deviceChange()
disk_spec.set_element_fileOperation("create")
disk_spec.set_element_operation("add")
disk_ctlr = VI.ns0.VirtualDisk_Def("disk_ctlr").pyclass()
disk_backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("disk_backing").pyclass()
disk_backing.set_element_fileName(volume_name)
disk_backing.set_element_diskMode("persistent")
disk_backing.ThinProvisioned = True
disk_ctlr.set_element_key(0)
disk_ctlr.set_element_controllerKey(disk_ctrl_key)
disk_ctlr.set_element_unitNumber(0)
disk_ctlr.set_element_backing(disk_backing)
disk_ctlr.set_element_capacityInKB(disksize)
disk_spec.set_element_device(disk_ctlr)
devices.append(disk_spec)
# add a NIC. the network Name must be set as the device name to create the NIC.
nic_spec = config.new_deviceChange()
if network_name:
nic_spec.set_element_operation("add")
nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass()
nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def("nic_backing").pyclass()
nic_backing.set_element_deviceName(network_name)
nic_ctlr.set_element_addressType("generated")
nic_ctlr.set_element_backing(nic_backing)
nic_ctlr.set_element_key(4)
nic_spec.set_element_device(nic_ctlr)
devices.append(nic_spec)
config.set_element_deviceChange(devices)
create_vm_request.set_element_config(config)
folder_mor = create_vm_request.new__this(vmfmor)
folder_mor.set_attribute_type(vmfmor.get_attribute_type())
create_vm_request.set_element__this(folder_mor)
rp_mor = create_vm_request.new_pool(rpmor)
rp_mor.set_attribute_type(rpmor.get_attribute_type())
create_vm_request.set_element_pool(rp_mor)
host_mor = create_vm_request.new_host(hostmor)
host_mor.set_attribute_type(hostmor.get_attribute_type())
create_vm_request.set_element_host(host_mor)
# CREATE THE VM
taskmor = s._proxy.CreateVM_Task(create_vm_request)._returnval
task = VITask(taskmor, s)
task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if task.get_state() == task.STATE_ERROR:
raise Exception("Error creating vm: %s" %
task.get_error_message())
# Here you should power your VM (refer to the pysphere documentation)
# So it boots from the specified ISO location
try:
new_vm = s.get_vm_by_name(opts.name)
connect_vm_cdroms(new_vm, s)
try:
new_vm.power_on()
except Exception as e:
print "Failed to power-on the new VM using:", opts.name
print "Exception:", str(e)
except Exception as e:
print "Failed to locate the new VM using:", opts.name
print "Exception:", str(e)
# disconnect from the server
s.disconnect()
示例15: VITask
# 需要导入模块: from pysphere.vi_task import VITask [as 别名]
# 或者: from pysphere.vi_task.VITask import get_error_message [as 别名]
module.exit_json(changed=False)
except Exception as e:
viserver.disconnect()
module.fail_json(msg=str(e))
try:
req = VI.MoveIntoFolder_TaskRequestMsg()
req.set_element__this(folder_mor)
req.set_element_list(vm_mors)
task = VITask(viserver._proxy.MoveIntoFolder_Task(req).Returnval, viserver)
task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if task.get_state() == task.STATE_ERROR:
viserver.disconnect()
module.fail_json(msg="Error moving vm: %s to folder %s. Error: %s" %
(found_vms, json.dumps(folder_structure), task.get_error_message()))
else:
changed = True
except Exception as e:
viserver.disconnect()
module.fail_json(msg="Error Requesting VM Move: %s for VM: %s" % (found_vms, json.dumps(folder_structure), str(e)))
viserver.disconnect()
module.exit_json(
changed=changed,
changes=found_vms)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
if __name__ == '__main__':