本文整理汇总了Python中sfa.util.sfalogging.logger.log_exc函数的典型用法代码示例。如果您正苦于以下问题:Python log_exc函数的具体用法?Python log_exc怎么用?Python log_exc使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了log_exc函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: convert_public_key
def convert_public_key(key):
keyconvert_path = "/usr/bin/keyconvert.py"
if not os.path.isfile(keyconvert_path):
raise IOError, "Could not find keyconvert in %s" % keyconvert_path
# we can only convert rsa keys
if "ssh-dss" in key:
return None
(ssh_f, ssh_fn) = tempfile.mkstemp()
ssl_fn = tempfile.mktemp()
os.write(ssh_f, key)
os.close(ssh_f)
cmd = keyconvert_path + " " + ssh_fn + " " + ssl_fn
os.system(cmd)
# this check leaves the temporary file containing the public key so
# that it can be expected to see why it failed.
# TODO: for production, cleanup the temporary files
if not os.path.exists(ssl_fn):
return None
k = Keypair()
try:
k.load_pubkey_from_file(ssl_fn)
except:
logger.log_exc("convert_public_key caught exception")
k = None
# remove the temporary files
os.remove(ssh_fn)
os.remove(ssl_fn)
return k
示例2: GETRequestToOARRestAPI
def GETRequestToOARRestAPI(self, request, strval=None ,next_page=None, username = None ):
self.oarserver['uri'] = \
OARGETParser.OARrequests_uri_dict[request]['uri']
#Get job details with username
if 'owner' in OARGETParser.OARrequests_uri_dict[request] and username:
self.oarserver['uri'] += OARGETParser.OARrequests_uri_dict[request]['owner'] + username
headers = {}
data = json.dumps({})
logger.debug("OARrestapi \tGETRequestToOARRestAPI %s" %(request))
if strval:
self.oarserver['uri'] = self.oarserver['uri'].\
replace("id",str(strval))
if next_page:
self.oarserver['uri'] += next_page
if username:
headers['X-REMOTE_IDENT'] = username
print>>sys.stderr, " \r\n \t OARrestapi \tGETRequestToOARRestAPI %s" %( self.oarserver['uri'])
logger.debug("OARrestapi: \t GETRequestToOARRestAPI \
self.oarserver['uri'] %s strval %s" \
%(self.oarserver['uri'], strval))
try :
#seems that it does not work if we don't add this
headers['content-length'] = '0'
conn = HTTPConnection(self.oarserver['ip'], \
self.oarserver['port'])
conn.request("GET", self.oarserver['uri'], data, headers)
resp = ( conn.getresponse()).read()
conn.close()
except HTTPException, error :
logger.log_exc("GET_OAR_SRVR : Problem with OAR server : %s " \
%(error))
示例3: get_slice_and_slivers
def get_slice_and_slivers(self, slice_xrn, login=None):
"""
Returns a dict of slivers keyed on the sliver's node_id
"""
slivers = {}
sfa_slice = None
if not slice_xrn:
return (sfa_slice, slivers)
slice_urn = hrn_to_urn(slice_xrn, 'slice')
slice_hrn, _ = urn_to_hrn(slice_xrn)
slice_name = slice_hrn
slices = self.driver.GetSlices(slice_filter= str(slice_name), \
slice_filter_type = 'slice_hrn', login=login)
logger.debug("Slabaggregate api \tget_slice_and_slivers \
sfa_slice %s \r\n slices %s self.driver.hrn %s" \
%(sfa_slice, slices, self.driver.hrn))
if not slices:
return (sfa_slice, slivers)
#if isinstance(sfa_slice, list):
#sfa_slice = slices[0]
#else:
#sfa_slice = slices
# sort slivers by node id , if there is a job
#and therfore, node allocated to this slice
for sfa_slice in slices:
try:
node_ids_list = sfa_slice['node_ids']
except KeyError:
logger.log_exc("SLABAGGREGATE \t \
get_slice_and_slivers KeyError ")
continue
for node in node_ids_list:
sliver_xrn = Xrn(slice_urn, type='sliver', id=node)
sliver_xrn.set_authority(self.driver.hrn)
#node_id = self.driver.root_auth + '.' + node_id
sliver = Sliver({'sliver_id':sliver_xrn.urn,
'name': sfa_slice['hrn'],
'type': 'slab-node',
'tags': []})
slivers[node] = sliver
#Add default sliver attribute :
#connection information for senslab
if get_authority (sfa_slice['hrn']) == self.driver.root_auth:
tmp = sfa_slice['hrn'].split('.')
ldap_username = tmp[1].split('_')[0]
vmaddr = 'ssh ' + ldap_username + '@grenoble.senslab.info'
slivers['default_sliver'] = {'vm': vmaddr , 'login': ldap_username}
#TODO get_slice_and_slivers Find the login of the external user
logger.debug("SLABAGGREGATE api get_slice_and_slivers slivers %s "\
%(slivers))
return (slices, slivers)
示例4: ParseJobsIds
def ParseJobsIds(self):
job_resources = ['wanted_resources', 'name', 'id', 'start_time', \
'state','owner','walltime','message']
job_resources_full = ['launching_directory', 'links', \
'resubmit_job_id', 'owner', 'events', 'message', \
'scheduled_start', 'id', 'array_id', 'exit_code', \
'properties', 'state','array_index', 'walltime', \
'type', 'initial_request', 'stop_time', 'project',\
'start_time', 'dependencies','api_timestamp','submission_time', \
'reservation', 'stdout_file', 'types', 'cpuset_name', \
'name', 'wanted_resources','queue','stderr_file','command']
job_info = self.raw_json
#logger.debug("OARESTAPI ParseJobsIds %s" %(self.raw_json))
values = []
try:
for k in job_resources:
values.append(job_info[k])
return dict(zip(job_resources, values))
except KeyError:
logger.log_exc("ParseJobsIds KeyError ")
示例5: _process_walltime
def _process_walltime(duration):
""" Calculates the walltime in seconds from the duration in H:M:S
specified in the RSpec.
"""
if duration:
# Fixing the walltime by adding a few delays.
# First put the walltime in seconds oarAdditionalDelay = 20;
# additional delay for /bin/sleep command to
# take in account prologue and epilogue scripts execution
# int walltimeAdditionalDelay = 240; additional delay
#for prologue/epilogue execution = $SERVER_PROLOGUE_EPILOGUE_TIMEOUT
#in oar.conf
# Put the duration in seconds first
#desired_walltime = duration * 60
desired_walltime = duration
total_walltime = desired_walltime + 240 #+4 min Update SA 23/10/12
sleep_walltime = desired_walltime # 0 sec added Update SA 23/10/12
walltime = []
#Put the walltime back in str form
#First get the hours
walltime.append(str(total_walltime / 3600))
total_walltime = total_walltime - 3600 * int(walltime[0])
#Get the remaining minutes
walltime.append(str(total_walltime / 60))
total_walltime = total_walltime - 60 * int(walltime[1])
#Get the seconds
walltime.append(str(total_walltime))
else:
logger.log_exc(" __process_walltime duration null")
return walltime, sleep_walltime
示例6: nuke
def nuke (self):
model.drop_tables(self.engine)
# so in this case it's like we haven't initialized the db at all
try:
migrate.drop_version_control (self.url, self.repository)
except migrate.exceptions.DatabaseNotControlledError:
logger.log_exc("Failed to drop version control")
示例7: delete_security_group
def delete_security_group(self, name):
try:
security_group = self.client.security_groups.find(name=name)
if not security_group.name == 'default':
self.client.security_groups.delete(security_group.id)
except Exception, ex:
logger.log_exc("Failed to delete security group")
示例8: LdapModify
def LdapModify(self, dn, old_attributes_dict, new_attributes_dict):
""" Modifies a LDAP entry, replaces user's old attributes with
the new ones given.
:param dn: user's absolute name in the LDAP hierarchy.
:param old_attributes_dict: old user's attributes. Keys must match
the ones used in the LDAP model.
:param new_attributes_dict: new user's attributes. Keys must match
the ones used in the LDAP model.
:type dn: string
:type old_attributes_dict: dict
:type new_attributes_dict: dict
:returns: dict bool True if Successful, bool False if not.
:rtype: dict
"""
ldif = modlist.modifyModlist(old_attributes_dict, new_attributes_dict)
# Connect and bind/authenticate
result = self.conn.connect()
if (result['bool']):
try:
self.conn.ldapserv.modify_s(dn, ldif)
self.conn.close()
return {'bool': True}
except ldap.LDAPError, error:
logger.log_exc("LDAP LdapModify Error %s" % error)
return {'bool': False}
示例9: do_POST
def do_POST(self):
"""Handles the HTTPS POST request.
It was copied out from SimpleXMLRPCServer.py and modified to shutdown
the socket cleanly.
"""
try:
peer_cert = Certificate()
peer_cert.load_from_pyopenssl_x509(self.connection.get_peer_certificate())
generic=Generic.the_flavour()
self.api = generic.make_api (peer_cert = peer_cert,
interface = self.server.interface,
key_file = self.server.key_file,
cert_file = self.server.cert_file,
cache = self.cache)
#logger.info("SecureXMLRpcRequestHandler.do_POST:")
#logger.info("interface=%s"%self.server.interface)
#logger.info("key_file=%s"%self.server.key_file)
#logger.info("api=%s"%self.api)
#logger.info("server=%s"%self.server)
#logger.info("handler=%s"%self)
# get arguments
request = self.rfile.read(int(self.headers["content-length"]))
remote_addr = (remote_ip, remote_port) = self.connection.getpeername()
self.api.remote_addr = remote_addr
response = self.api.handle(remote_addr, request, self.server.method_map)
except Exception, fault:
# This should only happen if the module is buggy
# internal error, report as HTTP server error
logger.log_exc("server.do_POST")
response = self.api.prepare_response(fault)
示例10: save
def save (self):
try:
outfile=file(self.filename,'w')
pickle.dump(self.url2version,outfile)
outfile.close()
except:
logger.log_exc ("Cannot save version cache into %s"%self.filename)
示例11: verify_slice_nodes
def verify_slice_nodes(self, slice_urn, slice, rspec_nodes):
slivers = {}
for node in rspec_nodes:
hostname = node.get('component_name')
client_id = node.get('client_id')
#client_id = node.get('component_id')
component_id = node.get('component_id').strip()
if hostname:
hostname = hostname.strip()
elif component_id:
hostname = xrn_to_hostname(component_id)
if hostname:
slivers[hostname] = {'client_id': client_id, 'component_id': component_id}
all_nodes = self.driver.shell.GetNodes()
requested_slivers = []
for node in all_nodes:
if node['hostname'] in slivers.keys():
requested_slivers.append(node['node_id'])
if 'node_ids' not in slice.keys():
slice['node_ids']=[]
nodes = self.driver.shell.GetNodes({'node_ids': slice['node_ids']})
current_slivers = [node['node_id'] for node in nodes]
# remove nodes not in rspec
deleted_nodes = list(set(current_slivers).difference(requested_slivers))
# add nodes from rspec
added_nodes = list(set(requested_slivers).difference(current_slivers))
try:
self.driver.shell.AddSliceToNodes({'slice_id': slice['slice_id'], 'node_ids': added_nodes})
self.driver.shell.DeleteSliceFromNodes({'slice_id': slice['slice_id'], 'node_ids': deleted_nodes})
except:
logger.log_exc('Failed to add/remove slice from nodes')
slices = self.driver.shell.GetSlices({'slice_name': slice['slice_name']})
resulting_nodes = self.driver.shell.GetNodes({'node_ids': slices[0]['node_ids']})
# update sliver allocations
for node in resulting_nodes:
client_id = slivers[node['hostname']]['client_id']
component_id = slivers[node['hostname']]['component_id']
sliver_hrn = '%s.%s-%s' % (self.driver.hrn, slice['slice_id'], node['node_id'])
sliver_id = Xrn(sliver_hrn, type='sliver').urn
record = SliverAllocation(sliver_id=sliver_id, client_id=client_id,
component_id=component_id,
slice_urn = slice_urn,
allocation_state='geni_allocated')
record.sync(self.driver.api.dbsession())
return resulting_nodes
示例12: merge_rspecs
def merge_rspecs(rspecs):
"""
Merge merge a list of RSpecs into 1 RSpec, and return the result.
rspecs must be a valid RSpec string or list of RSpec strings.
"""
if not rspecs or not isinstance(rspecs, list):
return rspecs
# ugly hack to avoid sending the same info twice, when the call graph has dags
known_networks={}
def register_network (network):
try:
known_networks[network.get('name')]=True
except:
logger.error("merge_rspecs: cannot register network with no name in rspec")
pass
def is_registered_network (network):
try:
return network.get('name') in known_networks
except:
logger.error("merge_rspecs: cannot retrieve network with no name in rspec")
return False
# the resulting tree
rspec = None
for input_rspec in rspecs:
# ignore empty strings as returned with used call_ids
if not input_rspec: continue
try:
tree = etree.parse(StringIO(input_rspec))
except etree.XMLSyntaxError:
# consider failing silently here
logger.log_exc("merge_rspecs, parse error")
message = str(sys.exc_info()[1]) + ' with ' + input_rspec
raise InvalidRSpec(message)
root = tree.getroot()
if not root.get("type") in ["SFA"]:
logger.error("merge_rspecs: unexpected type for rspec root, %s"%root.get('type'))
continue
if rspec == None:
# we scan the first input, register all networks
# in addition we remove duplicates - needed until everyone runs 1.0-10
rspec = root
for network in root.iterfind("./network"):
if not is_registered_network(network):
register_network(network)
else:
# duplicate in the first input - trash it
root.remove(network)
else:
for network in root.iterfind("./network"):
if not is_registered_network(network):
rspec.append(deepcopy(network))
register_network(network)
for request in root.iterfind("./request"):
rspec.append(deepcopy(request))
return etree.tostring(rspec, xml_declaration=True, pretty_print=True)
示例13: run_instances
def run_instances(self, instance_name, tenant_name, rspec, key_name, pubkeys):
#logger.debug('Reserving an instance: image: %s, flavor: ' \
# '%s, key: %s, name: %s' % \
# (image_id, flavor_id, key_name, slicename))
# make sure a tenant exists for this slice
tenant = self.create_tenant(tenant_name)
# add the sfa admin user to this tenant and update our nova client connection
# to use these credentials for the rest of this session. This emsures that the instances
# we create will be assigned to the correct tenant.
sfa_admin_user = self.driver.shell.auth_manager.users.find(name=self.driver.shell.auth_manager.opts['OS_USERNAME'])
user_role = self.driver.shell.auth_manager.roles.find(name='user')
admin_role = self.driver.shell.auth_manager.roles.find(name='admin')
self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, admin_role, tenant)
self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, user_role, tenant)
self.driver.shell.nova_manager.connect(tenant=tenant.name)
authorized_keys = "\n".join(pubkeys)
files = {'/root/.ssh/authorized_keys': authorized_keys}
rspec = RSpec(rspec)
requested_instances = defaultdict(list)
# iterate over clouds/zones/nodes
created_instances = []
for node in rspec.version.get_nodes_with_slivers():
instances = node.get('slivers', [])
if not instances:
continue
for instance in instances:
try:
metadata = {}
flavor_id = self.driver.shell.nova_manager.flavors.find(name=instance['name'])
image = instance.get('disk_image')
if image and isinstance(image, list):
image = image[0]
else:
raise InvalidRSpec("Must specify a disk_image for each VM")
image_id = self.driver.shell.nova_manager.images.find(name=image['name'])
fw_rules = instance.get('fw_rules', [])
group_name = self.create_security_group(instance_name, fw_rules)
metadata['security_groups'] = group_name
if node.get('component_id'):
metadata['component_id'] = node['component_id']
if node.get('client_id'):
metadata['client_id'] = node['client_id']
server = self.driver.shell.nova_manager.servers.create(flavor=flavor_id,
image=image_id,
key_name = key_name,
security_groups = [group_name],
files=files,
meta=metadata,
name=instance_name)
created_instances.append(server)
except Exception, err:
logger.log_exc(err)
示例14: LdapSearch
def LdapSearch(self, req_ldap=None, expected_fields=None):
"""
Used to search directly in LDAP, by using ldap filters and return
fields. When req_ldap is None, returns all the entries in the LDAP.
:param req_ldap: ldap style request, with appropriate filters,
example: (cn=*).
:param expected_fields: Fields in the user ldap entry that has to be
returned. If None is provided, will return 'mail', 'givenName',
'sn', 'uid', 'sshPublicKey', 'shadowExpire'.
:type req_ldap: string
:type expected_fields: list
.. seealso:: make_ldap_filters_from_record for req_ldap format.
"""
result = self.conn.connect(bind=False)
if (result['bool']):
return_fields_list = []
if expected_fields is None:
return_fields_list = ['mail', 'givenName', 'sn', 'uid',
'sshPublicKey', 'shadowExpire']
else:
return_fields_list = expected_fields
#No specifc request specified, get the whole LDAP
if req_ldap is None:
req_ldap = '(cn=*)'
logger.debug("LDAP.PY \t LdapSearch req_ldap %s \
return_fields_list %s" \
%(req_ldap, return_fields_list))
try:
msg_id = self.conn.ldapserv.search(
self.baseDN, ldap.SCOPE_SUBTREE,
req_ldap, return_fields_list)
#Get all the results matching the search from ldap in one
#shot (1 value)
result_type, result_data = \
self.conn.ldapserv.result(msg_id, 1)
self.conn.close()
logger.debug("LDAP.PY \t LdapSearch result_data %s"
% (result_data))
return result_data
except ldap.LDAPError, error:
logger.log_exc("LDAP LdapSearch Error %s" % error)
return []
else:
logger.error("LDAP.PY \t Connection Failed")
return
示例15: LaunchExperimentOnOAR
def LaunchExperimentOnOAR(self, added_nodes, slice_name, \
lease_start_time, lease_duration, slice_user=None):
"""
Create a job request structure based on the information provided
and post the job on OAR.
:param added_nodes: list of nodes that belong to the described lease.
:param slice_name: the slice hrn associated to the lease.
:param lease_start_time: timestamp of the lease startting time.
:param lease_duration: lease durationin minutes
"""
lease_dict = {}
lease_dict['lease_start_time'] = lease_start_time
lease_dict['lease_duration'] = lease_duration
lease_dict['added_nodes'] = added_nodes
lease_dict['slice_name'] = slice_name
lease_dict['slice_user'] = slice_user
lease_dict['grain'] = self.GetLeaseGranularity()
# I don't know why the SFATIME_FORMAT has changed...
# from sfa.util.sfatime import SFATIME_FORMAT
# Let's use a fixed format %Y-%m-%d %H:%M:%S
#lease_dict['time_format'] = self.time_format
lease_dict['time_format'] = '%Y-%m-%d %H:%M:%S'
logger.debug("IOTLAB_API.PY \tLaunchExperimentOnOAR slice_user %s\
\r\n " %(slice_user))
#Create the request for OAR
reqdict = self._create_job_structure_request_for_OAR(lease_dict)
# first step : start the OAR job and update the job
logger.debug("IOTLAB_API.PY \tLaunchExperimentOnOAR reqdict %s\
\r\n " %(reqdict))
answer = self.oar.POSTRequestToOARRestAPI('POST_job', \
reqdict, slice_user)
logger.debug("IOTLAB_API \tLaunchExperimentOnOAR jobid %s " %(answer))
try:
jobid = answer['id']
except KeyError:
logger.log_exc("IOTLAB_API \tLaunchExperimentOnOAR \
Impossible to create job %s " %(answer))
return None
if jobid :
logger.debug("IOTLAB_API \tLaunchExperimentOnOAR jobid %s \
added_nodes %s slice_user %s" %(jobid, added_nodes, \
slice_user))
return jobid