本文整理汇总了Python中servicelogger.log函数的典型用法代码示例。如果您正苦于以下问题:Python log函数的具体用法?Python log怎么用?Python log使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了log函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: advertise_to_DNS
def advertise_to_DNS(unique_id):
"""
Advertise unique_id to the zenodotus DNS server. We strip away whatever that
follows the NAME_SERVER part of the unique_id. For instance, if our unique_id
is abc.NAME_SERVER:[email protected], then we only advertise abc.NAME_SERVER.
"""
# IP that maps to the unique_id
myip = emulcomm.getmyip()
# Extract the part of unique_id up to the name server,
# i.e. xyz.zenodotus.washington.edu, and discard whatever that follows
name_server_pos = unique_id.find(NAME_SERVER)
if name_server_pos > -1:
unique_id = unique_id[0 : name_server_pos + len(NAME_SERVER)]
else:
raise Exception("Invalid unique_id format: '" + str(unique_id) + "'")
advertise_success = False
# We keep trying until successful advertisement (Fix for Ticket #956)
while not advertise_success:
try:
advertise_announce(unique_id, myip, DNS_CACHE_TTL)
servicelogger.log("[INFO]: Advertised " + str(unique_id) + " which maps to " + myip)
advertise_success = True
except Exception, error:
if 'announce error' in str(error):
# We can confidently drop the exception here. The advertisement service
# can sometimes be flaky, yet it can guarantee advertisement of our
# key-value pair on at least one of the three components. Thus, we are
# printing the error message as a warning here.
advertise_success = True
else:
advertise_success = False
示例2: update_restrictions
def update_restrictions():
# Create an internal handler function, takes a resource line and returns the new number of threads
def _internal_func(lineContents):
try:
threads = float(lineContents[2])
threads = threads * EVENT_SCALAR
threads = int(threads)
threads = max(threads, HARD_MIN) # Set a hard minimum
return threads
except:
# On failure, return the minimum
return HARD_MIN
# Create a task that uses our internal function
task = ("resource", "events", _internal_func, True)
taskList = [task]
# Process all the resource files
errors = nmrestrictionsprocessor.process_all_files(taskList)
# Log any errors we encounter
if errors != []:
for e in errors:
print e
servicelogger.log(
"[ERROR]:Unable to patch events limit in resource file " + e[0] + ", exception " + str(e[1])
)
示例3: uninstall_nokia
def uninstall_nokia():
"""
<Purpose>
Remove the startup script and symlink to it in the /etc/init.d and
/etc/rc2.d directories, and kill all seattle processes by using
stop_all_seattle_processes. This requires the user to be currently on root
access.
<Arguments>
None.
<Exceptions>
None.
<Side Effects>
Removes the startup script and the symlink to it, and stops seattle from
running.
<Returns>
True if succeeded in uninstalling,
False otherwise.
"""
# Note to developers: If you need to change the path of the startup script or
# the path of the symlink, make sure you keep it consistent with those in
# seattleinstaller.py.
startup_script_name = "nokia_seattle_startup.sh"
# The directory where the startup script resides.
startup_script_dir = "/etc/init.d/"
# The full path to the startup script.
startup_script_path = startup_script_dir + startup_script_name
# The name of the symlink that links to the startup script.
symlink_name = "S99startseattle"
# The directory where the symlink to the startup script resides.
symlink_dir = "/etc/rc2.d/"
# The full path to the symlink.
symlink_path = symlink_dir + symlink_name
# Check if the startup script and the symlink exists.
if not os.path.exists(startup_script_path) and \
not os.path.lexists(symlink_path):
_output("Neither the startup script nor the symlink exists.")
return True
# Remove the startup script.
try:
os.remove(startup_script_path)
# Cannot remove the startup script due to some reason.
except OSError, e:
# The startup script does not exist - that is fine, we will continue
# and try to remove the symlink.
if e.errno == errno.ENOENT:
pass
else:
# The startup script cannot be removed.
_output("The startup script cannot be removed. Make sure you have the " \
+ "permission to do so.")
servicelogger.log("Seattle cannot be uninstalled because " \
+ startup_script_path + " cannot be removed.")
return False
示例4: set_accepter
def set_accepter(accepter):
global accepter_thread
accepter_state['lock'].acquire(True)
accepter_thread = accepter
if DEBUG_MODE:
servicelogger.log("[DEBUG] Accepter Thread has been set...")
accepter_state['lock'].release()
示例5: process_API_call
def process_API_call(fullrequest):
callname = fullrequest.split('|')[0]
if DEBUG_MODE:
servicelogger.log("Now handling call: " + callname)
if callname not in API_dict:
raise nmAPI.BadRequest("Unknown Call")
# find the entry that describes this call...
numberofargs, permissiontype, APIfunction = API_dict[callname]
# we'll do the signature checks first... (the signature needs to be stripped
# off to get the args anyways)...
if permissiontype == 'Public':
# There should be no signature, so this is the raw request...
if len(fullrequest.split('|')) < numberofargs-1:
raise nmAPI.BadRequest("Not Enough Arguments")
# If there are 3 args, we want to split at most 3 times (the first item is
# the callname)
callargs = fullrequest.split('|',numberofargs)
# return any output for the user...
return APIfunction(*callargs[1:])
else:
# strip off the signature and get the requestdata
requestdata, requestsignature = fastsigneddata.signeddata_split_signature(fullrequest)
# NOTE: the first argument *must* be the vessel name!!!!!!!!!!!
vesselname = requestdata.split('|',2)[1]
if vesselname not in nmAPI.vesseldict:
raise nmAPI.BadRequest('Unknown Vessel')
# I must have something to check...
if permissiontype == 'Owner':
# only the owner is allowed, so the list of keys is merely that key
allowedkeys = [ nmAPI.vesseldict[vesselname]['ownerkey'] ]
else:
# the user keys are also allowed
allowedkeys = [ nmAPI.vesseldict[vesselname]['ownerkey'] ] + nmAPI.vesseldict[vesselname]['userkeys']
# I need to pass the fullrequest in here...
ensure_is_correctly_signed(fullrequest, allowedkeys, nmAPI.vesseldict[vesselname]['oldmetadata'])
# If there are 3 args, we want to split at most 3 times (the first item is
# the callname)
callargs = requestdata.split('|',numberofargs)
#store the request signature as old metadata
nmAPI.vesseldict[vesselname]['oldmetadata'] = requestsignature
# return any output for the user...
return APIfunction(*callargs[1:])
示例6: check_and_create_affix_object
def check_and_create_affix_object(virtual_host_name):
"""
<Purpose>
The purpose of this function is to check if Affix has been enabled,
If it is enabled, we create an Affix object with the advertised
Affix string and return the Affix object as well as whether Affix
is enabled.
<Arguments>
virtual_host_name - the zenodotus name we want to set for this
node.
<Exceptions>
None
<Return>
Returns a Tuple in the form:
(Boolean, AffixStackInterface, String)
The first item in the tuple is whether Affix has been enabled.
The second item is an AffixStackInterface object if Affix
has been enabled. Otherwise the second item is None.
The third item is the Affix string that is being used
for the Affix object.
"""
global affix_stack_string
global affix_enabled
# Check to see if AFFIX is enabled.
try:
affix_enabled_lookup = advertise_lookup(enable_affix_key)[-1]
# Now we check if the last entry is True or False.
if affix_enabled_lookup == 'True':
affix_stack_string = advertise_lookup(affix_service_key)[-1]
affix_enabled = True
servicelogger.log("[INFO]: Current advertised Affix string: " + str(affix_stack_string))
# If Affix is enabled, we can go ahead and create the Affix object
# right away so we don't have to repeatedly create it in the
# loop below.
affix_legacy_string = "(CoordinationAffix)" + affix_stack_string
affix_object = AffixStackInterface(affix_legacy_string, virtual_host_name)
# Return the results.
return (affix_enabled, affix_object, affix_legacy_string)
else:
affix_enabled = False
# Affix is not enabled, so we return (False, None)
return (affix_enabled, None, None)
except (AdvertiseError, TimeoutError, ValueError, IndexError), e:
servicelogger.log("Trying to look up Affix enabled threw " + str(type(e)) + " " + str(e))
affix_enabled = False
# Raise error on debug mode.
if DEBUG_MODE:
raise
# Affix is not enabled, so we return (False, None)
return (affix_enabled, None, None)
示例7: start_accepter
def start_accepter():
if AUTO_USE_NAT == False:
# check to see if we should use the nat layer
try:
# see if we can currently have a bi-directional connection
use_nat = nat_check_bi_directional(getmyip(), configuration['ports'][0])
except Exception,e:
servicelogger.log("Exception occurred trying to contact forwarder to detect nat "+str(e))
use_nat = False
示例8: parse_arguments
def parse_arguments():
"""
Parse all the arguments passed in through the command
line for the nodemanager. This way in the future it
will be easy to add and remove options from the
nodemanager.
"""
# Create the option parser
parser = optparse.OptionParser(version="Seattle " + version)
# Add the --foreground option.
parser.add_option('--foreground', dest='foreground',
action='store_true', default=False,
help="Run the nodemanager in foreground " +
"instead of daemonizing it.")
# Add the --test-mode optino.
parser.add_option('--test-mode', dest='test_mode',
action='store_true', default=False,
help="Run the nodemanager in test mode.")
# Add the using shim capability.
# --shims [shim name]: Forces use of the specified shims. The shim name must
# conform to the format as specified in:
# https://seattle.cs.washington.edu/wiki/UsingShims.
parser.add_option('--shims', type="string", dest="shim_name",
help="Use a user specified shim instead of the" +
" default (NatDeciderShim)")
# Parse the argumetns.
options, args = parser.parse_args()
# Set some global variables.
global FOREGROUND
global TEST_NM
global default_shim
# Analyze the options
if options.foreground:
FOREGROUND = True
if options.test_mode:
TEST_NM = True
if options.shim_name:
servicelogger.log("[INFO]: Using user-specified shims " + options.shim_name)
default_shim = options.shim_name
示例9: safe_log
def safe_log(message):
"""
Log a message in a way that cannot throw an exception. First try to log using
the servicelogger, then just try to print the message.
"""
try:
#f = open('/tmp/log.txt', 'a')
#f.write(message + '\n')
#f.close()
servicelogger.log(message)
except:
pass
示例10: log
def log(*args):
chunks = []
for arg in args:
chunks.append(str(arg))
logstring = " ".join(chunks)
# servicelogger.log will end a trailing newline to the string,
# remove the existing one (if any).
if logstring.endswith("\n"):
servicelogger.log(logstring[:-1])
else:
servicelogger.log(logstring)
示例11: new_affix_listenforconnection
def new_affix_listenforconnection(localip, localport, timeout=10):
global affix_enabled
global affix_stack_string
global zenodotus_advertise_handle
# Similarly, stop advertising my old Zenodotus name (if any),
# ignoring potential errors. If any error is raised, it will
# be recorded.
try:
advertisepipe.remove_from_pipe(zenodotus_advertise_handle)
except Exception, err:
servicelogger.log("Unexpected error when attempting to " +
"remove old zenodotus_advertise_handle. " + str(type(err)) +
". " + str(err))
示例12: safe_log
def safe_log(message):
"""
Log a message in a way that cannot throw an exception. First try to log using
the servicelogger, then just try to print the message.
"""
try:
servicelogger.log(message)
except:
try:
print message
except:
# As the standard output streams aren't closed, it would seem that this
# should never happen. If it does, though, what can we do to log the
# message, other than directly write to a file?
pass
示例13: run
def run(self):
# Run indefinitely.
# This is on the assumption that getconnection() blocks, and so this won't consume an inordinate amount of resources.
while True:
try:
ip, port, client_socket = self.serversocket.getconnection()
connection_handler(ip, port, client_socket)
except SocketWouldBlockError:
sleep(0.5)
except SocketTimeoutError:
sleep(0.5)
except Exception, e:
servicelogger.log("FATAL error in AccepterThread: " +
traceback.format_exc())
return
示例14: main
def main():
global configuration
if not FOREGROUND:
# Background ourselves.
daemon.daemonize()
# ensure that only one instance is running at a time...
gotlock = runonce.getprocesslock("seattlenodemanager")
if gotlock == True:
# I got the lock. All is well...
pass
else:
if gotlock:
servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) +
") is running")
else:
servicelogger.log("[ERROR]:Another node manager process is running")
return
# I'll grab the necessary information first...
servicelogger.log("[INFO]:Loading config")
# BUG: Do this better? Is this the right way to engineer this?
configuration = persist.restore_object("nodeman.cfg")
# Armon: initialize the network restrictions
initialize_ip_interface_restrictions(configuration)
# ZACK BOKA: For Linux and Darwin systems, check to make sure that the new
# seattle crontab entry has been installed in the crontab.
# Do this here because the "nodeman.cfg" needs to have been read
# into configuration via the persist module.
if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin':
if 'crontab_updated_for_2009_installer' not in configuration or \
configuration['crontab_updated_for_2009_installer'] == False:
try:
import update_crontab_entry
modified_crontab_entry = \
update_crontab_entry.modify_seattle_crontab_entry()
# If updating the seattle crontab entry succeeded, then update the
# 'crontab_updated_for_2009_installer' so the nodemanager no longer
# tries to update the crontab entry when it starts up.
if modified_crontab_entry:
configuration['crontab_updated_for_2009_installer'] = True
persist.commit_object(configuration,"nodeman.cfg")
except Exception,e:
exception_traceback_string = traceback.format_exc()
servicelogger.log("[ERROR]: The following error occured when " \
+ "modifying the crontab for the new 2009 " \
+ "seattle crontab entry: " \
+ exception_traceback_string)
示例15: handle_request
def handle_request(socketobj):
# always close the socketobj
try:
try:
# let's get the request...
# BUG: Should prevent endless data / slow retrival attacks
fullrequest = session.session_recvmessage(socketobj)
# Armon: Catch a vanilla exception because repy emulated_sockets
# will raise Exception when the socket has been closed.
# This is changed from just passing through socket.error,
# which we were catching previously.
except Exception, e:
#JAC: Fix for the exception logging observed in #992
if 'Socket closed' in str(e) or 'timed out!' in str(e):
servicelogger.log('Connection abruptly closed during recv')
return
elif 'Bad message size' in str(e):
servicelogger.log('Received bad message size')
return
else:
# I can't handle this, let's exit
# BUG: REMOVE LOGGING IN PRODUCTION VERSION (?)
servicelogger.log_last_exception()
return
# handle the request as appropriate
try:
retstring = process_API_call(fullrequest)
# Bad parameters, signatures, etc.
except nmAPI.BadRequest,e:
session.session_sendmessage(socketobj, str(e)+"\nError")
return