本文整理汇总了Python中shared.base.client_id_dir函数的典型用法代码示例。如果您正苦于以下问题:Python client_id_dir函数的具体用法?Python client_id_dir怎么用?Python client_id_dir使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了client_id_dir函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: refresh_user_map
def refresh_user_map(configuration):
"""Refresh map of users and their configuration. Uses a pickled
dictionary for efficiency.
User IDs are stored in their raw (non-anonymized form).
Only update map for users that updated conf after last map save.
"""
dirty = []
map_path = os.path.join(configuration.mig_system_files, "user.map")
lock_path = os.path.join(configuration.mig_system_files, "user.lock")
lock_handle = open(lock_path, 'a')
fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
user_map, map_stamp = load_user_map(configuration, do_lock=False)
# Find all users and their configurations
all_users = list_users(configuration.user_home)
real_map = real_to_anon_user_map(configuration.user_home)
for user in all_users:
settings_path = os.path.join(configuration.user_settings,
client_id_dir(user), settings_filename)
profile_path = os.path.join(configuration.user_settings,
client_id_dir(user), profile_filename)
settings_mtime, profile_mtime = 0, 0
if os.path.isfile(settings_path):
settings_mtime = os.path.getmtime(settings_path)
if os.path.isfile(profile_path):
profile_mtime = os.path.getmtime(profile_path)
if settings_mtime + profile_mtime > 0:
conf_mtime = max(settings_mtime, profile_mtime)
else:
conf_mtime = -1
# init first time
user_map[user] = user_map.get(user, {})
if not user_map[user].has_key(CONF) or conf_mtime >= map_stamp:
user_conf = get_user_conf(user, configuration, True)
if not user_conf:
user_conf = {}
user_map[user][CONF] = user_conf
public_id = user
if user_conf.get('ANONYMOUS', True):
public_id = real_map[user]
user_map[user][USERID] = public_id
user_map[user][MODTIME] = map_stamp
dirty += [user]
# Remove any missing users from map
missing_user = [user for user in user_map.keys() \
if not user in all_users]
for user in missing_user:
del user_map[user]
dirty += [user]
if dirty:
try:
dump(user_map, map_path)
except Exception, exc:
configuration.logger.error("Could not save user map: %s" % exc)
示例2: get_job_ids_with_specified_project_name
def get_job_ids_with_specified_project_name(
client_id,
project_name,
mrsl_files_dir,
logger,
):
"""Helper for finding a job with a given project field"""
client_dir = client_id_dir(client_id)
# Please note that base_dir must end in slash to avoid access to other
# user dirs when own name is a prefix of another user name
base_dir = os.path.abspath(os.path.join(mrsl_files_dir, client_dir)) \
+ os.sep
# this is heavy :-/ we must loop all the mrsl files submitted by the user
# to find the job ids belonging to the specified project
matching_job_ids = []
all_files = os.listdir(base_dir)
for mrsl_file in all_files:
job_dict = unpickle(base_dir + os.sep + mrsl_file, logger)
if not job_dict:
continue
if job_dict.has_key('PROJECT'):
if job_dict['PROJECT'] == project_name:
matching_job_ids.append(job_dict['JOB_ID'])
return matching_job_ids
示例3: main
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id, op_header=False)
client_dir = client_id_dir(client_id)
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
status = returnvalues.OK
title_entry = find_entry(output_objects, 'title')
title_entry['text'] = 'Job Manager'
title_entry['style'] = css_tmpl(configuration)
title_entry['javascript'] = js_tmpl()
output_objects.append({'object_type': 'header', 'text': 'Job Manager'})
output_objects.append({'object_type': 'html_form', 'text': html_pre()})
output_objects.append({'object_type': 'table_pager', 'entry_name': 'jobs',
'default_entries': default_pager_entries,
'form_append': pager_append()})
output_objects.append({'object_type': 'html_form', 'text': html_post()})
return (output_objects, status)
示例4: get_allowed_path
def get_allowed_path(configuration, client_id, path):
"""Check certificate data and path for either a valid user/server
or a resource using a valid session id. If the check succeeds, the
real path to the file is returned.
"""
client_dir = client_id_dir(client_id)
# Check cert and decide if it is a user, resource or server
if not client_id:
path_slash_stripped = path.lstrip("/")
sessionid = path_slash_stripped[: path_slash_stripped.find("/")]
# check that the sessionid is ok (does symlink exist?)
if not os.path.islink(configuration.webserver_home + sessionid):
raise Exception("Invalid session id!")
target_dir = configuration.webserver_home + path_slash_stripped[: path_slash_stripped.rfind("/")]
target_file = path_slash_stripped[path_slash_stripped.rfind("/") + 1 :]
elif is_user(client_id, configuration.mig_server_home):
real_path = os.path.normpath(os.path.join(configuration.user_home, client_dir, path))
target_dir = os.path.dirname(real_path)
target_file = os.path.basename(real_path)
elif is_server(client_id, configuration.server_home):
real_path = os.path.normpath(os.path.join(configuration.server_home, client_dir, path))
target_dir = os.path.dirname(real_path)
target_file = os.path.basename(real_path)
else:
raise Exception("Invalid credentials %s: no such user or server" % client_id)
target_path = target_dir + "/" + target_file
return target_path
示例5: lookup_full_user
def lookup_full_user(username):
"""Look up the full user identity for username consisting of e.g. just an
email address.
The method to extract the full identity depends on the back end database.
If username matches either the openid link, the full ID or the dir version
from it, a tuple with the expanded username and the full user dictionary
is returned.
On no match a tuple with the unchanged username and an empty dictionary
is returned.
"""
# print "DEBUG: lookup full user for %s" % username
db_path = os.path.join(configuration.mig_code_base, 'server',
'MiG-users.db')
# print "DEBUG: Loading user DB"
id_map = load_user_db(db_path)
login_url = os.path.join(configuration.user_openid_providers[0], username)
distinguished_name = get_openid_user_dn(configuration, login_url)
# print "DEBUG: compare against %s" % full_id
if distinguished_name in id_map:
url_friendly = client_id_dir(distinguished_name)
return (url_friendly, id_map[distinguished_name])
return (username, {})
示例6: handle_proxy
def handle_proxy(proxy_string, client_id, config):
"""If ARC-enabled server: store a proxy certificate.
Arguments: proxy_string - text extracted from given upload
client_id - DN for user just being created
config - global configuration
"""
output = []
client_dir = client_id_dir(client_id)
proxy_dir = os.path.join(config.user_home, client_dir)
proxy_path = os.path.join(config.user_home, client_dir, arc.Ui.proxy_name)
if not config.arc_clusters:
output.append({'object_type': 'error_text', 'text':
'No ARC support!'})
return output
# store the file
try:
write_file(proxy_string, proxy_path, config.logger)
os.chmod(proxy_path, 0600)
except Exception, exc:
output.append({'object_type': 'error_text', 'text'
: 'Proxy file could not be written (%s)!'
% str(exc).replace(proxy_dir, '')})
return output
示例7: edit_vm
def edit_vm(client_id, configuration, machine_name, machine_specs):
"""Updates the vm configuration for vm with given machine_name"""
# Grab the base directory of the user
client_dir = client_id_dir(client_id)
user_home = os.path.abspath(os.path.join(configuration.user_home,
client_dir))
vms_conf_paths = glob(os.path.join(user_home, vm_base, machine_name,
'*.cfg'))
# Grab the configuration file defining the machine
for conf_path in vms_conf_paths:
vm_config = ConfigParser.ConfigParser()
vm_config.read([conf_path])
for (key, val) in machine_specs.items():
if not isinstance(val, basestring) and isinstance(val, list):
string_val = ''
for entry in val:
string_val += '%s ' % entry
else:
string_val = val
vm_config.set('MiG', key, string_val)
conf_fd = open(conf_path, 'w')
vm_config.write(conf_fd)
conf_fd.close()
return (True, '')
示例8: _parse_and_save_auth_pw_keys
def _parse_and_save_auth_pw_keys(publickeys, password, client_id,
configuration, proto, proto_conf_dir):
"""Validate and write publickey and password settings for proto
(ssh/davs/ftps) in proto_conf_dir.
"""
client_dir = client_id_dir(client_id)
proto_conf_path = os.path.join(configuration.user_home, client_dir,
proto_conf_dir)
# Create proto conf dir for any old users
try:
os.mkdir(proto_conf_path)
except:
pass
keys_path = os.path.join(proto_conf_path, authkeys_filename)
key_status = parse_and_save_publickeys(keys_path, publickeys, client_id,
configuration)
pw_path = os.path.join(proto_conf_path, authpasswords_filename)
pw_status = parse_and_save_passwords(pw_path, password, client_id,
configuration)
digest_path = os.path.join(proto_conf_path, authdigests_filename)
if proto == 'davs':
digest_status = parse_and_save_digests(digest_path, password, client_id,
configuration)
else:
digest_status = (True, '')
status = (key_status[0] and pw_status[0] and digest_status[0],
key_status[1] + pw_status[1] + digest_status[1])
if status[0]:
mark_user_modified(configuration, client_id)
return status
示例9: main
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id, op_header=False)
client_dir = client_id_dir(client_id)
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
if not correct_handler('POST'):
output_objects.append(
{'object_type': 'error_text', 'text'
: 'Only accepting POST requests to prevent unintended updates'})
return (output_objects, returnvalues.CLIENT_ERROR)
unique_resource_name = accepted['unique_resource_name'][-1]
resconfig = accepted['resconfig'][-1]
output_objects.append({'object_type': 'header', 'text'
: 'Trying to Update resource configuration'})
if not is_owner(client_id, unique_resource_name,
configuration.resource_home, logger):
logger.error(client_id + ' is not an owner of '
+ unique_resource_name + ': update rejected!')
output_objects.append({'object_type': 'error_text', 'text'
: 'You must be an owner of '
+ unique_resource_name
+ ' to update the configuration!'})
return (output_objects, returnvalues.CLIENT_ERROR)
# TODO: race if two confs are uploaded concurrently!
host_url, host_identifier = unique_resource_name.rsplit('.', 1)
pending_file = os.path.join(configuration.resource_home,
unique_resource_name, 'config.tmp')
# write new proposed config file to disk
try:
logger.info('write to file: %s' % pending_file)
if not write_file(resconfig, pending_file, logger):
output_objects.append({'object_type': 'error_text',
'text': 'Could not write: %s' % pending_file})
return (output_objects, returnvalues.SYSTEM_ERROR)
except Exception, err:
logger.error('Resource conf %s could not be written: %s' % \
(pending_file, err))
output_objects.append({'object_type': 'error_text', 'text':
'Could not write configuration!'})
return (output_objects, returnvalues.SYSTEM_ERROR)
示例10: refresh_job_stats
def refresh_job_stats(configuration, client_id):
"""Refresh job stats for specified user"""
dirty = False
client_dir = client_id_dir(client_id)
job_base = os.path.join(configuration.mrsl_files_dir, client_dir)
stats_base = os.path.join(configuration.user_cache, client_dir)
stats_path = os.path.join(stats_base, "job-stats.pck")
lock_path = stats_path + ".lock"
try:
os.makedirs(stats_base)
except:
pass
lock_handle = open(lock_path, 'a')
fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
job_stats = {PARSE: 0, QUEUED: 0, EXECUTING:0, FINISHED: 0, RETRY: 0,
CANCELED: 0, EXPIRED: 0, FAILED: 0, FROZEN: 0}
try:
stats = load(stats_path)
stats_stamp = os.path.getmtime(stats_path)
# Backwards compatible update
job_stats.update(stats[JOBS])
stats[JOBS] = job_stats
except IOError:
configuration.logger.warn("No job stats to load - ok first time")
stats = {JOBS: job_stats}
stats_stamp = -1
now = time.time()
if now < stats_stamp + JOB_REFRESH_DELAY:
lock_handle.close()
return stats
# Inspect all jobs in user job dir and update the ones that changed
# since last stats run
for name in os.listdir(job_base):
if stats.has_key(name) and stats[name]["STATUS"] in FINAL_STATES:
continue
job_path = os.path.join(job_base, name)
job_stamp = os.path.getmtime(job_path)
if stats.has_key(name) and job_stamp < stats_stamp:
continue
dirty = True
job = load(job_path)
update_job_stats(stats, name, job)
if dirty:
try:
dump(stats, stats_path)
stats_stamp = os.path.getmtime(stats_path)
except Exception, exc:
configuration.logger.error("Could not save stats cache: %s" % exc)
示例11: migrate_job
def migrate_job(config, job, peer):
protocol = 'https'
port = ''
server = peer['fqdn']
# Remove schedule hint from job before migration
del job['SCHEDULE_HINT']
# Make sure legacy jobs don't fail
if not job.has_key('MIGRATE_COUNT'):
job['MIGRATE_COUNT'] = str(0)
# Add or increment migration counter
migrate_count = int(job['MIGRATE_COUNT']) + 1
job['MIGRATE_COUNT'] = str(migrate_count)
# TODO: only upload if job is not already replicated at
# remote server
# TMP!
steal_job = False
if not steal_job:
# upload pickled job to server
client_dir = client_id_dir(job['USER_CERT'])
mrsl_filename = config.mrsl_files_dir + client_dir + '/'\
+ job['JOB_ID'] + '.mRSL'
result = pickle(job, mrsl_filename, config.logger)
if not result:
config.logger.error('Aborting migration of job %s (%s)',
job['JOB_ID'], result)
return False
dest = mrsl_filename
# TMP!
# upload_reply = put_data(config, mrsl_filename, protocol, server, port, dest)
config.logger.warning('Actual migration disabled until fully supported'
)
upload_reply = (-1,
'Actual migration disabled until fully supported'
)
if upload_reply[0] != http_success:
return False
# migration_msg = ""
# migration_reply = put_data(config, protocol, server, port, migration_msg)
return True
示例12: main
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id)
client_dir = client_id_dir(client_id)
status = returnvalues.OK
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
if not correct_handler('POST'):
output_objects.append(
{'object_type': 'error_text', 'text'
: 'Only accepting POST requests to prevent unintended updates'})
return (output_objects, returnvalues.CLIENT_ERROR)
save_as_default = (accepted['save_as_default'][-1] != 'False')
external_dict = get_keywords_dict(configuration)
mrsl = fields_to_mrsl(configuration, user_arguments_dict, external_dict)
tmpfile = None
# Please note that base_dir must end in slash to avoid access to other
# user dirs when own name is a prefix of another user name
base_dir = os.path.abspath(os.path.join(configuration.user_home,
client_dir)) + os.sep
# save to temporary file
try:
(filehandle, real_path) = tempfile.mkstemp(text=True)
relative_path = os.path.basename(real_path)
os.write(filehandle, mrsl)
os.close(filehandle)
except Exception, err:
output_objects.append({'object_type': 'error_text',
'text':
'Failed to write temporary mRSL file: %s' % \
err})
return (output_objects, returnvalues.SYSTEM_ERROR)
示例13: main
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id, op_header=False, op_title=False)
client_dir = client_id_dir(client_id)
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
job_id_list = accepted['job_id']
external_dict = mrslkeywords.get_keywords_dict(configuration)
# Please note that base_dir must end in slash to avoid access to other
# user dirs when own name is a prefix of another user name
base_dir = \
os.path.abspath(os.path.join(configuration.mrsl_files_dir,
client_dir)) + os.sep
status = returnvalues.OK
for job_id in job_id_list:
# job = Job()
filepath = os.path.join(base_dir, job_id)
filepath += '.mRSL'
(new_job_obj_status, new_job_obj) = \
create_job_object_from_pickled_mrsl(filepath, logger,
external_dict)
if not new_job_obj_status:
output_objects.append({'object_type': 'error_text', 'text'
: new_job_obj})
status = returnvalues.CLIENT_ERROR
else:
# return new_job_obj
output_objects.append({'object_type': 'jobobj', 'jobobj'
: new_job_obj})
return (output_objects, status)
示例14: delete_vm
def delete_vm(client_id, configuration, machine_name):
"""Deletes the vm dir with configuration and images for vm with given
machine_name"""
# Grab the base directory of the user
client_dir = client_id_dir(client_id)
user_home = os.path.abspath(os.path.join(configuration.user_home,
client_dir))
vms_machine_path = os.path.join(user_home, vm_base, machine_name)
msg = ''
success = remove_rec(vms_machine_path, configuration)
if not success:
msg = "Error while removing %s" % machine_name
return (success, msg)
示例15: parse_and_save_pickle
def parse_and_save_pickle(source, destination, keywords, client_id,
configuration, strip_space, strip_comments):
"""Use conf parser to parse settings in mRSL file and save resulting
dictionary in a pickled file in user_settings.
"""
client_dir = client_id_dir(client_id)
result = parser.parse(source, strip_space, strip_comments)
(status, parsemsg) = parser.check_types(result, keywords,
configuration)
try:
os.remove(source)
except Exception, err:
msg = 'Exception removing temporary file %s, %s'\
% (source, err)