本文整理汇总了Python中settings.get_settings函数的典型用法代码示例。如果您正苦于以下问题:Python get_settings函数的具体用法?Python get_settings怎么用?Python get_settings使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_settings函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_userheader
def get_userheader():
options = settings.get_settings()
templates = []
for fn in settings.get_settings().user_headers:
filename = utils.which(fn, options.user_data_dirs)
if filename:
templates.append(USERHEADER_INFO % fn)
templates.append(open(filename).read())
return string.join(templates, '\n')
示例2: validate_settings
def validate_settings(l=True, v=True):
log_info("", l, v)
log_info("Validate GFW-sync settings", l, v)
#print ""
#print "Validate GFW-sync settings"
sets = settings.get_settings()
errors = 0
warnings = 0
bucket = sets['folders']['default_bucket']
bucket_drives = sets['bucket_drives']
bucket = validate_bucket(bucket, bucket_drives, l, v)
if not bucket:
errors += 1
default_srs = sets['spatial_references']['default_srs']
default_srs = validate_srs(default_srs, l, v)
if not default_srs:
errors += 1
gdb_srs = sets['spatial_references']['gdb_srs']
gdb_srs = validate_srs(gdb_srs, l, v)
if not gdb_srs:
errors += 1
return errors, warnings
示例3: get_docs_from_SimpleDB
def get_docs_from_SimpleDB(self, ENV = "dev", last_updated_since = None):
"""
Get the array of docs from the SimpleDB provider
"""
docs = []
# Specify run environment settings
settings = settingsLib.get_settings(ENV)
db = dblib.SimpleDB(settings)
db.connect()
if(last_updated_since is not None):
xml_item_list = db.elife_get_POA_delivery_S3_file_items(last_updated_since = last_updated_since)
else:
# Get all - not implemented for now to avoid mistakes running too many workflows
pass
for x in xml_item_list:
tmp = {}
name = x['name']
tmp['document'] = name
docs.append(tmp)
return docs
示例4: main
def main():
parser = OptionParser()
parser.add_option("-e", "--env", default="dev", action="store", type="string", dest="env",
help="set the environment to run, either dev or live")
(options, args) = parser.parse_args()
if options.env:
env = options.env
global settings
settings = settings_lib.get_settings(env)
log_file = "process_dashboard_queue.log"
global logger
logger = log.logger(log_file, settings.log_level)
# Simple connect
queue = get_queue()
pool = Pool(settings.event_queue_pool_size)
while True:
messages = queue.get_messages(num_messages=settings.event_queue_message_count, visibility_timeout=60,
wait_time_seconds=20)
if messages is not None:
logger.info(str(len(messages)) + " message received")
pool.map(process_message, messages)
else:
logger.info("No messages received")
示例5: decide
def decide(ENV = "dev"):
# Specify run environment settings
settings = settingsLib.get_settings(ENV)
# Decider event history length requested
maximum_page_size = 100
# Log
identity = "decider_%s" % int(random.random() * 1000)
logFile = "decider.log"
#logFile = None
logger = log.logger(logFile, settings.setLevel, identity)
# Simple connect
conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
token = None
# Poll for a decision task
while(True):
if(token == None):
logger.info('polling for decision...')
decision = conn.poll_for_decision_task(settings.domain, settings.default_task_list, identity, maximum_page_size)
# Check for a nextPageToken and keep polling until all events are pulled
decision = get_all_paged_events(decision, conn, settings.domain, settings.default_task_list, identity, maximum_page_size)
token = get_taskToken(decision)
logger.info('got decision: [json omitted], token %s' % token)
#logger.info('got decision: \n%s' % json.dumps(decision, sort_keys=True, indent=4))
if(token != None):
# Get the workflowType and attempt to do the work
workflowType = get_workflowType(decision)
if(workflowType != None):
logger.info('workflowType: %s' % workflowType)
# Instantiate and object for the workflow using eval
# Build a string for the object name
workflow_name = get_workflow_name(workflowType)
# Attempt to import the module for the workflow
if(import_workflow_class(workflow_name)):
# Instantiate the workflow object
workflow_object = get_workflow_object(workflow_name, settings, logger, conn, token, decision, maximum_page_size)
# Process the workflow
success = workflow_object.do_workflow()
# Print the result to the log
logger.info('%s success %s' % (workflow_name, success))
else:
logger.info('error: could not load object %s\n' % workflow_name)
# Reset and loop
token = None
示例6: start
def start(self, ENV = "dev", workflow = "S3Monitor"):
# Specify run environment settings
settings = settingsLib.get_settings(ENV)
# Log
identity = "starter_%s" % int(random.random() * 1000)
logFile = "starter.log"
#logFile = None
logger = log.logger(logFile, settings.setLevel, identity)
# Simple connect
conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
if(workflow):
(workflow_id, workflow_name, workflow_version, child_policy, execution_start_to_close_timeout, input) = self.get_workflow_params(workflow, settings)
logger.info('Starting workflow: %s' % workflow_id)
try:
response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, input)
logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))
except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
# There is already a running workflow with that ID, cannot start another
message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id
print message
logger.info(message)
示例7: post
def post( self, bot_name ):
creds = twitter.get_twitter_creds( bot_name )
if not self.authenticate_user( creds ):
self.render_notloggedin()
else:
bot_settings = settings.get_settings( creds )
bot_settings.learning_style = self.request.get( 'learnfrom' )
bot_settings.learning_guru = self.request.get( 'guru_name' )
bot_settings.locquacity_onschedule = self.request.get( 'locquacity_onschedule' ) == "true"
bot_settings.locquacity_reply = self.request.get( 'locquacity_reply' ) == "true"
bot_settings.locquacity_speakonnew = self.request.get( 'locquacity_speakonnew' ) == "true"
bot_settings.learn_retrospectively = self.request.get( 'learn_retrospectively' ) == "true"
gn = self.request.get( 'locquacity_greetnew' ) == "true"
logging.debug( 'SettingsHandler.post(): locquacity_greetnew=%s, bot_settings.locquacity_greetnew=%s' % (gn, bot_settings.locquacity_greetnew) )
if gn and not bot_settings.locquacity_greetnew:
logging.debug( '-> fetch follower ids' )
api = twitter.get_api( creds )
follower_ids = api.followers_ids()
creds.follower_ids = follower_ids
creds.put()
bot_settings.locquacity_greetnew = gn
tweet_frequency = self.request.get( 'tweet_frequency' )
if tweet_frequency is not None and len(tweet_frequency) > 0:
bot_settings.tweet_frequency = float( tweet_frequency )
tweet_chance = self.request.get( 'tweet_chance' )
if tweet_chance is not None and len(tweet_chance) > 0:
bot_settings.tweet_chance = float( tweet_chance )
self.render_template( creds, bot_settings, { "saved" : True } )
bot_settings.creds = creds
bot_settings.put()
示例8: __init__
def __init__(self, settings):
gtk.ScrolledWindow.__init__(self)
self.set_border_width(4)
self.settings = settings.get_settings()
self.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
self.vbox = gtk.VBox(False, 5)
self.hbox = gtk.HBox(False, 8)
self.vbox.pack_start(self.hbox, True)
self.left = gtk.VBox()
self.right = gtk.VBox()
self.hbox.pack_start(self.left, True)
self.hbox.pack_start(self.right, True)
self.create_form()
viewport = gtk.Viewport()
viewport.set_shadow_type(gtk.SHADOW_NONE)
viewport.add(self.vbox)
self.add(viewport)
self.show_all()
示例9: main
def main(flag):
global settings
global env
parser = OptionParser()
parser.add_option("-e", "--env", default="dev", action="store", type="string", dest="env",
help="set the environment to run, either dev or live")
(options, args) = parser.parse_args()
if options.env:
env = options.env
settings = settings_lib.get_settings(env)
env = env
log_file = "queue_workflow_starter.log"
global logger
logger = log.logger(log_file, settings.setLevel)
# Simple connect
queue = get_queue()
while flag.green():
messages = queue.get_messages(1, visibility_timeout=60,
wait_time_seconds=20)
if messages:
logger.info(str(len(messages)) + " message received")
logger.info('message contents: %s', messages[0])
process_message(messages[0])
else:
logger.debug("No messages received")
logger.info("graceful shutdown")
示例10: update_metadata
def update_metadata(in_fc, tech_title, gfw_env):
api_url = settings.get_settings(gfw_env)['metadata']['api_url']
layer_url = api_url + r'/' + tech_title
layer_url = api_url + r'/' + 'wdpa_protected_areas'
response = requests.get(layer_url)
api_data = json.loads(response.text)
md = arcpy_metadata.MetadataEditor(in_fc)
md.title = escape_html(api_data['title'])
md.purpose = escape_html(api_data['function'])
md.abstract = escape_html(api_data['overview'])
md.tags = api_data['tags'].split(",")
md.extent_description = escape_html(api_data['geographic_coverage'])
md.last_update = escape_html(api_data['date_of_content'])
md.update_frequency = escape_html(api_data['frequency_of_updates'])
md.citation = escape_html(api_data['citation'])
md.limitation = escape_html(api_data['cautions'])
md.source = escape_html(api_data['source'])
md.scale_resolution = escape_html(api_data['resolution'])
md.supplemental_information = escape_html(api_data['other'])
md.finish()
示例11: start
def start(self, ENV = "dev", limit = None):
# Specify run environment settings
settings = settingsLib.get_settings(ENV)
# Log
identity = "starter_%s" % int(random.random() * 1000)
logFile = "starter.log"
#logFile = None
logger = log.logger(logFile, settings.setLevel, identity)
# Simple connect
conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
# Start a workflow execution
workflow_id = "SendQueuedEmail"
workflow_name = "SendQueuedEmail"
workflow_version = "1"
child_policy = None
execution_start_to_close_timeout = None
if(limit):
input = '{"data": {"limit": "' + limit + '"}}'
else:
input = None
try:
response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, input)
logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))
except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
# There is already a running workflow with that ID, cannot start another
message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id
print message
logger.info(message)
示例12: start
def start(self, ENV = "dev"):
# Specify run environment settings
settings = settingsLib.get_settings(ENV)
ping_marker_id = "cron_NewS3FiguresPDF"
# Log
logFile = "starter.log"
logger = log.logger(logFile, settings.setLevel, ping_marker_id)
# Data provider
db = dblib.SimpleDB(settings)
db.connect()
# SWF meta data provider
swfmeta = swfmetalib.SWFMeta(settings)
swfmeta.connect()
# Default, if cron never run before
last_startTimestamp = 0
# Get the last time this cron was run
last_startTimestamp = swfmeta.get_last_completed_workflow_execution_startTimestamp(workflow_id = ping_marker_id)
# Start a ping workflow as a marker
self.start_ping_marker(ping_marker_id, ENV)
# Check for S3 PDF files that were updated since the last run
date_format = "%Y-%m-%dT%H:%M:%S.000Z"
# Quick hack - subtract 30 minutes to not ignore the top of the hour
# the time between S3Monitor running and this cron starter
last_startTimestamp_minus_30 = last_startTimestamp - (60*30)
if(last_startTimestamp_minus_30 < 0):
last_startTimestamp_minus_30 = 0
time_tuple = time.gmtime(last_startTimestamp_minus_30)
last_startDate = time.strftime(date_format, time_tuple)
logger.info('last run %s' % (last_startDate))
S3_item_list = db.elife_get_article_S3_file_items(file_data_type = "figures", latest = True, last_updated_since = last_startDate)
logger.info('Figures PDF files updated since %s: %s' % (last_startDate, str(len(S3_item_list))))
if(len(S3_item_list) <= 0):
# No new PDF
pass
else:
# Found new PDF files
# Start a PublishPDF starter
try:
starter_name = "starter_PublishFiguresPDF"
self.import_starter_module(starter_name, logger)
s = self.get_starter_module(starter_name, logger)
s.start(ENV = ENV, last_updated_since = last_startDate)
except:
logger.info('Error: %s starting %s' % (ping_marker_id, starter_name))
logger.exception('')
示例13: get_docs_from_SimpleDB
def get_docs_from_SimpleDB(self, ENV = "dev", last_updated_since = None, doi_id = None):
"""
Get the array of docs from the SimpleDB provider
"""
docs = []
# Specify run environment settings
settings = settingsLib.get_settings(ENV)
db = dblib.SimpleDB(settings)
db.connect()
if(last_updated_since is not None):
xml_item_list = db.elife_get_article_S3_file_items(file_data_type = "suppl", latest = True, last_updated_since = last_updated_since)
elif(doi_id is not None):
xml_item_list = db.elife_get_article_S3_file_items(file_data_type = "suppl", latest = True, doi_id = doi_id)
else:
# Get all
xml_item_list = db.elife_get_article_S3_file_items(file_data_type = "suppl", latest = True)
for x in xml_item_list:
tmp = {}
elife_id = str(x['name']).split("/")[0]
document = 'https://s3.amazonaws.com/' + x['item_name']
tmp['elife_id'] = elife_id
tmp['document'] = document
docs.append(tmp)
return docs
示例14: get_systemheader
def get_systemheader():
options = settings.get_settings()
fn = utils.which(
"header.ps", list(options.user_data_dirs) + [SYSTEM_DATA_DIR])
if fn:
return open(fn).read()
return "%%\%% System header %s not found!\n%%" % fn
示例15: start
def start(self, ENV = "dev"):
# Specify run environment settings
settings = settingsLib.get_settings(ENV)
ping_marker_id = "cron_NewS3FullArticle"
# Log
logFile = "starter.log"
logger = log.logger(logFile, settings.setLevel, ping_marker_id)
# Data provider
db = dblib.SimpleDB(settings)
db.connect()
# SWF meta data provider
swfmeta = swfmetalib.SWFMeta(settings)
swfmeta.connect()
last_startTimestamp = swfmeta.get_last_completed_workflow_execution_startTimestamp(workflow_id = ping_marker_id)
# Start a ping workflow as a marker
self.start_ping_marker(ping_marker_id, ENV)
# Check for S3 XML files that were updated since the last run
date_format = "%Y-%m-%dT%H:%M:%S.000Z"
# Quick hack - subtract 15 minutes,
# the time between S3Monitor running and this cron starter
if last_startTimestamp is not None:
last_startTimestamp_minus_15 = last_startTimestamp - (60*15)
else:
# On the first run ever the last start timestamp will be unavailable
last_startTimestamp_minus_15 = time.gmtime() - (60*15)
time_tuple = time.gmtime(last_startTimestamp_minus_15)
last_startDate = time.strftime(date_format, time_tuple)
logger.info('last run %s' % (last_startDate))
#
file_list = db.elife_get_production_final_delivery_S3_file_items(last_updated_since = last_startDate)
logger.info('Full Article files updated since %s: %s' % (last_startDate, str(len(file_list))))
if(len(file_list) <= 0):
# No new XML
pass
else:
# Found new XML files
# Start a PackagePOA starter
try:
starter_name = "starter_PublishFullArticleZip"
self.import_starter_module(starter_name, logger)
s = self.get_starter_module(starter_name, logger)
s.start(ENV = ENV, last_updated_since = last_startDate)
except:
logger.info('Error: %s starting %s' % (ping_marker_id, starter_name))
logger.exception('')