本文整理汇总了Python中utils.save_data函数的典型用法代码示例。如果您正苦于以下问题:Python save_data函数的具体用法?Python save_data怎么用?Python save_data使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了save_data函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
def run():
options = utils.flags()
debug = options.get('debug', False)
filename = "legislators-current.yaml"
args = utils.args()
legislators = load_data(filename)
if len(args) != 0:
bioguides = args
print("Fetching contact forms for %s..." % ', '.join(bioguides))
else:
bioguides = [member['id']['bioguide'] for member in legislators]
print("Fetching contact forms for all current members...")
for legislator in legislators:
bioguide = legislator['id']['bioguide']
if bioguide not in bioguides: continue
if bioguide in SKIP_BIOGUIDES: continue
if debug: print("Downloading form for %s" % bioguide, flush=True)
try:
steps = contact_steps_for(bioguide)
except LegislatorNotFoundError as e:
if debug: print("skipping, %s..." % e, flush=True)
continue
legislator['terms'][-1]['contact_form'] = steps['contact_form']['steps'][0]['visit']
print("Saving data to %s..." % filename)
save_data(legislators, filename)
示例2: main
def main(args):
'''Module main function'''
global database
global genetic_algorithm
global joint_positions
global goal_positions
pygame.init()
random.seed()
database = utils.initialize_database(args, 'RobotTrainingData')
database.set_objective_names(['Tiempo', r'Error en $\theta_1$', r'Error en $\theta_2$', r'Error en $\theta_3$', 'Energía'])
problem = EV3Problem()
generation = database.properties['highest_population']
population_size = database.properties['population_size']
genetic_algorithm = evolution.NSGA(problem, population_size)
x_path = os.path.abspath(pkg_resources.resource_filename('resources.ev3', 'x_train.txt'))
y_path = os.path.abspath(pkg_resources.resource_filename('resources.ev3', 'y_train.txt'))
batch_start = (generation % 10) * N_GOALS
joint_positions = np.loadtxt(x_path)[batch_start : batch_start + N_GOALS, :]
goal_positions = np.loadtxt(y_path)[batch_start : batch_start + N_GOALS, :]
if generation > 0:
parents, children = utils.load_data(database)
genetic_algorithm.set_population(parents)
genetic_algorithm.set_children(children)
for _ in range(args.iterations):
generation += 1
print('Starting generation ' + str(generation))
genetic_algorithm.iterate()
database.create_population()
utils.save_data(genetic_algorithm, database)
print('=' * (SCREEN_WIDTH - 1))
示例3: update_coordinates
def update_coordinates(matchings, filename, geocode_serv, all_=False):
from datetime import datetime
from utils.geocode import distance
from utils import save_data
schools_ = sorted(matchings.values(), key=lambda elem:datetime.strptime(elem['last_modified_at'], '%Y-%m-%d %H:%M:%S.%f'))
counter1, counter2, counter3, counter4 = 0, 0, 0, 0
for school in schools_:
counter1 += 1
if ('address' in school
and 'number' in school['address']
and ('geocoded' not in school['address'] or school['address']['geocoded'] == False)
and (all_ or not all_ and 'coordinates' not in school['address'])):
counter2 += 1
full_address = full_address(school['address'])
coord = geocode_serv(full_address)
if coord:
counter3 += 1
if 'coordinates' not in school['address'] or distance(coord, school['address']['coordinates']) > 0.1: counter4 += 1
school['address']['geocoded'] = True
school['address']['coordinates'] = coord
school['last_modified_at'] = str(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))
if (counter2 % 10 == 0): save_data(matchings, filename)
str_counter = str(counter4) + '/' + str(counter3) + '/' + str(counter2) + '/' + str(counter1) + '/' + str(len(schools_))
print (str_counter + ': ' + full_address + ' >> ' + str(coord))
示例4: main
def main(state_num):
matches_filename = 'matches_%d' % state_num
print 'Loading %s ...' % matches_filename
matches = utils.load_data(matches_filename)
matches_reduced_filename = 'matches_reduced'
try:
print "Loading matches_reduced ..."
matches_reduced = utils.load_data(matches_reduced_filename)
except:
print "Matches_reduced doesn't exists, creating new."
matches_reduced = {}
num_matches = len(matches.keys())
for keyIdx, matchId in enumerate(matches.keys()):
print "\rMatch %d out of %d [%0.1f%%]" % (keyIdx + 1, num_matches, (keyIdx + 1) / float(num_matches) * 100),
summoners = []
num_summoners = len(matches[matchId]['participants'])
for i in range(num_summoners):
champLevel = matches[matchId]['participants'][i]['stats']['champLevel']
summonerId = matches[matchId]['participantIdentities'][i]['player']['summonerId']
winner = matches[matchId]['participants'][i]['stats']['winner']
summoners += [{'champLevel': champLevel, 'summonerId': summonerId, 'winner': winner}]
matches_reduced[matchId] = {'summoners': summoners}
print "Saving %s ..." % matches_reduced_filename
utils.save_data(matches_reduced, matches_reduced_filename)
print "Done!"
示例5: update
def update():
for rec in csv.DictReader(open("cache/social_media/%s_candidates.csv" % service)):
bioguide = rec["bioguide"]
candidate = rec["candidate"]
if media_bioguide.has_key(bioguide):
media_bioguide[bioguide]['social'][service] = candidate
else:
new_media = {'id': {}, 'social': {}}
new_media['id']['bioguide'] = bioguide
thomas_id = current_bioguide[bioguide]['id'].get("thomas", None)
govtrack_id = current_bioguide[bioguide]['id'].get("govtrack", None)
if thomas_id:
new_media['id']['thomas'] = thomas_id
if govtrack_id:
new_media['id']['govtrack'] = govtrack_id
new_media['social'][service] = candidate
media.append(new_media)
print "Saving social media..."
save_data(media, "legislators-social-media.yaml")
# if it's a youtube update, always do the resolve
if service == "youtube":
resolveyt()
示例6: refresh_categories
def refresh_categories(self):
from utils import save_data
print ('Refreshing FS categories...')
categories = self._api_venues.venues.categories()['categories']
self.categories_by_id, self.categories_by_name = _prepare_categories(categories)
save_data(categories, self._filename)
print('Done.')
示例7: resolvefb
def resolvefb():
updated_media = []
for m in media:
social = m['social']
if ('facebook' in social and social['facebook']) and ('facebook_id' not in social):
graph_url = "https://graph.facebook.com/%s" % social['facebook']
if re.match('\d+', social['facebook']):
social['facebook_id'] = social['facebook']
print("Looking up graph username for %s" % social['facebook'])
fbobj = requests.get(graph_url).json()
if 'username' in fbobj:
print("\tGot graph username of %s" % fbobj['username'])
social['facebook'] = fbobj['username']
else:
print("\tUnable to get graph username")
else:
try:
print("Looking up graph ID for %s" % social['facebook'])
fbobj = requests.get(graph_url).json()
if 'id' in fbobj:
print("\tGot graph ID of %s" % fbobj['id'])
social['facebook_id'] = fbobj['id']
else:
print("\tUnable to get graph ID")
except:
print("\tUnable to get graph ID for: %s" % social['facebook'])
social['facebook_id'] = None
updated_media.append(m)
print("Saving social media...")
save_data(updated_media, "legislators-social-media.yaml")
示例8: resolveig
def resolveig():
# in order to preserve the comment block at the top of the file,
# copy it over into a new RtYamlList instance. We do this because
# Python list instances can't hold other random attributes.
import rtyaml
updated_media = rtyaml.RtYamlList()
if hasattr(media, '__initial_comment_block'):
updated_media.__initial_comment_block = getattr(media, '__initial_comment_block')
client_id_file = open('cache/instagram_client_id','r')
client_id = client_id_file.read()
bioguide = utils.flags().get('bioguide', None)
for m in media:
if bioguide and (m['id']['bioguide'] != bioguide):
updated_media.append(m)
continue
social = m['social']
if 'instagram' not in social and 'instagram_id' not in social:
updated_media.append(m)
continue
instagram_handle = social['instagram']
query_url = "https://api.instagram.com/v1/users/search?q={query}&client_id={client_id}".format(query=instagram_handle,client_id=client_id)
instagram_user_search = requests.get(query_url).json()
for user in instagram_user_search['data']:
time.sleep(0.5)
if user['username'] == instagram_handle:
m['social']['instagram_id'] = int(user['id'])
print("matched instagram_id {instagram_id} to {instagram_handle}".format(instagram_id=social['instagram_id'],instagram_handle=instagram_handle))
updated_media.append(m)
save_data(updated_media, "legislators-social-media.yaml")
示例9: resolvefb
def resolvefb():
updated_media = []
for m in media:
social = m['social']
if 'facebook' in social and social['facebook']:
graph_url = "https://graph.facebook.com/%s" % social['facebook']
if re.match('\d+', social['facebook']):
social['facebook_id'] = social['facebook']
fbobj = requests.get(graph_url).json()
if 'username' in fbobj:
social['facebook'] = fbobj['username']
else:
try:
social['facebook_id'] = requests.get(graph_url).json()['id']
except:
print "Unable to get graph ID for: %s" % social['facebook']
social['facebook_id'] = None
updated_media.append(m)
print "Saving social media..."
save_data(updated_media, "legislators-social-media.yaml")
示例10: run
def run():
# load in members, orient by bioguide ID
print("Loading current legislators...")
current = load_data("legislators-current.yaml")
current_bioguide = { }
for m in current:
if "bioguide" in m["id"]:
current_bioguide[m["id"]["bioguide"]] = m
# remove out-of-office people from current committee membership
print("Sweeping committee membership...")
membership_current = load_data("committee-membership-current.yaml")
for committee_id in list(membership_current.keys()):
for member in membership_current[committee_id]:
if member["bioguide"] not in current_bioguide:
print("\t[%s] Ding ding ding! (%s)" % (member["bioguide"], member["name"]))
membership_current[committee_id].remove(member)
save_data(membership_current, "committee-membership-current.yaml")
# remove out-of-office people from social media info
print("Sweeping social media accounts...")
socialmedia_current = load_data("legislators-social-media.yaml")
for member in list(socialmedia_current):
if member["id"]["bioguide"] not in current_bioguide:
print("\t[%s] Ding ding ding! (%s)" % (member["id"]["bioguide"], member["social"]))
socialmedia_current.remove(member)
save_data(socialmedia_current, "legislators-social-media.yaml")
示例11: put
def put(self, name, content, type=None):
if type is not None:
name = '%s.%s' % (name, type)
else:
name = name
path = '%s/%s' % ('/'.join(name[:3]), name)
save_data(self.path + '/' + path, content)
return path
示例12: main
def main():
if not os.path.exists('../data/matched_points.pkl') or not os.path.exists('../data/links.pkl'):
print "Saving Data"
save_data()
print "Loading Data"
matched_points, links = load_data()
print "Data Loaded"
link_to_slopes = process(matched_points, links, 10)
save_link_to_slopes(link_to_slopes, links)
示例13: measure_tips
def measure_tips(out_fname='results/experiment_run'):
""" Compute spiral-tip density for all available data files
"""
data_dir = 'data'
files = [os.path.join(data_dir, fn) for fn in os.listdir(data_dir)]
with Pool(len(files)) as p:
data = p.map(handle_measure_tips, files)
save_data(out_fname, data)
示例14: run
def run(legislator_ids=None):
legislators = utils.load_data('legislators-district-offices.yaml')
try:
for l in legislators:
if legislator_ids and l['id']['bioguide'] not in legislator_ids:
continue
geocode_offices(l)
finally:
# Save in-progress geocodes in case of keyboard interrupt
print("Saving data...")
utils.save_data(legislators, 'legislators-district-offices.yaml')
示例15: run
def run():
# load in current members
y = load_data("legislators-current.yaml")
for m in y:
# retrieve C-SPAN id, if available, from NYT API
# TODO: use utils.download here
response = urllib.request.urlopen("http://politics.nytimes.com/congress/svc/politics/v3/us/legislative/congress/members/%s.json" % m['id']['bioguide']).read()
j = json.loads(response.decode("utf8"))
cspan = j['results'][0]['cspan_id']
if not cspan == '':
m['id']['cspan'] = int(cspan)
save_data(y, "legislators-current.yaml")