本文整理汇总了Python中parse_rest.connection.ParseBatcher类的典型用法代码示例。如果您正苦于以下问题:Python ParseBatcher类的具体用法?Python ParseBatcher怎么用?Python ParseBatcher使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ParseBatcher类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setup_ghosts
def setup_ghosts(g):
"""
Create 1 - 50 Ghost objects by "batch_save"-ing them to Parse using
ParsePy's ParseBatcher().
"""
# Start a function timer.
function_start_time = time.time()
# We must subclass Object for the class names we want to use from Parse.
class Ghost(Object):
pass
list_of_Ghost_objects_to_upload = []
for ghost_number in range(1, g + 1, 1):
new_Ghost_object = Ghost(
username = "Ghost Partner",
ghostNum = ghost_number,
firstName = "Ghost",
sex = "G",
array_eventsRegistered = [1,2,3,4,5,6,7,8,9,10]
)
list_of_Ghost_objects_to_upload.append(new_Ghost_object)
batcher = ParseBatcher()
batcher.batch_save(list_of_Ghost_objects_to_upload)
print ("\n{} Ghost objects uploaded to Parse in {} seconds.\n".format(g, time.time() - function_start_time))
示例2: main
def main():
soup = BeautifulSoup(requests.get('https://www.mturk.com/mturk/viewhits?searchWords=&pageNumber=4&searchSpec=HITGroupSearch%23T%231%2310%23-1%23T%23%21%23%21NumHITs%211%21%23%21&sortType=NumHITs%3A1&selectedSearchType=hitgroups').text, "html.parser")
titles = soup.findAll('a', {"class" : "capsulelink"})
num_results = int(soup.findAll('td', {"class" : "title_orange_text"})[0].text.strip()[8:-7])
print("\nTotal number of HITs: " + str(num_results))
count = 0
page = 1
requestErrors = 0
privateCount = 0
register("DKJjvfvhnCGRK0cAdOpJN9MwR7zhIpuYya5xvbuF", "d8hIYrBrcW4r2ujEkL79vE03FmLxE2QCJgSwuXYv")
HITClass = ParseObject.factory("HIT")
all_hits = HITClass.Query.all()
batcher = ParseBatcher()
batcher.batch_delete(all_hits)
while (count < 200):
soup = BeautifulSoup(requests.get('https://www.mturk.com/mturk/viewhits?searchWords=&pageNumber=' + str(page) + '&searchSpec=HITGroupSearch%23T%231%2310%23-1%23T%23%21%23%21NumHITs%211%21%23%21&sortType=NumHITs%3A1&selectedSearchType=hitgroups').text, "html.parser")
titles = soup.findAll('a', {"class" : "capsulelink"})
for t in titles:
time.sleep(.3)
count = count + 1
print("\n" + str(count) + "\nTitle: " + t.text.strip())
linkA = t.parent.parent.findAll('span')[1].a
# check if the link is public
if linkA.has_attr('href'):
link = linkA['href']
hitPage = BeautifulSoup(requests.get('https://www.mturk.com' + link).text, "html.parser")
form = hitPage.findAll('form', {'name' : 'hitForm'})
# Check for error
if len(form) >= 3:
form = form[2]
requester = form.find("input", {'name' : 'prevRequester'})['value']
print('Requester: ' + requester)
reward = form.find("input", {'name' : 'prevReward'})['value']
print('Reward: ' + reward)
groupID = form.find("input", {'name' : 'groupId'})['value']
print('Group id: ' + groupID)
anyObject = HIT(requester=requester, reward=float(reward[3:]),
title=t.text.strip(), groupID=groupID)
anyObject.save()
else:
requestErrors = requestErrors + 1
print(link)
print(form)
else:
link = linkA['id']
print(link)
privateCount = privateCount + 1
page = page + 1
print("\n\nErrors: " + str(requestErrors))
print("Private HITs: " + str(privateCount))
示例3: accept_post
def accept_post(self, post):
snippet_posts = [Post.get(snip['objectId']) for snip in self.snippets]
for snippet_post in snippet_posts:
snippet_post.archived = True
snippet_post.original_story = self
batcher = ParseBatcher()
batcher.batch_save(snippet_posts)
self.snippets = []
self.accepted_posts.append(post)
示例4: testCanBatchUpdate
def testCanBatchUpdate(self):
user = self._get_logged_user()
phone_number = "555-0134"
original_updatedAt = user.updatedAt
user.phone = phone_number
batcher = ParseBatcher()
batcher.batch_save([user])
self.assertTrue(User.Query.filter(phone=phone_number).exists(),
'Failed to batch update user data. New info not on Parse')
self.assertNotEqual(user.updatedAt, original_updatedAt,
'Failed to batch update user data: updatedAt not changed')
示例5: batchSaveList
def batchSaveList(self, listOfParseObjects):
if len(listOfParseObjects)==0:
return;
print 'batch saving objects'
self.appendToLog('batch saving %d objects' % len(listOfParseObjects))
#batch save a list of parseobjects. the batch limit is 50!
batcher = ParseBatcher()
batchLimit = 50
while len(listOfParseObjects)> 0:
#save the first @batchLimit amount of objects
batcher.batch_save(listOfParseObjects[0:batchLimit])
#clear the list of those saved objects
listOfParseObjects = listOfParseObjects[batchLimit:]
示例6: load_spell_data
def load_spell_data(db):
conn = get_db_connection(db)
curs = conn.cursor()
find_central_index(curs, **{"type": "spell"})
index_lines = curs.fetchall()
batch = []
batcher = ParseBatcher()
count = 0
for line in index_lines:
spell = get_parse_spell(line['url'])
if spell:
batch.append(make_spell(conn, line, spell))
else:
batch.append(make_spell(conn, line))
if len(batch) >= 50:
batcher.batch_save(batch)
batch = []
count += 50
print "Saving through %s" % count
batcher.batch_save(batch)
示例7: update_answers_in_Parse
def update_answers_in_Parse():
# Get a list of all Answers in Parse.
ct_a = Answer.Query.all().count()
queryset = []
batcher = ParseBatcher()
print("{} Answers exist in Parse.".format(ct_a))
if ct_a == 0: # None exist; upload whole list
pass
elif ct_a > 0: # There's at least 1 to get
for i in range(0, ct_a, min(ct_a,1000)): # for each chunk of <= 1000 answers
queryset += list(Answer.Query.all().skip(i).limit(1000)) # get the chunk, add to queryset
queryset.sort(key = attrgetter("num"))
for A, a in zip(queryset, [a for a in _Answer.LIA if queryset[a.num-1].num == a.num]): # for each answer with the same num
# compare all attributes of the _Answer class.
# if different, set Parse object's attribute to _Answer object's attribute;
# if all are same, keep in Parse and delete from LIA
for key in _Answer.LI_ATTR: # for all attributes of the _Answer class
if getattr(A, key) != getattr(a, key): # if different
print(key, getattr(A,key), getattr(a,key))
batcher.batch_delete([A])
batcher.batch_save([a])
# print("{} updated in Parse".format(a.ID))
break
elif _Answer.LI_ATTR[-1] == key:
_Answer.LIA.remove(a)
print("{} Answers updated in Parse.".format(len(queryset)-len(_Answer.LIA)))
print("{} Answers must be created in Parse.".format(len(_Answer.LIA)))
# Now, upload those remaining in _Answer.LIA to Parse
# (should put batch_upload_with_sleep in a separate function)
# batch_save in chunks of no more than 50
len_lia = len(_Answer.LIA)
batcher = ParseBatcher()
lili_chunks = [_Answer.LIA[i:i+50] for i in range(0, len_lia, 50)]
for index, chunk in enumerate(lili_chunks):
while True:
try:
batcher.batch_save(chunk)
print "\r{} of {} Answers uploaded to Parse".format(50*(index+1), len_lia),
sys.stdout.flush()
break
except:
print("Locked. Sleeping for 5 seconds.")
time.sleep(5)
print
示例8: ParseService
class ParseService(Base, LogMixin):
def __init__(self, settings):
self.settings = settings
self.fileRestClient = ParseFileRestClient(settings)
self.galleryService = GalleryService(settings)
register(self.settings["parse"]["application_id"], self.settings["parse"]["rest_api_key"])
self.batcher = ParseBatcher()
def getByFilePath(self, filePath):
return ContentItem.Query.get(filePath=filePath)
def post(self, item):
return item.save()
def drop(self):
# There is no truncate on parse, so we iterate and delete all...
if(self.settings["drop"]):
items = ContentItem.Query.all()
#self.logger.info(dir(items)
self.logger.info("Truncating items... %s" % items.count())
if items.count() > 0:
self.batcher.batch_delete(items)
self.logger.info("Done.")
示例9: put_answers_in_Parse
def put_answers_in_Parse():
# Query for first 1000 Answers
queryset = list(Answer.Query.all().limit(1000))
while True:
if not queryset:
print("No Answers to delete from Parse -- none exist.")
break # skip to batch_save without deleting
elif len(queryset) == len(_Answer.LI_A):
print("{} Answers already exist in Parse.".format(len(queryset)))
srsly_delete_stuff = raw_input("Continue with delete anyway? (Y/n): ")
if srsly_delete_stuff != "Y":
print "Delete skipped. Upload skipped."
return
else:
print("There are {} Answers to delete from Parse.".format(len(queryset)))
srsly_delete_stuff = raw_input("Delete Answers from Parse? (Y/n): ")
if srsly_delete_stuff != "Y":
print "Delete skipped. Upload skipped."
return
# batch_delete in chunks of no more than 50
batcher = ParseBatcher()
lili_chunks = [queryset[i:i+50] for i in range(0, len(queryset), 50)]
for index, chunk in enumerate(lili_chunks):
batcher.batch_delete(chunk)
print "\r{} of {} Answers deleted from Parse".format(50*(index+1), len(queryset)),
sys.stdout.flush()
print
break # go to batch_save
# batch_save in chunks of no more than 50
len_lia = len(_Answer.LIA)
batcher = ParseBatcher()
lili_chunks = [_Answer.LIA[i:i+50] for i in range(0, len_lia, 50)]
for index, chunk in enumerate(lili_chunks):
while True:
try:
batcher.batch_save(chunk)
print "\r{} of {} Answers uploaded to Parse".format(50*(index+1), len_lia),
sys.stdout.flush()
break
except:
print("Locked. Sleeping for 5 seconds.")
time.sleep(5)
print
pass
示例10: testBatch
def testBatch(self):
"""test saving, updating and deleting objects in batches"""
scores = [GameScore(score=s, player_name="Jane", cheat_mode=False) for s in range(5)]
batcher = ParseBatcher()
batcher.batch_save(scores)
self.assertEqual(GameScore.Query.filter(player_name="Jane").count(), 5, "batch_save didn't create objects")
self.assertTrue(all(s.objectId is not None for s in scores), "batch_save didn't record object IDs")
# test updating
for s in scores:
s.score += 10
batcher.batch_save(scores)
updated_scores = GameScore.Query.filter(player_name="Jane")
self.assertEqual(
sorted([s.score for s in updated_scores]), list(range(10, 15)), msg="batch_save didn't update objects"
)
# test deletion
batcher.batch_delete(scores)
self.assertEqual(GameScore.Query.filter(player_name="Jane").count(), 0, "batch_delete didn't delete objects")
示例11: put_questions_in_Parse
def put_questions_in_Parse():
# Query for Questions
queryset = list(Question.Query.all().limit(1000))
while True:
if not queryset:
print("No Questions to delete from Parse -- none exist.")
break
elif len(queryset) == len(_Question.LI_Q):
print("{} Questions already exist in Parse.".format(len(queryset)))
srsly_delete_stuff = raw_input("Continue with delete anyway? (Y/n): ")
if srsly_delete_stuff != "Y":
print("Delete skipped. Upload skipped.")
return
else:
print("There are {} Questions to delete from Parse.".format(len(queryset)))
srsly_delete_stuff = raw_input("Delete Questions from Parse? (Y/n): ")
if srsly_delete_stuff != "Y":
print("Delete skipped. Upload skipped.")
return
# batch_delete in chunks of no more than 50
batcher = ParseBatcher()
lili_chunks = [queryset[i:i+50] for i in range(0, len(queryset), 50)]
for index, chunk in enumerate(lili_chunks):
batcher.batch_delete(chunk)
print("\r{} of {} Questions deleted from Parse".format(50*(index+1), len(queryset)), end = "\r")
sys.stdout.flush()
print
break
# batch_save in chunks of no more than 50
len_li_q = len(_Question.LIQ)
batcher = ParseBatcher()
lili_chunks = [_Question.LIQ[i:i+50] for i in range(0, len_li_q, 50)]
for index, chunk in enumerate(lili_chunks):
batcher.batch_save(chunk)
print("\r{} of {} Questions uploaded to Parse".format(50*(index+1), len_li_q), end = "\r")
sys.stdout.flush()
print
pass
示例12: hasattr
try:
print "-- " + i.Telephone
i.Telephone = i.Telephone.splitlines()[0]
except:
if hasattr(i, "Telephone"):
i.Telephone = ""
try:
i.Email = i.Email.splitlines()[0]
except:
if hasattr(i, "Email"):
i.Email = ""
cc += 1
#i.save()
if len(batchclients) == 50:
batcher = ParseBatcher()
batcher.batch_save(batchclients)
batchclients = []
n = len(res)
counter += n
print len(clients)
示例13: register
import json
from pprint import pprint
from parse_rest.connection import register, ParseBatcher
from parse_rest.datatypes import Object as ParseObject
from parse_rest.datatypes import ParseType, ParseResource
APPLICATION_ID = "2yokKd96SUq3dKCQDcSI7LlGPJ7ZddnCMwbCIvX7"
REST_API_KEY = "MyfLxYfGm8iaxVahmsTCeKSNNuiz2wKzkQIOCyhS"
register(APPLICATION_ID, REST_API_KEY)
with open('doc.json') as data_file:
data = json.load(data_file)
data_to_upload = []
for course in range(len(data)):
current = data[course]
if current['Term'] == '20151':
if current['DivisionCode'] == 'CC' or current['DivisionName'] == 'SCH OF ENGR & APP SCI: UGRAD' or current['DivisionCode'] == 'BC' or current['DivisionCode'] == 'GS':
newClass = ParseObject()
newClass.class_code = current['Course']
newClass.instructor = current['Instructor1Name']
newClass.name = current['CourseTitle']
#call function that gets location, start, and end time
#newClass.location, newClass.startTime, newClass.endTime = parseMeetString(current)
data_to_upload.append(newClass)
batcher = ParseBatcher()
for x in range(0, len(data_to_upload), 50):
batcher.batch_save(data_to_upload[x: (x+50) < len(data_to_upload) ? x+50 : len(data_to_upload)])
示例14: prepare_R1
#.........这里部分代码省略.........
# # Save objects to Firebase, grouped by iPad.
# # Structure looks like:
# """
# "zE####R1_inx_obj_by_iPadNum": {
# "ipadNum_####": {
# "ixnNum_####": {
# "{}".format(li_R1_obj_to_upload.ixnNum): {
# "subNum": li_R1_obj_to_upload.subNum,
# "staNum": li_R1_obj_to_upload.staNum,
# ...
# },
# ...
# },
# ...
# },
# ...
# }
# """
# # Create references to Firebase.
# ref_root = Firebase('https://burning-fire-8681.firebaseio.com')
# ref_R1_ixn_objs_by_iPadNum = ref_root.child(
# '{}R1_inx_objs_by_iPadNum'.format(ep)
# )
# # Create references for all iPads, and put them in a dictionary.
# # In the dictionary, the keys are the ipadNums, and the values are
# # lists containing the FB reference and a list of interaction objects.
# # { "i" : [ ref, [ ixn_obj, ixn_obj, ... ] ] }
# dll_ix_objs = {}
# for a in li_m_ipadNums + li_f_ipadNums:
# ref_ipadNum = ref_R1_ixn_objs_by_iPadNum.child('{}'.format(a))
# # ref_ipadNum.patch(
# # {
# # "something": "goes here"
# # })
# print ("iPad {} has been put into Firebase.".format(a))
# #li_ref_ipadNum["{}".format(a)] = [ref_ipadNum, []]
# #li_ref_ipadNum.append([a, ref_ipadNum, []])
# dll_ix_objs[str(a)] = [ref_ipadNum, []]
# pprint(dll_ix_objs)
# # Iterate through all objects, adding them to the right place in
# # the dictionary, then upload the dictionary into Firebase
# for ix_obj in li_R1_obj_to_upload:
# str_correct_m_iPad = str(ix_obj.m_ipadNum)
# str_correct_f_iPad = str(ix_obj.f_ipadNum)
# dll_ix_objs[str_correct_m_iPad][1].append(ix_obj)
# dll_ix_objs[str_correct_f_iPad][1].append(ix_obj)
# pprint(dll_ix_objs)
# # put into Firebase
# for k,v in dll_ix_objs.iteritems():
# ref = Firebase(v[0])
# #ref.patch(v[1])
# print("List added for iPad with ipadNum = {} at location: {}".format(k, ref))
# for obj in li_R1_obj_to_upload:
# pprint(vars(obj))
# Save multiple objects to Parse.
# Call batcher.batch_save on slices of the list no larger than 50.
# Parse will timeout if 1800 requests are made in 60 seconds,
# hence the time.sleep(1.67) every 50 objects saved. I could probably
# get away with sleeping less, but no reason to take chances.
batcher = ParseBatcher()
for b in range(counter_ixn/50 + 1):
lo = 50*b
hi = min(50 * (b+1), counter_ixn)
batcher.batch_save(li_R1_obj_to_upload[lo:hi])
sys.stdout.write("\r{} of {} new inx's uploaded ({}{})".format(
50 + (50*b),
counter_ixn,
int(round((50*(b+1)*100.0)/counter_ixn, 0)),
"%"
))
sys.stdout.flush() # must be done for it to work (why?)
# time.sleep(1.67) # explained above
sys.stdout.write("\n") # move the cursor to the next line after we're done
示例15: ParseBatcher
agency.Name = name
agency.Email = email
agency.Company = {"__type": "Pointer", "className": "Companies", "objectId": "ATTeW8GRQt"}
agency.Key = company
agency.Country = country
#client.save()
agencieslist.append(agency)
print email
counter += 1
if(counter == 20):
batcher = ParseBatcher()
batcher.batch_save(agencieslist)
agencieslist = []
print "Esperando algunos segundos"
#esperar un poco
time.sleep(.1)
counter=0
print "Trabajando ..."
print str(len(lines)) + " lines analized"