本文整理汇总了Python中contact_gc.GCContact类的典型用法代码示例。如果您正苦于以下问题:Python GCContact类的具体用法?Python GCContact怎么用?Python GCContact使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了GCContact类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_batch_error
def test_batch_error (self):
fobj = self.find_group(self.gid)
con0 = GCContact(fobj)
con0.set_firstname('Namo Narayananaya')
gce0 = con0.get_gce()
con = GCContact(fobj)
con.set_firstname('Ayeshwarya')
con.set_birthday('abcd"ef')
# con.set_anniv('1978-05-31 %s est n il y a %d ans')
# con.set_birthday('1980-08-10')
gce = con.get_gce()
feed = self.pimdb.new_feed()
feed.add_insert(entry=gce0, batch_id_string="DeadBeef")
feed.add_insert(entry=gce0, batch_id_string="DeadBeef")
feed.add_insert(entry=gce, batch_id_string="DeadBeef")
b = BatchState(1, feed, op='insert', sync_tag="asynk:testgcex:ex")
print 'Request: ', utils.pretty_xml(str(feed))
rr = self.pimdb.exec_batch(feed)
print 'Response: ', utils.pretty_xml(str(rr))
for entry in rr.entry:
print entry.batch_status
if entry.batch_status:
print 'Code: ',entry.batch_status.code
print 'Reason: ', entry.batch_status.reason
else:
self.handle_interrupted_feed(feed, str(rr))
示例2: new_item
def new_item (self, item):
"""Add the specified item to the folder."""
if item.__class__.__name__ == 'GCContact':
con = item
else:
con = GCContact(self, con=item)
eid = con.save()
return eid
示例3: test_create_contact
def test_create_contact(self, f=None):
if not f:
f = self.gout
c = GCContact(f)
c.set_name("ScrewBall Joseph")
cid = c.save()
if cid:
print "Successfully added contact. ID: ", cid
else:
print "D" "oh. Failed."
示例4: batch_create
def batch_create (self, src_sl, src_dbid, items):
"""See the documentation in folder.Folder"""
my_dbid = self.get_dbid()
c = self.get_config()
pname = src_sl.get_pname()
src_sync_tag = c.make_sync_label(src_sl.get_pname(), src_dbid)
dst_sync_tag = c.make_sync_label(src_sl.get_pname(), my_dbid)
f = self.get_db().new_feed()
stats = BatchState(1, f, 'insert', sync_tag=dst_sync_tag)
success = True
for item in items:
con_itemid = item.get_itemid_from_synctags(pname, 'gc')
gc = GCContact(self, con=item, con_itemid=con_itemid)
bid = item.get_itemid()
gc.update_sync_tags(src_sync_tag, bid)
gce = gc.get_gce()
stats.add_con(bid, new=gc, orig=item)
f.add_insert(entry=gce, batch_id_string=bid)
stats.incr_cnt()
if stats.get_cnt() % self.get_batch_size() == 0:
# Feeds have to be less than 1MB. We can push this some
# more. FIXME.
logging.debug('Uploading new batch # %02d to Google. ' +
'Count: %3d. Size: %6.2fK',
stats.get_bnum(), stats.get_cnt(),
stats.get_size())
rf = self.get_db().exec_batch(f)
succ, cons = stats.process_batch_response(rf)
success = success and succ
f = self.get_db().new_feed()
stats = BatchState(stats.get_bnum()+1, f, 'insert',
sync_tag=dst_sync_tag)
# Upload any leftovers
if stats.get_cnt() > 0:
logging.debug('New Batch # %02d. Count: %3d. Size: %5.2fK',
stats.get_bnum(), stats.get_cnt(),
stats.get_size())
rf = self.get_db().exec_batch(f)
succ, cons = stats.process_batch_response(rf)
success = success and succ
return success
示例5: _fetch_gc_entries
def _fetch_gc_entries (self, gcids):
"""gcids is a list of google contact ids to retrieve contact
entries for.
Returns a list of ContactEntries"""
f = self.get_db().new_feed()
stats = BatchState(1, f, 'query', sync_tag=None)
ret = []
success = True
for gcid in gcids:
gcid = GCContact.normalize_gcid(gcid)
ce = gdata.contacts.data.ContactEntry()
ce.id = atom.data.Id(text=gcid)
stats.add_con(gcid, ce, orig=None)
f.add_query(entry=ce, batch_id_string=gcid)
stats.incr_cnt()
if stats.get_cnt() % self.get_batch_size() == 0:
# Feeds have to be less than 1MB. We can push this some
# more
logging.debug('Qry Batch # %02d. Count: %3d. Size: %6.2fK',
stats.get_bnum(), stats.get_cnt(),
stats.get_size())
rf = self.get_db().exec_batch(f)
suc, ces = stats.process_batch_response(rf)
success = success and suc
[ret.append(x) for x in ces]
f = self.get_db().new_feed()
s = BatchState(stats.get_bnum()+1, f, 'query', sync_tag=None)
stats = s
# Process any leftovers
if stats.get_cnt() > 0:
logging.debug('Qry Batch # %02d. Count: %3d. Size: %5.2fK',
stats.get_bnum(), stats.get_cnt(),
stats.get_size())
rf = self.get_db().exec_batch(f)
suc, ces = stats.process_batch_response(rf)
success = success and suc
[ret.append(x) for x in ces]
return success, ret
示例6: batch_update
def batch_update (self, sync_list, src_dbid, items):
"""See the documentation in folder.Folder"""
# Updates and deletes on google require not just the entryid but also
# its correct etag which is a version identifier. This is to ensure
# two apps do not overwrite each other's work without even knowing
# about it. So we need to approach this in two steps: (a) Fetch the
# ContactEntries for all the items we are interested in. the returned
# entry objects have all the required info, including the latest
# etag. (b) Modify the same entry with the local updates and send it
# back
my_dbid = self.get_dbid()
c = self.get_config()
pname = sync_list.get_pname()
src_sync_tag = c.make_sync_label(pname, src_dbid)
dst_sync_tag = c.make_sync_label(pname, my_dbid)
tags = [item.get_sync_tags(dst_sync_tag)[0] for item in items]
gcids = [val for (tag, val) in tags]
logging.debug('Refreshing etags for modified entries...')
success, ces = self._fetch_gc_entries(gcids)
etags = [copy.deepcopy(ce.etag) for ce in ces]
f = self.get_db().new_feed()
stats = BatchState(1, f, 'update', sync_tag=dst_sync_tag)
for item, etag in zip(items, etags):
gc = GCContact(self, con=item)
bid = item.get_itemid()
gc.update_sync_tags(src_sync_tag, bid)
gce = gc.get_gce()
gce.etag = etag
stats.add_con(bid, new=gc, orig=item)
f.add_update(entry=gce, batch_id_string=bid)
stats.incr_cnt()
if stats.get_cnt() % self.get_batch_size() == 0:
# Feeds have to be less than 1MB. We can push this some
# more. FIXME.
logging.debug('Uploading mod batch # %02d to Google. ' +
'Count: %3d. Size: %6.2fK',
stats.get_bnum(), stats.get_cnt(),
stats.get_size())
rf = self.get_db().exec_batch(f)
succ, cons = stats.process_batch_response(rf)
success = success and succ
f = self.get_db().new_feed()
stats = BatchState(stats.get_bnum()+1, f, 'update',
sync_tag=dst_sync_tag)
# Upload any leftovers
if stats.get_cnt() > 0:
logging.debug('Mod Batch # %02d. Count: %3d. Size: %5.2fK',
stats.get_bnum(), stats.get_cnt(),
stats.get_size())
rf = self.get_db().exec_batch(f)
succ, cons = stats.process_batch_response(rf)
success = success and succ
return success
示例7: prep_sync_lists
def prep_sync_lists (self, destid, sl, updated_min=None, cnt=0):
"""See the documentation in folder.Folder"""
pname = sl.get_pname()
conf = self.get_config()
pdb1id = conf.get_profile_db1(pname)
oldi = conf.get_itemids(pname)
newi = self.get_itemids(pname, destid)
kss = newi.keys()
for x, y in oldi.iteritems():
if not x in kss and not y in kss:
logging.debug('Del Google Contact: %s:%s', x, y)
if pdb1id == self.get_dbid():
sl.add_del(x, y)
else:
sl.add_del(y,x)
logging.info('Querying Google for status of Contact Entries...')
stag = conf.make_sync_label(pname, destid)
## FIXME: The following commented out code appears very fishy. I am
## not able to recall why these two have to be used in sorted order. I
## am pretty sure there was some sense behind it, but as of now db1
## and db2 are not really being used; so the code works even without
## this "sorted" behaviour... Hm, this really should go, but I am
## being conservative here and leving the stuff commented out so we
## can come back to it later if required.
# ## Sort the DBIds so dest1 has the 'lower' ID
# db1 = self.get_db().get_dbid()
# if db1 > destid:
# db2 = db1
# db1 = destid
# else:
# db2 = destid
if not updated_min:
updated_min = conf.get_last_sync_stop(pname)
# FIXME: We are fetching the group feed a second time. Ideally we
# shoul dbe able to everything we want with the feed already fetched
# above. This has a performance implication for groups with a large
# number of items. Will fix this once functionality is validated.
feed = self._get_group_feed(updated_min=updated_min, showdeleted='false')
logging.info('Response recieved from Google. Processing...')
if not feed.entry:
logging.info('No entries in feed.')
for x in kss:
sl.add_unmod(x)
return
skip = 0
etag_cnt = 0
for i, entry in enumerate(feed.entry):
gcid = utils.get_link_rel(entry.link, 'edit')
gcid = GCContact.normalize_gcid(gcid)
olid = get_udp_by_key(entry.user_defined_field, stag)
etag = entry.etag
epd = entry.deleted
name = None
if entry.name:
if entry.name.full_name:
name = entry.name.full_name.text
elif entry.name.family_name:
name = entry.name.family_name.text
elif entry.name.given_name:
name = entry.name.given_name.text
if epd:
if olid:
pass
# We will trust our own delete logic...
# sl.add_del(gcid)
else:
# Deleted before it got synched. Get on with life
skip += 1
continue
else:
if olid:
logging.debug('Modified Google Contact: %20s %s',
name, gcid)
sl.add_mod(gcid, olid)
else:
logging.debug('New Google Contact: %20s %s',
name, gcid)
sl.add_new(gcid)
if etag:
sl.add_etag(gcid, etag)
etag_cnt += 1
else:
sl.add_entry(gcid)
for x in kss:
#.........这里部分代码省略.........
示例8:
else:
## We could just print a more detailed error for all
## cases. Should do some time FIXME.
logging.error('Sync failed for bid %s: %s: %s',
bid, err_str, entry.id)
else:
if op == 'query':
con = entry
# We could build and return array for all cases, but
# why waste memory...
cons.append(con)
elif op in ['insert', 'update']:
con = self.get_con(bid)
orig = self.get_orig(bid)
gcid = utils.get_link_rel(entry.link, 'edit')
gcid = GCContact.normalize_gcid(gcid)
orig.update_sync_tags(self.sync_tag, gcid)
cons.append(orig)
t = None
if op == 'insert':
t = 'created'
elif op == 'update':
t = 'updated'
if t:
logging.info('Successfully %s gmail entry for %30s (%s)',
t, con.get_name(), orig.get_itemid())
return success, cons
示例9: create_gc_contact
def create_gc_contact (asynk, uinps):
gc = asynk.get_db('gc')
gcfid = uinps.gcfid
gcf, t = gc.find_folder(gcfid)
con = GCContact(gcf)
con.set_name('Sri Venkata Sri Rama Subramanya Anjeneya Annapurna Sharma')
con.set_prefix('Mr.')
con.set_nickname('Karra')
# con.set_gender('Male')
con.add_phone_mob(('Mobile', '+91 90084 88997'))
con.add_notes('And so it goes...')
# FIXME: We should do a more exhaustive sort of contact, with multiple
# entries of each type of possible entry and so on...
return con.save()
示例10: process_batch_response
def process_batch_response (self, resp):
"""resp is the response feed obtained from a batch operation to
google.
This routine will walk through the batch response entries, and
make note in the outlook database for succesful sync, or handle
errors appropriately.
Returns a tuple (success, cons) where success is a boolean to know if
all the entries had successful operation, and an array of contact
items from the batch operation"""
op = self.get_operation()
cons = []
success = True
for entry in resp.entry:
bid = entry.batch_id.text if entry.batch_id else None
if not entry.batch_status:
# There is something seriously wrong with this request.
self.handle_interrupted_feed(str(resp))
success = False
continue
code = int(entry.batch_status.code)
reason = entry.batch_status.reason
if code != SYNC_OK and code != SYNC_CREATED:
# FIXME this code path needs to be tested properly
err = sync_status_str(code)
err_str = '' if err is None else ('Code: %s' % err)
err_str = 'Reason: %s. %s' % (reason, err_str)
success = False
if op == 'insert' or op == 'update':
try:
name = self.get_con(bid).get_disp_name()
except Exception, e:
name = "WTH!"
logging.error('Upload to Google failed for: %s: %s',
name, err_str)
elif op == 'Writeback olid':
logging.error('Could not complete sync for: %s: %s: %s',
bid, err_str, entry.id)
else:
## We could just print a more detailed error for all
## cases. Should do some time FIXME.
logging.error('Sync failed for bid %s: %s: %s',
bid, err_str, entry.id)
else:
if op == 'query':
con = entry
# We could build and return array for all cases, but
# why waste memory...
cons.append(con)
elif op in ['insert', 'update']:
con = self.get_con(bid)
orig = self.get_orig(bid)
gcid = utils.get_link_rel(entry.link, 'edit')
gcid = GCContact.normalize_gcid(gcid)
orig.update_sync_tags(self.sync_tag, gcid)
cons.append(orig)
t = None
if op == 'insert':
t = 'created'
elif op == 'update':
t = 'updated'
if t:
logging.info('Successfully %s gmail entry for %30s (%s)',
t, con.get_disp_name(), orig.get_itemid())
示例11: prep_sync_lists
def prep_sync_lists (self, destid, sl, updated_min=None, cnt=0):
"""See the documentation in folder.Folder"""
pname = sl.get_pname()
conf = self.get_config()
logging.info('Querying Google for status of Contact Entries...')
stag = conf.make_sync_label(pname, destid)
## Sort the DBIds so dest1 has the 'lower' ID
db1 = self.get_db().get_dbid()
if db1 > destid:
db2 = db1
db1 = destid
else:
db2 = destid
if not updated_min:
updated_min = conf.get_last_sync_stop(pname)
feed = self._get_group_feed(updated_min=updated_min, showdeleted='false')
logging.info('Response recieved from Google. Processing...')
if not feed.entry:
logging.info('No entries in feed.')
return
skip = 0
etag_cnt = 0
for i, entry in enumerate(feed.entry):
gcid = utils.get_link_rel(entry.link, 'edit')
gcid = GCContact.normalize_gcid(gcid)
olid = get_udp_by_key(entry.user_defined_field, stag)
etag = entry.etag
epd = entry.deleted
name = None
if entry.name.full_name:
name = entry.name.full_name.text
elif entry.name.family_name:
name = entry.name.family_name.text
elif entry.name.given_name:
name = entry.name.given_name.text
if epd:
if olid:
sl.add_del(gcid, olid)
else:
# Deleted before it got synched. Get on with life
skip += 1
continue
else:
if olid:
logging.debug('Modified Google Contact: %20s %s',
name, gcid)
sl.add_mod(gcid, olid)
else:
logging.debug('New Google Contact: %20s %s',
name, gcid)
sl.add_new(gcid)
if etag:
sl.add_etag(gcid, etag)
etag_cnt += 1
else:
sl.add_entry(gcid)
logging.debug('num with etags : %5d', etag_cnt)
logging.debug('num del bef sync : %5d', skip)
logging.info('Note: Stats for Google Contacts are only for the '
'changeset since the last synk. In particular the total '
'count is NOT the total number of contacts in your folder!')