本文整理匯總了Python中singer.utils.now方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.now方法的具體用法?Python utils.now怎麽用?Python utils.now使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類singer.utils
的用法示例。
在下文中一共展示了utils.now方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _sync_contacts_by_company
# 需要導入模塊: from singer import utils [as 別名]
# 或者: from singer.utils import now [as 別名]
def _sync_contacts_by_company(STATE, ctx, company_id):
schema = load_schema(CONTACTS_BY_COMPANY)
catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE))
mdata = metadata.to_map(catalog.get('metadata'))
url = get_url("contacts_by_company", company_id=company_id)
path = 'vids'
with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee:
with metrics.record_counter(CONTACTS_BY_COMPANY) as counter:
data = request(url, default_contacts_by_company_params).json()
for row in data[path]:
counter.increment()
record = {'company-id' : company_id,
'contact-id' : row}
record = bumble_bee.transform(lift_properties_and_versions(record), schema, mdata)
singer.write_record("contacts_by_company", record, time_extracted=utils.now())
return STATE
示例2: sync_campaigns
# 需要導入模塊: from singer import utils [as 別名]
# 或者: from singer.utils import now [as 別名]
def sync_campaigns(STATE, ctx):
catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE))
mdata = metadata.to_map(catalog.get('metadata'))
schema = load_schema("campaigns")
singer.write_schema("campaigns", schema, ["id"], catalog.get('stream_alias'))
LOGGER.info("sync_campaigns(NO bookmarks)")
url = get_url("campaigns_all")
params = {'limit': 500}
with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee:
for row in gen_request(STATE, 'campaigns', url, params, "campaigns", "hasMore", ["offset"], ["offset"]):
record = request(get_url("campaigns_detail", campaign_id=row['id'])).json()
record = bumble_bee.transform(lift_properties_and_versions(record), schema, mdata)
singer.write_record("campaigns", record, catalog.get('stream_alias'), time_extracted=utils.now())
return STATE
示例3: main
# 需要導入模塊: from singer import utils [as 別名]
# 或者: from singer.utils import now [as 別名]
def main():
# Parse command line arguments
args = utils.parse_args(REQUIRED_CONFIG_KEYS)
# If discover flag was passed, run discovery mode and dump output to stdout
if args.discover:
catalog = discover()
print(json.dumps(catalog, indent=2))
# Otherwise run in sync mode
else:
Context.tap_start = utils.now()
if args.catalog:
Context.catalog = args.catalog.to_dict()
else:
Context.catalog = discover()
Context.config = args.config
Context.state = args.state
sync()
示例4: _sync_contact_vids
# 需要導入模塊: from singer import utils [as 別名]
# 或者: from singer.utils import now [as 別名]
def _sync_contact_vids(catalog, vids, schema, bumble_bee):
if len(vids) == 0:
return
data = request(get_url("contacts_detail"), params={'vid': vids, 'showListMemberships' : True, "formSubmissionMode" : "all"}).json()
time_extracted = utils.now()
mdata = metadata.to_map(catalog.get('metadata'))
for record in data.values():
record = bumble_bee.transform(lift_properties_and_versions(record), schema, mdata)
singer.write_record("contacts", record, catalog.get('stream_alias'), time_extracted=time_extracted)
示例5: sync_contact_lists
# 需要導入模塊: from singer import utils [as 別名]
# 或者: from singer.utils import now [as 別名]
def sync_contact_lists(STATE, ctx):
catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE))
mdata = metadata.to_map(catalog.get('metadata'))
schema = load_schema("contact_lists")
bookmark_key = 'updatedAt'
singer.write_schema("contact_lists", schema, ["listId"], [bookmark_key], catalog.get('stream_alias'))
start = get_start(STATE, "contact_lists", bookmark_key)
max_bk_value = start
LOGGER.info("sync_contact_lists from %s", start)
url = get_url("contact_lists")
params = {'count': 250}
with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee:
for row in gen_request(STATE, 'contact_lists', url, params, "lists", "has-more", ["offset"], ["offset"]):
record = bumble_bee.transform(lift_properties_and_versions(row), schema, mdata)
if record[bookmark_key] >= start:
singer.write_record("contact_lists", record, catalog.get('stream_alias'), time_extracted=utils.now())
if record[bookmark_key] >= max_bk_value:
max_bk_value = record[bookmark_key]
STATE = singer.write_bookmark(STATE, 'contact_lists', bookmark_key, max_bk_value)
singer.write_state(STATE)
return STATE
示例6: sync_forms
# 需要導入模塊: from singer import utils [as 別名]
# 或者: from singer.utils import now [as 別名]
def sync_forms(STATE, ctx):
catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE))
mdata = metadata.to_map(catalog.get('metadata'))
schema = load_schema("forms")
bookmark_key = 'updatedAt'
singer.write_schema("forms", schema, ["guid"], [bookmark_key], catalog.get('stream_alias'))
start = get_start(STATE, "forms", bookmark_key)
max_bk_value = start
LOGGER.info("sync_forms from %s", start)
data = request(get_url("forms")).json()
time_extracted = utils.now()
with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee:
for row in data:
record = bumble_bee.transform(lift_properties_and_versions(row), schema, mdata)
if record[bookmark_key] >= start:
singer.write_record("forms", record, catalog.get('stream_alias'), time_extracted=time_extracted)
if record[bookmark_key] >= max_bk_value:
max_bk_value = record[bookmark_key]
STATE = singer.write_bookmark(STATE, 'forms', bookmark_key, max_bk_value)
singer.write_state(STATE)
return STATE
示例7: sync_workflows
# 需要導入模塊: from singer import utils [as 別名]
# 或者: from singer.utils import now [as 別名]
def sync_workflows(STATE, ctx):
catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE))
mdata = metadata.to_map(catalog.get('metadata'))
schema = load_schema("workflows")
bookmark_key = 'updatedAt'
singer.write_schema("workflows", schema, ["id"], [bookmark_key], catalog.get('stream_alias'))
start = get_start(STATE, "workflows", bookmark_key)
max_bk_value = start
STATE = singer.write_bookmark(STATE, 'workflows', bookmark_key, max_bk_value)
singer.write_state(STATE)
LOGGER.info("sync_workflows from %s", start)
data = request(get_url("workflows")).json()
time_extracted = utils.now()
with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee:
for row in data['workflows']:
record = bumble_bee.transform(lift_properties_and_versions(row), schema, mdata)
if record[bookmark_key] >= start:
singer.write_record("workflows", record, catalog.get('stream_alias'), time_extracted=time_extracted)
if record[bookmark_key] >= max_bk_value:
max_bk_value = record[bookmark_key]
STATE = singer.write_bookmark(STATE, 'workflows', bookmark_key, max_bk_value)
singer.write_state(STATE)
return STATE
示例8: sync_owners
# 需要導入模塊: from singer import utils [as 別名]
# 或者: from singer.utils import now [as 別名]
def sync_owners(STATE, ctx):
catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE))
mdata = metadata.to_map(catalog.get('metadata'))
schema = load_schema("owners")
bookmark_key = 'updatedAt'
singer.write_schema("owners", schema, ["ownerId"], [bookmark_key], catalog.get('stream_alias'))
start = get_start(STATE, "owners", bookmark_key)
max_bk_value = start
LOGGER.info("sync_owners from %s", start)
params = {}
if CONFIG.get('include_inactives'):
params['includeInactives'] = "true"
data = request(get_url("owners"), params).json()
time_extracted = utils.now()
with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee:
for row in data:
record = bumble_bee.transform(lift_properties_and_versions(row), schema, mdata)
if record[bookmark_key] >= max_bk_value:
max_bk_value = record[bookmark_key]
if record[bookmark_key] >= start:
singer.write_record("owners", record, catalog.get('stream_alias'), time_extracted=time_extracted)
STATE = singer.write_bookmark(STATE, 'owners', bookmark_key, max_bk_value)
singer.write_state(STATE)
return STATE
示例9: ad_creative_success
# 需要導入模塊: from singer import utils [as 別名]
# 或者: from singer.utils import now [as 別名]
def ad_creative_success(response, stream=None):
'''A success callback for the FB Batch endpoint used when syncing AdCreatives. Needs the stream
to resolve schema refs and transform the successful response object.'''
refs = load_shared_schema_refs()
schema = singer.resolve_schema_references(stream.catalog_entry.schema.to_dict(), refs)
rec = response.json()
record = Transformer(pre_hook=transform_date_hook).transform(rec, schema)
singer.write_record(stream.name, record, stream.stream_alias, utils.now())
示例10: job_params
# 需要導入模塊: from singer import utils [as 別名]
# 或者: from singer.utils import now [as 別名]
def job_params(self):
start_date = get_start(self, self.bookmark_key)
buffer_days = 28
if CONFIG.get('insights_buffer_days'):
buffer_days = int(CONFIG.get('insights_buffer_days'))
buffered_start_date = start_date.subtract(days=buffer_days)
end_date = pendulum.now()
if CONFIG.get('end_date'):
end_date = pendulum.parse(CONFIG.get('end_date'))
# Some automatic fields (primary-keys) cannot be used as 'fields' query params.
while buffered_start_date <= end_date:
yield {
'level': self.level,
'action_breakdowns': list(self.action_breakdowns),
'breakdowns': list(self.breakdowns),
'limit': self.limit,
'fields': list(self.fields().difference(self.invalid_insights_fields)),
'time_increment': self.time_increment,
'action_attribution_windows': list(self.action_attribution_windows),
'time_ranges': [{'since': buffered_start_date.to_date_string(),
'until': buffered_start_date.to_date_string()}]
}
buffered_start_date = buffered_start_date.add(days=1)
示例11: do_sync
# 需要導入模塊: from singer import utils [as 別名]
# 或者: from singer.utils import now [as 別名]
def do_sync(account, catalog, state):
streams_to_sync = get_streams_to_sync(account, catalog, state)
refs = load_shared_schema_refs()
for stream in streams_to_sync:
LOGGER.info('Syncing %s, fields %s', stream.name, stream.fields())
schema = singer.resolve_schema_references(load_schema(stream), refs)
metadata_map = metadata.to_map(stream.catalog_entry.metadata)
bookmark_key = BOOKMARK_KEYS.get(stream.name)
singer.write_schema(stream.name, schema, stream.key_properties, bookmark_key, stream.stream_alias)
# NB: The AdCreative stream is not an iterator
if stream.name == 'adcreative':
stream.sync()
continue
with Transformer(pre_hook=transform_date_hook) as transformer:
with metrics.record_counter(stream.name) as counter:
for message in stream:
if 'record' in message:
counter.increment()
time_extracted = utils.now()
record = transformer.transform(message['record'], schema, metadata=metadata_map)
singer.write_record(stream.name, record, stream.stream_alias, time_extracted)
elif 'state' in message:
singer.write_state(message['state'])
else:
raise TapFacebookException('Unrecognized message {}'.format(message))
示例12: get_end_date
# 需要導入模塊: from singer import utils [as 別名]
# 或者: from singer.utils import now [as 別名]
def get_end_date():
if CONFIG.get('end_date'):
return utils.strptime_with_tz(CONFIG.get('end_date'))
return utils.now()
示例13: resume_syncing_bulk_query
# 需要導入模塊: from singer import utils [as 別名]
# 或者: from singer.utils import now [as 別名]
def resume_syncing_bulk_query(sf, catalog_entry, job_id, state, counter):
bulk = Bulk(sf)
current_bookmark = singer.get_bookmark(state, catalog_entry['tap_stream_id'], 'JobHighestBookmarkSeen') or sf.get_start_date(state, catalog_entry)
current_bookmark = singer_utils.strptime_with_tz(current_bookmark)
batch_ids = singer.get_bookmark(state, catalog_entry['tap_stream_id'], 'BatchIDs')
start_time = singer_utils.now()
stream = catalog_entry['stream']
stream_alias = catalog_entry.get('stream_alias')
catalog_metadata = metadata.to_map(catalog_entry.get('metadata'))
replication_key = catalog_metadata.get((), {}).get('replication-key')
stream_version = get_stream_version(catalog_entry, state)
schema = catalog_entry['schema']
if not bulk.job_exists(job_id):
LOGGER.info("Found stored Job ID that no longer exists, resetting bookmark and removing JobID from state.")
return counter
# Iterate over the remaining batches, removing them once they are synced
for batch_id in batch_ids[:]:
with Transformer(pre_hook=transform_bulk_data_hook) as transformer:
for rec in bulk.get_batch_results(job_id, batch_id, catalog_entry):
counter.increment()
rec = transformer.transform(rec, schema)
rec = fix_record_anytype(rec, schema)
singer.write_message(
singer.RecordMessage(
stream=(
stream_alias or stream),
record=rec,
version=stream_version,
time_extracted=start_time))
# Update bookmark if necessary
replication_key_value = replication_key and singer_utils.strptime_with_tz(rec[replication_key])
if replication_key_value and replication_key_value <= start_time and replication_key_value > current_bookmark:
current_bookmark = singer_utils.strptime_with_tz(rec[replication_key])
state = singer.write_bookmark(state,
catalog_entry['tap_stream_id'],
'JobHighestBookmarkSeen',
singer_utils.strftime(current_bookmark))
batch_ids.remove(batch_id)
LOGGER.info("Finished syncing batch %s. Removing batch from state.", batch_id)
LOGGER.info("Batches to go: %d", len(batch_ids))
singer.write_state(state)
return counter
示例14: sync_companies
# 需要導入模塊: from singer import utils [as 別名]
# 或者: from singer.utils import now [as 別名]
def sync_companies(STATE, ctx):
catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE))
mdata = metadata.to_map(catalog.get('metadata'))
bumble_bee = Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING)
bookmark_key = 'hs_lastmodifieddate'
start = utils.strptime_to_utc(get_start(STATE, "companies", bookmark_key))
LOGGER.info("sync_companies from %s", start)
schema = load_schema('companies')
singer.write_schema("companies", schema, ["companyId"], [bookmark_key], catalog.get('stream_alias'))
# Because this stream doesn't query by `lastUpdated`, it cycles
# through the data set every time. The issue with this is that there
# is a race condition by which records may be updated between the
# start of this table's sync and the end, causing some updates to not
# be captured, in order to combat this, we must store the current
# sync's start in the state and not move the bookmark past this value.
current_sync_start = get_current_sync_start(STATE, "companies") or utils.now()
STATE = write_current_sync_start(STATE, "companies", current_sync_start)
singer.write_state(STATE)
url = get_url("companies_all")
max_bk_value = start
if CONTACTS_BY_COMPANY in ctx.selected_stream_ids:
contacts_by_company_schema = load_schema(CONTACTS_BY_COMPANY)
singer.write_schema("contacts_by_company", contacts_by_company_schema, ["company-id", "contact-id"])
with bumble_bee:
for row in gen_request(STATE, 'companies', url, default_company_params, 'companies', 'has-more', ['offset'], ['offset']):
row_properties = row['properties']
modified_time = None
if bookmark_key in row_properties:
# Hubspot returns timestamps in millis
timestamp_millis = row_properties[bookmark_key]['timestamp'] / 1000.0
modified_time = datetime.datetime.fromtimestamp(timestamp_millis, datetime.timezone.utc)
elif 'createdate' in row_properties:
# Hubspot returns timestamps in millis
timestamp_millis = row_properties['createdate']['timestamp'] / 1000.0
modified_time = datetime.datetime.fromtimestamp(timestamp_millis, datetime.timezone.utc)
if modified_time and modified_time >= max_bk_value:
max_bk_value = modified_time
if not modified_time or modified_time >= start:
record = request(get_url("companies_detail", company_id=row['companyId'])).json()
record = bumble_bee.transform(lift_properties_and_versions(record), schema, mdata)
singer.write_record("companies", record, catalog.get('stream_alias'), time_extracted=utils.now())
if CONTACTS_BY_COMPANY in ctx.selected_stream_ids:
STATE = _sync_contacts_by_company(STATE, ctx, record['companyId'])
# Don't bookmark past the start of this sync to account for updated records during the sync.
new_bookmark = min(max_bk_value, current_sync_start)
STATE = singer.write_bookmark(STATE, 'companies', bookmark_key, utils.strftime(new_bookmark))
STATE = write_current_sync_start(STATE, 'companies', None)
singer.write_state(STATE)
return STATE
示例15: sync_deals
# 需要導入模塊: from singer import utils [as 別名]
# 或者: from singer.utils import now [as 別名]
def sync_deals(STATE, ctx):
catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE))
mdata = metadata.to_map(catalog.get('metadata'))
bookmark_key = 'hs_lastmodifieddate'
start = utils.strptime_with_tz(get_start(STATE, "deals", bookmark_key))
max_bk_value = start
LOGGER.info("sync_deals from %s", start)
most_recent_modified_time = start
params = {'count': 250,
'includeAssociations': False,
'properties' : []}
schema = load_schema("deals")
singer.write_schema("deals", schema, ["dealId"], [bookmark_key], catalog.get('stream_alias'))
# Check if we should include associations
for key in mdata.keys():
if 'associations' in key:
assoc_mdata = mdata.get(key)
if (assoc_mdata.get('selected') and assoc_mdata.get('selected') == True):
params['includeAssociations'] = True
if mdata.get(('properties', 'properties'), {}).get('selected') or has_selected_custom_field(mdata):
# On 2/12/20, hubspot added a lot of additional properties for
# deals, and appending all of them to requests ended up leading to
# 414 (url-too-long) errors. Hubspot recommended we use the
# `includeAllProperties` and `allpropertiesFetchMode` params
# instead.
params['includeAllProperties'] = True
params['allPropertiesFetchMode'] = 'latest_version'
url = get_url('deals_all')
with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee:
for row in gen_request(STATE, 'deals', url, params, 'deals', "hasMore", ["offset"], ["offset"]):
row_properties = row['properties']
modified_time = None
if bookmark_key in row_properties:
# Hubspot returns timestamps in millis
timestamp_millis = row_properties[bookmark_key]['timestamp'] / 1000.0
modified_time = datetime.datetime.fromtimestamp(timestamp_millis, datetime.timezone.utc)
elif 'createdate' in row_properties:
# Hubspot returns timestamps in millis
timestamp_millis = row_properties['createdate']['timestamp'] / 1000.0
modified_time = datetime.datetime.fromtimestamp(timestamp_millis, datetime.timezone.utc)
if modified_time and modified_time >= max_bk_value:
max_bk_value = modified_time
if not modified_time or modified_time >= start:
record = bumble_bee.transform(lift_properties_and_versions(row), schema, mdata)
singer.write_record("deals", record, catalog.get('stream_alias'), time_extracted=utils.now())
STATE = singer.write_bookmark(STATE, 'deals', bookmark_key, utils.strftime(max_bk_value))
singer.write_state(STATE)
return STATE
#NB> no suitable bookmark is available: https://developers.hubspot.com/docs/methods/email/get_campaigns_by_id