本文整理匯總了Python中dateutil.parser方法的典型用法代碼示例。如果您正苦於以下問題:Python dateutil.parser方法的具體用法?Python dateutil.parser怎麽用?Python dateutil.parser使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類dateutil
的用法示例。
在下文中一共展示了dateutil.parser方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: testImportedModules
# 需要導入模塊: import dateutil [as 別名]
# 或者: from dateutil import parser [as 別名]
def testImportedModules(self):
import dateutil.easter
import dateutil.parser
import dateutil.relativedelta
import dateutil.rrule
import dateutil.tz
import dateutil.utils
import dateutil.zoneinfo
self.assertEquals(dateutil.easter, new_locals.pop("easter"))
self.assertEquals(dateutil.parser, new_locals.pop("parser"))
self.assertEquals(dateutil.relativedelta, new_locals.pop("relativedelta"))
self.assertEquals(dateutil.rrule, new_locals.pop("rrule"))
self.assertEquals(dateutil.tz, new_locals.pop("tz"))
self.assertEquals(dateutil.utils, new_locals.pop("utils"))
self.assertEquals(dateutil.zoneinfo, new_locals.pop("zoneinfo"))
self.assertFalse(new_locals)
示例2: perform_romeo_query
# 需要導入模塊: import dateutil [as 別名]
# 或者: from dateutil import parser [as 別名]
def perform_romeo_query(self, search_terms):
search_terms = search_terms.copy()
if self.api_key:
search_terms['ak'] = self.api_key
# Perform the query
try:
req = requests.get(self.base_url, params=search_terms, timeout=20)
except requests.exceptions.RequestException as e:
raise MetadataSourceException('Error while querying RoMEO.\n' +
'URL was: '+self.base_url+'\n' +
'Parameters were: '+str(search_terms)+'\n' +
'Error is: '+str(e))
# Parse it
try:
parser = ET.XMLParser(encoding='ISO-8859-1')
root = ET.parse(BytesIO(req.content), parser)
except ET.ParseError as e:
raise MetadataSourceException('RoMEO returned an invalid XML response.\n' +
'URL was: '+self.base_url+'\n' +
'Parameters were: '+str(search_terms)+'\n' +
'Error is: '+str(e))
return root
示例3: test_fetch_updates
# 需要導入模塊: import dateutil [as 別名]
# 或者: from dateutil import parser [as 別名]
def test_fetch_updates(self):
with requests_mock.mock() as http_mocker:
http_mocker.get('http://www.sherpa.ac.uk/downloads/journal-title-issns.php?format=tsv',
content=self.journals_dump_response)
http_mocker.get('http://www.sherpa.ac.uk/downloads/download-dates.php?format=xml',
content=self.latest_update_response)
# Fetch all publishers initially
self.api.fetch_updates()
p = Publisher.objects.get(alias='GSA Today')
self.assertEqual(p.last_updated, dateutil.parser.parse('2019-02-14T14:05:19Z'))
p = Publisher.objects.get(romeo_id='2425')
self.assertEqual(p.url, 'http://intranet.cvut.cz/')
# Fetch updates again
self.api.fetch_updates()
# A publisher was updated
p = Publisher.objects.get(romeo_id='2425')
self.assertEqual(p.url, 'https://intranet.cvut.cz/')
示例4: _process_remove_objects_batch
# 需要導入模塊: import dateutil [as 別名]
# 或者: from dateutil import parser [as 別名]
def _process_remove_objects_batch(self, bucket_name, objects_batch):
"""
Requester and response parser for remove_objects
"""
# assemble request content for objects_batch
content = xml_marshal_delete_objects(objects_batch)
# compute headers
headers = {
'Content-Md5': get_md5_base64digest(content),
'Content-Length': len(content)
}
query = {'delete': ''}
content_sha256_hex = get_sha256_hexdigest(content)
# send multi-object delete request
response = self._url_open(
'POST', bucket_name=bucket_name,
headers=headers, body=content,
query=query, content_sha256=content_sha256_hex,
)
# parse response to find delete errors
return parse_multi_delete_response(response.data)
示例5: parse_datetime_string
# 需要導入模塊: import dateutil [as 別名]
# 或者: from dateutil import parser [as 別名]
def parse_datetime_string(datetime_str):
"""
:param datetime_str: A string representing date and time with timezone
information.
:return: A datetime object, converted to UTC, with no timezone info.
"""
# Parse the string to datetime object
date_with_offset = dateutil.parser.parse(datetime_str)
# Convert the date to UTC
try:
utc_date = date_with_offset.astimezone(pytz.utc)
except ValueError:
raise manager_exceptions.BadParametersError(
'Date `{0}` missing timezone information, please provide'
' valid date. \nExpected format: YYYYMMDDHHMM+HHMM or'
' YYYYMMDDHHMM-HHMM i.e: 201801012230-0500'
' (Jan-01-18 10:30pm EST)'.format(datetime_str))
# Date is in UTC, tzinfo is not necessary
return utc_date.replace(tzinfo=None)
示例6: get_parsed_deployment
# 需要導入模塊: import dateutil [as 別名]
# 或者: from dateutil import parser [as 別名]
def get_parsed_deployment(blueprint,
app_dir,
app_blueprint):
file_server_root = config.instance.file_server_root
blueprint_resource_dir = os.path.join(file_server_root,
'blueprints',
blueprint.tenant_name,
blueprint.id)
# The dsl parser expects a URL
blueprint_resource_dir_url = 'file:{0}'.format(blueprint_resource_dir)
app_path = os.path.join(file_server_root, app_dir, app_blueprint)
try:
return tasks.parse_dsl(
app_path,
resources_base_path=file_server_root,
additional_resources=[blueprint_resource_dir_url],
**app_context.get_parser_context()
)
except parser_exceptions.DSLParsingException as ex:
raise manager_exceptions.InvalidBlueprintError(
'Invalid blueprint - {0}'.format(ex))
示例7: fetch_production
# 需要導入模塊: import dateutil [as 別名]
# 或者: from dateutil import parser [as 別名]
def fetch_production(zone_key, session=None, target_datetime=None):
if zone_key != "MX":
raise ValueError("MX parser cannot fetch production for zone {}".format(zone_key))
if target_datetime is None:
raise ValueError("Parser only supports fetching historical production data, please specify a terget_datetime in the past")
# retrieve data for the month either from the cache or fetch it
cache_key = target_datetime.strftime("%Y-%m")
if cache_key in DATA_CACHE:
df = DATA_CACHE[cache_key]
else:
df = fetch_csv_for_date(target_datetime, session=session)
DATA_CACHE[cache_key] = df
data = []
for idx, series in df.iterrows():
data.append({
'zoneKey': zone_key,
'datetime': series["instante"].to_pydatetime(),
'production': convert_production(series),
'source': 'cenace.gob.mx'
})
return data
示例8: fetch_MX_exchange
# 需要導入模塊: import dateutil [as 別名]
# 或者: from dateutil import parser [as 別名]
def fetch_MX_exchange(sorted_zone_keys, s):
"""
Finds current flow between two Mexican control areas.
Returns a float.
"""
req = s.get(MX_EXCHANGE_URL)
soup = BeautifulSoup(req.text, 'html.parser')
exchange_div = soup.find("div", attrs={'id': EXCHANGES[sorted_zone_keys]})
val = exchange_div.text
# cenace html uses unicode hyphens instead of minus signs and , as thousand separator
trantab = str.maketrans({chr(8208): chr(45), ",": ""})
val = val.translate(trantab)
flow = float(val)
if sorted_zone_keys in ["BZ->MX-PN", "MX-CE->MX-OR", "MX-CE->MX-OC"]:
# reversal needed for these zones due to EM ordering
flow = -1*flow
return flow
示例9: __json_date_parse
# 需要導入模塊: import dateutil [as 別名]
# 或者: from dateutil import parser [as 別名]
def __json_date_parse(json_object):
"""
Parse dates in certain known json fields, if possible.
"""
known_date_fields = ["created_at", "week", "day", "expires_at", "scheduled_at", "updated_at", "last_status_at", "starts_at", "ends_at", "published_at"]
for k, v in json_object.items():
if k in known_date_fields:
if v != None:
try:
if isinstance(v, int):
json_object[k] = datetime.datetime.fromtimestamp(v, pytz.utc)
else:
json_object[k] = dateutil.parser.parse(v)
except:
raise MastodonAPIError('Encountered invalid date.')
return json_object
示例10: _normalize_gaf_date
# 需要導入模塊: import dateutil [as 別名]
# 或者: from dateutil import parser [as 別名]
def _normalize_gaf_date(date, report, taxon, line):
if date is None or date == "":
report.warning(line, Report.INVALID_DATE, date, "GORULE:0000001: empty",
taxon=taxon, rule=1)
return date
# We check int(date)
if len(date) == 8 and date.isdigit():
d = datetime.datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]), 0, 0, 0, 0)
else:
report.warning(line, Report.INVALID_DATE, date, "GORULE:0000001: Date field must be YYYYMMDD, got: {}".format(date),
taxon=taxon, rule=1)
try:
d = dateutil.parser.parse(date)
except:
report.error(line, Report.INVALID_DATE, date, "GORULE:0000001: Could not parse date '{}' at all".format(date),
taxon=taxon, rule=1)
return None
return d.strftime("%Y%m%d")
## we generate both qualifier and relation field
## Returns: (negated, relation, other_qualifiers)
示例11: load_datetime_tz
# 需要導入模塊: import dateutil [as 別名]
# 或者: from dateutil import parser [as 別名]
def load_datetime_tz(time_str):
"""
Load datetime and ensure the result is timezone-aware.
If the parsed timestamp is naive, transform it into a timezone-aware one
using the local timezone.
:param str time_str: string representing a timestamp
:return datetime: the parsed timezone-aware datetime
"""
# dateutil parser returns naive or tz-aware string depending on the format
# of the input string
timestamp = dateutil.parser.parse(time_str)
# if the parsed timestamp is naive, forces it to local timezone
if timestamp.tzinfo is None:
timestamp = timestamp.replace(tzinfo=dateutil.tz.tzlocal())
return timestamp
示例12: _defs
# 需要導入模塊: import dateutil [as 別名]
# 或者: from dateutil import parser [as 別名]
def _defs(self, name, value):
if name in ['cvss_vector', 'cvss_temporal_vector']:
# Return a list of the Vectors instead of having everything in a
# flat string. This should allow for much easier parsing later.
return value.split('/')
elif name in ['cvss_base_score', 'cvss_temporal_score']:
# CVSS scores are floats, so lets return them as such.
return float(value)
elif name in ['first_found', 'last_found', 'plugin_modification_date',
'plugin_publication_date', 'HOST_END', 'HOST_START']:
# The first and last found attributes use a datetime timestamp
# format that we should convert into a unix timestamp.
return dateutil.parser.parse(value)
elif name in ['port', 'pluginID', 'severity']:
return int(value)
else:
return value
示例13: get_s2_granule_id_of_scihub_item_from_sentinelhub
# 需要導入模塊: import dateutil [as 別名]
# 或者: from dateutil import parser [as 別名]
def get_s2_granule_id_of_scihub_item_from_sentinelhub(img):
"""
Build the granule id of a given single tile SAFE.
The hard part is to get the timestamp in the granule id. Unfortunately this
timestamp is not part of the metadata returned by scihub. This function queries
sentinelhub to retrieve it. It takes about 3 seconds.
Args:
img (Sentinel2Image instance): Sentinel-2 image metadata
Return:
str: granule id, e.g. L1C_T36RTV_A005095_20180226T084545
"""
import sentinelhub
t0 = (img.date - datetime.timedelta(hours=2)).isoformat()
t1 = (img.date + datetime.timedelta(hours=2)).isoformat()
r = sentinelhub.opensearch.get_tile_info('T{}'.format(img.mgrs_id), time=(t0, t1))
assert(isinstance(r, dict))
granule_date = dateutil.parser.parse(r['properties']['startDate']).strftime("%Y%m%dT%H%M%S")
return "L1C_T{}_A{:06d}_{}".format(img.mgrs_id, img.relative_orbit, granule_date)
示例14: devseed_parser
# 需要導入模塊: import dateutil [as 別名]
# 或者: from dateutil import parser [as 別名]
def devseed_parser(self, img):
"""
Args:
img (dict): json metadata dict as shipped in devseed API response
"""
p = img['properties']
self.title = p['sentinel:product_id']
self.utm_zone = int(p['sentinel:utm_zone'])
self.lat_band = p['sentinel:latitude_band']
self.sqid = p['sentinel:grid_square']
self.mgrs_id = '{}{}{}'.format(self.utm_zone, self.lat_band, self.sqid)
self.date = dateutil.parser.parse(self.title.split('_')[2])
#self.granule_date = dateutil.parser.parse(p['datetime'])
self.satellite = p['eo:platform'].replace("sentinel-", "S").upper() # sentinel-2b --> S2B
self.relative_orbit = parse_safe_name_for_relative_orbit_number(self.title)
self.thumbnail = img['assets']['thumbnail']['href'].replace('sentinel-s2-l1c.s3.amazonaws.com',
'roda.sentinel-hub.com/sentinel-s2-l1c')
self.cloud_cover = p['eo:cloud_cover']
#self.id = img['id']
示例15: scihub_parser
# 需要導入模塊: import dateutil [as 別名]
# 或者: from dateutil import parser [as 別名]
def scihub_parser(self, img):
"""
Args:
img (dict): json metadata dict for a single SAFE, as shipped in scihub
opensearch API response
"""
self.title = img['title']
try:
self.mgrs_id = img['tileid']
except KeyError:
self.mgrs_id = re.findall(r"_T([0-9]{2}[A-Z]{3})_", img['title'])[0]
self.utm_zone, self.lat_band, self.sqid = split_mgrs_id(self.mgrs_id)
self.date = dateutil.parser.parse(img['beginposition'], ignoretz=True)
self.satellite = self.title[:3] # S2A_MSIL1C_2018010... --> S2A
self.absolute_orbit = img['orbitnumber']
self.relative_orbit = img['relativeorbitnumber']
self.datatake_id = img['s2datatakeid']
self.processing_level = img['processinglevel'].split('-')[1] # Level-1C --> L1C
self.thumbnail = img['links']['icon']