本文整理汇总了Python中pyiso.LOGGER类的典型用法代码示例。如果您正苦于以下问题:Python LOGGER类的具体用法?Python LOGGER怎么用?Python LOGGER使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LOGGER类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: latest_fuel_mix
def latest_fuel_mix(self):
# set up request
url = self.base_url + '/ria/FuelMix.aspx?CSV=True'
# carry out request
response = self.request(url)
if not response:
return pd.DataFrame()
# test for valid content
if 'The page cannot be displayed' in response.text:
LOGGER.error('MISO: Error in source data for generation')
return pd.DataFrame()
# preliminary parsing
df = pd.read_csv(BytesIO(response.content), header=0, index_col=0, parse_dates=True)
# set index
df.index = self.utcify_index(df.index)
df.index.set_names(['timestamp'], inplace=True)
# set names and labels
df['fuel_name'] = df.apply(lambda x: self.fuels[x['CATEGORY']], axis=1)
df['gen_MW'] = df['ACT']
# return
return df[['fuel_name', 'gen_MW']]
示例2: handle_options
def handle_options(self, **kwargs):
"""
Process and store keyword argument options.
"""
super(EIAClient, self).handle_options(**kwargs)
if not hasattr(self, 'BA'):
LOGGER.error('Balancing authority not set.')
raise ValueError('Balancing authority not set.')
if 'market' not in self.options:
if self.options['forecast']:
self.options['market'] = self.MARKET_CHOICES.dam
elif self.options['sliceable'] and self.options['data'] == 'gen':
self.options['market'] = self.MARKET_CHOICES.dam
else:
self.options['market'] = self.MARKET_CHOICES.hourly
if 'freq' not in self.options:
if self.options['forecast']:
self.options['freq'] = self.FREQUENCY_CHOICES.hourly
elif self.options['sliceable'] and self.options['data'] == 'gen':
self.options['freq'] = self.FREQUENCY_CHOICES.hourly
else:
self.options['freq'] = self.FREQUENCY_CHOICES.hourly
if 'yesterday' not in self.options:
self.options['yesterday'] = False
示例3: fetch_forecast
def fetch_forecast(self, date):
# construct url
datestr = date.strftime('%Y%m%d')
url = self.base_url + '/Library/Repository/Market%20Reports/' + datestr + '_da_ex.xls'
# make request with self.request for easier debugging, mocking
response = self.request(url)
if not response:
return pd.DataFrame()
if response.status_code == 404:
LOGGER.debug('No MISO forecast data available at %s' % datestr)
return pd.DataFrame()
xls = pd.read_excel(BytesIO(response.content))
# clean header
header_df = xls.iloc[:5]
df = xls.iloc[5:]
df.columns = ['hour_str'] + list(header_df.iloc[-1][1:])
# set index
idx = []
for hour_str in df['hour_str']:
# format like 'Hour 01' to 'Hour 24'
ihour = int(hour_str[5:]) - 1
local_ts = datetime(date.year, date.month, date.day, ihour)
idx.append(self.utcify(local_ts))
df.index = idx
df.index.set_names(['timestamp'], inplace=True)
# return
return df
示例4: _dst_active_hours_for_transition_day
def _dst_active_hours_for_transition_day(self, local_dt_index):
"""
When attempting to localize a timezone-naive list of dates, the daylight savings status may be ambigous. This
method is meant as a fallback when the ambiguous='infer' datetime handling in pandas fails. It assumes
that the datetime index is a daylight saving transition day.
:param pandas.DatetimeIndex local_dt_index: A list of timezone-naive DatetimeIndex values.
:return: A list of bool values indicating whether daylight savings time is active for the list provided.
This returned list of boolean value is useful for passing to pandas 'ambiguous' kwarg.
:rtype: list
"""
dst_active_list = []
hour_idx = local_dt_index.hour
if len(hour_idx) > 3:
starting_timestamp = local_dt_index[0]
starting_month = starting_timestamp.month
starting_hour = starting_timestamp.hour
if starting_month == 3 and starting_hour == 0:
dst_active_list = [h > 1 for h in hour_idx]
elif starting_month == 11 and starting_hour == 0:
dst_active_list = [h < 2 for h in hour_idx]
elif 3 < starting_month < 11:
dst_active_list = [True for h in hour_idx]
elif starting_month < 3 or starting_month > 11:
dst_active_list = [False for h in hour_idx]
else:
LOGGER.warn("Uanble to infer fallback DST status for ambiguous DatetimeIndex values.")
return dst_active_list
示例5: get_load
def get_load(self, latest=False, start_at=False, end_at=False,
forecast=False, **kwargs):
# set args
self.handle_options(data='load', latest=latest, forecast=forecast,
start_at=start_at, end_at=end_at, **kwargs)
# set up storage
raw_data = []
# collect raw data
for endpoint in self.request_endpoints():
# carry out request
data = self.fetch_data(endpoint, self.auth)
# pull out data
try:
raw_data += self.parse_json_load_data(data)
except ValueError as e:
LOGGER.warn(e)
continue
# parse data
try:
df = self._parse_json(raw_data)
except ValueError:
return []
df = self.slice_times(df)
# return
return self.serialize_faster(df, drop_index=True)
示例6: get_lmp
def get_lmp(self, node_id='INTERNALHUB', latest=True, start_at=False, end_at=False, **kwargs):
# set args
self.handle_options(data='lmp', latest=latest,
start_at=start_at, end_at=end_at, node_id=node_id, **kwargs)
# get location id
try:
locationid = self.locations[node_id.upper()]
except KeyError:
raise ValueError('No LMP data available for location %s' % node_id)
# set up storage
raw_data = []
# collect raw data
for endpoint in self.request_endpoints(locationid):
# carry out request
data = self.fetch_data(endpoint, self.auth)
# pull out data
try:
raw_data += self.parse_json_lmp_data(data)
except ValueError as e:
LOGGER.warn(e)
continue
# parse and slice
df = self._parse_json(raw_data)
df = self.slice_times(df)
# return
return df.to_dict(orient='record')
示例7: fetch_forecast
def fetch_forecast(self, date):
# construct url
datestr = date.strftime("%Y%m%d")
url = self.base_url + "/Library/Repository/Market%20Reports/" + datestr + "_da_ex.xls"
# make request
try:
xls = pd.read_excel(url)
except HTTPError:
LOGGER.debug("No MISO forecast data available at %s" % datestr)
return pd.DataFrame()
# clean header
header_df = xls.iloc[:5]
df = xls.iloc[5:]
df.columns = ["hour_str"] + list(header_df.iloc[-1][1:])
# set index
idx = []
for hour_str in df["hour_str"]:
# format like 'Hour 01' to 'Hour 24'
ihour = int(hour_str[5:]) - 1
local_ts = datetime(date.year, date.month, date.day, ihour)
idx.append(self.utcify(local_ts))
df.index = idx
df.index.set_names(["timestamp"], inplace=True)
# return
return df
示例8: request
def request(self, *args, **kwargs):
response = super(PJMClient, self).request(*args, **kwargs)
if response and response.status_code == 400:
LOGGER.warn('PJM request returned Bad Request %s' % response)
return None
return response
示例9: get_trade
def get_trade(self, latest=False,
start_at=False, end_at=False, **kwargs):
# set args
self.handle_options(data='trade', latest=latest,
start_at=start_at, end_at=end_at, **kwargs)
# set up storage
parsed_data = []
# collect data
for this_date in self.dates():
# fetch
try:
df, mode = self.fetch_df(this_date)
except (HTTPError, ValueError):
LOGGER.warn('No data available in NVEnergy at %s' % this_date)
continue
# store
try:
parsed_data += self.parse_trade(df, this_date, mode)
except KeyError:
LOGGER.warn('Unparseable data available in NVEnergy at %s: %s' % (this_date, df))
continue
# return
return self.time_subset(parsed_data)
示例10: latest_fuel_mix
def latest_fuel_mix(self):
# set up request
url = self.base_url + "/ria/FuelMix.aspx?CSV=True"
# carry out request
response = self.request(url)
if not response:
return pd.DataFrame()
# test for valid content
if "The page cannot be displayed" in response.text:
LOGGER.error("MISO: Error in source data for generation")
return pd.DataFrame()
# preliminary parsing
df = pd.read_csv(StringIO(response.text), header=0, index_col=0, parse_dates=True)
# set index
df.index = self.utcify_index(df.index)
df.index.set_names(["timestamp"], inplace=True)
# set names and labels
df["fuel_name"] = df.apply(lambda x: self.fuels[x["CATEGORY"]], axis=1)
df["gen_MW"] = df["ACT"]
# return
return df[["fuel_name", "gen_MW"]]
示例11: utcify_index
def utcify_index(self, local_index, tz_name=None):
"""
Convert a DateTimeIndex to UTC.
:param DateTimeIndex local_index: The local DateTimeIndex to be converted.
:param string tz_name: If local_ts is naive, it is assumed to be in timezone tz.
If tz is not provided, the client's default timezone is used.
:return: DatetimeIndex in UTC.
:rtype: DatetimeIndex
"""
# set up tz
if tz_name is None:
tz_name = self.TZ_NAME
# localize
try:
aware_local_index = local_index.tz_localize(tz_name)
except AmbiguousTimeError as e:
LOGGER.debug(e)
aware_local_index = local_index.tz_localize(tz_name, ambiguous='infer')
# except Exception as e:
# LOGGER.debug(e) # already aware
# print e
# aware_local_index = local_index
# convert to utc
aware_utc_index = aware_local_index.tz_convert('UTC')
# return
return aware_utc_index
示例12: fetch_csvs
def fetch_csvs(self, date, label):
# construct url
datestr = date.strftime('%Y%m%d')
if self.options['data'] == 'lmp':
url = '%s/%s/%s%s_zone.csv' % (self.base_url, label, datestr, label)
else:
url = '%s/%s/%s%s.csv' % (self.base_url, label, datestr, label)
# make request
response = self.request(url)
# if 200, return
if response and response.status_code == 200:
return [response.text]
# if failure, try zipped monthly data
datestr = date.strftime('%Y%m01')
if self.options['data'] == 'lmp':
url = '%s/%s/%s%s_zone_csv.zip' % (self.base_url, label, datestr, label)
else:
url = '%s/%s/%s%s_csv.zip' % (self.base_url, label, datestr, label)
# make request and unzip
response_zipped = self.request(url)
if response_zipped:
unzipped = self.unzip(response_zipped.content)
else:
return []
# return
if unzipped:
LOGGER.info('Failed to find daily %s data for %s but found monthly data, using that' % (self.options['data'], date))
return unzipped
else:
return []
示例13: fetch_todays_outlook_renewables
def fetch_todays_outlook_renewables(self):
# get renewables data
response = self.request(self.base_url_outlook+'renewables.html')
try:
return BeautifulSoup(response.content)
except AttributeError:
LOGGER.warn('No response for CAISO today outlook renewables')
return None
示例14: get_load
def get_load(self, latest=False, start_at=None, end_at=None, forecast=False, **kwargs):
# set args
self.handle_options(data='load', latest=latest,
start_at=start_at, end_at=end_at, forecast=forecast,
**kwargs)
if self.options['forecast']:
# fetch from eData
df = self.fetch_edata_series('ForecastedLoadHistory', {'name': 'PJM RTO Total'})
sliced = self.slice_times(df)
sliced.columns = ['load_MW']
# format
extras = {
'freq': self.FREQUENCY_CHOICES.hourly,
'market': self.MARKET_CHOICES.dam,
'ba_name': self.NAME,
}
data = self.serialize_faster(sliced, extras=extras)
# return
return data
elif self.options['end_at'] and self.options['end_at'] < datetime.now(pytz.utc) - timedelta(hours=1):
df = self.fetch_historical_load(self.options['start_at'].year)
sliced = self.slice_times(df)
# format
extras = {
'freq': self.FREQUENCY_CHOICES.hourly,
'market': self.MARKET_CHOICES.dam,
'ba_name': self.NAME,
}
data = self.serialize_faster(sliced, extras=extras)
# return
return data
else:
# handle real-time
load_ts, load_val = self.fetch_edata_point('InstantaneousLoad', 'PJM RTO Total', 'MW')
# fall back to OASIS
if not (load_ts and load_val):
load_ts, load_val = self.fetch_oasis_data()
if not (load_ts and load_val):
LOGGER.warn('No PJM latest load data')
return []
# format and return
return [{
'timestamp': load_ts,
'freq': self.FREQUENCY_CHOICES.fivemin,
'market': self.MARKET_CHOICES.fivemin,
'load_MW': load_val,
'ba_name': self.NAME,
}]
示例15: time_from_soup
def time_from_soup(self, soup):
"""
Returns a UTC timestamp if one is found in the soup,
or None if an error was encountered.
"""
ts_elt = soup.find(class_='ts')
if not ts_elt:
LOGGER.error('PJM: Timestamp not found in soup:\n%s' % soup)
return None
return self.utcify(ts_elt.string)