当前位置: 首页>>代码示例>>Python>>正文


Python LoggerManager.info方法代码示例

本文整理汇总了Python中pythalesians.util.loggermanager.LoggerManager.info方法的典型用法代码示例。如果您正苦于以下问题:Python LoggerManager.info方法的具体用法?Python LoggerManager.info怎么用?Python LoggerManager.info使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pythalesians.util.loggermanager.LoggerManager的用法示例。


在下文中一共展示了LoggerManager.info方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from pythalesians.util.loggermanager import LoggerManager [as 别名]
# 或者: from pythalesians.util.loggermanager.LoggerManager import info [as 别名]
class TwitterPyThalesians:

    def __init__(self, *args, **kwargs):
        self.logger = LoggerManager().getLogger(__name__)

    def set_key(self, APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET):
        self.twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)

    def auto_set_key(self):
        self.twitter = Twython(Constants().APP_KEY, Constants().APP_SECRET,
                               Constants().OAUTH_TOKEN, Constants().OAUTH_TOKEN_SECRET)

    def update_status(self, msg, link = None, picture = None):
        # 22 chars URL
        # 23 chars picture

        chars_lim = 140

        if link is not None: chars_lim = chars_lim - (22 * link)
        if picture is not None: chars_lim = chars_lim - 23

        if (len(msg) > chars_lim):
            self.logger.info("Message too long for Twitter!")

        if picture is None:
            self.twitter.update_status(status=msg)
        else:
            photo = open(picture, 'rb')
            self.twitter.update_status_with_media(status=msg, media=photo)
开发者ID:BryanFletcher,项目名称:pythalesians,代码行数:31,代码来源:twitterpythalesians.py

示例2: LoaderQuandl

# 需要导入模块: from pythalesians.util.loggermanager import LoggerManager [as 别名]
# 或者: from pythalesians.util.loggermanager.LoggerManager import info [as 别名]
class LoaderQuandl(LoaderTemplate):

    def __init__(self):
        super(LoaderQuandl, self).__init__()
        self.logger = LoggerManager().getLogger(__name__)

    # implement method in abstract superclass
    def load_ticker(self, time_series_request):
        time_series_request_vendor = self.construct_vendor_time_series_request(time_series_request)

        self.logger.info("Request Quandl data")

        data_frame = self.download_daily(time_series_request_vendor)

        if data_frame.index is []: return None

        # convert from vendor to Thalesians tickers/fields
        if data_frame is not None:
            returned_tickers = data_frame.columns

        if data_frame is not None:
            # tidy up tickers into a format that is more easily translatable
            returned_tickers = [x.replace(' - Value', '') for x in returned_tickers]
            returned_tickers = [x.replace('.', '/') for x in returned_tickers]

            fields = self.translate_from_vendor_field(['close' for x in returned_tickers], time_series_request)
            tickers = self.translate_from_vendor_ticker(returned_tickers, time_series_request)

            ticker_combined = []

            for i in range(0, len(fields)):
                ticker_combined.append(tickers[i] + "." + fields[i])

            data_frame.columns = ticker_combined
            data_frame.index.name = 'Date'

        self.logger.info("Completed request from Quandl.")

        return data_frame

    def download_daily(self, time_series_request):
        return Quandl.get(time_series_request.tickers, authtoken=Constants().quandl_api_key, trim_start=time_series_request.start_date,
                          trim_end=time_series_request.finish_date)
开发者ID:quantcruncher,项目名称:pythalesians,代码行数:45,代码来源:loaderquandl.py

示例3: __init__

# 需要导入模块: from pythalesians.util.loggermanager import LoggerManager [as 别名]
# 或者: from pythalesians.util.loggermanager.LoggerManager import info [as 别名]
class WebDataTemplate:

    def __init__(self):
        self.config = ConfigManager()
        self.logger = LoggerManager().getLogger(__name__)
        return

    @abc.abstractmethod
    def download_raw_data(self):
        return

    @abc.abstractmethod
    def construct_indicator(self):
        return

    def dump_indicator(self):

        indicator_group = self.raw_indicator # self.raw_indicator.join(self.processed_indicator, how='outer')

        self.logger.info("About to write all web indicators")
        indicator_group.to_csv(self._csv_indicator_dump, date_format='%d/%m/%Y %H:%M:%S')
开发者ID:BryanFletcher,项目名称:pythalesians,代码行数:23,代码来源:webdatatemplate.py

示例4: __init__

# 需要导入模块: from pythalesians.util.loggermanager import LoggerManager [as 别名]
# 或者: from pythalesians.util.loggermanager.LoggerManager import info [as 别名]
class DataLoaderTemplate:

    def __init__(self):
        self.config = ConfigManager()
        self.logger = LoggerManager().getLogger(__name__)
        return

    def load_database(self, key = None):
        tsio = TimeSeriesIO()
        tsc = TimeSeriesCalcs()

        file = self._hdf5

        if key is not None:
            file = self._hdf5 + key + ".h5"

        # if cached file exists, use that, otherwise load CSV
        if os.path.isfile(file):
            self.logger.info("About to load market database from HDF5...")
            self.news_database = tsio.read_time_series_cache_from_disk(file)
            self.news_database = self.preprocess(self.news_database)
        else:
            self.logger.info("About to load market database from CSV...")
            self.news_database = self.load_csv()

        return self.news_database

    @abc.abstractmethod
    def load_csv(self):
        return

    def get_database(self, key):
        return self.news_database

    @abc.abstractmethod
    def preprocess(self, df):
        return
开发者ID:BryanFletcher,项目名称:pythalesians,代码行数:39,代码来源:dataloadertemplate.py

示例5: __init__

# 需要导入模块: from pythalesians.util.loggermanager import LoggerManager [as 别名]
# 或者: from pythalesians.util.loggermanager.LoggerManager import info [as 别名]
class CreateDataIndexTemplate:

    def __init__(self):
        self.config = ConfigManager()
        self.logger = LoggerManager().getLogger(__name__)
        return

    @abc.abstractmethod
    def create_indicator(self):
        return

    @abc.abstractmethod
    def aggregate_news_data(self, raw_database):
        return

    @abc.abstractmethod
    def get_cached_aggregate(self):
        return

    def grab_indicator(self):
        return self.indicator

    def grab_econ_indicator(self):
        return self.indicator_econ

    def grab_final_indicator(self):
        return self.indicator_final

    def truncate_indicator(self, daily_ind, match):
        cols = daily_ind.columns.values

        to_include = []

        for i in range(0, len(cols)):
            if match in cols[i]:
                to_include.append(i)

        return daily_ind[daily_ind.columns[to_include]]

    def dump_indicators(self):
        tsf = TimeSeriesFilter()
        self.logger.info("About to write all indicators to CSV")
        self.indicator.to_csv(self._csv_indicator_dump, date_format='%d/%m/%Y')

        if (self._csv_econ_indicator_dump is not None):
            self.logger.info("About to write economy based indicators to CSV")
            self.indicator_econ.to_csv(self._csv_econ_indicator_dump, date_format='%d/%m/%Y')

        self.logger.info("About to write final indicators to CSV")

        # remove weekends and remove start of series
        if (self._csv_final_indicator_dump is not None):
            indicator_final_copy = tsf.filter_time_series_by_holidays(self.indicator_final, cal = 'WEEKDAY')
            indicator_final_copy = tsf.filter_time_series_by_date(
                start_date="01 Jan 2000", finish_date = None, data_frame=indicator_final_copy)

            indicator_final_copy.to_csv(self._csv_final_indicator_dump, date_format='%d/%m/%Y')
开发者ID:BryanFletcher,项目名称:pythalesians,代码行数:59,代码来源:createdataindextemplate.py

示例6: convert_library_to_vendor_field

# 需要导入模块: from pythalesians.util.loggermanager import LoggerManager [as 别名]
# 或者: from pythalesians.util.loggermanager.LoggerManager import info [as 别名]
        return ConfigManager._dict_time_series_fields_list_vendor_to_library[
            source + '.' + sourcefield]

    @staticmethod
    def convert_library_to_vendor_field(source, field):
        return ConfigManager._dict_time_series_fields_list_library_to_vendor[
            source + '.' + field]


## test function
if __name__ == '__main__':
    logger = LoggerManager().getLogger(__name__)

    categories = ConfigManager().get_categories_from_fields()

    logger.info("Categories from fields list")
    print(categories)

    categories = ConfigManager().get_categories_from_tickers()

    logger.info("Categories from tickers list")
    print(categories)

    filter = 'events'

    categories_filtered = ConfigManager().get_categories_from_tickers_selective_filter(filter)
    logger.info("Categories from tickers list, filtered by events")
    print(categories_filtered)

    logger.info("For each category, print all tickers and fields")
开发者ID:quantcruncher,项目名称:pythalesians,代码行数:32,代码来源:configmanager.py

示例7: LoaderBBG

# 需要导入模块: from pythalesians.util.loggermanager import LoggerManager [as 别名]
# 或者: from pythalesians.util.loggermanager.LoggerManager import info [as 别名]
class LoaderBBG(LoaderTemplate):

    def __init__(self):
        super(LoaderBBG, self).__init__()
        self.logger = LoggerManager().getLogger(__name__)

    # implement method in abstract superclass
    def load_ticker(self, time_series_request):
        """
        load_ticker - Retrieves market data from external data source (in this case Bloomberg)

        Parameters
        ----------
        time_series_request : TimeSeriesRequest
            contains all the various parameters detailing time series start and finish, tickers etc

        Returns
        -------
        DataFrame
        """

        time_series_request_vendor = self.construct_vendor_time_series_request(time_series_request)

        data_frame = None
        self.logger.info("Request Bloomberg data")

        # do we need daily or intraday data?
        if (time_series_request.freq in ['daily', 'weekly', 'monthly', 'quarterly', 'yearly']):

            # for events times/dates separately needs ReferenceDataRequest (when specified)
            if 'release-date-time-full' in time_series_request.fields:
                # experimental
                datetime_data_frame = self.get_reference_data(time_series_request_vendor, time_series_request)

                # remove fields 'release-date-time-full' from our request (and the associated field in the vendor)
                index = time_series_request.fields.index('release-date-time-full')
                time_series_request_vendor.fields.pop(index)
                time_series_request.fields.pop(index)

                # download all the other event fields (uses HistoricalDataRequest to Bloomberg)
                # concatenate with date time fields
                if len(time_series_request_vendor.fields) > 0:
                    events_data_frame = self.get_daily_data(time_series_request, time_series_request_vendor)

                    col = events_data_frame.index.name
                    events_data_frame = events_data_frame.reset_index(drop = False)

                    data_frame = pandas.concat([events_data_frame, datetime_data_frame], axis = 1)
                    temp = data_frame[col]
                    del data_frame[col]
                    data_frame.index = temp
                else:
                    data_frame = datetime_data_frame

            # for all other daily/monthly/quarter data, we can use HistoricalDataRequest to Bloomberg
            else:
                data_frame = self.get_daily_data(time_series_request, time_series_request_vendor)

        # assume one ticker only
        # for intraday data we use IntradayDataRequest to Bloomberg
        if (time_series_request.freq in ['intraday', 'minute', 'hourly']):
            time_series_request_vendor.tickers = time_series_request_vendor.tickers[0]

            data_frame = self.download_intraday(time_series_request_vendor)

            cols = data_frame.columns.values
            data_frame.tz_localize('UTC')
            cols = time_series_request.tickers[0] + "." + cols
            data_frame.columns = cols

        self.logger.info("Completed request from Bloomberg.")

        return data_frame

    def get_daily_data(self, time_series_request, time_series_request_vendor):
        data_frame = self.download_daily(time_series_request_vendor)

        # convert from vendor to Thalesians tickers/fields
        if data_frame is not None:
            returned_fields = data_frame.columns.get_level_values(0)
            returned_tickers = data_frame.columns.get_level_values(1)

        if data_frame is not None:
            # TODO if empty try downloading again a year later
            fields = self.translate_from_vendor_field(returned_fields, time_series_request)
            tickers = self.translate_from_vendor_ticker(returned_tickers, time_series_request)

            ticker_combined = []

            for i in range(0, len(fields)):
                ticker_combined.append(tickers[i] + "." + fields[i])

            data_frame.columns = ticker_combined
            data_frame.index.name = 'Date'

        return data_frame

    def get_reference_data(self, time_series_request_vendor, time_series_request):
        end = datetime.datetime.today()
        end = end.replace(year = end.year + 1)
#.........这里部分代码省略.........
开发者ID:quantcruncher,项目名称:pythalesians,代码行数:103,代码来源:loaderbbg.py

示例8: LoaderDukasCopy

# 需要导入模块: from pythalesians.util.loggermanager import LoggerManager [as 别名]
# 或者: from pythalesians.util.loggermanager.LoggerManager import info [as 别名]
class LoaderDukasCopy(LoaderTemplate):
    tick_name  = "{symbol}/{year}/{month}/{day}/{hour}h_ticks.bi5"

    def __init__(self):
        super(LoaderTemplate, self).__init__()
        self.logger = LoggerManager().getLogger(__name__)

        import logging
        logging.getLogger("requests").setLevel(logging.WARNING)

    # implement method in abstract superclass
    def load_ticker(self, time_series_request):
        """
        load_ticker - Retrieves market data from external data source (in this case Bloomberg)

        Parameters
        ----------
        time_series_request : TimeSeriesRequest
            contains all the various parameters detailing time series start and finish, tickers etc

        Returns
        -------
        DataFrame
        """

        time_series_request_vendor = self.construct_vendor_time_series_request(time_series_request)

        data_frame = None
        self.logger.info("Request Dukascopy data")

        # doesn't support non-tick data
        if (time_series_request.freq in ['daily', 'weekly', 'monthly', 'quarterly', 'yearly', 'intraday', 'minute', 'hourly']):
            self.logger.warning("Dukascopy loader is for tick data only")

            return None

        # assume one ticker only (LightTimeSeriesFactory only calls one ticker at a time)
        if (time_series_request.freq in ['tick']):
            # time_series_request_vendor.tickers = time_series_request_vendor.tickers[0]

            data_frame = self.get_tick(time_series_request, time_series_request_vendor)

            if data_frame is not None: data_frame.tz_localize('UTC')

        self.logger.info("Completed request from Dukascopy")

        return data_frame

    def kill_session(self):
        return

    def get_tick(self, time_series_request, time_series_request_vendor):

        data_frame = self.download_tick(time_series_request_vendor)

        # convert from vendor to Thalesians tickers/fields
        if data_frame is not None:
            returned_fields = data_frame.columns
            returned_tickers = [time_series_request_vendor.tickers[0]] * (len(returned_fields))

        if data_frame is not None:
            fields = self.translate_from_vendor_field(returned_fields, time_series_request)
            tickers = self.translate_from_vendor_ticker(returned_tickers, time_series_request)

            ticker_combined = []

            for i in range(0, len(fields)):
                ticker_combined.append(tickers[i] + "." + fields[i])

            data_frame.columns = ticker_combined
            data_frame.index.name = 'Date'

        return data_frame

    def download_tick(self, time_series_request):

        symbol = time_series_request.tickers[0]
        df_list = []

        self.logger.info("About to download from Dukascopy... for " + symbol)

        # single threaded
        df_list = [self.fetch_file(time, symbol) for time in
                   self.hour_range(time_series_request.start_date, time_series_request.finish_date)]

        # parallel (has pickle issues)
        # time_list = self.hour_range(time_series_request.start_date, time_series_request.finish_date)
        # df_list = Parallel(n_jobs=-1)(delayed(self.fetch_file)(time, symbol) for time in time_list)

        try:
            return pandas.concat(df_list)
        except:
            return None

    def fetch_file(self, time, symbol):
        if time.hour % 24 == 0: self.logger.info("Downloading... " + str(time))

        tick_path = self.tick_name.format(
                symbol = symbol,
                year = str(time.year).rjust(4, '0'),
#.........这里部分代码省略.........
开发者ID:swaraj007,项目名称:pythalesians,代码行数:103,代码来源:loaderdukascopy.py

示例9: str

# 需要导入模块: from pythalesians.util.loggermanager import LoggerManager [as 别名]
# 或者: from pythalesians.util.loggermanager.LoggerManager import info [as 别名]
                Constants.time_series_factory_thread_technique = tech

                for no in thread_no:
                    for key in Constants.time_series_factory_thread_no:
                        Constants.time_series_factory_thread_no[key] = no

                    import time
                    start = time.time();
                    df = ltsf.harvest_time_series(time_series_request);
                    end = time.time()
                    duration = end - start

                    diag.append("With " + str(no) + " " + tech + " no: " + str(duration) + " seconds")

            for d in diag:
                logger.info(d)

        ###### download intraday data from Bloomberg for FX, with different threading techniques
        if True:

            from datetime import timedelta

            time_series_request = TimeSeriesRequest(
                    start_date = datetime.date.today() - timedelta(days=10),    # start date
                    finish_date = datetime.date.today(),                        # finish date
                    freq = 'intraday',                                          # intraday data
                    data_source = 'bloomberg',                      # use Bloomberg as data source
                    tickers = ['EURUSD',                            # ticker (Thalesians)
                               'GBPUSD',
                               'USDJPY',
                               'AUDUSD'],
开发者ID:BryanFletcher,项目名称:pythalesians,代码行数:33,代码来源:paralleldata_examples.py

示例10: __init__

# 需要导入模块: from pythalesians.util.loggermanager import LoggerManager [as 别名]
# 或者: from pythalesians.util.loggermanager.LoggerManager import info [as 别名]
class TradeAnalysis:

    def __init__(self):
        self.logger = LoggerManager().getLogger(__name__)
        self.DUMP_PATH = 'output_data/' + datetime.date.today().strftime("%Y%m%d") + ' '
        self.scale_factor = 3
        return

    def run_strategy_returns_stats(self, strategy):
        """
        run_strategy_returns_stats - Plots useful statistics for the trading strategy (using PyFolio)

        Parameters
        ----------
        strategy : StrategyTemplate
            defining trading strategy

        """

        pnl = strategy.get_strategy_pnl()
        tz = TimeSeriesTimezone()
        tsc = TimeSeriesCalcs()

        # PyFolio assumes UTC time based DataFrames (so force this localisation)
        try:
            pnl = tz.localise_index_as_UTC(pnl)
        except: pass

        # set the matplotlib style sheet & defaults
        try:
            matplotlib.rcdefaults()
            plt.style.use(Constants().plotfactory_pythalesians_style_sheet['pythalesians'])
        except: pass

        # TODO for intraday strategies, make daily

        # convert DataFrame (assumed to have only one column) to Series
        pnl = tsc.calculate_returns(pnl)
        pnl = pnl[pnl.columns[0]]

        fig = pf.create_returns_tear_sheet(pnl, return_fig=True)

        try:
            plt.savefig (strategy.DUMP_PATH + "stats.png")
        except: pass

        plt.show()

    def run_tc_shock(self, strategy, tc = None):
        if tc is None: tc = [0, 0.25, 0.5, 0.75, 1, 1.25, 1.5, 1.75, 2.0]

        parameter_list = [{'spot_tc_bp' : x } for x in tc]
        pretty_portfolio_names = [str(x) + 'bp' for x in tc]    # names of the portfolio
        parameter_type = 'TC analysis'                          # broad type of parameter name

        return self.run_arbitrary_sensitivity(strategy,
                                 parameter_list=parameter_list,
                                 pretty_portfolio_names=pretty_portfolio_names,
                                 parameter_type=parameter_type)

    ###### Parameters and signal generations (need to be customised for every model)
    def run_arbitrary_sensitivity(self, strat, parameter_list = None, parameter_names = None,
                                  pretty_portfolio_names = None, parameter_type = None):

        asset_df, spot_df, spot_df2, basket_dict = strat.fill_assets()

        port_list = None
        tsd_list = []

        for i in range(0, len(parameter_list)):
            br = strat.fill_backtest_request()

            current_parameter = parameter_list[i]

            # for calculating P&L
            for k in current_parameter.keys():
                setattr(br, k, current_parameter[k])

            strat.br = br   # for calculating signals

            signal_df = strat.construct_signal(spot_df, spot_df2, br.tech_params, br)

            cash_backtest = CashBacktest()
            self.logger.info("Calculating... " + pretty_portfolio_names[i])

            cash_backtest.calculate_trading_PnL(br, asset_df, signal_df)
            tsd_list.append(cash_backtest.get_portfolio_pnl_tsd())
            stats = str(cash_backtest.get_portfolio_pnl_desc()[0])

            port = cash_backtest.get_cumportfolio().resample('B').mean()
            port.columns = [pretty_portfolio_names[i] + ' ' + stats]

            if port_list is None:
                port_list = port
            else:
                port_list = port_list.join(port)

        # reset the parameters of the strategy
        strat.br = strat.fill_backtest_request()

#.........这里部分代码省略.........
开发者ID:hedgefair,项目名称:pythalesians,代码行数:103,代码来源:tradeanalysis.py

示例11: BBGLowLevelRef

# 需要导入模块: from pythalesians.util.loggermanager import LoggerManager [as 别名]
# 或者: from pythalesians.util.loggermanager.LoggerManager import info [as 别名]
class BBGLowLevelRef(BBGLowLevelTemplate):

    def __init__(self):
        super(BBGLowLevelRef, self).__init__()

        self.logger = LoggerManager().getLogger(__name__)
        self._options = []

    # populate options for Bloomberg request for asset intraday request
    def fill_options(self, time_series_request):
        self._options = OptionsBBG()

        self._options.security = time_series_request.tickers
        self._options.startDateTime = time_series_request.start_date
        self._options.endDateTime = time_series_request.finish_date
        self._options.fields = time_series_request.fields

        return self._options

    def process_message(self, msg):
        data = collections.defaultdict(dict)

        # process received events
        securityDataArray = msg.getElement('securityData')

        index = 0

        for securityData in list(securityDataArray.values()):
            ticker = securityData.getElementAsString("security")
            fieldData = securityData.getElement("fieldData")

            for field in fieldData.elements():
                if not field.isValid():
                    field_name = "%s" % field.name()

                    self.logger.error(field_name + " is NULL")
                elif field.isArray():
                    # iterate over complex data returns.
                    field_name = "%s" % field.name()

                    for i, row in enumerate(field.values()):
                        data[(field_name, ticker)][index] = re.findall(r'"(.*?)"', "%s" % row)[0]

                        index = index + 1
                # else:
                    # vals.append(re.findall(r'"(.*?)"', "%s" % row)[0])
                    # print("%s = %s" % (field.name(), field.getValueAsString()))

            fieldExceptionArray = securityData.getElement("fieldExceptions")

            for fieldException in list(fieldExceptionArray.values()):
                errorInfo = fieldException.getElement("errorInfo")
                print(errorInfo.getElementAsString("category"), ":", \
                    fieldException.getElementAsString("fieldId"))

        data_frame = pandas.DataFrame(data)

        # if obsolete ticker could return no values
        if (not(data_frame.empty)):
            data_frame.columns = pandas.MultiIndex.from_tuples(data, names=['field', 'ticker'])
            self.logger.info("Reading: " + ticker + ' ' + str(data_frame.index[0]) + ' - ' + str(data_frame.index[-1]))
        else:
            return None

        return data_frame

    def combine_slices(self, data_frame, data_frame_slice):
        if (data_frame_slice.columns.get_level_values(1).values[0]
            not in data_frame.columns.get_level_values(1).values):

            return data_frame.join(data_frame_slice, how="outer")

        return data_frame

    # create request for data
    def send_bar_request(self, session, eventQueue):
        refDataService = session.getService("//blp/refdata")
        request = refDataService.createRequest('ReferenceDataRequest')

        self.add_override(request, 'TIME_ZONE_OVERRIDE', 23)    # force GMT time
        self.add_override(request, 'START_DT', self._options.startDateTime.strftime('%Y%m%d'))
        self.add_override(request, 'END_DT', self._options.endDateTime.strftime('%Y%m%d'))

        # only one security/eventType per request
        for field in self._options.fields:
            request.getElement("fields").appendValue(field)

        for security in self._options.security:
            request.getElement("securities").appendValue(security)

        self.logger.info("Sending Bloomberg Ref Request:" + str(request))
        session.sendRequest(request)
开发者ID:humdings,项目名称:pythalesians,代码行数:94,代码来源:loaderbbgopen.py

示例12: __init__

# 需要导入模块: from pythalesians.util.loggermanager import LoggerManager [as 别名]
# 或者: from pythalesians.util.loggermanager.LoggerManager import info [as 别名]

#.........这里部分代码省略.........
        time_series_request.cut = cut                           # NYC/BGN ticker
        time_series_request.fields = 'close'                    # close field only
        time_series_request.cache_algo = cache_algo             # cache_algo_only, cache_algo_return, internet_load

        time_series_request.environment = 'backtest'
        time_series_request.start_date = start
        time_series_request.finish_date = end
        time_series_request.data_source = source

        for cr in cross:
            base = cr[0:3]
            terms = cr[3:6]

            if (type == 'spot'):
                # non-USD crosses
                if base != 'USD' and terms != 'USD':
                    base_USD = self.fxconv.correct_notation('USD' + base)
                    terms_USD = self.fxconv.correct_notation('USD' + terms)

                    # TODO check if the cross exists in the database

                    # download base USD cross
                    time_series_request.tickers = base_USD
                    time_series_request.category = self.fxconv.em_or_g10(base, freq)
                    base_vals = time_series_factory.harvest_time_series(time_series_request)

                    # download terms USD cross
                    time_series_request.tickers = terms_USD
                    time_series_request.category = self.fxconv.em_or_g10(terms, freq)
                    terms_vals = time_series_factory.harvest_time_series(time_series_request)

                    if (base_USD[0:3] == 'USD'):
                        base_vals = 1 / base_vals
                    if (terms_USD[0:3] == 'USD'):
                        terms_vals = 1 / terms_vals

                    base_vals.columns = ['temp']
                    terms_vals.columns = ['temp']
                    cross_vals = base_vals.div(terms_vals, axis = 'index')
                    cross_vals.columns = [cr + '.close']

                else:
                    if base == 'USD': non_USD = terms
                    if terms == 'USD': non_USD = base

                    correct_cr = self.fxconv.correct_notation(cr)

                    time_series_request.tickers = correct_cr
                    time_series_request.category = self.fxconv.em_or_g10(non_USD, freq)
                    cross_vals = time_series_factory.harvest_time_series(time_series_request)

                    # flip if not convention
                    if(correct_cr != cr):
                        cross_vals = 1 / cross_vals

                    cross_vals.columns.names = [cr + '.close']

            elif type[0:3] == "tot":
                if freq == 'daily':
                    # download base USD cross
                    time_series_request.tickers = base + 'USD'
                    time_series_request.category = self.fxconv.em_or_g10(base, freq) + '-tot'

                    if type == "tot":
                        base_vals = time_series_factory.harvest_time_series(time_series_request)
                    else:
                        x = 0

                    # download terms USD cross
                    time_series_request.tickers = terms + 'USD'
                    time_series_request.category = self.fxconv.em_or_g10(terms, freq) + '-tot'

                    if type == "tot":
                        terms_vals = time_series_factory.harvest_time_series(time_series_request)
                    else:
                        x = 0

                    base_rets = time_series_calcs.calculate_returns(base_vals)
                    terms_rets = time_series_calcs.calculate_returns(terms_vals)

                    cross_rets = base_rets.sub(terms_rets.iloc[:,0],axis=0)

                    # first returns of a time series will by NaN, given we don't know previous point
                    cross_rets.iloc[0] = 0

                    cross_vals = time_series_calcs.create_mult_index(cross_rets)
                    cross_vals.columns = [cr + '-tot.close']

                elif freq == 'intraday':
                    self.logger.info('Total calculated returns for intraday not implemented yet')
                    return None

            if data_frame_agg is None:
                data_frame_agg = cross_vals
            else:
                data_frame_agg = data_frame_agg.join(cross_vals, how='outer')

        # strip the nan elements
        data_frame_agg = data_frame_agg.dropna()
        return data_frame_agg
开发者ID:quantcruncher,项目名称:pythalesians,代码行数:104,代码来源:fxcrossfactory.py

示例13: HistoricalDataRequest

# 需要导入模块: from pythalesians.util.loggermanager import LoggerManager [as 别名]
# 或者: from pythalesians.util.loggermanager.LoggerManager import info [as 别名]
class HistoricalDataRequest(Request):

    def __init__(self, symbols, fields, start=None, end=None, period='DAILY', addtl_sets=None, ignore_security_error=0, ignore_field_error=0):
        """ Historical data request for bbg.

        Parameters
        ----------
        symbols : string or list
        fields : string or list
        start : start date (if None then use 1 year ago)
        end : end date (if None then use today)
        period : ('DAILY', 'WEEKLY', 'MONTHLY', 'QUARTERLY', 'SEMI-ANNUAL', 'YEARLY')
        ignore_field_errors : bool
        ignore_security_errors : bool

        """

        Request.__init__(self, ignore_security_error=ignore_security_error, ignore_field_error=ignore_field_error)

        assert period in ('DAILY', 'WEEKLY', 'MONTHLY', 'QUARTERLY', 'SEMI-ANNUAL', 'YEARLY')
        self.symbols = isinstance(symbols, str) and [symbols] or symbols
        self.fields = isinstance(fields, str) and [fields] or fields

        if start is None:
            start = datetime.today() - timedelta(365)   # by default download the past year
        if end is None:
            end = datetime.today()

        self.start = to_datetime(start)
        self.end = to_datetime(end)
        self.period = period

        self.logger = LoggerManager().getLogger(__name__)

        # response related
        self.response = {}

    def get_bbg_service_name(self):
        return '//blp/refdata'

    def get_bbg_request(self, svc, session):
        # create the bbg request object
        request = svc.CreateRequest('HistoricalDataRequest')
        [request.GetElement('securities').AppendValue(sec) for sec in self.symbols]
        [request.GetElement('fields').AppendValue(fld) for fld in self.fields]
        request.Set('startDate', self.start.strftime('%Y%m%d'))
        request.Set('endDate', self.end.strftime('%Y%m%d'))
        request.Set('periodicitySelection', self.period)

        o = request.GetElement('overrides').AppendElment()
        o.SetElement('fieldId', 'TIME_ZONE_OVERRIDE')
        o.SetElement('value', 'GMT')

        return request

    def on_security_data_node(self, node):
        """ process a securityData node - FIXME: currently not handling relateDate node """
        sid = XmlHelper.get_child_value(node, 'security')
        farr = node.GetElement('fieldData')
        dmap = defaultdict(list)

        self.logger.info("Fetching ticker " + sid)

        for i in range(farr.NumValues):
            pt = farr.GetValue(i)
            [dmap[f].append(XmlHelper.get_child_value(pt, f)) for f in ['date'] + self.fields]

        self.logger.info("Returning ticker " + sid)

        idx = dmap.pop('date')
        frame = DataFrame(dmap, columns=self.fields, index=idx)
        frame.index.name = 'date'
        self.response[sid] = frame

    def on_event(self, evt, is_final):
        """
        on_event - This is invoked from in response to COM PumpWaitingMessages - different thread

        """

        for msg in XmlHelper.message_iter(evt):
            # Single security element in historical request
            node = msg.GetElement('securityData')
            if node.HasElement('securityError'):
                self.security_errors.append(XmlHelper.as_security_error(node.GetElement('securityError')))
            else:
                self.on_security_data_node(node)

    def response_as_single(self, copy=0):
        """
        response_as_single - convert the response map to a single data frame with Multi-Index columns

        """

        arr = []

        for sid, frame in self.response.items():
            if copy:
                frame = frame.copy()
            'security' not in frame and frame.insert(0, 'security', sid)
#.........这里部分代码省略.........
开发者ID:quantcruncher,项目名称:pythalesians,代码行数:103,代码来源:loaderbbgcom.py

示例14: LoaderPandasWeb

# 需要导入模块: from pythalesians.util.loggermanager import LoggerManager [as 别名]
# 或者: from pythalesians.util.loggermanager.LoggerManager import info [as 别名]
class LoaderPandasWeb(LoaderTemplate):
    def __init__(self):
        super(LoaderPandasWeb, self).__init__()
        self.logger = LoggerManager().getLogger(__name__)

    # implement method in abstract superclass
    def load_ticker(self, time_series_request):
        time_series_request_vendor = self.construct_vendor_time_series_request(time_series_request)

        self.logger.info("Request Pandas Web data")

        data_frame = self.download_daily(time_series_request_vendor)

        if time_series_request_vendor.data_source == "fred":
            returned_fields = ["close" for x in data_frame.columns.values]
            returned_tickers = data_frame.columns.values
        else:
            data_frame = data_frame.to_frame().unstack()

            # print(data_frame.tail())

            if data_frame.index is []:
                return None

            # convert from vendor to Thalesians tickers/fields
            if data_frame is not None:
                returned_fields = data_frame.columns.get_level_values(0)
                returned_tickers = data_frame.columns.get_level_values(1)

        if data_frame is not None:
            fields = self.translate_from_vendor_field(returned_fields, time_series_request)
            tickers = self.translate_from_vendor_ticker(returned_tickers, time_series_request)

            ticker_combined = []

            for i in range(0, len(fields)):
                ticker_combined.append(tickers[i] + "." + fields[i])

            ticker_requested = []

            for f in time_series_request.fields:
                for t in time_series_request.tickers:
                    ticker_requested.append(t + "." + f)

            data_frame.columns = ticker_combined
            data_frame.index.name = "Date"

            # only return the requested tickers
            data_frame = pandas.DataFrame(
                data=data_frame[ticker_requested], index=data_frame.index, columns=ticker_requested
            )

        self.logger.info("Completed request from Pandas Web.")

        return data_frame

    def download_daily(self, time_series_request):
        return web.DataReader(
            time_series_request.tickers,
            time_series_request.data_source,
            time_series_request.start_date,
            time_series_request.finish_date,
        )
开发者ID:swaraj007,项目名称:pythalesians,代码行数:65,代码来源:loaderpandasweb.py

示例15: __init__

# 需要导入模块: from pythalesians.util.loggermanager import LoggerManager [as 别名]
# 或者: from pythalesians.util.loggermanager.LoggerManager import info [as 别名]

#.........这里部分代码省略.........
    def get_bcolz_filename(self, fname):
        """
        get_bcolz_filename - Strips h5 off filename returning first portion of filename

        Parameters
        ----------
        fname : str
            h5 filename to strip

        Returns
        -------
        str
        """
        if fname[-6:] == '.bcolz':
            return fname

        return fname + ".bcolz"

    def write_r_compatible_hdf_dataframe(self, data_frame, fname, fields = None):
        """
        write_r_compatible_hdf_dataframe - Write a DataFrame to disk in as an R compatible HDF5 file

        Parameters
        ----------
        data_frame : DataFrame
            data frame to be written
        fname : str
            file path to be written
        fields : list(str)
            columns to be written
        """
        fname_r = self.get_h5_filename(fname)

        self.logger.info("About to dump R binary HDF5 - " + fname_r)
        data_frame32 = data_frame.astype('float32')

        if fields is None:
            fields = data_frame32.columns.values

        # decompose date/time into individual fields (easier to pick up in R)
        data_frame32['Year'] = data_frame.index.year
        data_frame32['Month'] = data_frame.index.month
        data_frame32['Day'] = data_frame.index.day
        data_frame32['Hour'] = data_frame.index.hour
        data_frame32['Minute'] = data_frame.index.minute
        data_frame32['Second'] = data_frame.index.second
        data_frame32['Millisecond'] = data_frame.index.microsecond / 1000

        data_frame32 = data_frame32[
            ['Year', 'Month', 'Day', 'Hour', 'Minute', 'Second', 'Millisecond'] + fields]

        cols = data_frame32.columns

        store_export = pandas.HDFStore(fname_r)
        store_export.put('df_for_r', data_frame32, data_columns=cols)
        store_export.close()

    def read_time_series_cache_from_disk(self, fname, use_bcolz = False):
        """
        read_time_series_cache_from_disk - Reads time series cache from disk in either HDF5 or bcolz

        Parameters
        ----------
        fname : str
            file to be read from
开发者ID:BryanFletcher,项目名称:pythalesians,代码行数:69,代码来源:timeseriesio.py


注:本文中的pythalesians.util.loggermanager.LoggerManager.info方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。