本文整理汇总了Python中zipline.gens.utils.hash_args函数的典型用法代码示例。如果您正苦于以下问题:Python hash_args函数的具体用法?Python hash_args怎么用?Python hash_args使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了hash_args函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, **kwargs):
# TODO Use alternatives to `index` and `universe` objects
self.log = dna.logging.logger(__name__)
if 'index' not in kwargs or 'universe' not in kwargs:
raise InvalidDatafeed(
reason='you must provide a universe and an index')
if not isinstance(kwargs.get('index'),
pd.tseries.index.DatetimeIndex):
raise InvalidDatafeed(reason='you must provide a valid time index')
# Unpack config dictionary with default values.
self.sids = kwargs['universe'].sids
self.index = kwargs['index']
self.start = self.index[0]
self.end = self.index[-1]
self.frequency = float(kwargs.get('frequency', 14))
self.market_open = kwargs['universe'].open
self.market_close = kwargs['universe'].close
if 'backtest' in kwargs:
self.backtest = kwargs['backtest'](self.sids, kwargs)
if 'live' in kwargs:
self.live = kwargs['live'](self.sids, kwargs)
_check_data_modules(self.backtest, self.live, self.start, self.end)
# Hash_value for downstream sorting.
self.arg_string = hash_args(**kwargs)
self._raw_data = None
示例2: __init__
def __init__(self, tnfm_class, *args, **kwargs):
assert isinstance(tnfm_class, (types.ObjectType, types.ClassType)), \
"Stateful transform requires a class."
assert hasattr(tnfm_class, 'update'), \
"Stateful transform requires the class to have an update method"
# Create an instance of our transform class.
if isinstance(tnfm_class, TransformMeta):
# Classes derived TransformMeta have their __call__
# attribute overridden. Since this is what is usually
# used to create an instance, we have to delegate the
# responsibility of creating an instance to
# TransformMeta's parent class, which is 'type'. This is
# what is implicitly done behind the scenes by the python
# interpreter for most classes anyway, but here we have to
# be explicit because we've overridden the method that
# usually resolves to our super call.
self.state = super(TransformMeta, tnfm_class).__call__(
*args, **kwargs)
# Normal object instantiation.
else:
self.state = tnfm_class(*args, **kwargs)
# save the window_length of the state for external access.
self.window_length = self.state.window_length
# Create the string associated with this generator's output.
self.namestring = tnfm_class.__name__ + hash_args(*args, **kwargs)
示例3: __init__
def __init__(self, env, trading_calendar, *args, **kwargs):
# We shouldn't get any positional arguments.
assert len(args) == 0
self.env = env
self.trading_calendar = trading_calendar
# Default to None for event_list and filter.
self.event_list = kwargs.get('event_list')
self.filter = kwargs.get('filter')
if self.event_list is not None:
# If event_list is provided, extract parameters from there
# This isn't really clean and ultimately I think this
# class should serve a single purpose (either take an
# event_list or autocreate events).
self.count = kwargs.get('count', len(self.event_list))
self.start = kwargs.get('start', self.event_list[0].dt)
self.end = kwargs.get('end', self.event_list[-1].dt)
self.delta = delta = kwargs.get('delta')
if delta is None:
self.delta = self.event_list[1].dt - self.event_list[0].dt
self.concurrent = kwargs.get('concurrent', False)
self.identifiers = kwargs.get(
'sids',
set(event.sid for event in self.event_list)
)
assets_by_identifier = {}
for identifier in self.identifiers:
assets_by_identifier[identifier] = env.asset_finder.\
lookup_generic(identifier, datetime.now())[0]
self.sids = [asset.sid for asset in assets_by_identifier.values()]
for event in self.event_list:
event.sid = assets_by_identifier[event.sid].sid
else:
# Unpack config dictionary with default values.
self.count = kwargs.get('count', 500)
self.start = kwargs.get(
'start',
datetime(2008, 6, 6, 15, tzinfo=pytz.utc))
self.end = kwargs.get(
'end',
datetime(2008, 6, 6, 15, tzinfo=pytz.utc))
self.delta = kwargs.get(
'delta',
timedelta(minutes=1))
self.concurrent = kwargs.get('concurrent', False)
self.identifiers = kwargs.get('sids', [1, 2])
assets_by_identifier = {}
for identifier in self.identifiers:
assets_by_identifier[identifier] = env.asset_finder.\
lookup_generic(identifier, datetime.now())[0]
self.sids = [asset.sid for asset in assets_by_identifier.values()]
# Hash_value for downstream sorting.
self.arg_string = hash_args(*args, **kwargs)
self.generator = self.create_fresh_generator()
示例4: __init__
def __init__(self, tnfm_class, *args, **kwargs):
assert isinstance(tnfm_class, (types.ObjectType, types.ClassType)), \
"Stateful transform requires a class."
assert hasattr(tnfm_class, 'update'), \
"Stateful transform requires the class to have an update method"
# Flag set inside the Passthrough transform class to signify special
# behavior if we are being fed to merged_transforms.
self.passthrough = hasattr(tnfm_class, 'PASSTHROUGH')
# Flags specifying how to append the calculated value.
# Merged is the default for ease of testing, but we use sequential
# in production.
self.sequential = False
self.merged = True
# Create an instance of our transform class.
if isinstance(tnfm_class, TransformMeta):
# Classes derived TransformMeta have their __call__
# attribute overridden. Since this is what is usually
# used to create an instance, we have to delegate the
# responsibility of creating an instance to
# TransformMeta's parent class, which is 'type'. This is
# what is implicitly done behind the scenes by the python
# interpreter for most classes anyway, but here we have to
# be explicit because we've overridden the method that
# usually resolves to our super call.
self.state = super(TransformMeta, tnfm_class).__call__(
*args, **kwargs)
# Normal object instantiation.
else:
self.state = tnfm_class(*args, **kwargs)
# Create the string associated with this generator's output.
self.namestring = tnfm_class.__name__ + hash_args(*args, **kwargs)
示例5: __init__
def __init__(self, fname):
self.fname = fname
# Hash_value for downstream sorting.
self.arg_string = hash_args(fname)
self._raw_data = None
示例6: __init__
def __init__(self, start_prices=None, freq='minute', start=None,
end=None, calendar=calendar_nyse):
"""
:Arguments:
start_prices : dict
sid -> starting price.
Default: {0: 100, 1: 500}
freq : str <default='minute'>
Emits events according to freq.
Can be 'daily' or 'minute'
start : datetime <default=start of calendar>
Start dt to emit events.
end : datetime <default=end of calendar>
End dt until to which emit events.
calendar : calendar object <default: NYSE>
Calendar to use.
See zipline.utils for different choices.
:Example:
# Assumes you have instantiated your Algorithm
# as myalgo.
myalgo = MyAlgo()
source = RandomWalkSource()
myalgo.run(source)
"""
# Hash_value for downstream sorting.
self.arg_string = hash_args(start_prices, freq, start, end,
calendar.__name__)
if freq not in self.VALID_FREQS:
raise ValueError('%s not in %s' % (freq, self.VALID_FREQS))
self.freq = freq
if start_prices is None:
self.start_prices = {0: 100,
1: 500}
else:
self.start_prices = start_prices
self.calendar = calendar
if start is None:
self.start = calendar.start
else:
self.start = start
if end is None:
self.end = calendar.end_base
else:
self.end = end
self.drift = .1
self.sd = .1
self.sids = self.start_prices.keys()
self.open_and_closes = \
calendar.open_and_closes[self.start:self.end]
self._raw_data = None
示例7: __init__
def __init__(self, env=None, *args, **kwargs):
# We shouldn't get any positional arguments.
assert len(args) == 0
# Default to None for event_list and filter.
self.event_list = kwargs.get('event_list')
self.filter = kwargs.get('filter')
if self.event_list is not None:
# If event_list is provided, extract parameters from there
# This isn't really clean and ultimately I think this
# class should serve a single purpose (either take an
# event_list or autocreate events).
self.count = kwargs.get('count', len(self.event_list))
self.start = kwargs.get('start', self.event_list[0].dt)
self.end = kwargs.get('end', self.event_list[-1].dt)
self.delta = kwargs.get(
'delta',
self.event_list[1].dt - self.event_list[0].dt)
self.concurrent = kwargs.get('concurrent', False)
self.identifiers = kwargs.get(
'sids',
set(event.sid for event in self.event_list)
)
env.update_asset_finder(identifiers=self.identifiers)
self.sids = [
env.asset_finder.retrieve_asset_by_identifier(identifier).sid
for identifier in self.identifiers
]
for event in self.event_list:
event.sid = env.asset_finder.\
retrieve_asset_by_identifier(event.sid).sid
else:
# Unpack config dictionary with default values.
self.count = kwargs.get('count', 500)
self.start = kwargs.get(
'start',
datetime(2008, 6, 6, 15, tzinfo=pytz.utc))
self.end = kwargs.get(
'end',
datetime(2008, 6, 6, 15, tzinfo=pytz.utc))
self.delta = kwargs.get(
'delta',
timedelta(minutes=1))
self.concurrent = kwargs.get('concurrent', False)
self.identifiers = kwargs.get('sids', [1, 2])
env.update_asset_finder(identifiers=self.identifiers)
self.sids = [
env.asset_finder.retrieve_asset_by_identifier(identifier).sid
for identifier in self.identifiers
]
# Hash_value for downstream sorting.
self.arg_string = hash_args(*args, **kwargs)
self.generator = self.create_fresh_generator()
示例8: __init__
def __init__(self, rows, cols, datasource_type, time_zone='US/Eastern'):
self.rows = rows
self.cols = cols
self.datasource_type = datasource_type
# These are mandatory for the Zipline DataSource class.
self.arg_string = hash_args(cols)
self._raw_data = None
self.time_zone = timezone(time_zone)
示例9: __init__
def __init__(self, data, **kwargs):
assert isinstance(data, tables.file.File)
self.h5file = data
self.sids = kwargs.get('sids', None)
self.start = kwargs.get('start')
self.end = kwargs.get('end')
self.source_id = kwargs.get("source_id", None)
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
self.root_node = +"/" + kwargs.get('root', "signal") + "/"
示例10: __init__
def __init__(self, **kwargs):
self.arg_string = hash_args('TempCSVDataSource', **kwargs)
self.sids = kwargs.get('stocks')
self.start = kwargs.get('start')
self.end = kwargs.get('end')
self.started_sids = set()
self._raw_data = None
示例11: __init__
def __init__(self, data, **kwargs):
self.data = data
# Unpack config dictionary with default values.
self.sids = kwargs.get('sids', data.columns)
self.start = kwargs.get('start', data.index[0])
self.end = kwargs.get('end', data.index[-1])
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
示例12: __init__
def __init__(self, data, **kwargs):
assert isinstance(data, csv.DictReader)
self.data = data
self.source_id = kwargs.get("source_id", None)
# Unpack config dictionary with default values.
self.start = kwargs.get('start')
self.end = kwargs.get('end')
self.sids = kwargs.get('sids', None)
self.sid_filter = kwargs.get('sid_filter', None)
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
示例13: __init__
def __init__(self, data, **kwargs):
assert isinstance(data.index, pd.tseries.index.DatetimeIndex)
self.data = data
# Unpack config dictionary with default values.
self.sids = kwargs.get("sids", data.columns)
self.start = kwargs.get("start", data.index[0])
self.end = kwargs.get("end", data.index[-1])
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
示例14: __init__
def __init__(self, data, **kwargs):
assert isinstance(data.major_axis, pd.tseries.index.DatetimeIndex)
self.data = data
# Unpack config dictionary with default values.
self.sids = kwargs.get('sids', data.items)
self.start = kwargs.get('start', data.major_axis[0])
self.end = kwargs.get('end', data.major_axis[-1])
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
示例15: __init__
def __init__(self, data_descriptor, **kwargs):
assert isinstance(data_descriptor['index'], pd.tseries.index.DatetimeIndex)
self.data_descriptor = data_descriptor
# Unpack config dictionary with default values.
self.sids = kwargs.get('sids', data_descriptor['tickers'])
self.start = kwargs.get('start', data_descriptor['index'][0])
self.end = kwargs.get('end', data_descriptor['index'][-1])
# Hash_value for downstream sorting.
self.arg_string = hash_args(data_descriptor, **kwargs)
self._raw_data = None
self.feed = DataFeed()