本文整理汇总了Python中zipline.testing.str_to_seconds函数的典型用法代码示例。如果您正苦于以下问题:Python str_to_seconds函数的具体用法?Python str_to_seconds怎么用?Python str_to_seconds使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了str_to_seconds函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_mergers_data
def make_mergers_data(cls):
return pd.DataFrame.from_records([
{
'effective_date': str_to_seconds('2016-01-06'),
'ratio': 0.5,
'sid': cls.MERGER_ASSET_SID,
},
{
'effective_date': str_to_seconds('2016-01-07'),
'ratio': 0.6,
'sid': cls.ILLIQUID_MERGER_ASSET_SID,
}
])
示例2: make_splits_data
def make_splits_data(cls):
return pd.DataFrame.from_records([
{
'effective_date': str_to_seconds("2016-01-06"),
'ratio': 0.5,
'sid': cls.SPLIT_ASSET_SID,
},
{
'effective_date': str_to_seconds("2016-01-07"),
'ratio': 0.5,
'sid': cls.ILLIQUID_SPLIT_ASSET_SID,
},
])
示例3: create_adjustment_reader
def create_adjustment_reader(cls, tempdir):
dbpath = tempdir.getpath('adjustments.sqlite')
writer = SQLiteAdjustmentWriter(dbpath, cls.env.trading_days,
MockDailyBarSpotReader())
splits = DataFrame.from_records([
{
'effective_date': str_to_seconds('2014-06-09'),
'ratio': (1 / 7.0),
'sid': cls.AAPL,
}
])
mergers = DataFrame(
{
# Hackery to make the dtypes correct on an empty frame.
'effective_date': array([], dtype=int),
'ratio': array([], dtype=float),
'sid': array([], dtype=int),
},
index=DatetimeIndex([]),
columns=['effective_date', 'ratio', 'sid'],
)
dividends = DataFrame({
'sid': array([], dtype=uint32),
'amount': array([], dtype=float64),
'record_date': array([], dtype='datetime64[ns]'),
'ex_date': array([], dtype='datetime64[ns]'),
'declared_date': array([], dtype='datetime64[ns]'),
'pay_date': array([], dtype='datetime64[ns]'),
})
writer.write(splits, mergers, dividends)
return SQLiteAdjustmentReader(dbpath)
示例4: create_adjustments_reader
def create_adjustments_reader(cls):
path = cls.tempdir.getpath("test_adjustments.db")
adj_writer = SQLiteAdjustmentWriter(
path,
cls.env.trading_days,
MockDailyBarReader()
)
splits = pd.DataFrame([
{
'effective_date': str_to_seconds("2016-01-06"),
'ratio': 0.5,
'sid': cls.asset3.sid
}
])
# Mergers and Dividends are not tested, but we need to have these
# anyway
mergers = pd.DataFrame({}, columns=['effective_date', 'ratio', 'sid'])
mergers.effective_date = mergers.effective_date.astype(np.int64)
mergers.ratio = mergers.ratio.astype(np.float64)
mergers.sid = mergers.sid.astype(np.int64)
dividends = pd.DataFrame({}, columns=['ex_date', 'record_date',
'declared_date', 'pay_date',
'amount', 'sid'])
dividends.amount = dividends.amount.astype(np.float64)
dividends.sid = dividends.sid.astype(np.int64)
adj_writer.write(splits, mergers, dividends)
return SQLiteAdjustmentReader(path)
示例5: make_splits_data
def make_splits_data(cls):
return pd.DataFrame([
{
'effective_date': str_to_seconds('2016-01-06'),
'ratio': 0.5,
'sid': 3,
}
])
示例6: make_splits_data
def make_splits_data(cls):
return DataFrame.from_records([
{
'effective_date': str_to_seconds('2014-06-09'),
'ratio': (1 / 7.0),
'sid': cls.AAPL,
}
])
示例7: DataFrame
# ADJUSTMENTS use the following scheme to indicate information about the value
# upon inspection.
#
# 1s place is the equity
#
# 0.1s place is the action type, with:
#
# splits, 1
# mergers, 2
# dividends, 3
#
# 0.001s is the date
SPLITS = DataFrame(
[
# Before query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-03'),
'ratio': 1.103,
'sid': 1},
# First day of query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-10'),
'ratio': 3.110,
'sid': 3},
# Third day of query range, should have last_row of 2
{'effective_date': str_to_seconds('2015-06-12'),
'ratio': 3.112,
'sid': 3},
# After query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-21'),
'ratio': 6.121,
'sid': 6},
# Another action in query range, should have last_row of 1
示例8: make_splits_data
def make_splits_data(cls):
return DataFrame.from_records(
[{"effective_date": str_to_seconds("2014-06-09"), "ratio": (1 / 7.0), "sid": cls.AAPL}]
)
示例9: test_ingest
def test_ingest(self):
start = pd.Timestamp('2014-01-06', tz='utc')
end = pd.Timestamp('2014-01-10', tz='utc')
calendar = get_calendar('NYSE')
sessions = calendar.sessions_in_range(start, end)
minutes = calendar.minutes_for_sessions_in_range(start, end)
sids = tuple(range(3))
equities = make_simple_equity_info(
sids,
start,
end,
)
daily_bar_data = make_bar_data(equities, sessions)
minute_bar_data = make_bar_data(equities, minutes)
first_split_ratio = 0.5
second_split_ratio = 0.1
splits = pd.DataFrame.from_records([
{
'effective_date': str_to_seconds('2014-01-08'),
'ratio': first_split_ratio,
'sid': 0,
},
{
'effective_date': str_to_seconds('2014-01-09'),
'ratio': second_split_ratio,
'sid': 1,
},
])
@self.register(
'bundle',
calendar=calendar,
start_session=start,
end_session=end,
)
def bundle_ingest(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
assert_is(environ, self.environ)
asset_db_writer.write(equities=equities)
minute_bar_writer.write(minute_bar_data)
daily_bar_writer.write(daily_bar_data)
adjustment_writer.write(splits=splits)
assert_is_instance(calendar, TradingCalendar)
assert_is_instance(cache, dataframe_cache)
assert_is_instance(show_progress, bool)
self.ingest('bundle', environ=self.environ)
bundle = self.load('bundle', environ=self.environ)
assert_equal(set(bundle.asset_finder.sids), set(sids))
columns = 'open', 'high', 'low', 'close', 'volume'
actual = bundle.equity_minute_bar_reader.load_raw_arrays(
columns,
minutes[0],
minutes[-1],
sids,
)
for actual_column, colname in zip(actual, columns):
assert_equal(
actual_column,
expected_bar_values_2d(minutes, equities, colname),
msg=colname,
)
actual = bundle.equity_daily_bar_reader.load_raw_arrays(
columns,
start,
end,
sids,
)
for actual_column, colname in zip(actual, columns):
assert_equal(
actual_column,
expected_bar_values_2d(sessions, equities, colname),
msg=colname,
)
adjustments_for_cols = bundle.adjustment_reader.load_adjustments(
columns,
sessions,
pd.Index(sids),
)
for column, adjustments in zip(columns, adjustments_for_cols[:-1]):
# iterate over all the adjustments but `volume`
#.........这里部分代码省略.........
示例10: test_ingest
def test_ingest(self):
calendar = get_calendar("NYSE")
sessions = calendar.sessions_in_range(self.START_DATE, self.END_DATE)
minutes = calendar.minutes_for_sessions_in_range(self.START_DATE, self.END_DATE)
sids = tuple(range(3))
equities = make_simple_equity_info(sids, self.START_DATE, self.END_DATE)
daily_bar_data = make_bar_data(equities, sessions)
minute_bar_data = make_bar_data(equities, minutes)
first_split_ratio = 0.5
second_split_ratio = 0.1
splits = pd.DataFrame.from_records(
[
{"effective_date": str_to_seconds("2014-01-08"), "ratio": first_split_ratio, "sid": 0},
{"effective_date": str_to_seconds("2014-01-09"), "ratio": second_split_ratio, "sid": 1},
]
)
@self.register("bundle", calendar_name="NYSE", start_session=self.START_DATE, end_session=self.END_DATE)
def bundle_ingest(
environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
):
assert_is(environ, self.environ)
asset_db_writer.write(equities=equities)
minute_bar_writer.write(minute_bar_data)
daily_bar_writer.write(daily_bar_data)
adjustment_writer.write(splits=splits)
assert_is_instance(calendar, TradingCalendar)
assert_is_instance(cache, dataframe_cache)
assert_is_instance(show_progress, bool)
self.ingest("bundle", environ=self.environ)
bundle = self.load("bundle", environ=self.environ)
assert_equal(set(bundle.asset_finder.sids), set(sids))
columns = "open", "high", "low", "close", "volume"
actual = bundle.equity_minute_bar_reader.load_raw_arrays(columns, minutes[0], minutes[-1], sids)
for actual_column, colname in zip(actual, columns):
assert_equal(actual_column, expected_bar_values_2d(minutes, equities, colname), msg=colname)
actual = bundle.equity_daily_bar_reader.load_raw_arrays(columns, self.START_DATE, self.END_DATE, sids)
for actual_column, colname in zip(actual, columns):
assert_equal(actual_column, expected_bar_values_2d(sessions, equities, colname), msg=colname)
adjustments_for_cols = bundle.adjustment_reader.load_adjustments(columns, sessions, pd.Index(sids))
for column, adjustments in zip(columns, adjustments_for_cols[:-1]):
# iterate over all the adjustments but `volume`
assert_equal(
adjustments,
{
2: [Float64Multiply(first_row=0, last_row=2, first_col=0, last_col=0, value=first_split_ratio)],
3: [Float64Multiply(first_row=0, last_row=3, first_col=1, last_col=1, value=second_split_ratio)],
},
msg=column,
)
# check the volume, the value should be 1/ratio
assert_equal(
adjustments_for_cols[-1],
{
2: [Float64Multiply(first_row=0, last_row=2, first_col=0, last_col=0, value=1 / first_split_ratio)],
3: [Float64Multiply(first_row=0, last_row=3, first_col=1, last_col=1, value=1 / second_split_ratio)],
},
msg="volume",
)