本文整理汇总了Python中pykalman.KalmanFilter.loglikelihood方法的典型用法代码示例。如果您正苦于以下问题:Python KalmanFilter.loglikelihood方法的具体用法?Python KalmanFilter.loglikelihood怎么用?Python KalmanFilter.loglikelihood使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pykalman.KalmanFilter
的用法示例。
在下文中一共展示了KalmanFilter.loglikelihood方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_kalman_fit
# 需要导入模块: from pykalman import KalmanFilter [as 别名]
# 或者: from pykalman.KalmanFilter import loglikelihood [as 别名]
def test_kalman_fit():
# check against MATLAB dataset
kf = KalmanFilter(
data.transition_matrix,
data.observation_matrix,
data.initial_transition_covariance,
data.initial_observation_covariance,
data.transition_offsets,
data.observation_offset,
data.initial_state_mean,
data.initial_state_covariance,
em_vars=['transition_covariance', 'observation_covariance'])
loglikelihoods = np.zeros(5)
for i in range(len(loglikelihoods)):
loglikelihoods[i] = kf.loglikelihood(data.observations)
kf.em(X=data.observations, n_iter=1)
assert_true(np.allclose(loglikelihoods, data.loglikelihoods[:5]))
# check that EM for all parameters is working
kf.em_vars = 'all'
n_timesteps = 30
for i in range(len(loglikelihoods)):
kf.em(X=data.observations[0:n_timesteps], n_iter=1)
loglikelihoods[i] = kf.loglikelihood(data.observations[0:n_timesteps])
for i in range(len(loglikelihoods) - 1):
assert_true(loglikelihoods[i] < loglikelihoods[i + 1])
示例2: test_kalman_pickle
# 需要导入模块: from pykalman import KalmanFilter [as 别名]
# 或者: from pykalman.KalmanFilter import loglikelihood [as 别名]
def test_kalman_pickle():
kf = KalmanFilter(
data.transition_matrix,
data.observation_matrix,
data.transition_covariance,
data.observation_covariance,
data.transition_offsets,
data.observation_offset,
data.initial_state_mean,
data.initial_state_covariance,
em_vars='all')
# train and get log likelihood
X = data.observations[0:10]
kf = kf.em(X, n_iter=5)
loglikelihood = kf.loglikelihood(X)
# pickle Kalman Filter
store = StringIO()
pickle.dump(kf, store)
clf = pickle.load(StringIO(store.getvalue()))
# check that parameters came out already
np.testing.assert_almost_equal(loglikelihood, kf.loglikelihood(X))
# store it as BytesIO as well
store = BytesIO()
pickle.dump(kf, store)
kf = pickle.load(BytesIO(store.getvalue()))
示例3: range
# 需要导入模块: from pykalman import KalmanFilter [as 别名]
# 或者: from pykalman.KalmanFilter import loglikelihood [as 别名]
data.observation_offset,
data.initial_state_mean,
data.initial_state_covariance,
em_vars=[
'transition_matrices', 'observation_matrices',
'transition_covariance', 'observation_covariance',
'observation_offsets', 'initial_state_mean',
'initial_state_covariance'
]
)
# Learn good values for parameters named in `em_vars` using the EM algorithm
loglikelihoods = np.zeros(10)
for i in range(len(loglikelihoods)):
kf = kf.em(X=data.observations, n_iter=1)
loglikelihoods[i] = kf.loglikelihood(data.observations)
# Estimate the state without using any observations. This will let us see how
# good we could do if we ran blind.
n_dim_state = data.transition_matrix.shape[0]
n_timesteps = data.observations.shape[0]
blind_state_estimates = np.zeros((n_timesteps, n_dim_state))
for t in range(n_timesteps - 1):
if t == 0:
blind_state_estimates[t] = kf.initial_state_mean
blind_state_estimates[t + 1] = (
np.dot(kf.transition_matrices, blind_state_estimates[t])
+ kf.transition_offsets[t]
)
# Estimate the hidden states using observations up to and including