本文整理汇总了Python中pystruct.inference.get_installed函数的典型用法代码示例。如果您正苦于以下问题:Python get_installed函数的具体用法?Python get_installed怎么用?Python get_installed使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_installed函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_edge_feature_latent_node_crf_no_latent
def test_edge_feature_latent_node_crf_no_latent():
# no latent nodes
# Test inference with different weights in different directions
X, Y = generate_blocks_multinomial(noise=2, n_samples=1, seed=1, size_x=10)
x, y = X[0], Y[0]
n_states = x.shape[-1]
edge_list = make_grid_edges(x, 4, return_lists=True)
edges = np.vstack(edge_list)
pw_horz = -1 * np.eye(n_states + 5)
xx, yy = np.indices(pw_horz.shape)
# linear ordering constraint horizontally
pw_horz[xx > yy] = 1
# high cost for unequal labels vertically
pw_vert = -1 * np.eye(n_states + 5)
pw_vert[xx != yy] = 1
pw_vert *= 10
# generate edge weights
edge_weights_horizontal = np.repeat(pw_horz[np.newaxis, :, :],
edge_list[0].shape[0], axis=0)
edge_weights_vertical = np.repeat(pw_vert[np.newaxis, :, :],
edge_list[1].shape[0], axis=0)
edge_weights = np.vstack([edge_weights_horizontal, edge_weights_vertical])
# do inference
# pad x for hidden states...
x_padded = -100 * np.ones((x.shape[0], x.shape[1], x.shape[2] + 5))
x_padded[:, :, :x.shape[2]] = x
res = lp_general_graph(-x_padded.reshape(-1, n_states + 5), edges,
edge_weights)
edge_features = edge_list_to_features(edge_list)
x = (x.reshape(-1, n_states), edges, edge_features, 0)
y = y.ravel()
for inference_method in get_installed(["lp"]):
# same inference through CRF inferface
crf = EdgeFeatureLatentNodeCRF(n_labels=3,
inference_method=inference_method,
n_edge_features=2, n_hidden_states=5)
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
y_pred = crf.inference(x, w, relaxed=True)
assert_array_almost_equal(res[0], y_pred[0].reshape(-1, n_states + 5),
4)
assert_array_almost_equal(res[1], y_pred[1], 4)
assert_array_equal(y, np.argmax(y_pred[0], axis=-1))
for inference_method in get_installed(["lp", "ad3", "qpbo"]):
# again, this time discrete predictions only
crf = EdgeFeatureLatentNodeCRF(n_labels=3,
inference_method=inference_method,
n_edge_features=2, n_hidden_states=5)
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
y_pred = crf.inference(x, w, relaxed=False)
assert_array_equal(y, y_pred)
示例2: test_edge_type_graph_crf
def test_edge_type_graph_crf():
# create two samples with different graphs
# two states only, pairwise smoothing
# all edges are of the first type. should do the same as GraphCRF
# if we make w symmetric
for inference_method in get_installed(['qpbo', 'lp', 'ad3', 'dai', 'ogm']):
crf = EdgeTypeGraphCRF(n_states=2, inference_method=inference_method,
n_edge_types=1)
assert_array_equal(crf.inference((x_1, [g_1]), w_sym), y_1)
assert_array_equal(crf.inference((x_2, [g_2]), w_sym), y_2)
# same, only with two edge types and no edges of second type
w_sym_ = np.array([1, 0, # unary
0, 1,
.22, 0, # pairwise
0, .22,
2, -1, # second edge type, doesn't exist
-1, 3])
for inference_method in get_installed(['qpbo', 'lp', 'ad3', 'dai', 'ogm']):
crf = EdgeTypeGraphCRF(n_states=2, inference_method=inference_method,
n_edge_types=2)
assert_array_equal(crf.inference((x_1,
[g_1, np.zeros((0, 2),
dtype=np.int)]),
w_sym_), y_1)
assert_array_equal(crf.inference((x_2, [g_2, np.zeros((0, 2),
dtype=np.int)]),
w_sym_), y_2)
print crf.get_pairwise_potentials((x_2, [g_2, np.zeros((0, 2),
dtype=np.int)]),
w_sym_)
示例3: test_switch_to_ad3
def test_switch_to_ad3():
# test if switching between qpbo and ad3 works
if not get_installed(['qpbo']) or not get_installed(['ad3']):
return
X, Y = toy.generate_blocks_multinomial(n_samples=5, noise=1.5,
seed=0)
crf = GridCRF(n_states=3, inference_method='qpbo')
ssvm = NSlackSSVM(crf, max_iter=10000)
ssvm_with_switch = NSlackSSVM(crf, max_iter=10000, switch_to=('ad3'))
ssvm.fit(X, Y)
ssvm_with_switch.fit(X, Y)
assert_equal(ssvm_with_switch.model.inference_method, 'ad3')
# we check that the dual is higher with ad3 inference
# as it might use the relaxation, that is pretty much guraranteed
assert_greater(ssvm_with_switch.objective_curve_[-1],
ssvm.objective_curve_[-1])
print(ssvm_with_switch.objective_curve_[-1], ssvm.objective_curve_[-1])
# test that convergence also results in switch
ssvm_with_switch = NSlackSSVM(crf, max_iter=10000, switch_to=('ad3'),
tol=10)
ssvm_with_switch.fit(X, Y)
assert_equal(ssvm_with_switch.model.inference_method, 'ad3')
示例4: test_switch_to_ad3
def test_switch_to_ad3():
# smoketest only
# test if switching between qpbo and ad3 works inside latent svm
# use less perfect initialization
if not get_installed(['qpbo']) or not get_installed(['ad3']):
return
X, Y = generate_crosses(n_samples=20, noise=5, n_crosses=1, total_size=8)
X_test, Y_test = X[10:], Y[10:]
X, Y = X[:10], Y[:10]
crf = LatentGridCRF(n_states_per_label=2,
inference_method='qpbo')
crf.initialize(X, Y)
H_init = crf.init_latent(X, Y)
np.random.seed(0)
mask = np.random.uniform(size=H_init.shape) > .7
H_init[mask] = 2 * (H_init[mask] / 2)
base_ssvm = OneSlackSSVM(crf, inactive_threshold=1e-8, cache_tol=.0001,
inference_cache=50, max_iter=10000,
switch_to=('ad3', {'branch_and_bound': True}),
C=10. ** 3)
clf = LatentSSVM(base_ssvm)
clf.fit(X, Y, H_init=H_init)
assert_equal(clf.model.inference_method[0], 'ad3')
Y_pred = clf.predict(X)
assert_array_equal(np.array(Y_pred), Y)
# test that score is not always 1
assert_true(.98 < clf.score(X_test, Y_test) < 1)
示例5: test_inference
def test_inference():
# Test inference with different weights in different directions
X, Y = toy.generate_blocks_multinomial(noise=2, n_samples=1, seed=1)
x, y = X[0], Y[0]
n_states = x.shape[-1]
edge_list = make_grid_edges(x, 4, return_lists=True)
edges = np.vstack(edge_list)
pw_horz = -1 * np.eye(n_states)
xx, yy = np.indices(pw_horz.shape)
# linear ordering constraint horizontally
pw_horz[xx > yy] = 1
# high cost for unequal labels vertically
pw_vert = -1 * np.eye(n_states)
pw_vert[xx != yy] = 1
pw_vert *= 10
# generate edge weights
edge_weights_horizontal = np.repeat(pw_horz[np.newaxis, :, :],
edge_list[0].shape[0], axis=0)
edge_weights_vertical = np.repeat(pw_vert[np.newaxis, :, :],
edge_list[1].shape[0], axis=0)
edge_weights = np.vstack([edge_weights_horizontal, edge_weights_vertical])
# do inference
res = lp_general_graph(-x.reshape(-1, n_states), edges, edge_weights)
edge_features = edge_list_to_features(edge_list)
x = (x.reshape(-1, n_states), edges, edge_features)
y = y.ravel()
for inference_method in get_installed(["lp", "ad3"]):
# same inference through CRF inferface
crf = EdgeFeatureGraphCRF(n_states=3,
inference_method=inference_method,
n_edge_features=2)
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
y_pred = crf.inference(x, w, relaxed=True)
if isinstance(y_pred, tuple):
# ad3 produces an integer result if it found the exact solution
assert_array_almost_equal(res[1], y_pred[1])
assert_array_almost_equal(res[0], y_pred[0].reshape(-1, n_states))
assert_array_equal(y, np.argmax(y_pred[0], axis=-1))
for inference_method in get_installed(["lp", "ad3", "qpbo"]):
# again, this time discrete predictions only
crf = EdgeFeatureGraphCRF(n_states=3,
inference_method=inference_method,
n_edge_features=2)
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
y_pred = crf.inference(x, w, relaxed=False)
assert_array_equal(y, y_pred)
示例6: test_switch_to_ad3
def test_switch_to_ad3():
# smoketest only
# test if switching between qpbo and ad3 works inside latent svm
# use less perfect initialization
if not get_installed(["qpbo"]) or not get_installed(["ad3"]):
return
X, Y = generate_crosses(n_samples=20, noise=5, n_crosses=1, total_size=8)
X_test, Y_test = X[10:], Y[10:]
X, Y = X[:10], Y[:10]
crf = LatentGridCRF(n_states_per_label=2, inference_method="qpbo")
crf.initialize(X, Y)
H_init = crf.init_latent(X, Y)
np.random.seed(0)
mask = np.random.uniform(size=H_init.shape) > 0.7
H_init[mask] = 2 * (H_init[mask] / 2)
base_ssvm = OneSlackSSVM(
crf,
inactive_threshold=1e-8,
cache_tol=0.0001,
inference_cache=50,
max_iter=10000,
switch_to=("ad3", {"branch_and_bound": True}),
C=10.0 ** 3,
)
clf = LatentSSVM(base_ssvm)
# evil hackery to get rid of ad3 output
try:
devnull = open("/dev/null", "w")
oldstdout_fno = os.dup(sys.stdout.fileno())
os.dup2(devnull.fileno(), 1)
replaced_stdout = True
except:
replaced_stdout = False
clf.fit(X, Y, H_init=H_init)
if replaced_stdout:
os.dup2(oldstdout_fno, 1)
assert_equal(clf.model.inference_method[0], "ad3")
Y_pred = clf.predict(X)
assert_array_equal(np.array(Y_pred), Y)
# test that score is not always 1
assert_true(0.98 < clf.score(X_test, Y_test) < 1)
示例7: test_psi_continuous
def test_psi_continuous():
# FIXME
# first make perfect prediction, including pairwise part
X, Y = toy.generate_blocks_multinomial(noise=2, n_samples=1, seed=1)
x, y = X[0], Y[0]
n_states = x.shape[-1]
edge_list = make_grid_edges(x, 4, return_lists=True)
edges = np.vstack(edge_list)
edge_features = edge_list_to_features(edge_list)
x = (x.reshape(-1, 3), edges, edge_features)
y = y.ravel()
pw_horz = -1 * np.eye(n_states)
xx, yy = np.indices(pw_horz.shape)
# linear ordering constraint horizontally
pw_horz[xx > yy] = 1
# high cost for unequal labels vertically
pw_vert = -1 * np.eye(n_states)
pw_vert[xx != yy] = 1
pw_vert *= 10
# create crf, assemble weight, make prediction
for inference_method in get_installed(["lp", "ad3"]):
crf = EdgeFeatureGraphCRF(n_states=3,
inference_method=inference_method,
n_edge_features=2)
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
y_pred = crf.inference(x, w, relaxed=True)
# compute psi for prediction
psi_y = crf.psi(x, y_pred)
assert_equal(psi_y.shape, (crf.size_psi,))
示例8: test_chain
def test_chain():
# test LP, AD3, AD3-BB and JT on a chain.
# they should all be exact
rnd = np.random.RandomState(0)
algorithms = get_installed([('ad3', {'branch_and_bound': False}),
('ad3', {'branch_and_bound': True}),
('ogm', {'alg': 'dyn'}),
('ogm', {'alg': 'dd'}),
('ogm', {'alg': 'trw'})])
n_states = 3
n_nodes = 10
for i in xrange(10):
forward = np.c_[np.arange(n_nodes - 1), np.arange(1, n_nodes)]
backward = np.c_[np.arange(1, n_nodes), np.arange(n_nodes - 1)]
unary_potentials = rnd.normal(size=(n_nodes, n_states))
pairwise_potentials = rnd.normal(size=(n_states, n_states))
# test that reversing edges is same as transposing pairwise potentials
y_forward = inference_dispatch(unary_potentials, pairwise_potentials,
forward, 'lp')
y_backward = inference_dispatch(unary_potentials,
pairwise_potentials.T, backward, 'lp')
assert_array_equal(y_forward, y_backward)
for chain in [forward, backward]:
y_lp = inference_dispatch(unary_potentials, pairwise_potentials,
chain, 'lp')
for alg in algorithms:
if chain is backward and alg[0] == 'ogm':
# ogm needs sorted indices
continue
y = inference_dispatch(unary_potentials, pairwise_potentials,
chain, alg)
assert_array_equal(y, y_lp)
示例9: test_energy
def test_energy():
# make sure that energy as computed by ssvm is the same as by lp
np.random.seed(0)
for inference_method in get_installed(["lp", "ad3"]):
found_fractional = False
crf = DirectionalGridCRF(n_states=3, n_features=3,
inference_method=inference_method)
while not found_fractional:
x = np.random.normal(size=(7, 8, 3))
unary_params = np.random.normal(size=(3, 3))
pw1 = np.random.normal(size=(3, 3))
pw2 = np.random.normal(size=(3, 3))
w = np.hstack([unary_params.ravel(), pw1.ravel(), pw2.ravel()])
res, energy = crf.inference(x, w, relaxed=True, return_energy=True)
found_fractional = np.any(np.max(res[0], axis=-1) != 1)
joint_feature = crf.joint_feature(x, res)
energy_svm = np.dot(joint_feature, w)
assert_almost_equal(energy, -energy_svm)
if not found_fractional:
# exact discrete labels, test non-relaxed version
res, energy = crf.inference(x, w, relaxed=False,
return_energy=True)
joint_feature = crf.joint_feature(x, res)
energy_svm = np.dot(joint_feature, w)
assert_almost_equal(energy, -energy_svm)
示例10: test_binary_blocks_cutting_plane
def test_binary_blocks_cutting_plane():
#testing cutting plane ssvm on easy binary dataset
# generate graphs explicitly for each example
for inference_method in get_installed(["lp", "qpbo", "ad3", 'ogm']):
X, Y = generate_blocks(n_samples=3)
crf = GraphCRF(inference_method=inference_method)
clf = NSlackSSVM(model=crf, max_iter=20, C=100, check_constraints=True,
break_on_bad=False, n_jobs=1)
x1, x2, x3 = X
y1, y2, y3 = Y
n_states = len(np.unique(Y))
# delete some rows to make it more fun
x1, y1 = x1[:, :-1], y1[:, :-1]
x2, y2 = x2[:-1], y2[:-1]
# generate graphs
X_ = [x1, x2, x3]
G = [make_grid_edges(x) for x in X_]
# reshape / flatten x and y
X_ = [x.reshape(-1, n_states) for x in X_]
Y = [y.ravel() for y in [y1, y2, y3]]
X = list(zip(X_, G))
clf.fit(X, Y)
Y_pred = clf.predict(X)
for y, y_pred in zip(Y, Y_pred):
assert_array_equal(y, y_pred)
示例11: test_joint_feature_continuous
def test_joint_feature_continuous():
# FIXME
# first make perfect prediction, including pairwise part
X, Y = generate_blocks_multinomial(noise=2, n_samples=1, seed=1)
x, y = X[0], Y[0]
n_states = x.shape[-1]
pw_horz = -1 * np.eye(n_states)
xx, yy = np.indices(pw_horz.shape)
# linear ordering constraint horizontally
pw_horz[xx > yy] = 1
# high cost for unequal labels vertically
pw_vert = -1 * np.eye(n_states)
pw_vert[xx != yy] = 1
pw_vert *= 10
# create crf, assemble weight, make prediction
for inference_method in get_installed(["lp", "ad3"]):
crf = DirectionalGridCRF(inference_method=inference_method)
crf.initialize(X, Y)
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
y_pred = crf.inference(x, w, relaxed=True)
# compute joint_feature for prediction
joint_feature_y = crf.joint_feature(x, y_pred)
assert_equal(joint_feature_y.shape, (crf.size_joint_feature,))
示例12: test_chain
def test_chain():
# test LP, AD3, AD3-BB and JT on a chain.
# they should all be exact
rnd = np.random.RandomState(0)
algorithms = get_installed([('ad3', {'branch_and_bound':False}),
('ad3', {'branch_and_bound':True}),
('dai', {'alg':'jt'})])
for i in xrange(10):
forward = np.c_[np.arange(9), np.arange(1, 10)]
backward = np.c_[np.arange(1, 10), np.arange(9)]
unary_potentials = rnd.normal(size=(10, 3))
pairwise_potentials = rnd.normal(size=(3, 3))
# test that reversing edges is same as transposing pairwise potentials
y_forward = inference_dispatch(unary_potentials, pairwise_potentials,
forward, 'lp')
y_backward = inference_dispatch(unary_potentials,
pairwise_potentials.T, backward, 'lp')
assert_array_equal(y_forward, y_backward)
for chain in [forward, backward]:
y_lp = inference_dispatch(unary_potentials, pairwise_potentials,
chain, 'lp')
for alg in algorithms:
print(alg)
y = inference_dispatch(unary_potentials, pairwise_potentials,
chain, alg)
assert_array_equal(y, y_lp)
示例13: test_binary_grid_unaries
def test_binary_grid_unaries():
# test handling on unaries for binary grid CRFs
for ds in binary:
X, Y = ds(n_samples=1)
x, y = X[0], Y[0]
for inference_method in get_installed():
#NOTE: ad3+ fails because it requires a different data structure
if inference_method == 'ad3+': continue
crf = GridCRF(inference_method=inference_method)
crf.initialize(X, Y)
w_unaries_only = np.zeros(7)
w_unaries_only[:4] = np.eye(2).ravel()
# test that inference with unaries only is the
# same as argmax
inf_unaries = crf.inference(x, w_unaries_only)
assert_array_equal(inf_unaries, np.argmax(x, axis=2),
"Wrong unary inference for %s"
% inference_method)
assert(np.mean(inf_unaries == y) > 0.5)
# check that the right thing happens on noise-free data
X, Y = ds(n_samples=1, noise=0)
inf_unaries = crf.inference(X[0], w_unaries_only)
assert_array_equal(inf_unaries, Y[0],
"Wrong unary result for %s"
% inference_method)
示例14: test_energy_continuous
def test_energy_continuous():
# make sure that energy as computed by ssvm is the same as by lp
np.random.seed(0)
for inference_method in get_installed(["lp", "ad3"]):
found_fractional = False
crf = EdgeFeatureGraphCRF(n_states=3,
inference_method=inference_method,
n_edge_features=2)
while not found_fractional:
x = np.random.normal(size=(7, 8, 3))
edge_list = make_grid_edges(x, 4, return_lists=True)
edges = np.vstack(edge_list)
edge_features = edge_list_to_features(edge_list)
x = (x.reshape(-1, 3), edges, edge_features)
unary_params = np.random.normal(size=(3, 3))
pw1 = np.random.normal(size=(3, 3))
pw2 = np.random.normal(size=(3, 3))
w = np.hstack([unary_params.ravel(), pw1.ravel(), pw2.ravel()])
res, energy = crf.inference(x, w, relaxed=True, return_energy=True)
found_fractional = np.any(np.max(res[0], axis=-1) != 1)
psi = crf.psi(x, res)
energy_svm = np.dot(psi, w)
assert_almost_equal(energy, -energy_svm)
示例15: test_one_slack_constraint_caching
def test_one_slack_constraint_caching():
# testing cutting plane ssvm on easy multinomial dataset
X, Y = generate_blocks_multinomial(n_samples=10, noise=0.5, seed=0,
size_x=9)
n_labels = len(np.unique(Y))
exact_inference = get_installed([('ad3', {'branch_and_bound': True}), "lp"])[0]
crf = GridCRF(n_states=n_labels, inference_method=exact_inference)
clf = OneSlackSSVM(model=crf, max_iter=150, C=1,
check_constraints=True, break_on_bad=True,
inference_cache=50, inactive_window=0)
clf.fit(X, Y)
Y_pred = clf.predict(X)
assert_array_equal(Y, Y_pred)
assert_equal(len(clf.inference_cache_), len(X))
# there should be 13 constraints, which are less than the 94 iterations
# that are done
# check that we didn't change the behavior of how we construct the cache
constraints_per_sample = [len(cache) for cache in clf.inference_cache_]
if exact_inference == "lp":
assert_equal(len(clf.inference_cache_[0]), 18)
assert_equal(np.max(constraints_per_sample), 18)
assert_equal(np.min(constraints_per_sample), 18)
else:
assert_equal(len(clf.inference_cache_[0]), 13)
assert_equal(np.max(constraints_per_sample), 20)
assert_equal(np.min(constraints_per_sample), 11)