本文整理汇总了Python中treq.client.HTTPClient.get方法的典型用法代码示例。如果您正苦于以下问题:Python HTTPClient.get方法的具体用法?Python HTTPClient.get怎么用?Python HTTPClient.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类treq.client.HTTPClient
的用法示例。
在下文中一共展示了HTTPClient.get方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: PathResource
# 需要导入模块: from treq.client import HTTPClient [as 别名]
# 或者: from treq.client.HTTPClient import get [as 别名]
class PathResource(resource.Resource):
"""
Docker has asked us for the concrete on-disk location of an extant volume.
If it hasn't already asked for it to be mounted, or is currently on another
machine, this is an error.
"""
def __init__(self, *args, **kw):
self._agent = Agent(reactor) # no connectionpool
self.client = HTTPClient(self._agent)
return resource.Resource.__init__(self, *args, **kw)
def render_POST(self, request):
# TODO make a FlockerResource base class
self.base_url = os.environ.get("FLOCKER_CONTROL_SERVICE_BASE_URL")
# expect Name
data = json.loads(request.content.read())
print "path:", data
d = self.client.get(self.base_url + "/configuration/datasets")
d.addCallback(treq.json_content)
def get_dataset(datasets):
dataset_id = None
# 1. find the flocker dataset_id of the named volume
# 2. look up the path of that volume in the datasets current state
for dataset in datasets:
if dataset["metadata"]["name"] == data["Name"]:
dataset_id = dataset["dataset_id"]
d = self.client.get(self.base_url + "/state/datasets")
d.addCallback(treq.json_content)
def get_path(datasets, dataset_id):
if dataset_id is None:
path = None
else:
for dataset in datasets:
if dataset["dataset_id"] == dataset_id:
path = dataset["path"]
if path is not None:
request.write(json.dumps(dict(
Mountpoint=path,
Err=None,
)))
else:
request.write(json.dumps(dict(
Mountpoint="",
Err="unable to find %s" % (data["Name"],),
)))
request.finish()
d.addCallback(get_path, dataset_id=dataset_id)
return d
d.addCallback(get_dataset)
return server.NOT_DONE_YET
示例2: validate_ticket
# 需要导入模块: from treq.client import HTTPClient [as 别名]
# 或者: from treq.client.HTTPClient import get [as 别名]
def validate_ticket(self, ticket, request):
service_name = self.service_name
ticket_name = self.ticket_name
this_url = self.get_url(request)
p = urlparse.urlparse(this_url)
qs_map = urlparse.parse_qs(p.query)
if ticket_name in qs_map:
del qs_map[ticket_name]
param_str = urlencode(qs_map, doseq=True)
p = urlparse.ParseResult(*tuple(p[:4] + (param_str,) + p[5:]))
service_url = urlparse.urlunparse(p)
params = {
service_name: service_url,
ticket_name: ticket,}
param_str = urlencode(params, doseq=True)
p = urlparse.urlparse(self.cas_info['service_validate_url'])
p = urlparse.ParseResult(*tuple(p[:4] + (param_str,) + p[5:]))
service_validate_url = urlparse.urlunparse(p)
self.log(
"Requesting service-validate URL => '{0}' ...".format(
service_validate_url))
http_client = HTTPClient(self.cas_agent)
d = http_client.get(service_validate_url)
d.addCallback(treq.content)
d.addCallback(self.parse_sv_results, service_url, ticket, request)
return d
示例3: validate_ticket
# 需要导入模块: from treq.client import HTTPClient [as 别名]
# 或者: from treq.client.HTTPClient import get [as 别名]
def validate_ticket(self, ticket, request):
service_name = self.service_name
ticket_name = self.ticket_name
this_url = self.get_url(request)
p = urlparse.urlparse(this_url)
qs_map = urlparse.parse_qs(p.query)
if ticket_name in qs_map:
del qs_map[ticket_name]
param_str = urlencode(qs_map)
p = urlparse.ParseResult(*tuple(p[:4] + (param_str,) + p[5:]))
service_url = urlparse.urlunparse(p)
params = {
service_name: service_url,
ticket_name: ticket,}
param_str = urlencode(params)
p = urlparse.urlparse(self.cas_info['service_validate_url'])
p = urlparse.ParseResult(*tuple(p[:4] + (param_str,) + p[5:]))
service_validate_url = urlparse.urlunparse(p)
log.msg("[INFO] requesting URL '%s' ..." % service_validate_url)
http_client = HTTPClient(self.agent)
d = http_client.get(service_validate_url)
d.addCallback(treq.content)
d.addCallback(self.parse_sv_results, service_url, ticket, request)
return d
示例4: main
# 需要导入模块: from treq.client import HTTPClient [as 别名]
# 或者: from treq.client.HTTPClient import get [as 别名]
def main():
url = "http://google.com"
factory = ssl.ClientContextFactory()
factory.protocol = LineReceiver
tor_endpoint = TCP4ClientEndpoint(reactor, '127.0.0.1', 9050)
#tls_endpoint = TLSWrapClientEndpoint(tor_endpoint, factory)
socks_agent = SOCKS5Agent(reactor, proxyEndpoint=tor_endpoint)
socks_client = HTTPClient(socks_agent)
d = socks_client.get("https://wtfismyip.com/text")
d.addCallback(readBody)
d.addCallback(foo)
reactor.run()
示例5: StreamingEliotLogsTests
# 需要导入模块: from treq.client import HTTPClient [as 别名]
# 或者: from treq.client.HTTPClient import get [as 别名]
class StreamingEliotLogsTests(SyncTestCase):
"""
Tests for the log streaming resources created by ``create_log_resources``.
"""
def setUp(self):
self.resource = create_log_resources()
self.agent = RequestTraversalAgent(self.resource)
self.client = HTTPClient(self.agent)
return super(StreamingEliotLogsTests, self).setUp()
def test_v1(self):
"""
There is a resource at *v1*.
"""
self.assertThat(
self.client.get(b"http:///v1"),
succeeded(has_response_code(Equals(OK))),
)
示例6: RabbitmqManagementClient
# 需要导入模块: from treq.client import HTTPClient [as 别名]
# 或者: from treq.client.HTTPClient import get [as 别名]
class RabbitmqManagementClient(object):
clock = reactor
@classmethod
def pool_factory(self, reactor):
pool = HTTPConnectionPool(reactor, persistent=True)
pool.maxPersistentPerHost = TPS_LIMIT
@classmethod
def agent_factory(self, reactor, pool=None):
return Agent(reactor, pool=pool)
def __init__(self, base_url, username, password):
self.base_url = base_url
self.username = username
self.password = password
self.http_client = HTTPClient(self.agent_factory(
self.clock, pool=self.pool_factory(self.clock)))
self.semaphore = defer.DeferredSemaphore(TPS_LIMIT)
def get_queue(self, vhost, queue_name):
url = 'http://%s/api/queues/%s/%s' % (
self.base_url,
urllib.quote(vhost, safe=''),
queue_name
)
def _get_queue():
d = self.http_client.get(url, auth=(self.username, self.password))
d.addCallback(treq.json_content)
return d
return self.semaphore.run(_get_queue)
示例7: PowerstripFlockerTests
# 需要导入模块: from treq.client import HTTPClient [as 别名]
# 或者: from treq.client.HTTPClient import get [as 别名]
class PowerstripFlockerTests(TestCase):
"""
Real flocker-plugin tests against two nodes using the flocker
acceptance testing framework.
"""
# Slow builds because initial runs involve pulling some docker images
# (flocker-plugin).
timeout = 1200
def _buildDockerOnce(self):
"""
Using blocking APIs, build docker once per test run.
"""
if len(BUILD_ONCE):
return
if path.exists(DOCKER_PATH):
dockerCmd = ("cd %(dockerDir)s;"
"docker build -t custom-docker .;"
"docker run --privileged --rm "
"-e DOCKER_EXPERIMENTAL=1 "
"-e DOCKER_GITCOMMIT=`git log -1 --format=%%h` "
"-v %(dockerDir)s:/go/src/github.com/docker/docker "
"custom-docker hack/make.sh binary" % dict(
dockerDir=DOCKER_PATH))
print "Running docker command:", dockerCmd
exit = system(dockerCmd)
if exit > 0:
raise Exception("failed to build docker")
BUILD_ONCE.append(1)
def _injectDockerOnce(self, ip):
"""
Using blocking APIs, copy the docker binary from whence it was built in
_buildDockerOnce to the given ip.
"""
if ip not in INJECT_ONCE:
INJECT_ONCE[ip] = []
if len(INJECT_ONCE[ip]):
return
if path.exists(DOCKER_PATH):
# e.g. 1.5.0-plugins
dockerVersion = "1.7.0-dev-experimental" # XXX Docker need to update their VERSION file open("%s/VERSION" % (DOCKER_PATH,)).read().strip()
binaryPath = "%(dockerDir)s/bundles/%(dockerVersion)s/binary/docker-%(dockerVersion)s" % dict(
dockerDir=DOCKER_PATH, dockerVersion=dockerVersion)
hostBinaryPath = "/usr/bin/docker"
key = "/home/buildslave/.ssh/id_rsa_flocker"
exit = system("scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "
"-i %(key)s %(binaryPath)s [email protected]%(ip)s:%(hostBinaryPath)s" % dict(
key=key, hostBinaryPath=hostBinaryPath, binaryPath=binaryPath, ip=ip))
if exit > 0:
raise Exception("failed to inject docker into %(ip)s" % dict(ip=ip))
INJECT_ONCE[ip].append(1)
def setUp(self):
"""
Ready the environment for tests which actually run docker
with flocker-plugin enabled.
* Log into each node in turn:
* Load flocker-plugin into docker
"""
self.agent = Agent(reactor) # no connectionpool
self.client = HTTPClient(self.agent)
d = get_test_cluster(self, 2)
def got_cluster(cluster):
self.cluster = cluster
self.plugins = {}
daemonReadyDeferreds = []
self.ips = [node.address for node in cluster.nodes]
# Build docker if necessary (if there's a docker submodule)
self._buildDockerOnce()
for ip in self.ips:
# cleanup after previous test runs
#run(ip, ["pkill", "-f", "flocker"])
shell(ip, "sleep 5 && initctl stop docker || true")
# Copy docker into the respective node
self._injectDockerOnce(ip)
# workaround https://github.com/calavera/docker/pull/4#issuecomment-100046383
shell(ip, "mkdir -p %s" % (PLUGIN_DIR,))
# cleanup stale sockets
shell(ip, "rm -f %s/*" % (PLUGIN_DIR,))
#shell(ip, "supervisorctl stop flocker-agent")
#shell(ip, "supervisorctl start flocker-agent")
"""
for container in ("flocker",):
try:
run(ip, ["docker", "rm", "-f", container])
except Exception:
print container, "was not running, not killed, OK."
# start flocker-plugin
FLOCKER_PLUGIN = "%s/flocker-plugin:%s" % (DOCKER_PULL_REPO, PF_VERSION)
run(ip, ["docker", "pull", FLOCKER_PLUGIN])
"""
# TODO - come up with cleaner/nicer way of flocker-plugin
#.........这里部分代码省略.........
示例8: MountResource
# 需要导入模块: from treq.client import HTTPClient [as 别名]
# 或者: from treq.client.HTTPClient import get [as 别名]
class MountResource(resource.Resource):
"""
A powerstrip pre-hook for container create.
"""
isLeaf = True
def __init__(self, *args, **kw):
self._agent = Agent(reactor) # no connectionpool
self.client = HTTPClient(self._agent)
return resource.Resource.__init__(self, *args, **kw)
def render_POST(self, request):
"""
Handle a pre-hook: either create a filesystem, or move it in place.
"""
json_parsed = json.loads(request.content.read())
print ">>> called with", json_parsed
pprint.pprint(os.environ)
# BASE_URL like http://control-service/v1/ ^
self.base_url = os.environ.get("FLOCKER_CONTROL_SERVICE_BASE_URL")
self.ip = os.environ.get("MY_NETWORK_IDENTITY")
def wait_until_volume_in_place(result, fs):
"""
Called after a dataset has been created or moved in the cluster's
desired configuration. Wait until the volume shows up in the
cluster actual state on the right host (either having been created
or moved).
:return: Deferred which fires with the tuple (fs, dataset_id) --
that is, the filesystem and the corresponding flocker dataset
uuid that the docker client asked for -- firing only once the
filesystem has been created/moved and mounted (iow, exists on
the right host in the cluster state).
"""
print "wait_until_volume_in_place while processing", fs, "got result", result
dataset_id = result["dataset_id"]
def dataset_exists():
d = self.client.get(self.base_url + "/state/datasets")
d.addCallback(treq.json_content)
def check_dataset_exists(datasets):
"""
The /v1/state/datasets API seems to show the volume as
being on two hosts at once during a move. We assume
therefore that when it settles down to only show it on one
host that this means the move is complete.
"""
print "Got", self.ip, self.host_uuid, "datasets:", datasets
matching_datasets = []
for dataset in datasets:
if dataset["dataset_id"] == dataset_id:
matching_datasets.append(dataset)
if len(matching_datasets) == 1:
if matching_datasets[0]["primary"] == self.host_uuid:
return matching_datasets[0]
return False
d.addCallback(check_dataset_exists)
return d
d = loop_until(dataset_exists)
d.addCallback(lambda dataset: (fs, dataset))
return d
d = self.client.get(self.base_url + "/state/nodes")
d.addCallback(treq.json_content)
def find_my_uuid(nodes):
for node in nodes:
if node["host"] == self.ip:
self.host_uuid = node["uuid"]
break
return self.client.get(self.base_url + "/configuration/datasets")
d.addCallback(find_my_uuid)
d.addCallback(treq.json_content)
def got_dataset_configuration(configured_datasets):
# form a mapping from names onto dataset objects
configured_dataset_mapping = {}
for dataset in configured_datasets:
if dataset["metadata"].get("name"):
configured_dataset_mapping[dataset["metadata"].get("name")] = dataset
# iterate over the datasets we were asked to create by the docker client
fs_create_deferreds = []
old_binds = []
print "got json_parsed...", json_parsed
if json_parsed['Name'] is not None and json_parsed['Name'] != "":
binds = [json_parsed['Name']]
for bind in binds:
fs, remainder = bind, ""
# TODO validation
# if "/" in fs:
# raise Exception("Not allowed flocker filesystems more than one level deep")
old_binds.append((fs, remainder))
# if a dataset exists, and is in the right place, we're cool.
if fs in configured_dataset_mapping:
dataset = configured_dataset_mapping[fs]
if dataset["primary"] == self.host_uuid:
# check / wait for the state to match the desired
# configuration
fs_create_deferreds.append(wait_until_volume_in_place(dataset, fs=fs))
#.........这里部分代码省略.........
示例9: HTTPClientTests
# 需要导入模块: from treq.client import HTTPClient [as 别名]
# 或者: from treq.client.HTTPClient import get [as 别名]
#.........这里部分代码省略.........
def test_request_unsupported_params_combination(self):
self.assertRaises(ValueError,
self.client.request,
'POST', 'http://example.com/',
data=BytesIO(b"yo"),
files={"file1": BytesIO(b"hey")})
def test_request_dict_headers(self):
self.client.request('GET', 'http://example.com/', headers={
'User-Agent': 'treq/0.1dev',
'Accept': ['application/json', 'text/plain']
})
self.agent.request.assert_called_once_with(
b'GET', b'http://example.com/',
Headers({b'User-Agent': [b'treq/0.1dev'],
b'accept-encoding': [b'gzip'],
b'Accept': [b'application/json', b'text/plain']}),
None)
@with_clock
def test_request_timeout_fired(self, clock):
"""
Verify the request is cancelled if a response is not received
within specified timeout period.
"""
self.agent.request.return_value = d = Deferred()
self.client.request('GET', 'http://example.com', timeout=2)
# simulate we haven't gotten a response within timeout seconds
clock.advance(3)
# a deferred should have been cancelled
self.failureResultOf(d, CancelledError)
@with_clock
def test_request_timeout_cancelled(self, clock):
"""
Verify timeout is cancelled if a response is received before
timeout period elapses.
"""
self.agent.request.return_value = d = Deferred()
self.client.request('GET', 'http://example.com', timeout=2)
# simulate a response
d.callback(mock.Mock(code=200, headers=Headers({})))
# now advance the clock but since we already got a result,
# a cancellation timer should have been cancelled
clock.advance(3)
self.successResultOf(d)
def test_response_is_buffered(self):
response = mock.Mock(deliverBody=mock.Mock(),
headers=Headers({}))
self.agent.request.return_value = succeed(response)
d = self.client.get('http://www.example.com')
result = self.successResultOf(d)
protocol = mock.Mock(Protocol)
result.deliverBody(protocol)
self.assertEqual(response.deliverBody.call_count, 1)
result.deliverBody(protocol)
self.assertEqual(response.deliverBody.call_count, 1)
def test_response_buffering_is_disabled_with_unbufferred_arg(self):
response = mock.Mock(headers=Headers({}))
self.agent.request.return_value = succeed(response)
d = self.client.get('http://www.example.com', unbuffered=True)
# YOLO public attribute.
self.assertEqual(self.successResultOf(d).original, response)
def test_request_post_redirect_denied(self):
response = mock.Mock(code=302, headers=Headers({'Location': ['/']}))
self.agent.request.return_value = succeed(response)
d = self.client.post('http://www.example.com')
self.failureResultOf(d, ResponseFailed)
def test_request_browser_like_redirects(self):
response = mock.Mock(code=302, headers=Headers({'Location': ['/']}))
self.agent.request.return_value = succeed(response)
raw = mock.Mock(return_value=[])
final_resp = mock.Mock(code=200, headers=mock.Mock(getRawHeaders=raw))
with mock.patch('twisted.web.client.RedirectAgent._handleRedirect',
return_value=final_resp):
d = self.client.post('http://www.google.com',
browser_like_redirects=True,
unbuffered=True)
self.assertEqual(self.successResultOf(d).original, final_resp)
示例10: HTTPClientTests
# 需要导入模块: from treq.client import HTTPClient [as 别名]
# 或者: from treq.client.HTTPClient import get [as 别名]
#.........这里部分代码省略.........
self.client.request(
"POST", "http://example.com/", data={"key": "a", "key2": "b"}, files={"file1": StringIO("hey")}
)
self.agent.request.assert_called_once_with(
"POST",
"http://example.com/",
headers=Headers({"Content-Type": ["multipart/form-data; boundary=heyDavid"]}),
bodyProducer=self.MultiPartProducer.return_value,
)
FP = self.FileBodyProducer.return_value
self.assertEqual(
mock.call(
[("key", "a"), ("key2", "b"), ("file1", (None, "application/octet-stream", FP))], boundary="heyDavid"
),
self.MultiPartProducer.call_args,
)
def test_request_unsupported_params_combination(self):
self.assertRaises(
ValueError,
self.client.request,
"POST",
"http://example.com/",
data=StringIO("yo"),
files={"file1": StringIO("hey")},
)
def test_request_dict_headers(self):
self.client.request(
"GET",
"http://example.com/",
headers={"User-Agent": "treq/0.1dev", "Accept": ["application/json", "text/plain"]},
)
self.agent.request.assert_called_once_with(
"GET",
"http://example.com/",
headers=Headers({"User-Agent": ["treq/0.1dev"], "Accept": ["application/json", "text/plain"]}),
bodyProducer=None,
)
@with_clock
def test_request_timeout_fired(self, clock):
"""
Verify the request is cancelled if a response is not received
within specified timeout period.
"""
self.client.request("GET", "http://example.com", timeout=2)
# simulate we haven't gotten a response within timeout seconds
clock.advance(3)
deferred = self.agent.request.return_value
# a deferred should have been cancelled
self.assertTrue(deferred.cancel.called)
@with_clock
def test_request_timeout_cancelled(self, clock):
"""
Verify timeout is cancelled if a response is received before
timeout period elapses.
"""
self.client.request("GET", "http://example.com", timeout=2)
# simulate a response
deferred = self.agent.request.return_value
gotResult = deferred.addBoth.call_args[0][0]
gotResult("result")
# now advance the clock but since we already got a result,
# a cancellation timer should have been cancelled
clock.advance(3)
self.assertFalse(deferred.cancel.called)
def test_response_is_buffered(self):
response = mock.Mock(deliverBody=mock.Mock())
self.agent.request.return_value = succeed(response)
d = self.client.get("http://www.example.com")
result = self.successResultOf(d)
protocol = mock.Mock(Protocol)
result.deliverBody(protocol)
self.assertEqual(response.deliverBody.call_count, 1)
result.deliverBody(protocol)
self.assertEqual(response.deliverBody.call_count, 1)
def test_response_buffering_is_disabled_with_unbufferred_arg(self):
response = mock.Mock()
self.agent.request.return_value = succeed(response)
d = self.client.get("http://www.example.com", unbuffered=True)
self.assertEqual(self.successResultOf(d), response)
示例11: HTTPClientTests
# 需要导入模块: from treq.client import HTTPClient [as 别名]
# 或者: from treq.client.HTTPClient import get [as 别名]
#.........这里部分代码省略.........
def test_request_mixed_params_dict(self):
self.client.request(
'POST', 'http://example.com/',
data={"key": "a", "key2": "b"},
files={"file1": StringIO("hey")})
self.agent.request.assert_called_once_with(
'POST', 'http://example.com/',
headers=Headers({
'Content-Type': [
'multipart/form-data; boundary=heyDavid']}),
bodyProducer=self.MultiPartProducer.return_value)
FP = self.FileBodyProducer.return_value
self.assertEqual(
mock.call([
('key', 'a'),
('key2', 'b'),
('file1', (None, 'application/octet-stream', FP))],
boundary='heyDavid'),
self.MultiPartProducer.call_args)
def test_request_unsupported_params_combination(self):
self.assertRaises(ValueError,
self.client.request,
'POST', 'http://example.com/',
data=StringIO("yo"),
files={"file1": StringIO("hey")})
def test_request_dict_headers(self):
self.client.request('GET', 'http://example.com/', headers={
'User-Agent': 'treq/0.1dev',
'Accept': ['application/json', 'text/plain']
})
self.agent.request.assert_called_once_with(
'GET', 'http://example.com/',
headers=Headers({'User-Agent': ['treq/0.1dev'],
'Accept': ['application/json', 'text/plain']}),
bodyProducer=None)
@with_clock
def test_request_timeout_fired(self, clock):
"""
Verify the request is cancelled if a response is not received
within specified timeout period.
"""
self.client.request('GET', 'http://example.com', timeout=2)
# simulate we haven't gotten a response within timeout seconds
clock.advance(3)
deferred = self.agent.request.return_value
# a deferred should have been cancelled
self.assertTrue(deferred.cancel.called)
@with_clock
def test_request_timeout_cancelled(self, clock):
"""
Verify timeout is cancelled if a response is received before
timeout period elapses.
"""
self.client.request('GET', 'http://example.com', timeout=2)
# simulate a response
deferred = self.agent.request.return_value
gotResult = deferred.addBoth.call_args[0][0]
gotResult('result')
# now advance the clock but since we already got a result,
# a cancellation timer should have been cancelled
clock.advance(3)
self.assertFalse(deferred.cancel.called)
def test_response_is_buffered(self):
response = mock.Mock(deliverBody=mock.Mock())
self.agent.request.return_value = succeed(response)
d = self.client.get('http://www.example.com')
result = self.successResultOf(d)
protocol = mock.Mock(Protocol)
result.deliverBody(protocol)
self.assertEqual(response.deliverBody.call_count, 1)
result.deliverBody(protocol)
self.assertEqual(response.deliverBody.call_count, 1)
def test_response_buffering_is_disabled_with_unbufferred_arg(self):
response = mock.Mock()
self.agent.request.return_value = succeed(response)
d = self.client.get('http://www.example.com', unbuffered=True)
# YOLO public attribute.
self.assertEqual(self.successResultOf(d).original, response)
示例12: PowerstripFlockerTests
# 需要导入模块: from treq.client import HTTPClient [as 别名]
# 或者: from treq.client.HTTPClient import get [as 别名]
class PowerstripFlockerTests(TestCase):
"""
Real powerstrip-flocker tests against two nodes using the flocker
acceptance testing framework.
"""
# Slow builds because initial runs involve pulling some docker images
# (powerstrip, and powerstrip-flocker).
timeout = 1200
def setUp(self):
"""
Ready the environment for tests which actually run docker against
powerstrip with powerstrip-flocker enabled.
* Log into each node in turn:
* Run powerstrip-flocker in docker
* Run powerstrip in docker
"""
self.agent = Agent(reactor) # no connectionpool
self.client = HTTPClient(self.agent)
d = get_test_cluster(self, 2)
def got_cluster(cluster):
self.cluster = cluster
self.powerstripflockers = {}
self.powerstrips = {}
daemonReadyDeferreds = []
self.ips = [node.address for node in cluster.nodes]
for ip in self.ips:
# cleanup after previous test runs
#run(ip, ["pkill", "-f", "flocker"])
for proc in ("powerstrip", "powerstrip-flocker"):
try:
run(ip, ["docker", "rm", "-f", proc])
except Exception:
print proc, "was not running, not killed, OK."
# put a powerstrip config in place
run(ip, ["mkdir", "-p", "/root/powerstrip-config"])
run(ip, ["sh", "-c", "cat > /root/powerstrip-config/adapters.yml"], """
version: 1
endpoints:
"POST /*/containers/create":
pre: [flocker]
adapters:
flocker: http://powerstrip-flocker/flocker-adapter
""")
# start powerstrip-flocker
POWERSTRIP_FLOCKER = "%s/powerstrip-flocker:latest" % (DOCKER_PULL_REPO,)
run(ip, ["docker", "pull", POWERSTRIP_FLOCKER])
# TODO - come up with cleaner/nicer way of powerstrip-flocker
# being able to establish its own host uuid (or volume
# mountpoints), such as API calls.
host_uuid = run(ip, ["python", "-c", "import json; "
"print json.load(open('/etc/flocker/volume.json'))['uuid']"]).strip()
self.powerstripflockers[ip] = remote_service_for_test(self, ip,
["docker", "run", "--name=powerstrip-flocker",
"--expose", "80",
"-p", "9999:80", # so that we can detect it being up
"-e", "FLOCKER_CONTROL_SERVICE_BASE_URL=%s" % (self.cluster.base_url,),
"-e", "MY_NETWORK_IDENTITY=%s" % (ip,),
"-e", "MY_HOST_UUID=%s" % (host_uuid,),
POWERSTRIP_FLOCKER])
print "Waiting for powerstrip-flocker to show up on", ip, "..."
daemonReadyDeferreds.append(wait_for_socket(ip, 9999))
# start powerstrip
# TODO - use the new unix-socket powerstrip approach.
POWERSTRIP = "clusterhq/powerstrip:latest"
run(ip, ["docker", "pull", POWERSTRIP])
self.powerstrips[ip] = remote_service_for_test(self, ip,
["docker", "run", "--name=powerstrip",
"-p", "2375:2375",
"-v", "/var/run/docker.sock:/var/run/docker.sock",
"-v", "/root/powerstrip-config/adapters.yml:"
"/etc/powerstrip/adapters.yml",
"--link", "powerstrip-flocker:powerstrip-flocker",
POWERSTRIP])
print "Waiting for powerstrip to show up on", ip, "..."
daemonReadyDeferreds.append(wait_for_socket(ip, 2375))
d = defer.gatherResults(daemonReadyDeferreds)
# def debug():
# services
# import pdb; pdb.set_trace()
# d.addCallback(lambda ignored: deferLater(reactor, 1, debug))
return d
d.addCallback(got_cluster)
return d
def test_create_a_dataset(self):
"""
Running a docker container specifying a dataset name which has never
been created before creates it in the API.
"""
node1, node2 = sorted(self.ips)
fsName = "test001"
powerstrip(node1, "docker run "
"-v /flocker/%s:/data busybox "
"sh -c 'echo 1 > /data/file'" % (fsName,))
url = self.cluster.base_url + "/configuration/datasets"
#.........这里部分代码省略.........
示例13: ProxyTests
# 需要导入模块: from treq.client import HTTPClient [as 别名]
# 或者: from treq.client.HTTPClient import get [as 别名]
class ProxyTests(TestCase, GenerallyUsefulPowerstripTestMixin):
def setUp(self):
"""
Construct a fake "Docker daemon" (one which does much less than the
actual Docker daemon) and a Proxy instance.
Pre- and post-hook API servers are provided by the individual tests.
"""
self.agent = Agent(reactor) # no connectionpool
self.client = HTTPClient(self.agent)
def tearDown(self):
shutdowns = [
self.dockerServer.stopListening(),
self.proxyServer.stopListening()]
if hasattr(self, 'adderServer'):
shutdowns.append(self.adderServer.stopListening())
if hasattr(self, 'adderTwoServer'):
shutdowns.append(self.adderTwoServer.stopListening())
return defer.gatherResults(shutdowns)
def test_empty_endpoints(self):
"""
The proxy passes through requests when no endpoints are specified.
In particular, when POST to the /towel endpoint on the *proxy*, we get
to see that we were seen by the (admittedly fake) Docker daemon.
"""
self._configure("endpoints: {}\nadapters: {}")
d = self.client.post('http://127.0.0.1:%d/towel' % (self.proxyPort,),
json.dumps({"hiding": "things"}),
headers={'Content-Type': ['application/json']})
d.addCallback(treq.json_content)
def verify(response):
self.assertEqual(response,
{"hiding": "things", "SeenByFakeDocker": 42})
d.addCallback(verify)
return d
def test_empty_endpoints_socket(self):
"""
The proxy is able to connect to Docker on a UNIX socket.
"""
self._configure("endpoints: {}\nadapters: {}", dockerOnSocket=True)
d = self.client.post('http://127.0.0.1:%d/towel' % (self.proxyPort,),
json.dumps({"hiding": "things"}),
headers={'Content-Type': ['application/json']})
d.addCallback(treq.json_content)
def verify(response):
self.assertEqual(response,
{"hiding": "things", "SeenByFakeDocker": 42})
d.addCallback(verify)
return d
def test_endpoint_and_empty_hooks(self):
"""
An endpoint is specified, but no pre-or post hooks are added to it.
Requests to the endpoint are proxied.
"""
endpoint = "/towel"
self._configure("""endpoints:
"POST %s":
pre: []
post: []
adapters: {}""" % (endpoint,))
d = self.client.post('http://127.0.0.1:%d%s' % (self.proxyPort, endpoint),
json.dumps({"hiding": "things"}),
headers={'Content-Type': ['application/json']})
d.addCallback(treq.json_content)
def verify(response):
self.assertEqual(response,
{"hiding": "things", "SeenByFakeDocker": 42})
d.addCallback(verify)
return d
def _getAdder(self, *args, **kw):
self.adderAPI = TrafficLoggingFactory(AdderPlugin(*args, **kw), "adder-")
self.adderServer = reactor.listenTCP(0, self.adderAPI)
self.adderPort = self.adderServer.getHost().port
def _getAdderTwo(self, *args, **kw):
kw["incrementBy"] = 2
self.adderTwoAPI = TrafficLoggingFactory(AdderPlugin(*args, **kw), "adder2-")
self.adderTwoServer = reactor.listenTCP(0, self.adderTwoAPI)
self.adderTwoPort = self.adderTwoServer.getHost().port
def _hookTest(self, config_yml, adderArgs=dict(pre=True), adderTwoArgs=dict(pre=True)):
"""
Generalised version of a pre-hook test.
"""
self._getAdder(**adderArgs)
self._getAdderTwo(**adderTwoArgs)
self.dockerEndpoint = "/towel"
self.adapterEndpoint = "/adapter"
self.args = dict(dockerEndpoint=self.dockerEndpoint,
adapterEndpoint=self.adapterEndpoint,
adderPort=self.adderPort,
adderTwoPort=self.adderTwoPort)
self._configure(config_yml % self.args)
#.........这里部分代码省略.........
示例14: AdapterResource
# 需要导入模块: from treq.client import HTTPClient [as 别名]
# 或者: from treq.client.HTTPClient import get [as 别名]
class AdapterResource(resource.Resource):
"""
A powerstrip pre-hook for container create.
"""
isLeaf = True
def __init__(self, *args, **kw):
self._agent = Agent(reactor) # no connectionpool
self.client = HTTPClient(self._agent)
return resource.Resource.__init__(self, *args, **kw)
def render_POST(self, request):
"""
Handle a pre-hook: either create a filesystem, or move it in place.
"""
requestJson = json.loads(request.content.read())
if requestJson["Type"] != "pre-hook":
raise Exception("unsupported hook type %s" %
(requestJson["Type"],))
pprint.pprint(os.environ)
# BASE_URL like http://control-service/v1/ ^
json_payload = requestJson["ClientRequest"]["Body"]
json_parsed = json.loads(json_payload)
self.base_url = os.environ.get("FLOCKER_CONTROL_SERVICE_BASE_URL")
self.ip = os.environ.get("MY_NETWORK_IDENTITY")
self.host_uuid = os.environ.get("MY_HOST_UUID")
def wait_until_volume_in_place(result, fs):
"""
Called after a dataset has been created or moved in the cluster's
desired configuration. Wait until the volume shows up in the
cluster actual state on the right host (either having been created
or moved).
:return: Deferred which fires with the tuple (fs, dataset_id) --
that is, the filesystem and the corresponding flocker dataset
uuid that the docker client asked for -- firing only once the
filesystem has been created/moved and mounted (iow, exists on
the right host in the cluster state).
"""
dataset_id = result["dataset_id"]
def dataset_exists():
d = self.client.get(self.base_url + "/state/datasets")
d.addCallback(treq.json_content)
def check_dataset_exists(datasets):
"""
The /v1/state/datasets API seems to show the volume as
being on two hosts at once during a move. We assume
therefore that when it settles down to only show it on one
host that this means the move is complete.
"""
print "Got", self.ip, "datasets:", datasets
matching_datasets = []
for dataset in datasets:
if dataset["dataset_id"] == dataset_id:
matching_datasets.append(dataset)
if len(matching_datasets) == 1:
if matching_datasets[0]["primary"] == self.ip:
return True
return False
d.addCallback(check_dataset_exists)
return d
d = loop_until(dataset_exists)
d.addCallback(lambda ignored: (fs, dataset_id))
return d
d = self.client.get(self.base_url + "/configuration/datasets")
d.addCallback(treq.json_content)
def got_dataset_configuration(configured_datasets):
# form a mapping from names onto dataset objects
configured_dataset_mapping = {}
for dataset in configured_datasets:
if dataset["metadata"].get("name"):
configured_dataset_mapping[dataset["metadata"].get("name")] = dataset
# iterate over the datasets we were asked to create by the docker client
fs_create_deferreds = []
old_binds = []
if json_parsed['HostConfig']['Binds'] is not None:
for bind in json_parsed['HostConfig']['Binds']:
host_path, remainder = bind.split(":", 1)
# TODO validation
# if "/" in fs:
# raise Exception("Not allowed flocker filesystems more than one level deep")
if host_path.startswith("/flocker/"):
fs = host_path[len("/flocker/"):]
old_binds.append((fs, remainder))
# if a dataset exists, and is in the right place, we're cool.
if fs in configured_dataset_mapping:
dataset = configured_dataset_mapping[fs]
if dataset["primary"] == self.ip:
# simulate "immediate success"
fs_create_deferreds.append(defer.succeed((fs, dataset["dataset_id"])))
else:
# if a dataset exists, but is on the wrong server [TODO
# and is not being used], then move it in place.
d = self.client.post(
self.base_url + "/configuration/datasets/%s" % (
#.........这里部分代码省略.........