本文整理汇总了Python中app.util.url_builder.UrlBuilder类的典型用法代码示例。如果您正苦于以下问题:Python UrlBuilder类的具体用法?Python UrlBuilder怎么用?Python UrlBuilder使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了UrlBuilder类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _validate_successful_deployment
def _validate_successful_deployment(self, master_service_url, slaves_to_validate):
"""
Poll the master's /slaves endpoint until either timeout or until all of the slaves have registered with
the master.
Throws exception upon timeout or API response error.
:param master_service_url: the hostname:port for the running master service
:type master_service_url: str
:param slaves_to_validate: the list of slave hostnames (no ports) to deploy to
:type slaves_to_validate: list[str]
"""
master_api = UrlBuilder(master_service_url, BuildRunner.API_VERSION)
slave_api_url = master_api.url('slave')
network = Network()
def all_slaves_registered():
return len(self._non_registered_slaves(slave_api_url, slaves_to_validate, network)) == 0
if not wait_for(
boolean_predicate=all_slaves_registered,
timeout_seconds=self._SLAVE_REGISTRY_TIMEOUT_SEC,
poll_period=1,
exceptions_to_swallow=(requests.RequestException, requests.ConnectionError)
):
try:
non_registered_slaves = self._non_registered_slaves(slave_api_url, slaves_to_validate, network)
except ConnectionError:
self._logger.error('Error contacting {} on the master.'.format(slave_api_url))
raise SystemExit(1)
self._logger.error('Slave registration timed out after {} sec, with slaves {} missing.'.format(
self._SLAVE_REGISTRY_TIMEOUT_SEC, ','.join(non_registered_slaves)))
raise SystemExit(1)
示例2: test_url_should_generate_correct_url
def test_url_should_generate_correct_url(self):
host = 'master:9000'
first, second, third = 'first', 'second', 'third'
builder = UrlBuilder(host)
url = builder.url(first, second, third)
self.assertEqual('http://{}/v1/{}/{}/{}'.format(host, first, second, third), url,
'Url generated did not match expectation')
示例3: get
def get(self, build_id, subjob_id, atom_id):
"""
:type build_id: int
:type subjob_id: int
:type atom_id: int
"""
max_lines = int(self.get_query_argument('max_lines', 50))
offset_line = self.get_query_argument('offset_line', None)
if offset_line is not None:
offset_line = int(offset_line)
try:
response = self._cluster_master.get_console_output(
build_id,
subjob_id,
atom_id,
Configuration['results_directory'],
max_lines,
offset_line
)
self.write(response)
return
except ItemNotFoundError as e:
# If the master doesn't have the atom's console output, it's possible it's currently being worked on,
# in which case the slave that is working on it may be able to provide the in-progress console output.
build = self._cluster_master.get_build(int(build_id))
subjob = build.subjob(int(subjob_id))
slave = subjob.slave
if slave is None:
raise e
api_url_builder = UrlBuilder(slave.url)
slave_console_url = api_url_builder.url('build', build_id, 'subjob', subjob_id, 'atom', atom_id, 'console')
query = {'max_lines': max_lines}
if offset_line is not None:
query['offset_line'] = offset_line
query_string = urllib.parse.urlencode(query)
self.redirect('{}?{}'.format(slave_console_url, query_string))
示例4: __init__
def __init__(self, slave_url, num_executors):
"""
:type slave_url: str
:type num_executors: int
"""
self.url = slave_url
self.num_executors = num_executors
self.id = self._slave_id_counter.increment()
self._num_executors_in_use = Counter()
self._network = Network(min_connection_poolsize=num_executors)
self.current_build_id = None
self.is_alive = True
self._slave_api = UrlBuilder(slave_url, app.master.cluster_master.ClusterMaster.API_VERSION)
示例5: block_until_build_queue_empty
def block_until_build_queue_empty(self, timeout=60):
"""
This blocks until the master's build queue is empty. This data is exposed via the /queue endpoint and contains
any jobs that are currently building or not yet started. If the queue is not empty before the timeout, this
method raises an exception.
:param timeout: The maximum number of seconds to block before raising an exception.
:type timeout: int
"""
master_api = UrlBuilder(self._master_url)
queue_url = master_api.url('queue')
def is_queue_empty():
queue_resp = requests.get(queue_url)
if queue_resp and queue_resp.ok:
queue_data = queue_resp.json()
if 'queue' in queue_data and len(queue_data['queue']) == 0:
return True
return False
if not poll.wait_for(is_queue_empty, timeout, 0.5):
raise Exception('Master service did not become idle before timeout.')
示例6: __init__
def __init__(self, slave_url, num_executors):
"""
:type slave_url: str
:type num_executors: int
"""
self.url = slave_url
self.num_executors = num_executors
self.id = self._slave_id_counter.increment()
self._num_executors_in_use = Counter()
self._network = Network(min_connection_poolsize=num_executors)
self.current_build_id = None
self._is_alive = True
self._slave_api = UrlBuilder(slave_url, self.API_VERSION)
self._logger = log.get_logger(__name__)
示例7: _download_and_extract_zip_results
def _download_and_extract_zip_results(self, master_api: UrlBuilder, build_id: int, download_dir: str):
"""Download the artifacts.zip from the master and extract it."""
download_artifacts_url = master_api.url('build', build_id, 'artifacts.zip')
download_filepath = os.path.join(download_dir, BuildArtifact.ARTIFACT_ZIPFILE_NAME)
response = self._network.get(download_artifacts_url)
if response.status_code == http.client.OK:
# save file to disk, decompress, and delete
with open(download_filepath, 'wb') as file:
chunk_size = 500 * 1024
for chunk in response.iter_content(chunk_size):
file.write(chunk)
fs.unzip_directory(download_filepath, delete=True)
示例8: connect_to_master
def connect_to_master(self, master_url=None):
"""
Notify the master that this slave exists.
:param master_url: The URL of the master service. If none specified, defaults to localhost:43000.
:type master_url: str
"""
self._master_url = master_url or 'localhost:43000'
self._master_api = UrlBuilder(self._master_url)
connect_url = self._master_api.url('slave')
data = {
'slave': '{}:{}'.format(self.host, self.port),
'num_executors': self._num_executors,
}
response = self._network.post(connect_url, data)
self._slave_id = int(response.json().get('slave_id'))
self._logger.info('Slave {}:{} connected to master on {}.', self.host, self.port, self._master_url)
示例9: __init__
def __init__(self, master_url, request_params, secret):
"""
:param master_url: The url of the master which the build will be executed on
:type master_url: str
:param request_params: A dict of request params that will be json-encoded and sent in the build request
:type request_params: dict
:type secret: str
"""
self._master_url = self._ensure_url_has_scheme(master_url)
self._request_params = request_params
self._secret = secret
self._build_id = None
self._network = Network()
self._logger = get_logger(__name__)
self._last_build_status_details = None
self._master_api = UrlBuilder(master_url, self.API_VERSION)
self._cluster_master_api_client = ClusterMasterAPIClient(master_url)
示例10: __init__
def __init__(self, slave_url, num_executors, slave_session_id=None):
"""
:type slave_url: str
:type num_executors: int
:type slave_session_id: str
"""
self.url = slave_url
self.num_executors = num_executors
self.id = self._slave_id_counter.increment()
self._num_executors_in_use = Counter()
self._network = Network(min_connection_poolsize=num_executors)
self.current_build_id = None
self._last_heartbeat_time = datetime.now()
self._is_alive = True
self._is_in_shutdown_mode = False
self._slave_api = UrlBuilder(slave_url, self.API_VERSION)
self._session_id = slave_session_id
self._logger = log.get_logger(__name__)
示例11: connect_to_master
def connect_to_master(self, master_url=None):
"""
Notify the master that this slave exists.
:param master_url: The URL of the master service. If none specified, defaults to localhost:43000.
:type master_url: str | None
"""
self.is_alive = True
self._master_url = master_url or "localhost:43000"
self._master_api = UrlBuilder(self._master_url)
connect_url = self._master_api.url("slave")
data = {"slave": "{}:{}".format(self.host, self.port), "num_executors": self._num_executors}
response = self._network.post(connect_url, data=data)
self._slave_id = int(response.json().get("slave_id"))
self._logger.info("Slave {}:{} connected to master on {}.", self.host, self.port, self._master_url)
# We disconnect from the master before build_teardown so that the master stops sending subjobs. (Teardown
# callbacks are executed in the reverse order that they're added, so we add the build_teardown callback first.)
UnhandledExceptionHandler.singleton().add_teardown_callback(self._do_build_teardown_and_reset, timeout=30)
UnhandledExceptionHandler.singleton().add_teardown_callback(self._disconnect_from_master)
示例12: Slave
class Slave(object):
_slave_id_counter = Counter()
def __init__(self, slave_url, num_executors):
"""
:type slave_url: str
:type num_executors: int
"""
self.url = slave_url
self.num_executors = num_executors
self.id = self._slave_id_counter.increment()
self._num_executors_in_use = Counter()
self._network = Network(min_connection_poolsize=num_executors)
self.current_build_id = None
self.is_alive = True
self._slave_api = UrlBuilder(slave_url, app.master.cluster_master.ClusterMaster.API_VERSION)
def api_representation(self):
return {
'url': self.url,
'id': self.id,
'num_executors': self.num_executors,
'num_executors_in_use': self.num_executors_in_use(),
'current_build_id': self.current_build(),
}
def mark_as_idle(self):
"""
Do bookkeeping when this slave becomes idle. Error if the slave cannot be idle.
"""
if self._num_executors_in_use.value() != 0:
raise Exception('Trying to mark slave idle while {} executors still in use.',
self._num_executors_in_use.value())
self.current_build_id = None
def setup(self, build_id, project_type_params):
"""
Execute a setup command on the slave for the specified build. The command is executed asynchronously from the
perspective of this method, but any subjobs will block until the slave finishes executing the setup command.
:param build_id: The build id that this setup command is for.
:type build_id: int
:param project_type_params: The parameters that define the project type this build will execute in
:typeproject_type_paramss: dict
"""
setup_url = self._slave_api.url('build', build_id, 'setup')
post_data = {
'project_type_params': project_type_params,
}
self._network.post_with_digest(setup_url, post_data, Secret.get())
def teardown(self):
"""
Tell the slave to run the build teardown
"""
teardown_url = self._slave_api.url('build', self.current_build_id, 'teardown')
self._network.post(teardown_url)
def start_subjob(self, subjob):
"""
:type subjob: Subjob
"""
if not self.is_alive:
raise RuntimeError('Tried to start a subjob on a dead slave! ({}, id: {})'.format(self.url, self.id))
SafeThread(target=self._async_start_subjob, args=(subjob,)).start()
self.current_build_id = subjob.build_id()
def _async_start_subjob(self, subjob):
"""
:type subjob: Subjob
"""
execution_url = self._slave_api.url('build', subjob.build_id(), 'subjob', subjob.subjob_id())
post_data = {
'subjob_artifact_dir': subjob.artifact_dir(),
'atomic_commands': subjob.atomic_commands(),
}
response = self._network.post_with_digest(execution_url, post_data, Secret.get(), error_on_failure=True)
subjob_executor_id = response.json().get('executor_id')
analytics.record_event(analytics.MASTER_TRIGGERED_SUBJOB, executor_id=subjob_executor_id,
build_id=subjob.build_id(), subjob_id=subjob.subjob_id(), slave_url=self.url)
def num_executors_in_use(self):
return self._num_executors_in_use.value()
def claim_executor(self):
new_count = self._num_executors_in_use.increment()
if new_count > self.num_executors:
raise Exception('Cannot claim executor on slave {}. No executors left.'.format(self.url))
return new_count
def free_executor(self):
new_count = self._num_executors_in_use.decrement()
if new_count < 0:
raise Exception('Cannot free executor on slave {}. All are free.'.format(self.url))
return new_count
#.........这里部分代码省略.........
示例13: Slave
class Slave(object):
API_VERSION = 'v1'
_slave_id_counter = Counter()
def __init__(self, slave_url, num_executors):
"""
:type slave_url: str
:type num_executors: int
"""
self.url = slave_url
self.num_executors = num_executors
self.id = self._slave_id_counter.increment()
self._num_executors_in_use = Counter()
self._network = Network(min_connection_poolsize=num_executors)
self.current_build_id = None
self._is_alive = True
self._slave_api = UrlBuilder(slave_url, self.API_VERSION)
self._logger = log.get_logger(__name__)
def api_representation(self):
return {
'url': self.url,
'id': self.id,
'num_executors': self.num_executors,
'num_executors_in_use': self.num_executors_in_use(),
'current_build_id': self.current_build_id,
'is_alive': self.is_alive(),
}
def mark_as_idle(self):
"""
Do bookkeeping when this slave becomes idle. Error if the slave cannot be idle.
"""
if self._num_executors_in_use.value() != 0:
raise Exception('Trying to mark slave idle while {} executors still in use.',
self._num_executors_in_use.value())
self.current_build_id = None
def setup(self, build):
"""
Execute a setup command on the slave for the specified build. The setup process executes asynchronously on the
slave and the slave will alert the master when setup is complete and it is ready to start working on subjobs.
:param build: The build to set up this slave to work on
:type build: Build
"""
slave_project_type_params = build.build_request.build_parameters().copy()
slave_project_type_params.update(build.project_type.slave_param_overrides())
setup_url = self._slave_api.url('build', build.build_id(), 'setup')
post_data = {
'project_type_params': slave_project_type_params,
'build_executor_start_index': build.num_executors_allocated,
}
self._network.post_with_digest(setup_url, post_data, Secret.get())
self.current_build_id = build.build_id()
def teardown(self):
"""
Tell the slave to run the build teardown
"""
if self.is_alive():
teardown_url = self._slave_api.url('build', self.current_build_id, 'teardown')
self._network.post(teardown_url)
else:
self._logger.notice('Teardown request to slave {} was not sent since slave is disconnected.', self.url)
def start_subjob(self, subjob):
"""
:type subjob: Subjob
"""
if not self.is_alive():
raise RuntimeError('Tried to start a subjob on a dead slave! ({}, id: {})'.format(self.url, self.id))
SafeThread(target=self._async_start_subjob, args=(subjob,)).start()
def _async_start_subjob(self, subjob):
"""
:type subjob: Subjob
"""
execution_url = self._slave_api.url('build', subjob.build_id(), 'subjob', subjob.subjob_id())
post_data = {
'subjob_artifact_dir': subjob.artifact_dir(),
'atomic_commands': subjob.atomic_commands(),
}
response = self._network.post_with_digest(execution_url, post_data, Secret.get(), error_on_failure=True)
subjob_executor_id = response.json().get('executor_id')
analytics.record_event(analytics.MASTER_TRIGGERED_SUBJOB, executor_id=subjob_executor_id,
build_id=subjob.build_id(), subjob_id=subjob.subjob_id(), slave_id=self.id)
def num_executors_in_use(self):
return self._num_executors_in_use.value()
def claim_executor(self):
new_count = self._num_executors_in_use.increment()
if new_count > self.num_executors:
raise Exception('Cannot claim executor on slave {}. No executors left.'.format(self.url))
#.........这里部分代码省略.........
示例14: ClusterSlave
#.........这里部分代码省略.........
Called from teardown_build(). Do asynchronous teardown for the build so that we can make the call to
teardown_build() non-blocking. Also take care of posting back to the master when teardown is complete.
"""
if self._project_type:
self._project_type.teardown_build()
self._logger.info('Build teardown complete for build {}.', self._current_build_id)
self._current_build_id = None
self._project_type = None
if not should_disconnect_from_master:
# report back to master that this slave is finished with teardown and ready for a new build
self._logger.info('Notifying master that this slave is ready for new builds.')
idle_url = self._master_api.url('slave', self._slave_id, 'idle')
response = self._network.post(idle_url)
if response.status_code != http.client.OK:
raise RuntimeError("Could not post teardown completion to master at {}".format(idle_url))
elif self._is_master_responsive():
# report back to master that this slave is shutting down and should not receive new builds
self._logger.info('Notifying master to disconnect this slave.')
disconnect_url = self._master_api.url('slave', self._slave_id, 'disconnect')
response = self._network.post(disconnect_url)
if response.status_code != http.client.OK:
self._logger.error('Could not post disconnect notification to master at {}'.format(disconnect_url))
def connect_to_master(self, master_url=None):
"""
Notify the master that this slave exists.
:param master_url: The URL of the master service. If none specified, defaults to localhost:43000.
:type master_url: str
"""
self._master_url = master_url or 'localhost:43000'
self._master_api = UrlBuilder(self._master_url)
connect_url = self._master_api.url('slave')
data = {
'slave': '{}:{}'.format(self.host, self.port),
'num_executors': self._num_executors,
}
response = self._network.post(connect_url, data)
self._slave_id = int(response.json().get('slave_id'))
self._logger.info('Slave {}:{} connected to master on {}.', self.host, self.port, self._master_url)
def _is_master_responsive(self):
"""
Ping the master to check if it is still alive. Code using this method should treat the return value as a
*probable* truth since the state of the master can change at any time. This method is not a replacement for
error handling.
:return: Whether the master is responsive or not
:rtype: bool
"""
# todo: This method repeats some logic we have in the deployment code (checking a service). We should DRY it up.
is_responsive = True
try:
self._network.get(self._master_api.url())
except requests.ConnectionError:
is_responsive = False
return is_responsive
def start_working_on_subjob(self, build_id, subjob_id, subjob_artifact_dir, atomic_commands):
"""
Begin working on a subjob with the given build id and subjob id. This just starts the subjob execution
asynchronously on a separate thread.
示例15: ClusterSlave
#.........这里部分代码省略.........
def _send_master_idle_notification(self):
if not self._is_master_responsive():
self._logger.notice('Could not post idle notification to master because master is unresponsive.')
return
# Notify master that this slave is finished with teardown and ready for a new build.
self._logger.info('Notifying master that this slave is ready for new builds.')
self._notify_master_of_state_change(SlaveState.IDLE)
def _disconnect_from_master(self):
"""
Perform internal bookkeeping, as well as notify the master, that this slave is disconnecting itself
from the slave pool.
"""
self.is_alive = False
if not self._is_master_responsive():
self._logger.notice('Could not post disconnect notification to master because master is unresponsive.')
return
# Notify master that this slave is shutting down and should not receive new builds.
self._logger.info('Notifying master that this slave is disconnecting.')
self._notify_master_of_state_change(SlaveState.DISCONNECTED)
def connect_to_master(self, master_url=None):
"""
Notify the master that this slave exists.
:param master_url: The URL of the master service. If none specified, defaults to localhost:43000.
:type master_url: str | None
"""
self.is_alive = True
self._master_url = master_url or 'localhost:43000'
self._master_api = UrlBuilder(self._master_url)
connect_url = self._master_api.url('slave')
data = {
'slave': '{}:{}'.format(self.host, self.port),
'num_executors': self._num_executors,
}
response = self._network.post(connect_url, data=data)
self._slave_id = int(response.json().get('slave_id'))
self._logger.info('Slave {}:{} connected to master on {}.', self.host, self.port, self._master_url)
# We disconnect from the master before build_teardown so that the master stops sending subjobs. (Teardown
# callbacks are executed in the reverse order that they're added, so we add the build_teardown callback first.)
UnhandledExceptionHandler.singleton().add_teardown_callback(self._do_build_teardown_and_reset, timeout=30)
UnhandledExceptionHandler.singleton().add_teardown_callback(self._disconnect_from_master)
def _is_master_responsive(self):
"""
Ping the master to check if it is still alive. Code using this method should treat the return value as a
*probable* truth since the state of the master can change at any time. This method is not a replacement for
error handling.
:return: Whether the master is responsive or not
:rtype: bool
"""
# todo: This method repeats some logic we have in the deployment code (checking a service). We should DRY it up.
is_responsive = True
try:
self._network.get(self._master_api.url())
except requests.ConnectionError:
is_responsive = False
return is_responsive