本文整理汇总了Python中resource_management.libraries.script.script.Script.get_config方法的典型用法代码示例。如果您正苦于以下问题:Python Script.get_config方法的具体用法?Python Script.get_config怎么用?Python Script.get_config使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类resource_management.libraries.script.script.Script
的用法示例。
在下文中一共展示了Script.get_config方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _install_lzo_support_if_needed
# 需要导入模块: from resource_management.libraries.script.script import Script [as 别名]
# 或者: from resource_management.libraries.script.script.Script import get_config [as 别名]
def _install_lzo_support_if_needed(self, params):
hadoop_classpath_prefix = self._expand_hadoop_classpath_prefix(params.hadoop_classpath_prefix_template, params.config['configurations']['tez-site'])
hadoop_lzo_dest_path = extract_path_component(hadoop_classpath_prefix, "hadoop-lzo-")
if hadoop_lzo_dest_path:
hadoop_lzo_file = os.path.split(hadoop_lzo_dest_path)[1]
config = Script.get_config()
file_url = urlparse.urljoin(config['hostLevelParams']['jdk_location'], hadoop_lzo_file)
hadoop_lzo_dl_path = os.path.join(config["hostLevelParams"]["agentCacheDir"], hadoop_lzo_file)
download_file(file_url, hadoop_lzo_dl_path)
#This is for protection against configuration changes. It will infect every new destination with the lzo jar,
# but since the classpath points to the jar directly we're getting away with it.
if not os.path.exists(hadoop_lzo_dest_path):
copy_file(hadoop_lzo_dl_path, hadoop_lzo_dest_path)
示例2: str
# 需要导入模块: from resource_management.libraries.script.script import Script [as 别名]
# 或者: from resource_management.libraries.script.script.Script import get_config [as 别名]
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import status_params
from resource_management.libraries.functions import format
from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script
# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
stack_name = default("/hostLevelParams/stack_name", None)
current_version = default("/hostLevelParams/current_version", None)
component_directory = status_params.component_directory
# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)
# default parameters
zk_home = "/usr/hdp/2.3.2.0-2950/zookeeper"
zk_bin = "/usr/hdp/2.3.2.0-2950/zookeeper/bin"
示例3:
# 需要导入模块: from resource_management.libraries.script.script import Script [as 别名]
# 或者: from resource_management.libraries.script.script.Script import get_config [as 别名]
from resource_management.libraries.script.script import Script
CONFIG = Script.get_config()
CONFIGS = CONFIG['configurations']
HOST_INFO = CONFIG['clusterHostInfo']
CASSANDRA_CONF = CONFIGS['cassandra-conf']
cassandra_conf_dir = '/etc/cassandra/conf'
cassandra_user = 'cassandra'
config_defaults = {
'cross_node_timeout': False,
'inter_dc_tcp_nodelay': False,
'rpc_server_type': 'sync',
'disk_failure_policy': 'stop',
'authorizer': 'AllowAllAuthorizer',
'tombstone_warn_threshold': 1000,
'internode_compression': 'all',
'truncate_request_timeout_in_ms': 60000,
'cluster_name': 'Test Cluster',
'read_request_timeout_in_ms': 5000,
'ssl_storage_port': 7001,
'listen_address': 'localhost',
'request_scheduler': 'org.apache.cassandra.scheduler.NoScheduler',
'range_request_timeout_in_ms': 10000,
'hinted_handoff_enabled': True,
'max_hint_window_in_ms': 10800000,
'authenticator': 'AllowAllAuthenticator',
'tombstone_failure_threshold': 100000,
'commitlog_directory': '/var/lib/cassandra/commitlog',
'column_index_size_in_kb': 64,