当前位置: 首页>>代码示例>>Python>>正文


Python Script.is_hdp_stack_greater_or_equal方法代码示例

本文整理汇总了Python中resource_management.libraries.script.script.Script.is_hdp_stack_greater_or_equal方法的典型用法代码示例。如果您正苦于以下问题:Python Script.is_hdp_stack_greater_or_equal方法的具体用法?Python Script.is_hdp_stack_greater_or_equal怎么用?Python Script.is_hdp_stack_greater_or_equal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在resource_management.libraries.script.script.Script的用法示例。


在下文中一共展示了Script.is_hdp_stack_greater_or_equal方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_hdfs_binary

# 需要导入模块: from resource_management.libraries.script.script import Script [as 别名]
# 或者: from resource_management.libraries.script.script.Script import is_hdp_stack_greater_or_equal [as 别名]
def get_hdfs_binary(distro_component_name):
  """
  Get the hdfs binary to use depending on the stack and version.
  :param distro_component_name: e.g., hadoop-hdfs-namenode, hadoop-hdfs-datanode
  :return: The hdfs binary to use
  """
  import params
  hdfs_binary = "hdfs"
  if params.stack_name == "HDP":
    # This was used in HDP 2.1 and earlier
    hdfs_binary = "hdfs"
    if Script.is_hdp_stack_greater_or_equal("2.2"):
      hdfs_binary = "/usr/hdp/current/{0}/bin/hdfs".format(distro_component_name)

  return hdfs_binary
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:17,代码来源:utils.py

示例2: get_hadoop_dir

# 需要导入模块: from resource_management.libraries.script.script import Script [as 别名]
# 或者: from resource_management.libraries.script.script.Script import is_hdp_stack_greater_or_equal [as 别名]
def get_hadoop_dir(target, force_latest_on_upgrade=False):
  """
  Return the hadoop shared directory in the following override order
  1. Use default for 2.1 and lower
  2. If 2.2 and higher, use /usr/hdp/current/hadoop-client/{target}
  3. If 2.2 and higher AND for an upgrade, use /usr/hdp/<version>/hadoop/{target}.
  However, if the upgrade has not yet invoked hdp-select, return the current
  version of the component.
  :target: the target directory
  :force_latest_on_upgrade: if True, then this will return the "current" directory
  without the HDP version built into the path, such as /usr/hdp/current/hadoop-client
  """

  if not target in HADOOP_DIR_DEFAULTS:
    raise Fail("Target {0} not defined".format(target))

  hadoop_dir = HADOOP_DIR_DEFAULTS[target]

  if Script.is_hdp_stack_greater_or_equal("2.2"):
    # home uses a different template
    if target == "home":
      hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format("current", "hadoop-client")
    else:
      hadoop_dir = HADOOP_DIR_TEMPLATE.format("current", "hadoop-client", target)

    # if we are not forcing "current" for HDP 2.2, then attempt to determine
    # if the exact version needs to be returned in the directory
    if not force_latest_on_upgrade:
      stack_info = _get_upgrade_stack()

      if stack_info is not None:
        stack_version = stack_info[1]

        # determine if hdp-select has been run and if not, then use the current
        # hdp version until this component is upgraded
        current_hdp_version = get_role_component_current_hdp_version()
        if current_hdp_version is not None and stack_version != current_hdp_version:
          stack_version = current_hdp_version

        if target == "home":
          # home uses a different template
          hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_version, "hadoop")
        else:
          hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_version, "hadoop", target)

  return hadoop_dir
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:48,代码来源:hdp_select.py

示例3: default

# 需要导入模块: from resource_management.libraries.script.script import Script [as 别名]
# 或者: from resource_management.libraries.script.script.Script import is_hdp_stack_greater_or_equal [as 别名]
stack_name = default("/hostLevelParams/stack_name", None)
current_version = default("/hostLevelParams/current_version", None)
component_directory = status_params.component_directory

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)

# default parameters
zk_home = "/usr/hdp/2.3.2.0-2950/zookeeper"
zk_bin = "/usr/hdp/2.3.2.0-2950/zookeeper/bin"
zk_cli_shell = "/usr/hdp/2.3.2.0-2950/zookeeper/bin/zkCli.sh"
config_dir = "/etc/zookeeper/conf"

# hadoop parameters for 2.2+
if Script.is_hdp_stack_greater_or_equal("2.2"):
  zk_home = format("/usr/hdp/current/{component_directory}")
  zk_bin = format("/usr/hdp/current/{component_directory}/bin")
  zk_cli_shell = format("/usr/hdp/current/{component_directory}/bin/zkCli.sh")
  config_dir = status_params.config_dir


zk_user = config['configurations']['zookeeper-env']['zk_user']
hostname = config['hostname']
user_group = config['configurations']['cluster-env']['user_group']
zk_env_sh_template = config['configurations']['zookeeper-env']['content']

zk_log_dir = config['configurations']['zookeeper-env']['zk_log_dir']
zk_data_dir = config['configurations']['zoo.cfg']['dataDir']
zk_pid_dir = status_params.zk_pid_dir
zk_pid_file = status_params.zk_pid_file
开发者ID:andreysabitov,项目名称:ambari-mantl,代码行数:32,代码来源:params_linux.py

示例4: default

# 需要导入模块: from resource_management.libraries.script.script import Script [as 别名]
# 或者: from resource_management.libraries.script.script.Script import is_hdp_stack_greater_or_equal [as 别名]
from resource_management.libraries import functions

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

stack_name = default("/hostLevelParams/stack_name", None)

# node hostname
hostname = config["hostname"]

# This is expected to be of the form #.#.#.#
stack_version_unformatted = str(config["hostLevelParams"]["stack_version"])
hdp_stack_version_major = format_hdp_stack_version(stack_version_unformatted)
stack_is_hdp21 = Script.is_hdp_stack_greater_or_equal("2.0") and Script.is_hdp_stack_less_than("2.2")

# this is not available on INSTALL action because hdp-select is not available
hdp_stack_version = functions.get_hdp_version("hive-server2")

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade.
# It cannot be used during the initial Cluser Install because the version is not yet known.
version = default("/commandParams/version", None)

# current host stack version
current_version = default("/hostLevelParams/current_version", None)

# When downgrading the 'version' and 'current_version' are both pointing to the downgrade-target version
# downgrade_from_version provides the source-version the downgrade is happening from
downgrade_from_version = default("/commandParams/downgrade_from_version", None)
开发者ID:andreysabitov,项目名称:ambari-mantl,代码行数:32,代码来源:params_linux.py

示例5:

# 需要导入模块: from resource_management.libraries.script.script import Script [as 别名]
# 或者: from resource_management.libraries.script.script.Script import is_hdp_stack_greater_or_equal [as 别名]
dfs_dn_ipc_address = config['configurations']['hdfs-site']['dfs.datanode.ipc.address']
secure_dn_ports_are_in_use = False

# hadoop default parameters
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
hadoop_bin = hdp_select.get_hadoop_dir("sbin")
hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
hadoop_home = hdp_select.get_hadoop_dir("home")
hadoop_secure_dn_user = hdfs_user
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
hadoop_lib_home = hdp_select.get_hadoop_dir("lib")

# hadoop parameters for 2.2+
if Script.is_hdp_stack_greater_or_equal("2.2"):
  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"

  if not security_enabled:
    hadoop_secure_dn_user = '""'
  else:
    dfs_dn_port = utils.get_port(dfs_dn_addr)
    dfs_dn_http_port = utils.get_port(dfs_dn_http_addr)
    dfs_dn_https_port = utils.get_port(dfs_dn_https_addr)
    # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
    if dfs_http_policy == "HTTPS_ONLY":
      secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_https_port)
    elif dfs_http_policy == "HTTP_AND_HTTPS":
      secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port) or utils.is_secure_port(dfs_dn_https_port)
    else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
      secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port)
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:33,代码来源:params_linux.py

示例6: str

# 需要导入模块: from resource_management.libraries.script.script import Script [as 别名]
# 或者: from resource_management.libraries.script.script.Script import is_hdp_stack_greater_or_equal [as 别名]
# zeppelin-env.sh
zeppelin_env_content = config["configurations"]["zeppelin-env"]["content"]

# detect HS2 details and java home
master_configs = config["clusterHostInfo"]
hive_server_host = str(master_configs["hive_server_host"][0])
hive_metastore_host = str(master_configs["hive_metastore_host"][0])
hive_metastore_port = str(get_port_from_url(config["configurations"]["hive-site"]["hive.metastore.uris"]))

java64_home = config["hostLevelParams"]["java_home"]


# e.g. 2.3
stack_version_unformatted = str(config["hostLevelParams"]["stack_version"])

# e.g. 2.3.0.0
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

if hasattr(Script, "is_hdp_stack_greater_or_equal") and Script.is_hdp_stack_greater_or_equal("2.3"):
    mvn_spark_tag = "spark-1.3"
else:
    mvn_spark_tag = "spark-1.2"

# e.g. 2.3.0.0-2130
full_version = default("/commandParams/version", None)
hdp_version = full_version

if hasattr(functions, "get_hdp_version"):
    spark_version = functions.get_hdp_version("spark-client")
开发者ID:ameetp,项目名称:ambari-zeppelin-service,代码行数:31,代码来源:params.py

示例7: get_hadoop_conf_dir

# 需要导入模块: from resource_management.libraries.script.script import Script [as 别名]
# 或者: from resource_management.libraries.script.script.Script import is_hdp_stack_greater_or_equal [as 别名]
def get_hadoop_conf_dir(force_latest_on_upgrade=False):
  """
  Gets the shared hadoop conf directory using:
  1.  Start with /etc/hadoop/conf
  2.  When the stack is greater than HDP-2.2, use /usr/hdp/current/hadoop-client/conf
  3.  Only when doing a RU and HDP-2.3 or higher, use the value as computed
      by conf-select.  This is in the form /usr/hdp/VERSION/hadoop/conf to make sure
      the configs are written in the correct place. However, if the component itself has
      not yet been upgraded, it should use the hadoop configs from the prior version.
      This will perform an hdp-select status to determine which version to use.
  :param force_latest_on_upgrade:  if True, then force the returned path to always
  be that of the upgrade target version, even if hdp-select has not been called. This
  is primarily used by hooks like before-ANY to ensure that hadoop environment
  configurations are written to the correct location since they are written out
  before the hdp-select/conf-select would have been called.
  """
  hadoop_conf_dir = "/etc/hadoop/conf"
  stack_name = None
  version = None
  allow_setting_conf_select_symlink = False

  if not Script.in_stack_upgrade():
    # During normal operation, the HDP stack must be 2.3 or higher
    if Script.is_hdp_stack_greater_or_equal("2.2"):
      hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"

    if Script.is_hdp_stack_greater_or_equal("2.3"):
      hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
      stack_name = default("/hostLevelParams/stack_name", None)
      version = default("/commandParams/version", None)

      if stack_name and version:
        version = str(version)
        allow_setting_conf_select_symlink = True
  else:
    # During an upgrade/downgrade, which can be a Rolling or Express Upgrade, need to calculate it based on the version
    '''
    Whenever upgrading to HDP 2.2, or downgrading back to 2.2, need to use /etc/hadoop/conf
    Whenever upgrading to HDP 2.3, or downgrading back to 2.3, need to use a versioned hadoop conf dir

    Type__|_Source_|_Target_|_Direction_____________|_Comment_____________________________________________________________
    Normal|        | 2.2    |                       | Use /etc/hadoop/conf
    Normal|        | 2.3    |                       | Use /etc/hadoop/conf, which should be a symlink to /usr/hdp/current/hadoop-client/conf
    EU    | 2.1    | 2.3    | Upgrade               | Use versioned /usr/hdp/current/hadoop-client/conf
          |        |        | No Downgrade Allowed  | Invalid
    EU/RU | 2.2    | 2.2.*  | Any                   | Use /usr/hdp/current/hadoop-client/conf
    EU/RU | 2.2    | 2.3    | Upgrade               | Use /usr/hdp/$version/hadoop/conf, which should be a symlink destination
          |        |        | Downgrade             | Use /usr/hdp/current/hadoop-client/conf
    EU/RU | 2.3    | 2.3.*  | Any                   | Use /usr/hdp/$version/hadoop/conf, which should be a symlink destination
    '''

    # The method "is_hdp_stack_greater_or_equal" uses "stack_version" which is the desired stack, e.g., 2.2 or 2.3
    # In an RU, it is always the desired stack, and doesn't change even during the Downgrade!
    # In an RU Downgrade from HDP 2.3 to 2.2, the first thing we do is 
    # rm /etc/[component]/conf and then mv /etc/[component]/conf.backup /etc/[component]/conf
    if Script.is_hdp_stack_greater_or_equal("2.2"):
      hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"

      # This contains the "version", including the build number, that is actually used during a stack upgrade and
      # is the version upgrading/downgrading to.
      stack_info = hdp_select._get_upgrade_stack()

      if stack_info is not None:
        stack_name = stack_info[0]
        version = stack_info[1]
      else:
        raise Fail("Unable to get parameter 'version'")
      
      Logger.info("In the middle of a stack upgrade/downgrade for Stack {0} and destination version {1}, determining which hadoop conf dir to use.".format(stack_name, version))
      # This is the version either upgrading or downgrading to.
      if compare_versions(format_hdp_stack_version(version), "2.3.0.0") >= 0:
        # Determine if hdp-select has been run and if not, then use the current
        # hdp version until this component is upgraded.
        if not force_latest_on_upgrade:
          current_hdp_version = hdp_select.get_role_component_current_hdp_version()
          if current_hdp_version is not None and version != current_hdp_version:
            version = current_hdp_version
            Logger.info("hdp-select has not yet been called to update the symlink for this component, keep using version {0}".format(current_hdp_version))

        # Only change the hadoop_conf_dir path, don't conf-select this older version
        hadoop_conf_dir = "/usr/hdp/{0}/hadoop/conf".format(version)
        Logger.info("Hadoop conf dir: {0}".format(hadoop_conf_dir))

        allow_setting_conf_select_symlink = True

  if allow_setting_conf_select_symlink:
    # If not in the middle of an upgrade and on HDP 2.3 or higher, or if
    # upgrading stack to version 2.3.0.0 or higher (which may be upgrade or downgrade), then consider setting the
    # symlink for /etc/hadoop/conf.
    # If a host does not have any HDFS or YARN components (e.g., only ZK), then it will not contain /etc/hadoop/conf
    # Therefore, any calls to conf-select will fail.
    # For that reason, if the hadoop conf directory exists, then make sure it is set.
    if os.path.exists(hadoop_conf_dir):
      Logger.info("The hadoop conf dir {0} exists, will call conf-select on it for version {1}".format(hadoop_conf_dir, version))
      select(stack_name, "hadoop", version)

  Logger.info("Using hadoop conf dir: {0}".format(hadoop_conf_dir))
  return hadoop_conf_dir
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:100,代码来源:conf_select.py


注:本文中的resource_management.libraries.script.script.Script.is_hdp_stack_greater_or_equal方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。