当前位置: 首页>>代码示例>>Python>>正文


Python Logger.error方法代码示例

本文整理汇总了Python中resource_management.core.logger.Logger.error方法的典型用法代码示例。如果您正苦于以下问题:Python Logger.error方法的具体用法?Python Logger.error怎么用?Python Logger.error使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在resource_management.core.logger.Logger的用法示例。


在下文中一共展示了Logger.error方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: check_postgre_running

# 需要导入模块: from resource_management.core.logger import Logger [as 别名]
# 或者: from resource_management.core.logger.Logger import error [as 别名]
 def check_postgre_running(self, statusCmd):
   Logger.info("check service by: {0}".format(statusCmd))
   cmd = "{0} | grep -E 'is running'".format(statusCmd)
   result = self.exe1(cmd)
   if (result != ""):
     Logger.error("service not exist")
     raise ComponentIsNotRunning()
开发者ID:chinpeng,项目名称:ambari,代码行数:9,代码来源:utils.py

示例2: check_url

# 需要导入模块: from resource_management.core.logger import Logger [as 别名]
# 或者: from resource_management.core.logger.Logger import error [as 别名]
 def check_url(self, url):
   Logger.info("check url: {0}".format(url))
   cmd = "curl -I \"" + url + "\" 2> /dev/null | awk 'NR==1{print}' | awk '{print $2}'"
   result = self.exe(cmd)
   if (result != "200"):
     Logger.error("service not exist")
     raise ComponentIsNotRunning()
开发者ID:chinpeng,项目名称:ambari,代码行数:9,代码来源:utils.py

示例3: bootstrap_standby_namenode

# 需要导入模块: from resource_management.core.logger import Logger [as 别名]
# 或者: from resource_management.core.logger.Logger import error [as 别名]
def bootstrap_standby_namenode(params, use_path=False):

  bin_path = os.path.join(params.hadoop_bin_dir, '') if use_path else ""

  try:
    iterations = 50
    bootstrap_cmd = format("{bin_path}hdfs namenode -bootstrapStandby -nonInteractive")
    # Blue print based deployments start both NN in parallel and occasionally
    # the first attempt to bootstrap may fail. Depending on how it fails the
    # second attempt may not succeed (e.g. it may find the folder and decide that
    # bootstrap succeeded). The solution is to call with -force option but only
    # during initial start
    if params.command_phase == "INITIAL_START":
      bootstrap_cmd = format("{bin_path}hdfs namenode -bootstrapStandby -nonInteractive -force")
    Logger.info("Boostrapping standby namenode: %s" % (bootstrap_cmd))
    for i in range(iterations):
      Logger.info('Try %d out of %d' % (i+1, iterations))
      code, out = shell.call(bootstrap_cmd, logoutput=False, user=params.hdfs_user)
      if code == 0:
        Logger.info("Standby namenode bootstrapped successfully")
        return True
      elif code == 5:
        Logger.info("Standby namenode already bootstrapped")
        return True
      else:
        Logger.warning('Bootstrap standby namenode failed with %d error code. Will retry' % (code))
  except Exception as ex:
    Logger.error('Bootstrap standby namenode threw an exception. Reason %s' %(str(ex)))
  return False
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:31,代码来源:hdfs_namenode.py

示例4: wait_for_safemode_off

# 需要导入模块: from resource_management.core.logger import Logger [as 别名]
# 或者: from resource_management.core.logger.Logger import error [as 别名]
def wait_for_safemode_off(hdfs_binary, afterwait_sleep=0, execute_kinit=False):
  """
  During NonRolling (aka Express Upgrade), after starting NameNode, which is still in safemode, and then starting
  all of the DataNodes, we need for NameNode to receive all of the block reports and leave safemode.
  If HA is present, then this command will run individually on each NameNode, which checks for its own address.
  """
  import params

  Logger.info("Wait to leafe safemode since must transition from ON to OFF.")

  if params.security_enabled and execute_kinit:
    kinit_command = format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}")
    Execute(kinit_command, user=params.hdfs_user, logoutput=True)

  try:
    # Note, this fails if namenode_address isn't prefixed with "params."

    dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary, use_specific_namenode=True)
    is_namenode_safe_mode_off = dfsadmin_base_command + " -safemode get | grep 'Safe mode is OFF'"

    # Wait up to 30 mins
    Execute(is_namenode_safe_mode_off,
            tries=115,
            try_sleep=10,
            user=params.hdfs_user,
            logoutput=True
            )

    # Wait a bit more since YARN still depends on block reports coming in.
    # Also saw intermittent errors with HBASE service check if it was done too soon.
    time.sleep(afterwait_sleep)
  except Fail:
    Logger.error("NameNode is still in safemode, please be careful with commands that need safemode OFF.")
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:35,代码来源:hdfs_namenode.py

示例5: write_actual_version_to_history_file

# 需要导入模块: from resource_management.core.logger import Logger [as 别名]
# 或者: from resource_management.core.logger.Logger import error [as 别名]
def write_actual_version_to_history_file(repository_version, actual_version):
  """
  Save the tuple of repository_version,actual_version to the repo version history file if the repository_version
  doesn't already exist
  :param repository_version: normalized repo version (without build number) as received from the server
  :param actual_version: Repo version with the build number, as determined using hdp-select
  :returns Return True if appended the values to the file, otherwise, return False.
  """
  wrote_value = False
  if repository_version is None or actual_version is None:
    return

  if repository_version == "" or actual_version == "":
    return

  value = repository_version + "," + actual_version
  key_exists = False
  try:
    if os.path.isfile(REPO_VERSION_HISTORY_FILE):
      with open(REPO_VERSION_HISTORY_FILE, "r") as f:
        for line in f.readlines():
          line_parts = line.split(",")
          if line_parts and len(line_parts) == 2 and line_parts[0] == repository_version and line_parts[1] == actual_version:
            key_exists = True
            break

    if not key_exists:
      with open(REPO_VERSION_HISTORY_FILE, "a") as f:
        f.write(repository_version + "," + actual_version + "\n")
        wrote_value = True
    if wrote_value:
      Logger.info("Appended value \"{0}\" to file {1} to track this as a new version.".format(value, REPO_VERSION_HISTORY_FILE))
  except Exception, err:
    Logger.error("Failed to write to file {0} the value: {1}. Error: {2}".format(REPO_VERSION_HISTORY_FILE, value, str(err)))
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:36,代码来源:repo_version_history.py

示例6: prepare_upgrade_check_for_previous_dir

# 需要导入模块: from resource_management.core.logger import Logger [as 别名]
# 或者: from resource_management.core.logger.Logger import error [as 别名]
def prepare_upgrade_check_for_previous_dir():
  """
  During a NonRolling (aka Express Upgrade), preparing the NameNode requires backing up some data.
  Check that there is no "previous" folder inside the NameNode Name Dir.
  """
  import params

  if params.dfs_ha_enabled:
    namenode_ha = NamenodeHAState()
    if namenode_ha.is_active(params.hostname):
      Logger.info("NameNode High Availability is enabled and this is the Active NameNode.")

      problematic_previous_namenode_dirs = set()
      nn_name_dirs = params.dfs_name_dir.split(',')
      for nn_dir in nn_name_dirs:
        if os.path.isdir(nn_dir):
          # Check for a previous folder, which is not allowed.
          previous_dir = os.path.join(nn_dir, "previous")
          if os.path.isdir(previous_dir):
            problematic_previous_namenode_dirs.add(previous_dir)

      if len(problematic_previous_namenode_dirs) > 0:
        message = 'WARNING. The following NameNode Name Dir(s) have a "previous" folder from an older version.\n' \
                  'Please back it up first, and then delete it, OR Finalize (E.g., "hdfs dfsadmin -finalizeUpgrade").\n' \
                  'NameNode Name Dir(s): {0}\n' \
                  '***** Then, retry this step. *****'.format(", ".join(problematic_previous_namenode_dirs))
        Logger.error(message)
        raise Fail(message)
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:30,代码来源:namenode_upgrade.py

示例7: check_command

# 需要导入模块: from resource_management.core.logger import Logger [as 别名]
# 或者: from resource_management.core.logger.Logger import error [as 别名]
 def check_command(command, keyword):
   cmd = "{0} | grep -E '{1}'".format(command, keyword)
   Logger.info("check command: {0}".format(command))
   output = Toolkit.exe(cmd)
   if (output == ""):
     Logger.error("command {0} not running".format(command))
     raise ComponentIsNotRunning()
开发者ID:fanzhidongyzby,项目名称:ambari,代码行数:9,代码来源:toolkit.py

示例8: _copy_files

# 需要导入模块: from resource_management.core.logger import Logger [as 别名]
# 或者: from resource_management.core.logger.Logger import error [as 别名]
def _copy_files(source_and_dest_pairs, component_user, file_owner, group_owner, kinit_if_needed):
  """
  :param source_and_dest_pairs: List of tuples (x, y), where x is the source file in the local file system,
  and y is the destination file path in HDFS
  :param component_user:  User that will execute the Hadoop commands, usually smokeuser
  :param file_owner: Owner to set for the file copied to HDFS (typically hdfs account)
  :param group_owner: Owning group to set for the file copied to HDFS (typically hadoop group)
  :param kinit_if_needed: kinit command if it is needed, otherwise an empty string
  :return: Returns 0 if at least one file was copied and no exceptions occurred, and 1 otherwise.

  Must kinit before calling this function.
  """
  import params

  return_value = 1
  if source_and_dest_pairs and len(source_and_dest_pairs) > 0:
    return_value = 0
    for (source, destination) in source_and_dest_pairs:
      try:
        destination_dir = os.path.dirname(destination)

        params.HdfsDirectory(destination_dir,
                             action="create",
                             owner=file_owner,
                             hdfs_user=params.hdfs_user,   # this will be the user to run the commands as
                             mode=0555
        )

        # Because CopyFromLocal does not guarantee synchronization, it's possible for two processes to first attempt to
        # copy the file to a temporary location, then process 2 fails because the temporary file was already created by
        # process 1, so process 2 tries to clean up by deleting the temporary file, and then process 1
        # cannot finish the copy to the final destination, and both fail!
        # For this reason, the file name on the destination must be unique, and we then rename it to the intended value.
        # The rename operation is synchronized by the Namenode.
        orig_dest_file_name = os.path.split(destination)[1]
        unique_string = str(uuid.uuid4())[:8]
        new_dest_file_name = orig_dest_file_name + "." + unique_string
        new_destination = os.path.join(destination_dir, new_dest_file_name)
        CopyFromLocal(source,
                      mode=0444,
                      owner=file_owner,
                      group=group_owner,
                      user=params.hdfs_user,               # this will be the user to run the commands as
                      dest_dir=destination_dir,
                      dest_file=new_dest_file_name,
                      kinnit_if_needed=kinit_if_needed,
                      hdfs_user=params.hdfs_user,
                      hadoop_bin_dir=params.hadoop_bin_dir,
                      hadoop_conf_dir=params.hadoop_conf_dir
        )

        mv_command = format("fs -mv {new_destination} {destination}")
        ExecuteHadoop(mv_command,
                      user=params.hdfs_user,
                      bin_dir=params.hadoop_bin_dir,
                      conf_dir=params.hadoop_conf_dir
        )
      except Exception, e:
        Logger.error("Failed to copy file. Source: %s, Destination: %s. Error: %s" % (source, destination, e.message))
        return_value = 1
开发者ID:maduhu,项目名称:HDP2.5-ambari,代码行数:62,代码来源:dynamic_variable_interpretation.py

示例9: check_service

# 需要导入模块: from resource_management.core.logger import Logger [as 别名]
# 或者: from resource_management.core.logger.Logger import error [as 别名]
 def check_service(service, keyword = "running|active|运行"):
   cmd = "service {0} status | grep -E '{1}'".format(service, keyword)
   Logger.info("check service: {0}".format(service))
   output = Toolkit.exe(cmd)
   if (output == ""):
     Logger.error("service {0} not running".format(service))
     raise ComponentIsNotRunning()
开发者ID:fanzhidongyzby,项目名称:ambari,代码行数:9,代码来源:toolkit.py

示例10: get_hdp_version

# 需要导入模块: from resource_management.core.logger import Logger [as 别名]
# 或者: from resource_management.core.logger.Logger import error [as 别名]
def get_hdp_version():
  try:
    command = 'hdp-select status hadoop-client'
    return_code, hdp_output = shell.call(command, timeout=20)
  except Exception, e:
    Logger.error(str(e))
    raise Fail('Unable to execute hdp-select command to retrieve the version.')
开发者ID:fanzhidongyzby,项目名称:ambari,代码行数:9,代码来源:setup_spark.py

示例11: pre_rolling_upgrade_shutdown

# 需要导入模块: from resource_management.core.logger import Logger [as 别名]
# 或者: from resource_management.core.logger.Logger import error [as 别名]
def pre_rolling_upgrade_shutdown(hdfs_binary):
  """
  Runs the "shutdownDatanode {ipc_address} upgrade" command to shutdown the
  DataNode in preparation for an upgrade. This will then periodically check
  "getDatanodeInfo" to ensure the DataNode has shutdown correctly.
  This function will obtain the Kerberos ticket if security is enabled.
  :param hdfs_binary: name/path of the HDFS binary to use
  :return: Return True if ran ok (even with errors), and False if need to stop the datanode forcefully.
  """
  import params

  Logger.info('DataNode executing "shutdownDatanode" command in preparation for upgrade...')
  if params.security_enabled:
    Execute(params.dn_kinit_cmd, user = params.hdfs_user)

  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
  command = format('{dfsadmin_base_command} -shutdownDatanode {dfs_dn_ipc_address} upgrade')

  code, output = shell.call(command, user=params.hdfs_user)
  if code == 0:
    # verify that the datanode is down
    _check_datanode_shutdown(hdfs_binary)
  else:
    # Due to bug HDFS-7533, DataNode may not always shutdown during stack upgrade, and it is necessary to kill it.
    if output is not None and re.search("Shutdown already in progress", output):
      Logger.error("Due to a known issue in DataNode, the command {0} did not work, so will need to shutdown the datanode forcefully.".format(command))
      return False
  return True
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:30,代码来源:datanode_upgrade.py

示例12: service_check

# 需要导入模块: from resource_management.core.logger import Logger [as 别名]
# 或者: from resource_management.core.logger.Logger import error [as 别名]
    def service_check(self, env):
        import params
        env.set_params(params)

        if not os.path.isfile(params.solr_config_pid_file):
            Logger.error(format("PID file {solr_config_pid_file} does not exist"))
            exit(1)

        if not params.solr_collection_sample_create:
            Logger.info("Create sample collection unchecked, skipping ...")
            return

        if exists_collection(params.solr_collection_name):
            Logger.warning(format("Collection {solr_collection_name} already exists, skipping ..."))
            return

        if not params.solr_cloud_mode:
            Execute(
                    format(
                            '{solr_config_bin_dir}/solr create_core -c {solr_collection_name}' +
                            ' -d {solr_collection_config_dir} -p {solr_config_port} >> {solr_config_service_log_file} 2>&1'
                    ),
                    environment={'JAVA_HOME': params.java64_home},
                    user=params.solr_config_user
            )
        else:
            Execute(format(
                    '{solr_config_bin_dir}/solr create_collection -c {solr_collection_name}' +
                    ' -d {solr_collection_config_dir} -p {solr_config_port}' +
                    ' -s {solr_collection_shards} -rf {solr_collection_replicas}' +
                    ' >> {solr_config_service_log_file} 2>&1'),
                    environment={'JAVA_HOME': params.java64_home},
                    user=params.solr_config_user
            )
开发者ID:hortonworks,项目名称:solr-stack,代码行数:36,代码来源:service_check.py

示例13: check_service_status

# 需要导入模块: from resource_management.core.logger import Logger [as 别名]
# 或者: from resource_management.core.logger.Logger import error [as 别名]
 def check_service_status(self, service, keyword):
   cmd = "service {0} status | grep -E '{1}'".format(service, keyword)
   Logger.info("run service check on {0} : ".format(service))
   (status, output) = commands.getstatusoutput(cmd)
   if (output == ""):
     Logger.error("service {0} not running".format(service))
     raise ComponentIsNotRunning() 
开发者ID:fanzhidongyzby,项目名称:ambari,代码行数:9,代码来源:utils.py

示例14: syncCluster

# 需要导入模块: from resource_management.core.logger import Logger [as 别名]
# 或者: from resource_management.core.logger.Logger import error [as 别名]
  def syncCluster(self, coors, datanodes, coord_port, datanode_port, host, sql_str):
    for coor in coors:
      name = "coordinator_" + coor.replace(".", "_")
      sql = "CREATE NODE {0} WITH (TYPE = '{1}', HOST = '{2}', PORT = {3});".format(name, "coordinator", coor, coord_port)
      cmd = sql_str.format(host, coord_port, sql)
      Logger.info(cmd)
      (ret, out) = commands.getstatusoutput(cmd)
      if (ret == 0 and out.rfind("CREATE NODE") != -1):
        # insert record
        Logger.info("coordinator {0} create success on coordinator {1}".format(name, host))
      elif (ret != 0 and out.rfind(name) != -1):
        # update record
        Logger.info("coordinator {0} exist on coordinator {1}, updating coordinator ...".format(name, host))
        update_cmd = cmd.replace("CREATE", "ALTER")
        Logger.info(update_cmd)
        (ret, out) = commands.getstatusoutput(update_cmd)
        if (ret == 0 and out.rfind("ALTER NODE") != -1):
          Logger.info("coordinator {0} update success on coordinator {1}".format(name, host))
        elif (ret != 0 and out.rfind(name) != -1):
          # update coordinator self
          # update pgxc_node set node_host = 'local', node_port = 5000 WHERE node_name = 'datanode_10_151_0_123';
          self_sql = "update pgxc_node set node_host = '{0}', node_port = {1} WHERE node_name = '{2}';".format(host, datanode_port, name)
          self_cmd = sql_str.format(coor, coord_port, self_sql)
          Logger.info(self_cmd)
          (ret, out) = commands.getstatusoutput(self_cmd)
          if (ret == 0 and out.rfind("UPDATE 1") != -1):
            Logger.info("coordinator {0} update success on coordinator {1} itself".format(name, host))
          else:
            Logger.error("coordinator {0} update failed on coordinator {1} itself".format(name, host))
            raise Fail()
        else:
          Logger.error("coordinator {0} update failed on coordinator {1}".format(name, host))
          raise Fail()
      else:
        Logger.error("coordinator {0} create failed on coordinator {1}".format(name, host))
        raise Fail()

    for datanode in datanodes:
      name = "datanode_" + datanode.replace(".", "_")
      sql = "CREATE NODE {0} WITH (TYPE = '{1}', HOST = '{2}', PORT = {3});".format(name, "datanode", datanode, datanode_port)
      cmd = sql_str.format(host, coord_port, sql)
      Logger.info(cmd)
      (ret, out) = commands.getstatusoutput(cmd)
      if (ret == 0 and out.rfind("CREATE NODE") != -1):
        # insert record
        Logger.info("datanode {0} create success on coordinator {1}".format(name, host))
      elif (ret != 0 and out.rfind(name) != -1):
        # update record
        Logger.info("datanode {0} exist on coordinator {1}, updating datanode ...".format(name, host))
        update_cmd = cmd.replace("CREATE", "ALTER")
        Logger.info(update_cmd)
        (ret, out) = commands.getstatusoutput(update_cmd)
        if (ret == 0 and out.rfind("ALTER NODE") != -1):
          Logger.info("datannode {0} update success on coordinator {1}".format(name, host))
        else:
          Logger.error("datanode {0} update failed on coordinator {1}".format(name, host))
          raise Fail()
      else:
        Logger.error("datanode {0} create failed on coordinator {1}".format(name, host))
        raise Fail()
开发者ID:fanzhidongyzby,项目名称:ambari,代码行数:62,代码来源:utils.py

示例15: service_check

# 需要导入模块: from resource_management.core.logger import Logger [as 别名]
# 或者: from resource_management.core.logger.Logger import error [as 别名]
def service_check(cmd, user, label):
    """
    Executes a service check command that adheres to LSB-compliant
    return codes.  The return codes are interpreted as defined
    by the LSB.

    See http://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/iniscrptact.html
    for more information.

    :param cmd: The service check command to execute.
    :param label: The name of the service.
    """
    Logger.info("Performing service check; cmd={0}, user={1}, label={2}".format(cmd, user, label))
    rc, out, err = get_user_call_output(cmd, user, is_checked_call=False)

    if len(err) > 0:
      Logger.error(err)

    if rc in [1, 2, 3]:
      # if return code in [1, 2, 3], then 'program is not running' or 'program is dead'
      Logger.info("{0} is not running".format(label))
      raise ComponentIsNotRunning()

    elif rc == 0:
      # if return code = 0, then 'program is running or service is OK'
      Logger.info("{0} is running".format(label))

    else:
      # else service state is unknown
      err_msg = "{0} service check failed; cmd '{1}' returned {2}".format(label, cmd, rc)
      Logger.error(err_msg)
      raise ExecutionFailed(err_msg, rc, out, err)
开发者ID:JonZeolla,项目名称:incubator-metron,代码行数:34,代码来源:common.py


注:本文中的resource_management.core.logger.Logger.error方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。