当前位置: 首页>>代码示例>>Python>>正文


Python logger.info函数代码示例

本文整理汇总了Python中execo_engine.logger.info函数的典型用法代码示例。如果您正苦于以下问题:Python info函数的具体用法?Python info怎么用?Python info使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了info函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: prepare_dataset

    def prepare_dataset(self, comb):
        """Prepare the dataset to be used in the next set of experiments.

        Args:
          comb (dict): The combination containing the dataset's parameters.

        Returns:
          dict: The dataset parameters.

        """

        # Create ds_comb
        (ds_class_name, ds_params) = self.comb_manager.get_ds_class_params(comb)

        local_path = ds_params["local_path"]
        remote_path = os.path.join(self.div_p2p.remote_dir,
                                   os.path.basename(local_path))

        ds_comb = {"ds.class.path": remote_path, "ds.class": ds_class_name}

        # Copy dataset to host
        logger.info(self._th_prefix() + "Prepare dataset with combination " +
                    str(self.comb_manager.get_ds_parameters(comb)))

        copy_code = TaktukPut([self.div_p2p.host], [local_path], remote_path)
        copy_code.run()

        # Notify stats manager
        self.stats_manager.add_ds(self.ds_id, comb)

        return ds_comb
开发者ID:mliroz,项目名称:diversity_p2p,代码行数:31,代码来源:test_thread.py

示例2: change_conf

    def change_conf(self, params, conf_file=None, default_file=MR_CONF_FILE):
        """Modify Hadoop configuration. This method copies the configuration
        files from the first host of each g5k cluster conf dir into a local
        temporary dir, do all the changes in place and broadcast the new
        configuration files to all hosts.
        
        Args:
          params (dict of str:str):
            The parameters to be changed in the form key:value.
          conf_file (str, optional):
            The file where parameters should be set. If not specified, all
            files are checked for the parameter name and the parameter is set
            in the file where the property is found. If not found, the
            parameter is set in the default file.
          default_file (str, optional): The default conf file where to set the
            parameter if not found. Only applies when conf_file is not set.
        """

        for cluster in self.hw.get_clusters():
            hosts = cluster.get_hosts()

            # Copy conf files from first host in the cluster
            action = Remote("ls " + self.conf_dir + "/*.xml", [hosts[0]])
            action.run()
            output = action.processes[0].stdout

            remote_conf_files = []
            for f in output.split():
                remote_conf_files.append(os.path.join(self.conf_dir, f))

            tmp_dir = "/tmp/mliroz_temp_hadoop/"
            if not os.path.exists(tmp_dir):
                os.makedirs(tmp_dir)

            action = Get([hosts[0]], remote_conf_files, tmp_dir)
            action.run()

            # Do replacements in temp file
            if conf_file:
                f = os.path.join(tmp_dir, conf_file)
                for name, value in params.iteritems():
                    replace_in_xml_file(f, name, value, True)
            else:
                temp_conf_files = [os.path.join(tmp_dir, f) for f in
                                   os.listdir(tmp_dir)]

                for name, value in params.iteritems():
                    for f in temp_conf_files:
                        if replace_in_xml_file(f, name, value):
                            break
                    else:
                        # Property not found - add it in MR_CONF_FILE
                        logger.info("Parameter with name " + name + " has not "
                                    "been found in any conf file. Setting it "
                                    "in " + default_file)
                        f = os.path.join(tmp_dir, default_file)
                        replace_in_xml_file(f, name, value, True)

            # Copy back the files to all hosts
            self._copy_conf(tmp_dir, hosts)
开发者ID:mliroz,项目名称:bigdata_dpy,代码行数:60,代码来源:cluster.py

示例3: initial_state

    def initial_state(self, outdir=None):
        """ Convert the dict given from parameters to Numpy array """
        logger.info(style.log_header('Initial boxes configuration\n') +
                    ''.ljust(8) +
                    ''.join([style.emph(box.rjust(10))
                             for box in self.Boxes.iterkeys()]) +
                    style.object_repr('\n' + 'Delta'.ljust(8)) +
                    ''.join([str(box['Delta']).rjust(10)
                             for box in self.Boxes.itervalues()]) +
                    style.object_repr('\n' + 'Mass'.ljust(8)) +
                    ''.join([str(box['Mass']).rjust(10)
                             for box in self.Boxes.itervalues()])
                    )
        if outdir is None:
            outdir = self.result_dir + '/'
        self.plot_state(self.Boxes.keys(),
                        array([box['Delta']
                               for box in self.Boxes.itervalues()]),
                        name='_initial', outdir=outdir)

        self._Mass = array([box['Mass']
                            for box in self.Boxes.itervalues()])
        self._Flux = array([box.values()
                            for box in self.Flux.values()])
        self._Partcoeff = array([box.values()
                                 for box in self.Partcoeff.values()])

        f = open(outdir + '/Delta.initial', 'w')
        for box, value in self.Boxes.iteritems():
            f.write(box + ' ' + str(value['Delta']) + '\n')
        f.close()
        return [box['Delta'] for box in self.Boxes.itervalues()]
开发者ID:lpouillo,项目名称:boxmodel,代码行数:32,代码来源:IsotopicBoxModel.py

示例4: run

    def run(self):
        # Go to the result folder before everything
        os.chdir(self.result_dir)

        # jobs = [(_jobID, _site)]
        # Get nodes
        # nodes = get_oar_job_nodes(_jobID, _site)

        try:
            # logger.info("Creating hostfiles for all combinations...")
            # for nbr_node in _nbrNodes:
            #     hostfile_filename = self.result_dir + '/' + 'hostfile-' + nbr_node
            #     with open(hostfile_filename, 'w') as hostfile:
            #         for node in nodes[:int(nbr_node)]:
            #             print>>hostfile, node.address

            spack_command = 'spack install -v [email protected]+starpu+fxt ^[email protected]+fxt'
            # spack_process = Remote(spack_command, nodes)
            logger.info("Starting StarPU installation...")
            spack_process = Process(spack_command).start()            

            spack_process.wait()
            logger.info("StarPU installation DONE...")
            if  (not spack_process.ok):
                logger.info("Error : " + spack_process.error_reason)
            else:
                logger.info("spac stdout: {}".format(spack_process.stdout));
            spack_process.kill()

            # Pilotage
        except:
            traceback.print_exc()
        finally:
	        logger.info("Fin...")
开发者ID:FlorianPO,项目名称:Performance-Regression-Testing,代码行数:34,代码来源:execo_local_script.py

示例5: _exec_on_node

 def _exec_on_node(self, command, machine, log):
     logger.info(log)
     rem = ex.action.Remote(command, machine, connection_params={'user':'ci'}).run()
     if rem.ok :
         logger.info("Success")
     else:
         logger.error("Failure")
开发者ID:Marie-Donnie,项目名称:misc,代码行数:7,代码来源:os-distri-db.py

示例6: _initialize_conf

    def _initialize_conf(self):
        """Merge locally-specified configuration files with default files
        from the distribution."""

        if os.path.exists(self.local_base_conf_dir):
            base_conf_files = [os.path.join(self.local_base_conf_dir, f)
                               for f in os.listdir(self.local_base_conf_dir)]
            for f in base_conf_files:
                shutil.copy(f, self.init_conf_dir)
        else:
            logger.warn(
                "Local conf dir does not exist. Using default configuration")
            base_conf_files = []

        missing_conf_files = self.conf_mandatory_files
        for f in base_conf_files:
            f_base_name = os.path.basename(f)
            if f_base_name in missing_conf_files:
                missing_conf_files.remove(f_base_name)

        logger.info("Copying missing conf files from master: " + str(
            missing_conf_files))

        remote_missing_files = [os.path.join(self.conf_dir, f)
                                for f in missing_conf_files]

        action = Get([self.master], remote_missing_files, self.init_conf_dir)
        action.run()
开发者ID:mliroz,项目名称:bigdata_dpy,代码行数:28,代码来源:cluster.py

示例7: _copy_xp_output

    def _copy_xp_output(self):
        """Copy experiment's output."""

        if self.output_path:
            remote_path = self.macro_manager.test_macros["xp.output"]  # TODO: what happens if not specified?
            local_path = os.path.join(self.output_path, str(self.comb_id))
            logger.info("Copying output to " + local_path)

            tmp_dir = "/tmp"

            # Remove file in tmp dir if exists
            proc = SshProcess("rm -rf " +
                              os.path.join(tmp_dir, os.path.basename(remote_path)),
                              self.hc.master)
            proc.run()

            # Get files in master
            self.hc.execute("fs -get " + remote_path + " " + tmp_dir,
                            verbose=False)

            # Copy files from master
            action = Get([self.hc.master],
                         [os.path.join(tmp_dir, os.path.basename(remote_path))],
                         local_path)
            action.run()
开发者ID:djamelinfo,项目名称:hadoop_g5k,代码行数:25,代码来源:engine.py

示例8: __init__

    def __init__(self, hosts, topo_list=None):
        """Create a Hadoop topology object assigning each host to the
        corresponding rack.

        Args:
          hosts (list of Host):
            The hosts to be assigned a topology.
          topo_list (list of str, optional):
            The racks to be assigned to each host. len(hosts) should be equal to
            len(topo_list).
        """

        if topo_list:
            if len(hosts) == len(topo_list):
                self.topology = topo_list
                return
            else:
                logger.warn("hosts and topology have not the same length.")

        logger.info("Discovering topology automatically")
        self.topology = {}
        for h in hosts:
            nw_adapters = get_host_attributes(h)[u'network_adapters']
            for nwa in nw_adapters:
                if (u'network_address' in nwa and
                            nwa[u'network_address'] == h.address):
                    self.topology[h] = "/" + nwa[u'switch']
                    break
开发者ID:djamelinfo,项目名称:hadoop_g5k,代码行数:28,代码来源:objects.py

示例9: plot_state

 def plot_state(self, boxes, deltas, name = '', outdir = None):
     """ Make a graph of a given state """
     graph = Dot(graph_type='digraph', fontname="Verdana", size="10, 5", fixedsize= True)
     i_box = 0
     for box in boxes:            
         textcolor = 'white' if sum( [ self.color_chars.index(col) for col in self.plots_conf[box]['color'].split('#')[1] ] ) < 35 else 'black' 
         node_box = Node(box, style="filled", label = '<<font POINT-SIZE="10" color="'+textcolor+'">'+box+'<br/> '+
                         "%.7f" % round(deltas[i_box], 7)+'</font>>',
             fillcolor = self.plots_conf[box]['color'], shape = self.plots_conf[box]['shape'])
         i_box += 1
         graph.add_node(node_box)
         
     for box_from, boxes_to in self.Flux.iteritems():
         for box_to, flux in boxes_to.iteritems():
             if flux !=0:
                 if flux > 0:
                     edge = Edge(box_from, box_to,  label = '<<font POINT-SIZE="10">'+str(flux)+'</font>>')
                 elif flux < 0:
                     edge = Edge(box_to, box_from,  label = '<<font POINT-SIZE="10">'+str(flux)+'</font>>')                
                 graph.add_edge(edge)
     
     if outdir is None:
         outdir = self.result_dir
         
     outfile = outdir+'/state'+name+'.png'
     graph.write_png(outfile)
     logger.info('State has been saved to '+set_style(outfile, 'emph'))
开发者ID:lpouillo,项目名称:boxmodel,代码行数:27,代码来源:BoxModel.py

示例10: __init__

 def __init__(self):
     """ Add options for the number of measures, migration bandwidth, number of nodes
     walltime, env_file or env_name, stress, and clusters and initialize the engine """
     super(BoxModel, self).__init__()
     self.init_plots()
     logger.setLevel('INFO')
     logger.info(set_style('\n\n                 Welcome to the human isotopic Box Model\n', 'log_header'))
开发者ID:lpouillo,项目名称:boxmodel,代码行数:7,代码来源:BoxModel.py

示例11: start_spark

    def start_spark(self):
        """Start spark processes.
        In STANDALONE mode it starts the master and slaves. In YARN mode it just
        checks that Hadoop is running, and starts it if not.
        """

        logger.info("Starting Spark")

        if self.running:
            logger.warn("Spark was already started")
            return

        if self.mode == STANDALONE_MODE:
            proc = SshProcess(self.sbin_dir + "/start-master.sh;" +
                              self.sbin_dir + "/start-slaves.sh;",
                              self.master)
            proc.run()
            if not proc.finished_ok:
                logger.warn("Error while starting Spark")
                return
        elif self.mode == YARN_MODE:
            if not self.hc.running:
                logger.warn("YARN services must be started first")
                self.hc.start_and_wait()

        self.running = True
开发者ID:djamelinfo,项目名称:hadoop_g5k,代码行数:26,代码来源:spark.py

示例12: serialize_cluster

def serialize_cluster(cluster_type, cid, cluster_object):
    """Serialize the cluster object. Replace also the linked Hadoop cluster if
    it exists.

    Args:
      cluster_type (str):
        The type of cluster to serialize.
      cid (int):
        The id of the cluster.
      cluster_object:
        The cluster to serialize.
    """

    fname = __get_cluster_file(cluster_type, cid)

    logger.info("Serialize cluster (" + cluster_type + ") in " + fname)

    c_file = open(fname, 'wb')
    pickle.dump(cluster_object, c_file)

    if cluster_type != HadoopCluster.get_cluster_type():
        hc_link_fname = __get_hc_link_file(cluster_type, cid)
        if os.path.exists(hc_link_fname):
            with open(hc_link_fname) as link_file:
                hc_id = int(link_file.readline())
            serialize_cluster(HadoopCluster.get_cluster_type(), hc_id,
                              cluster_object.hc)
开发者ID:sarlam,项目名称:hadoop_g5k,代码行数:27,代码来源:serialization.py

示例13: load

    def load(self):
        """Load the configuration file"""

        # Load the configuration file
        try:
            with open(self.config_path) as config_file:
                config = yaml.load(config_file)
        except:
            logger.error("Error reading configuration file %s" %
                         self.config_path)
            t, value, tb = sys.exc_info()
            print("%s %s" % (str(t), str(value)))
            sys.exit(23)

        # Load g5k networks
        with open(NETWORK_FILE) as network_file:
            self.networks = yaml.load(network_file)


        self.config = {}
        self.config.update(DEFAULT_CONFIG)
        self.config.update(config)

        logger.info("Configuration file loaded : %s" % self.config_path)
        logger.info(pf(self.config))

        return self.config
开发者ID:BeyondTheClouds,项目名称:kolla-g5k,代码行数:27,代码来源:g5k_engine.py

示例14: _copy_base_conf

    def _copy_base_conf(self):
        """Copy base configuration files to tmp dir."""

        self.temp_conf_dir = tempfile.mkdtemp("", "hadoop-", "/tmp")
        if os.path.exists(self.local_base_conf_dir):
            base_conf_files = [os.path.join(self.local_base_conf_dir, f)
                               for f in os.listdir(self.local_base_conf_dir)]
            for f in base_conf_files:
                shutil.copy(f, self.temp_conf_dir)
        else:
            logger.warn(
                "Local conf dir does not exist. Using default configuration")
            base_conf_files = []

        mandatory_files = [CORE_CONF_FILE, HDFS_CONF_FILE, MR_CONF_FILE]

        missing_conf_files = mandatory_files
        for f in base_conf_files:
            f_base_name = os.path.basename(f)
            if f_base_name in missing_conf_files:
                missing_conf_files.remove(f_base_name)

        logger.info("Copying missing conf files from master: " + str(
            missing_conf_files))

        remote_missing_files = [os.path.join(self.conf_dir, f)
                                for f in missing_conf_files]

        action = Get([self.master], remote_missing_files, self.temp_conf_dir)
        action.run()
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:30,代码来源:cluster.py

示例15: __force_clean

    def __force_clean(self):
        """Stop previous Hive processes (if any) and remove all remote files
        created by it."""

        hive_processes = []

        force_kill = False
        for h in self.hosts:
            proc = SshProcess("jps", self.master)
            proc.run()

            ids_to_kill = []
            for line in proc.stdout.splitlines():
                field = line.split()
                if field[1] in hive_processes:
                    ids_to_kill.append(field[0])

            if ids_to_kill:
                force_kill = True
                ids_to_kill_str = ""
                for pid in ids_to_kill:
                    ids_to_kill_str += " " + pid

                proc = SshProcess("kill -9" + ids_to_kill_str, h)
                proc.run()

        if force_kill:
            logger.info(
                "Processes from previous hadoop deployments had to be killed")

        self.clean_logs()
开发者ID:djamelinfo,项目名称:hadoop_g5k,代码行数:31,代码来源:hive.py


注:本文中的execo_engine.logger.info函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。