当前位置: 首页>>代码示例>>Python>>正文


Python datalogger.DataLogger类代码示例

本文整理汇总了Python中datalogger.DataLogger的典型用法代码示例。如果您正苦于以下问题:Python DataLogger类的具体用法?Python DataLogger怎么用?Python DataLogger使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了DataLogger类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_longtime_data

    def get_longtime_data(self, args):
        """
        get values from RAW Archive

        parameters:
        /<str>project/<str>tablename/<str>datestring/<str>key/<str>value_keys

        keyids=hostname:srvszp2orb.tilak.cc means
        this is only useful if keyids are unique

        return data like this:
        [
            {
                name: "name of this series" usually this is the counter name
                data : [[ts, value], ...]
            },
            ...
        ]
        """
        assert len(args) == 5
        project, tablename, monthstring, keys_str, value_key = args
        if len(monthstring) > 7:
            return "monthstring, has to be in YYYY-MM format"
        # key_str should be a tuple string, convert to unicode tuple
        keys = tuple([unicode(key_value) for key_value in eval(base64.b64decode(keys_str))])
        logging.info("project : %s", project)
        logging.info("tablename : %s", tablename)
        logging.info("monthstring : %s", monthstring)
        logging.info("keys : %s", keys)
        logging.info("value_keys : %s", value_key)
        datalogger = DataLogger(basedir, project, tablename)
        data = datalogger.get_tsastats_longtime_hc(monthstring, keys, value_key)
        #logging.info("got data: %s", data)
        hc_data = [{"name" : funcname, "data" : data[funcname]} for funcname in data.keys()]
        return json.dumps(hc_data)
开发者ID:gunny26,项目名称:datalogger,代码行数:35,代码来源:code.py

示例2: get_quantile_web

    def get_quantile_web(self, args):
        """
        return exported QuantileArray json formatted, special
        version for use in webpages to render with tablesorter

        in difference to get_quantile the value_keyname has to be given

        ex: Datalogger/get_quantile/{projectname}/{tablename}/{datestring}

        [
            dict of index_keys : dict of quantile,
            list of index_keys,
            list of value_names,
        ]

        returns:
        json(quantile_dict)
        """
        project, tablename, datestring, value_keyname = args[:4]
        datalogger = DataLogger(basedir, project, tablename)
        qa = datalogger.load_quantile(datestring)
        ret_data = []
        # build header
        ret_data.append(list(datalogger.index_keynames) + ["Q0", "Q1", "Q2", "Q3", "Q4"])
        # data part
        for k, v  in qa[value_keyname].quantile.items():
            ret_data.append(list(k) + v.values())
        return json.dumps(ret_data)
开发者ID:gunny26,项目名称:datalogger,代码行数:28,代码来源:code.py

示例3: get_quantile

    def get_quantile(self, project, tablename, datestring, args):
        """
        return exported QuantileArray json formatted

        ex: Datalogger/get_quantile/{projectname}/{tablename}/{datestring}

        [
            dict of index_keys : dict of quantile,
            list of index_keys,
            list of value_names,
        ]

        returns:
        json(quantile_dict)
        """
        logging.info("optional arguments received: %s", args)
        datalogger = DataLogger(basedir, project, tablename)
        quantile = datalogger.load_quantile(datestring)
        if len(args) > 0:
            value_keyname = args[0]
            ret_data = []
            # build header
            ret_data.append(list(datalogger.index_keynames) + ["Q0", "Q1", "Q2", "Q3", "Q4"])
            # data part
            for k, v  in quantile[value_keyname].quantile.items():
                ret_data.append(list(k) + v.values())
            return json.dumps(ret_data)
        return quantile.to_json()
开发者ID:gunny26,项目名称:datalogger,代码行数:28,代码来源:code2.py

示例4: sr_hrstorage_unused

 def sr_hrstorage_unused(args):
     """
     special report to get a report of unused SNMP Host Storage
     works only with snmp/hrStorageTable
     """
     datestring, storage_type = args[:2]
     datalogger = DataLogger(basedir, "snmp", "hrStorageTable")
     tsastat = datalogger.load_tsastats(datestring)
     data = []
     data.append(("hostname", "hrStorageDescr", "hrStorageSizeKb", "hrStorageUsedKb", "hrStorageNotUsedKbMin", "hrStorageNotUsedPct"))
     for index_key in tsastat.keys():
         # (u'srvcacdbp1.tilak.cc', u'Physical Memory',
         # u'HOST-RESOURCES-TYPES::hrStorageRam')
         if (u"HOST-RESOURCES-TYPES::%s" % storage_type) not in index_key:
             del tsastat[index_key]
         if index_key[1][:4] in (u"/run", u"/dev", u"/sys"):
             del tsastat[index_key]
     for key, tsstat in tsastat.items():
         sizekb = tsstat["hrStorageSize"]["min"] * tsstat["hrStorageAllocationUnits"]["max"] / 1024
         usedkb = tsstat["hrStorageUsed"]["max"] * tsstat["hrStorageAllocationUnits"]["max"] / 1024
         notused = sizekb - usedkb
         notused_pct = 0.0
         try:
             notused_pct = 100.0 *  notused / sizekb
         except ZeroDivisionError:
             pass
         data.append((key[0], key[1], "%0.2f" % sizekb, "%0.2f" % usedkb, "%0.2f" % notused, "%0.2f" % notused_pct))
     return json.dumps(data)
开发者ID:gunny26,项目名称:datalogger,代码行数:28,代码来源:code.py

示例5: get_monthly_ts

    def get_monthly_ts(self, project, tablename, monthstring, args):
        """
        get monthly statistical values

        TODO: should be combined with get_lt_ts
        """
        index_key_enc = None
        value_keyname = None
        stat_func_name = "avg"
        if len(args) == 2:
            index_key_enc, value_keyname = args
        else:
            index_key_enc, value_keyname, stat_func_name = args
        if len(monthstring) != 7:
            web.internalerror()
            return "monthstring, has to be in YYYY-MM format"
        # key_str should be a tuple string, convert to unicode tuple
        index_key = tuple([unicode(key_value) for key_value in eval(base64.b64decode(index_key_enc))])
        logging.info("index_key : %s", index_key)
        logging.info("value_keyname : %s", value_keyname)
        logging.info("stat_func_name: %s", stat_func_name)
        datalogger = DataLogger(basedir, project, tablename)
        filterkeys = dict(zip(datalogger.index_keynames, index_key))
        ret_data = []
        for datestring in datalogger.monthwalker(monthstring):
            logging.debug("getting tsatstats for %s", monthstring)
            try:
                tsastats = datalogger.load_tsastats(datestring, filterkeys=filterkeys)
                ret_data.append([datestring, tsastats[index_key][value_keyname][stat_func_name]])
            except DataLoggerRawFileMissing as exc:
                logging.error("No Input File for datestring %s found, skipping this date", datestring)
            except DataLoggerLiveDataError as exc:
                logging.error("Reading from live data is not allowed, skipping this data, and ending loop")
                break
        return json.dumps(ret_data)
开发者ID:gunny26,项目名称:datalogger,代码行数:35,代码来源:code2.py

示例6: archive

def archive(project, tablename, datestring):
    datalogger = DataLogger(basedir, project, tablename)
    caches = datalogger.get_caches(datestring)
    suffix = "%s/%s/%s\t" % (datestring, project, tablename)
    if caches["tsa"]["raw"] is None:
        logging.debug("%s RAW Data not found", suffix)
    else:
        if not os.path.isfile(caches["tsa"]["raw"]):
            logging.info("%s RAW does not exists, maybe archived or deleted", suffix)
            return
        logging.info("%s found raw file %s", suffix, caches["tsa"]["raw"])
        filebasename = os.path.basename(caches["tsa"]["raw"])
        parts= filebasename.split("_")
        filetablename = filebasename.replace("_%s" % parts[-1], "")
        filedatestring = parts[-1].split(".")[0]
        filesuffix = ".".join(parts[-1].split(".")[1:])
        logging.info("found tablename %s, datestring %s, ending %s", filetablename, filedatestring, filesuffix)
        if (filetablename != tablename) or (filedatestring != datestring):
            logging.error("the references raw file seems not to be the correct one")
        else:
            if filesuffix == "csv.gz":
                logging.info("raw file already zipped, this seems not to be the actual one")
                if (len(caches["tsa"]["keys"]) > 0) and (len(caches["tsastat"]["keys"]) > 0) and (len(caches["ts"]["keys"]) > 0) and (caches["quantile"]["exists"] is True):
                    logging.info("%s all generated archives found, raw data could be archived", suffix)
                    archivepath = os.path.join(args.archivedir, datestring, project, tablename)
                    archivefilename = os.path.join(archivepath, os.path.basename(caches["tsa"]["raw"]))
                    if not os.path.isdir(archivepath):
                        logging.info("creating directory %s", archivepath)
                        os.makedirs(archivepath)
                    logging.info("%s moving raw file to %s", suffix, archivefilename)
                    shutil.move(caches["tsa"]["raw"], archivefilename)
                else:
                    logging.info("%s not all archives available, generate them first, before archiving raw data", suffix)
    del caches
    del datalogger
开发者ID:gunny26,项目名称:datalogger,代码行数:35,代码来源:archiver.py

示例7: get_scatter_data

    def get_scatter_data(args):
        """
        gets scatter plot data of two value_keys of the same tablename

        ex: Datalogger/{projectname}/{tablename}/{datestring}/{value_keyname1}/{value_keyname2}/{stat function name}

        value_keyname{1/2} has to be one of get_value_keynames
        stat function name has to be one of get_stat_func_names

        returns:
        json(highgraph data)
        """
        assert len(args) == 6
        project, tablename, datestring, value_key1, value_key2, stat_func_name = args
        logging.info("project : %s", project)
        logging.info("tablename : %s", tablename)
        logging.info("datestring : %s", datestring)
        logging.info("value_key1 : %s", value_key1)
        logging.info("value_key2 : %s", value_key2)
        datalogger = DataLogger(basedir, project, tablename)
        tsastats = datalogger.load_tsastats(datestring)
        hc_scatter_data = []
        for key, tsstat in tsastats.items():
            hc_scatter_data.append({
                "name" : str(key),
                "data" : ((tsstat[value_key1]["avg"], tsstat[value_key2]["avg"]), )
            })
        return json.dumps(hc_scatter_data)
开发者ID:gunny26,项目名称:datalogger,代码行数:28,代码来源:code.py

示例8: get_tsastats_func

    def get_tsastats_func(self, args):
        """
        return json data to render html table from it

        parameters:
        <b>project</b> project string
        <b>tablename</b> tablename string
        <b>datestring</b> datestring in YYYY-MM-DD form
        <b>stat_func_name</b> statistical function
        """
        project, tablename, datestring, stat_func_name = args
        datalogger = DataLogger(basedir, project, tablename)
        tsastats = datalogger.load_tsastats(datestring)
        return json.dumps(tsastats.to_csv(stat_func_name))
开发者ID:gunny26,项目名称:datalogger,代码行数:14,代码来源:code.py

示例9: get_projects

    def get_projects():
        """
        get available projects for this Datalogger Server

        ex: Datalogger/get_projects/...
        there is no further argument needed

        returns:
        json(existing project names)
        """
        ret_data = {
            "projects" : DataLogger.get_projects(basedir),
            "stat_func_names" : TimeseriesStats.stat_funcs.keys(),
            "last_businessday_datestring" : DataLogger.get_last_business_day_datestring()
        }
        return json.dumps(ret_data)
开发者ID:gunny26,项目名称:datalogger,代码行数:16,代码来源:code2.py

示例10: sr_vicenter_unused_mem

    def sr_vicenter_unused_mem(args):
        """
        special resport to find virtual machine which are not used their ram entirely
        on this machines there is a possibility to save some virtual memory

        works only for VMware machine, in special virtualMachineMemoryStats
        """
        datestring = args[0]
        datalogger = DataLogger(basedir, "vicenter", "virtualMachineMemoryStats")
        tsastat = datalogger.load_tsastats(datestring)
        tsastat_g = datalogger.tsastat_group_by(tsastat, ("hostname", ))
        data = []
        data.append(("hostname", "avg_active_max", "avg_granted_min", "avg_notused_min"))
        for key in tsastat_g.keys():
            not_used = tsastat_g[key]["mem.granted.average"]["min"] - tsastat_g[key]["mem.active.average"]["max"]
            data.append((key[0], "%0.2f" % tsastat_g[key]["mem.active.average"]["max"], "%0.3f" % tsastat_g[key]["mem.granted.average"]["min"], "%0.2f" % not_used))
        return json.dumps(data)
开发者ID:gunny26,项目名称:datalogger,代码行数:17,代码来源:code.py

示例11: main

def main():
    for datestring in tuple(DataLogger.datewalker(startdate, args.enddate)):
        start_ts, stop_ts = DataLogger.get_ts_for_datestring(datestring)
        logging.debug("working on datestring %s (from %s to %s)", datestring, start_ts, stop_ts)
        for project in DataLogger.get_projects(args.basedir):
            if args.project is not None:
                if project != args.project:
                    logging.debug("skipping project %s", project)
                    continue
            logging.debug("working on project %s", project)
            for tablename in DataLogger.get_tablenames(args.basedir, project):
                if args.tablename is not None:
                    if tablename != args.tablename:
                        logging.debug("skipping tablename %s", tablename)
                        continue
                    logging.debug("working on tablename %s", tablename)
                archive(project, tablename, datestring)
开发者ID:gunny26,项目名称:datalogger,代码行数:17,代码来源:archiver.py

示例12: __init__

    def __init__(self, name):
        adc = ADC("Analog input",0)
        self.pressure = Strain_PressureSensor("Pressure (kPa)",adc)
        print "Pressure is %s kPa" % self.pressure.get_pressure_kpa()

        # put Port 8 Pin 3&4 into mode 7 (GPIO_1_6 & GPIO_1_7)
        open('/sys/kernel/debug/omap_mux/gpmc_ad6', 'wb').write("%X" % 7)
        open('/sys/kernel/debug/omap_mux/gpmc_ad7', 'wb').write("%X" % 7)
        gpio1_6 = GPIO(38,GPIO.OUTPUT) #p8_3
        gpio1_7 = GPIO(39,GPIO.OUTPUT) #p8_4
        self.vi = Valve("Inlet",gpio1_6)
        self.vo = Valve("Outlet",gpio1_7)

        # put Port 8 Pin 5 into mode 7 (GPIO_1_2)
        open('/sys/kernel/debug/omap_mux/gpmc_ad2', 'wb').write("%X" % 7)
        gpio1_2 = GPIO(34,GPIO.OUTPUT) #p8_5
        self.pump = Pump("Water cooling pump", gpio1_2)

        # put Port 9 Pin 12 into mode 7 (GPIO_1_28)
        gpio1_28 = GPIO(60,GPIO.INPUT) #p9_12
        self.waterlevel = WaterLevel("Waterlevel sensor", gpio1_28)

        sck=GPIO(49,GPIO.OUTPUT) #p9_23
        s0=GPIO(115,GPIO.INPUT)  #p9_27
        cs_t1=GPIO(48,GPIO.OUTPUT) #p9_15 GPIO1_16 48
        cs_t2=GPIO(117,GPIO.OUTPUT) #p9_25
        self.max1 = MAX31855(cs_t1, sck, s0)  #Maxim IC No 1, connected to the cartidge heater TC
        self.max2 = MAX31855(cs_t2, sck, s0)  #Maxim IC No 2, connected to the TC at the bottom of the vessel

        # PWMHeater
        self.h = PWMHeater("Cartridge heater (%)", "ehrpwm1a")
        self.h.setfrequency(3)
        
        # DS18B20
        T1="28-000003f5b1c9"
        T2="28-000003f5baa4"
        T3="28-000003f5be11"
        
        self.ds1=DS18B20("T in (C)",T1)
        self.ds2=DS18B20("T out (C)",T2)
        self.ds3=DS18B20("T Env (C)",T3)
        
        print "ds1: ",self.ds1.get_temperature()
        print "ds2: ",self.ds2.get_temperature()
        print "ds3: ",self.ds3.get_temperature()
        
        # CS5490 power meter
        self.cs=CS5490("Power In (W)")

        print "start DataLogger"
        self.log = DataLogger("my datalogger",500)
        self.log.add_sensor(("T Vessel bottom (C)","Maxim 2 Cld jnct (C)"),self.max2.get_celsius)
        self.log.add_sensor(("T Cartridge (C)","Maxim 1 Cld jnct (C)"),self.max1.get_celsius)
        self.log.add_sensor(self.ds1.get_name(),self.ds1.get_temperature)
        self.log.add_sensor(self.ds2.get_name(),self.ds2.get_temperature)
        self.log.add_sensor(self.ds3.get_name(),self.ds3.get_temperature)
        self.log.add_sensor(self.pressure.get_name(),self.pressure.get_pressure_kpa)
        self.log.add_sensor(self.cs.get_name(),self.cs.get_average_power)
开发者ID:fusioncatalyst,项目名称:peerpressure,代码行数:58,代码来源:peerpressure.py

示例13: get_tsstat_caches

    def get_tsstat_caches(args):
        """
        DEPRECATED use get_caches instead

        get a list of all available TimeseriesStats available
        attention: there are only tsstat caches if raw data is already analyzed

        ex: Datalogger/get_tsstat_caches/{projectname}/{tablename}/{datestring}

        returns:
        json(list of all available TimeseriesStats data)
        """
        project, tablename, datestring = args[:3]
        datalogger = DataLogger(basedir, project, tablename)
        keys = []
        for cache in datalogger.list_tsstat_caches(datestring):
            keys.append(cache[1])
        return json.dumps(keys)
开发者ID:gunny26,项目名称:datalogger,代码行数:18,代码来源:code.py

示例14: report_group

def report_group(project, tablename, datestring1, datestring2, value_key):
    # get data, from datalogger, or dataloggerhelper
    datalogger = DataLogger(BASEDIR, project, tablename)
    dataloggerweb = DataLoggerWeb(DATALOGGER_URL)
    print "loading data"
    starttime = time.time()
    # tsa1 = datalogger.load_tsa(datestring1)
    tsa1 = dataloggerweb.get_tsa(project, tablename, datestring1)
    tsa1 = datalogger.group_by(datestring1, tsa1, ("hostname",), lambda a, b: (a + b) / 2)
    # tsa2 = datalogger.load_tsa(datestring2)
    tsa2 = dataloggerweb.get_tsa(project, tablename, datestring2)
    tsa2 = datalogger.group_by(datestring2, tsa2, ("hostname",), lambda a, b: (a + b) / 2)
    print "Duration load %f" % (time.time() - starttime)
    starttime = time.time()
    cm = CorrelationMatrixTime(tsa1, tsa2, value_key)
    print "TOP most differing keys between %s and %s" % (datestring1, datestring2)
    for key, coefficient in sorted(cm.items(), key=lambda items: items[1], reverse=True)[:20]:
        print key, coefficient
开发者ID:gunny26,项目名称:datalogger,代码行数:18,代码来源:test_anomality.py

示例15: get_last_business_day_datestring

    def get_last_business_day_datestring(args):
        """
        get datestring of last businessday Mo.-Fr.

        ex: Dataloger/get_last_business_day_datestring/...

        returns:
        json(datestring of last businessday)
        """
        return json.dumps(DataLogger.get_last_business_day_datestring())
开发者ID:gunny26,项目名称:datalogger,代码行数:10,代码来源:code.py


注:本文中的datalogger.DataLogger类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。