当前位置: 首页>>代码示例>>Java>>正文


Java BatchPoints.point方法代码示例

本文整理汇总了Java中org.influxdb.dto.BatchPoints.point方法的典型用法代码示例。如果您正苦于以下问题:Java BatchPoints.point方法的具体用法?Java BatchPoints.point怎么用?Java BatchPoints.point使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.influxdb.dto.BatchPoints的用法示例。


在下文中一共展示了BatchPoints.point方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: insert

import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
@Override
    public void insert() {
        InfluxDB influxDB = null;
        try {
            influxDB = InfluxDBFactory.connect(influxDBUrl);
            if (!influxDB.databaseExists(dbName)) {
                influxDB.createDatabase(dbName);
            }
            for (OffsetInfo offsetInfo : offsetInfoList) {
                String group = offsetInfo.getGroup();
                String topic = offsetInfo.getTopic();
                Long logSize = offsetInfo.getLogSize();
                Long offsets = offsetInfo.getCommittedOffset();
                Long lag = offsetInfo.getLag();
                Long timestamp = offsetInfo.getTimestamp();

                BatchPoints batchPoints = BatchPoints
                        .database(dbName)
                        .tag("group", group)
                        .tag("topic", topic)
                        .build();
                Point point = Point.measurement("offsetsConsumer")
                        .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
//                        .time(timestamp, TimeUnit.MILLISECONDS)
                        .addField("logSize", logSize)
                        .addField("offsets", offsets)
                        .addField("lag", lag)
                        .build();
                batchPoints.point(point);
                influxDB.write(batchPoints);
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            if (influxDB != null) {
                influxDB.close();
            }
        }

    }
 
开发者ID:dubin555,项目名称:Kafka-Insight,代码行数:41,代码来源:OffsetsInfluxDBDaoImpl.java

示例2: write

import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
@Override
public void write(final String database, final RetentionPolicy retentionPolicy, final Point point) throws InflowException {
  if (this.batchEnabled.get()) {
    BatchEntry batchEntry = new BatchEntry(point, database, retentionPolicy);
    this.batchProcessor.put(batchEntry);
  } else {
    BatchPoints batchPoints = BatchPoints.database(database).retentionPolicy(retentionPolicy).build();
    batchPoints.point(point);
    this.write(batchPoints);
    this.unBatchedCount.incrementAndGet();
  }
  this.writeCount.incrementAndGet();
}
 
开发者ID:nkiraly,项目名称:influxdb-inflow,代码行数:14,代码来源:DriverHTTP.java

示例3: savePoint

import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
private void savePoint(String message, InfluxDB influxDB) {
    try {
        System.out.println(" [x] Received '" + message + "'");
        JSONObject msg = new JSONObject(message);
        DateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
        Date date = format.parse(msg.get("dateModified").toString());
        long timeMillisec = date.getTime();
        Point event2add = Point.measurement("events")
                .time(timeMillisec, TimeUnit.MILLISECONDS)
                .field("id", msg.get("id"))
                .field("status", msg.get("status"))
                .field("billingModel", msg.get("billingModel").toString())
                .field("productType", msg.get("productType").toString())
                .field("productId", msg.get("productId").toString())
                .field("instanceId", msg.get("instanceId").toString())
                .field("setupCost", msg.get("setupCost"))
                .field("periodCost", msg.get("periodCost"))
                .field("period", msg.get("period"))
                .field("providerId", msg.get("providerId").toString())
                .field("clientId", msg.get("clientId").toString())
                .field("startDate", msg.get("startDate"))
                .field("lastBillDate", msg.get("lastBillDate"))
                .field("agreementId", msg.get("agreementId").toString())
                .field("relatives", msg.get("relatives"))
                .field("renew", msg.get("renew"))
                .field("dateCreated", msg.get("dateCreated"))
                .field("priceUnit", msg.get("priceUnit"))
                .build();
        BatchPoints batchPoints = giveMeEmptyContainer();
        batchPoints.point(event2add);
        saveContainerToDB(batchPoints);
        //influxDB.write(load.configuration.get("dbName"), "default", event2add);
    } catch (Exception ex) {
        System.err.println("Caught exception in client thread: " + ex.getMessage());
    }
}
 
开发者ID:icclab,项目名称:cyclops-udr,代码行数:37,代码来源:InfluxDBClient.java

示例4: populateContainerWithCDRs

import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
public void populateContainerWithCDRs(BatchPoints container) {
    // get indexes
    Integer timeIndex = columns.indexOf("time");
    Integer rawUsageIndex = columns.indexOf("rawusage");
    Integer projectidIndex = columns.indexOf("projectid");
    Integer accountIndex = columns.indexOf("account");

    // create UDREntries
    for (List<String> point : points) {
        UDREntry entry = new UDREntry();

        entry.setTime(point.get(timeIndex));
        entry.setRawusage(point.get(rawUsageIndex));
        entry.setProjectid(point.get(projectidIndex));
        entry.setAccount(point.get(accountIndex));
        entry.setMetername(name);

        // now save CDR to container
        CDR cdr = entry.getCDR();

        // only add if we have valid CDR
        if (cdr != null) {
            container.point(cdr.toDBPoint());
        }
    }
}
 
开发者ID:icclab,项目名称:cyclops-rc,代码行数:27,代码来源:UDRRecord.java

示例5: write

import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
@Override
public void write(Batch batch) throws StageException {
  BatchPoints batchPoints = BatchPoints
      .database(conf.dbName)
      .retentionPolicy(conf.retentionPolicy)
      .consistency(conf.consistencyLevel)
      .build();

  Iterator<Record> recordIterator = batch.getRecords();

  while (recordIterator.hasNext()) {
    Record record = recordIterator.next();

    for (Point point : converter.getPoints(record)) {
      batchPoints.point(point);
    }
  }

  client.write(batchPoints);
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:21,代码来源:InfluxTarget.java

示例6: testCase

import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
@Override
protected void testCase(Object[] args) {
    assertEquals(1, args.length);

    BatchPoints actual = (BatchPoints) args[0];

    Point expectedPoint = Point.measurement("fake")
            .field(InfluxDBReporter.VALUE_COLUMN, 100L)
            .tag(TagUtil.PREFIX_TAG, "influxdb.reporter.test")
            .build();

    BatchPoints expected = BatchPoints.database("database").build();
    expected.point(expectedPoint);

    assertEquals(expected.getDatabase(), actual.getDatabase());
    assertEquals(expected.getPoints().size(), actual.getPoints().size());

    Point actualPoint = actual.getPoints().get(0);

    // All the fields on Point are private
    assertTrue(actualPoint.lineProtocol().startsWith("fake"));
    assertTrue(actualPoint.lineProtocol().contains("value=100"));
    assertTrue(actualPoint.lineProtocol().contains("prefix=influxdb.reporter.test"));
}
 
开发者ID:etsy,项目名称:statsd-jvm-profiler,代码行数:25,代码来源:InfluxDBReporterTest.java

示例7: sendData

import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
@Override
public Object sendData(Map<String, Object> data) {

    BatchPoints batchPoints = BatchPoints
            .database(databaseName)//.tag("async", "true").retentionPolicy("default").consistency(ConsistencyLevel.ALL)
            .build();

    for (Map.Entry<String, Object> entry : data.entrySet()) {
        Point point;
        if (Map.class.isAssignableFrom(entry.getValue().getClass())) {
            point = Point.measurement(entry.getKey()).fields((Map<String, Object>) entry.getValue())
                    .build();
        } else {
            point = Point.measurement(entry.getKey()).field("value", entry.getValue()).build();
        }
        batchPoints.point(point);
    }

    influxDB.write(batchPoints);
    return null;
}
 
开发者ID:stropa,项目名称:datapipes,代码行数:22,代码来源:InfluxDBSender.java

示例8: handleChangeBlock

import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
private void handleChangeBlock(String name, ChangeBlockEvent.Break.Post event, Player player) {
    BatchPoints batch = newBatch();
    for (Transaction<BlockSnapshot> transaction : event.getTransactions()) {
        Point.Builder p = newPoint(name);
        addPlayer(p, player);
        BlockSnapshot original = transaction.getOriginal();
        p.tag("block_original", original.getState().getId());
        p.tag("block_final", transaction.getFinal().getState().getId());
        original.getLocation().ifPresent(loc -> {
            addLocation(p, loc);
        });

        batch.point(p.build());
    }

    connection.write(batch);
}
 
开发者ID:CubeEngine,项目名称:modules-extra,代码行数:18,代码来源:Stats.java

示例9: importReport

import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
private static void importReport(FritzBoxReportMail mail) {
    final BatchPoints batchPoints = BatchPoints.database(dbName) //
            // .tag("async", "true") //
            .retentionPolicy("default") //
            .consistency(ConsistencyLevel.ALL) //
            .build();
    batchPoints.point(getDataConnectionPointYesterday(mail));
    influxDB.write(batchPoints);
}
 
开发者ID:kaklakariada,项目名称:fritzbox-report-mail,代码行数:10,代码来源:ImportReportsToInfluxDb.java

示例10: setMeterList

import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
@Post
public String setMeterList(Representation entity) {
    counter.increment(endpoint);
    logger.trace("Received CloudStack meterList selection, saving it into DB");

    try {
        // construct object from JSON based on POJO template
        MeterList meterList = new Gson().fromJson(entity.getText(), MeterList.class);

        // get empty container
        BatchPoints container = dbClient.giveMeEmptyContainer();

        // now fill it with data
        for (Meter meter : meterList.getData()) {

            // add point to container
            container.point(meter.toDBPoint());
        }

        // save it to database
        dbClient.saveContainerToDB(container);

    } catch (IOException e) {
        logger.error("Could not parse received JSON when setting meterList: " + e.getMessage());
        throw new ResourceException(500);
    }
    return "Success";
}
 
开发者ID:icclab,项目名称:cyclops-udr,代码行数:29,代码来源:CloudStackMeter.java

示例11: savePointsToDb

import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
/**
 * Save list of points to influxDB
 *
 * @param points list
 */
private void savePointsToDb(List<Point> points) {
    // add points to container
    if (points != null) {
        // ask for new container, we have to save each page on its own, so we don't run out of memory
        BatchPoints container = dbClient.giveMeEmptyContainer();

        for (Point point : points) {
            container.point(point);
        }

        dbClient.saveContainerToDB(container);
    }
}
 
开发者ID:icclab,项目名称:cyclops-udr,代码行数:19,代码来源:CloudStackPuller.java

示例12: handleMessageDelivery

import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
/**
 * This is the body of message processing
 *
 * @param channel where consumer should listen
 * @return consumer object
 */
private Consumer handleMessageDelivery(Channel channel) {
    return new DefaultConsumer(channel) {
        @Override
        public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException {
            try {
                // make sure encoding is correct
                String message = new String(body, "UTF-8");

                // now parse it
                MCNEvent mcnEvent = processMessage(message);

                if (mcnEvent != null) {
                    // request empty container
                    BatchPoints container = db.giveMeEmptyContainerForEvents(Loader.getSettings().getMcnSettings().getMCNDBEventsName());

                    // add a point
                    container.point(mcnEvent.toPoint());

                    // let the database save it
                    db.saveContainerToDB(container);
                }

            } catch (Exception ex) {
                logger.error("Caught exception in client thread: " + ex.getMessage());
            }

        }
    };
}
 
开发者ID:icclab,项目名称:cyclops-udr,代码行数:36,代码来源:McnRabbitMQClient.java

示例13: handleMessageDelivery

import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
/**
 * This is the body of message processing
 *
 * @param channel where consumer should listen
 * @return consumer object
 */
private Consumer handleMessageDelivery(Channel channel) {
    return new DefaultConsumer(channel) {
        @Override
        public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException {
            try {
                // make sure encoding is correct
                String message = new String(body, "UTF-8");

                // now parse it
                TnovaEvent tnovaEvent = processMessage(message);

                if (tnovaEvent != null) {
                    // request empty container
                    BatchPoints container = db.giveMeEmptyContainerForEvents(Loader.getSettings().gettNovaSettings().getTNovaDBEventsName());

                    // add a point
                    container.point(tnovaEvent.toPoint());

                    // let the database save it
                    db.saveContainerToDB(container);
                }

            } catch (Exception ex) {
                System.err.println("Caught exception in client thread: " + ex.getMessage());
            }

        }
    };
}
 
开发者ID:icclab,项目名称:cyclops-udr,代码行数:36,代码来源:TnovaRabbitMQClient.java

示例14: recordGaugeValues

import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
/**
 * Record multiple gauge values in InfluxDB
 *
 * @param gauges A map of gauge names to values
 */
@Override
public void recordGaugeValues(Map<String, ? extends Number> gauges) {
    long time = System.currentTimeMillis();
    BatchPoints batchPoints = BatchPoints.database(database).build();
    for (Map.Entry<String, ? extends Number> gauge: gauges.entrySet()) {
        batchPoints.point(constructPoint(time, gauge.getKey(), gauge.getValue()));
    }
    client.write(batchPoints);
}
 
开发者ID:etsy,项目名称:statsd-jvm-profiler,代码行数:15,代码来源:InfluxDBReporter.java

示例15: insert

import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
@Override
    public void insert() {

        InfluxDB influxDB = null;
        try {
            influxDB = InfluxDBFactory.connect(influxDBUrl);
            if (!influxDB.databaseExists(dbName)) {
                influxDB.createDatabase(dbName);
            }
            for (MBeanInfo mBeanInfo : mBeanInfoList) {
                String label = mBeanInfo.getLabel();
                String topic = mBeanInfo.getTopic();
                double oneMinute = mBeanInfo.getOneMinute();
                double fiveMinute = mBeanInfo.getFiveMinute();
                double fifteenMinute = mBeanInfo.getFifteenMinute();
                double meanRate = mBeanInfo.getMeanRate();


                BatchPoints batchPoints = BatchPoints
                        .database(dbName)
                        .tag("label", label)
                        .tag("topic", topic)
                        .build();
                Point point = Point.measurement("mBeanMetric")
                        .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
//                        .time(timestamp, TimeUnit.MILLISECONDS)
                        .addField("oneMinuteRate", oneMinute)
                        .addField("fiveMinuteRate", fiveMinute)
                        .addField("fifteenMinuteRate", fifteenMinute)
                        .addField("meanRate", meanRate)
                        .build();
                batchPoints.point(point);
                influxDB.write(batchPoints);
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            if (influxDB != null) {
                influxDB.close();
            }
        }

    }
 
开发者ID:dubin555,项目名称:Kafka-Insight,代码行数:44,代码来源:MBeansInfluxDBDaoImpl.java


注:本文中的org.influxdb.dto.BatchPoints.point方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。