本文整理汇总了Java中org.influxdb.dto.BatchPoints.point方法的典型用法代码示例。如果您正苦于以下问题:Java BatchPoints.point方法的具体用法?Java BatchPoints.point怎么用?Java BatchPoints.point使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.influxdb.dto.BatchPoints
的用法示例。
在下文中一共展示了BatchPoints.point方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: insert
import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
@Override
public void insert() {
InfluxDB influxDB = null;
try {
influxDB = InfluxDBFactory.connect(influxDBUrl);
if (!influxDB.databaseExists(dbName)) {
influxDB.createDatabase(dbName);
}
for (OffsetInfo offsetInfo : offsetInfoList) {
String group = offsetInfo.getGroup();
String topic = offsetInfo.getTopic();
Long logSize = offsetInfo.getLogSize();
Long offsets = offsetInfo.getCommittedOffset();
Long lag = offsetInfo.getLag();
Long timestamp = offsetInfo.getTimestamp();
BatchPoints batchPoints = BatchPoints
.database(dbName)
.tag("group", group)
.tag("topic", topic)
.build();
Point point = Point.measurement("offsetsConsumer")
.time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
// .time(timestamp, TimeUnit.MILLISECONDS)
.addField("logSize", logSize)
.addField("offsets", offsets)
.addField("lag", lag)
.build();
batchPoints.point(point);
influxDB.write(batchPoints);
}
} catch (Exception e) {
e.printStackTrace();
} finally {
if (influxDB != null) {
influxDB.close();
}
}
}
示例2: write
import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
@Override
public void write(final String database, final RetentionPolicy retentionPolicy, final Point point) throws InflowException {
if (this.batchEnabled.get()) {
BatchEntry batchEntry = new BatchEntry(point, database, retentionPolicy);
this.batchProcessor.put(batchEntry);
} else {
BatchPoints batchPoints = BatchPoints.database(database).retentionPolicy(retentionPolicy).build();
batchPoints.point(point);
this.write(batchPoints);
this.unBatchedCount.incrementAndGet();
}
this.writeCount.incrementAndGet();
}
示例3: savePoint
import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
private void savePoint(String message, InfluxDB influxDB) {
try {
System.out.println(" [x] Received '" + message + "'");
JSONObject msg = new JSONObject(message);
DateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
Date date = format.parse(msg.get("dateModified").toString());
long timeMillisec = date.getTime();
Point event2add = Point.measurement("events")
.time(timeMillisec, TimeUnit.MILLISECONDS)
.field("id", msg.get("id"))
.field("status", msg.get("status"))
.field("billingModel", msg.get("billingModel").toString())
.field("productType", msg.get("productType").toString())
.field("productId", msg.get("productId").toString())
.field("instanceId", msg.get("instanceId").toString())
.field("setupCost", msg.get("setupCost"))
.field("periodCost", msg.get("periodCost"))
.field("period", msg.get("period"))
.field("providerId", msg.get("providerId").toString())
.field("clientId", msg.get("clientId").toString())
.field("startDate", msg.get("startDate"))
.field("lastBillDate", msg.get("lastBillDate"))
.field("agreementId", msg.get("agreementId").toString())
.field("relatives", msg.get("relatives"))
.field("renew", msg.get("renew"))
.field("dateCreated", msg.get("dateCreated"))
.field("priceUnit", msg.get("priceUnit"))
.build();
BatchPoints batchPoints = giveMeEmptyContainer();
batchPoints.point(event2add);
saveContainerToDB(batchPoints);
//influxDB.write(load.configuration.get("dbName"), "default", event2add);
} catch (Exception ex) {
System.err.println("Caught exception in client thread: " + ex.getMessage());
}
}
示例4: populateContainerWithCDRs
import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
public void populateContainerWithCDRs(BatchPoints container) {
// get indexes
Integer timeIndex = columns.indexOf("time");
Integer rawUsageIndex = columns.indexOf("rawusage");
Integer projectidIndex = columns.indexOf("projectid");
Integer accountIndex = columns.indexOf("account");
// create UDREntries
for (List<String> point : points) {
UDREntry entry = new UDREntry();
entry.setTime(point.get(timeIndex));
entry.setRawusage(point.get(rawUsageIndex));
entry.setProjectid(point.get(projectidIndex));
entry.setAccount(point.get(accountIndex));
entry.setMetername(name);
// now save CDR to container
CDR cdr = entry.getCDR();
// only add if we have valid CDR
if (cdr != null) {
container.point(cdr.toDBPoint());
}
}
}
示例5: write
import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
@Override
public void write(Batch batch) throws StageException {
BatchPoints batchPoints = BatchPoints
.database(conf.dbName)
.retentionPolicy(conf.retentionPolicy)
.consistency(conf.consistencyLevel)
.build();
Iterator<Record> recordIterator = batch.getRecords();
while (recordIterator.hasNext()) {
Record record = recordIterator.next();
for (Point point : converter.getPoints(record)) {
batchPoints.point(point);
}
}
client.write(batchPoints);
}
示例6: testCase
import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
@Override
protected void testCase(Object[] args) {
assertEquals(1, args.length);
BatchPoints actual = (BatchPoints) args[0];
Point expectedPoint = Point.measurement("fake")
.field(InfluxDBReporter.VALUE_COLUMN, 100L)
.tag(TagUtil.PREFIX_TAG, "influxdb.reporter.test")
.build();
BatchPoints expected = BatchPoints.database("database").build();
expected.point(expectedPoint);
assertEquals(expected.getDatabase(), actual.getDatabase());
assertEquals(expected.getPoints().size(), actual.getPoints().size());
Point actualPoint = actual.getPoints().get(0);
// All the fields on Point are private
assertTrue(actualPoint.lineProtocol().startsWith("fake"));
assertTrue(actualPoint.lineProtocol().contains("value=100"));
assertTrue(actualPoint.lineProtocol().contains("prefix=influxdb.reporter.test"));
}
示例7: sendData
import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
@Override
public Object sendData(Map<String, Object> data) {
BatchPoints batchPoints = BatchPoints
.database(databaseName)//.tag("async", "true").retentionPolicy("default").consistency(ConsistencyLevel.ALL)
.build();
for (Map.Entry<String, Object> entry : data.entrySet()) {
Point point;
if (Map.class.isAssignableFrom(entry.getValue().getClass())) {
point = Point.measurement(entry.getKey()).fields((Map<String, Object>) entry.getValue())
.build();
} else {
point = Point.measurement(entry.getKey()).field("value", entry.getValue()).build();
}
batchPoints.point(point);
}
influxDB.write(batchPoints);
return null;
}
示例8: handleChangeBlock
import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
private void handleChangeBlock(String name, ChangeBlockEvent.Break.Post event, Player player) {
BatchPoints batch = newBatch();
for (Transaction<BlockSnapshot> transaction : event.getTransactions()) {
Point.Builder p = newPoint(name);
addPlayer(p, player);
BlockSnapshot original = transaction.getOriginal();
p.tag("block_original", original.getState().getId());
p.tag("block_final", transaction.getFinal().getState().getId());
original.getLocation().ifPresent(loc -> {
addLocation(p, loc);
});
batch.point(p.build());
}
connection.write(batch);
}
示例9: importReport
import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
private static void importReport(FritzBoxReportMail mail) {
final BatchPoints batchPoints = BatchPoints.database(dbName) //
// .tag("async", "true") //
.retentionPolicy("default") //
.consistency(ConsistencyLevel.ALL) //
.build();
batchPoints.point(getDataConnectionPointYesterday(mail));
influxDB.write(batchPoints);
}
示例10: setMeterList
import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
@Post
public String setMeterList(Representation entity) {
counter.increment(endpoint);
logger.trace("Received CloudStack meterList selection, saving it into DB");
try {
// construct object from JSON based on POJO template
MeterList meterList = new Gson().fromJson(entity.getText(), MeterList.class);
// get empty container
BatchPoints container = dbClient.giveMeEmptyContainer();
// now fill it with data
for (Meter meter : meterList.getData()) {
// add point to container
container.point(meter.toDBPoint());
}
// save it to database
dbClient.saveContainerToDB(container);
} catch (IOException e) {
logger.error("Could not parse received JSON when setting meterList: " + e.getMessage());
throw new ResourceException(500);
}
return "Success";
}
示例11: savePointsToDb
import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
/**
* Save list of points to influxDB
*
* @param points list
*/
private void savePointsToDb(List<Point> points) {
// add points to container
if (points != null) {
// ask for new container, we have to save each page on its own, so we don't run out of memory
BatchPoints container = dbClient.giveMeEmptyContainer();
for (Point point : points) {
container.point(point);
}
dbClient.saveContainerToDB(container);
}
}
示例12: handleMessageDelivery
import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
/**
* This is the body of message processing
*
* @param channel where consumer should listen
* @return consumer object
*/
private Consumer handleMessageDelivery(Channel channel) {
return new DefaultConsumer(channel) {
@Override
public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException {
try {
// make sure encoding is correct
String message = new String(body, "UTF-8");
// now parse it
MCNEvent mcnEvent = processMessage(message);
if (mcnEvent != null) {
// request empty container
BatchPoints container = db.giveMeEmptyContainerForEvents(Loader.getSettings().getMcnSettings().getMCNDBEventsName());
// add a point
container.point(mcnEvent.toPoint());
// let the database save it
db.saveContainerToDB(container);
}
} catch (Exception ex) {
logger.error("Caught exception in client thread: " + ex.getMessage());
}
}
};
}
示例13: handleMessageDelivery
import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
/**
* This is the body of message processing
*
* @param channel where consumer should listen
* @return consumer object
*/
private Consumer handleMessageDelivery(Channel channel) {
return new DefaultConsumer(channel) {
@Override
public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException {
try {
// make sure encoding is correct
String message = new String(body, "UTF-8");
// now parse it
TnovaEvent tnovaEvent = processMessage(message);
if (tnovaEvent != null) {
// request empty container
BatchPoints container = db.giveMeEmptyContainerForEvents(Loader.getSettings().gettNovaSettings().getTNovaDBEventsName());
// add a point
container.point(tnovaEvent.toPoint());
// let the database save it
db.saveContainerToDB(container);
}
} catch (Exception ex) {
System.err.println("Caught exception in client thread: " + ex.getMessage());
}
}
};
}
示例14: recordGaugeValues
import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
/**
* Record multiple gauge values in InfluxDB
*
* @param gauges A map of gauge names to values
*/
@Override
public void recordGaugeValues(Map<String, ? extends Number> gauges) {
long time = System.currentTimeMillis();
BatchPoints batchPoints = BatchPoints.database(database).build();
for (Map.Entry<String, ? extends Number> gauge: gauges.entrySet()) {
batchPoints.point(constructPoint(time, gauge.getKey(), gauge.getValue()));
}
client.write(batchPoints);
}
示例15: insert
import org.influxdb.dto.BatchPoints; //导入方法依赖的package包/类
@Override
public void insert() {
InfluxDB influxDB = null;
try {
influxDB = InfluxDBFactory.connect(influxDBUrl);
if (!influxDB.databaseExists(dbName)) {
influxDB.createDatabase(dbName);
}
for (MBeanInfo mBeanInfo : mBeanInfoList) {
String label = mBeanInfo.getLabel();
String topic = mBeanInfo.getTopic();
double oneMinute = mBeanInfo.getOneMinute();
double fiveMinute = mBeanInfo.getFiveMinute();
double fifteenMinute = mBeanInfo.getFifteenMinute();
double meanRate = mBeanInfo.getMeanRate();
BatchPoints batchPoints = BatchPoints
.database(dbName)
.tag("label", label)
.tag("topic", topic)
.build();
Point point = Point.measurement("mBeanMetric")
.time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
// .time(timestamp, TimeUnit.MILLISECONDS)
.addField("oneMinuteRate", oneMinute)
.addField("fiveMinuteRate", fiveMinute)
.addField("fifteenMinuteRate", fifteenMinute)
.addField("meanRate", meanRate)
.build();
batchPoints.point(point);
influxDB.write(batchPoints);
}
} catch (Exception e) {
e.printStackTrace();
} finally {
if (influxDB != null) {
influxDB.close();
}
}
}