本文整理汇总了Java中com.github.benmanes.caffeine.cache.Caffeine类的典型用法代码示例。如果您正苦于以下问题:Java Caffeine类的具体用法?Java Caffeine怎么用?Java Caffeine使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Caffeine类属于com.github.benmanes.caffeine.cache包,在下文中一共展示了Caffeine类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: cacheExposesMetricsForHitMissAndEviction
import com.github.benmanes.caffeine.cache.Caffeine; //导入依赖的package包/类
@Test
void cacheExposesMetricsForHitMissAndEviction() throws Exception {
// Run cleanup in same thread, to remove async behavior with evictions
Cache<String, String> cache = Caffeine.newBuilder().maximumSize(2).recordStats().executor(Runnable::run).build();
CaffeineCacheMetrics.monitor(registry, cache, "c", userTags);
cache.getIfPresent("user1");
cache.getIfPresent("user1");
cache.put("user1", "First User");
cache.getIfPresent("user1");
// Add to cache to trigger eviction.
cache.put("user2", "Second User");
cache.put("user3", "Third User");
cache.put("user4", "Fourth User");
assertThat(registry.mustFind("c.requests").tags("result", "hit").tags(userTags).functionCounter().count()).isEqualTo(1.0);
assertThat(registry.mustFind("c.requests").tags("result", "miss").tags(userTags).functionCounter().count()).isEqualTo(2.0);
assertThat(registry.mustFind("c.evictions").tags(userTags).functionCounter().count()).isEqualTo(2.0);
}
示例2: start
import com.github.benmanes.caffeine.cache.Caffeine; //导入依赖的package包/类
public void start() {
if(started) {
throw new RuntimeException("Already started or in progress");
}
started = true;
if(service == null) {
service = newDefaultForkJoinPool(threadsCount);
}
cachedDispatchQueues = Caffeine.newBuilder()
.weakValues()
.executor(service)
.maximumSize(queueSize)
.build().asMap();
}
示例3: filterByStream
import com.github.benmanes.caffeine.cache.Caffeine; //导入依赖的package包/类
@Test
public void filterByStream() throws Exception {
Cache<String, LinkedList<ApplicationMetrics>> rawCache = Caffeine.newBuilder().build();
ApplicationMetricsService service = new ApplicationMetricsService(rawCache);
MetricsAggregator aggregator = new MetricsAggregator(service);
MetricsCollectorEndpoint endpoint = new MetricsCollectorEndpoint(service);
ApplicationMetrics app = createMetrics("httpIngest", "http", "foo", 0);
ApplicationMetrics app1 = createMetrics("httpIngest", "http", "foobar", 1);
ApplicationMetrics app2 = createMetrics("woodchuck", "time", "bar", 0);
ApplicationMetrics app3 = createMetrics("twitter", "twitterstream", "bar", 0);
aggregator.receive(app);
aggregator.receive(app1);
aggregator.receive(app2);
aggregator.receive(app3);
Assert.assertEquals(2, endpoint.fetchMetrics("httpIngest,woodchuck").getBody().getContent().size());
}
开发者ID:spring-cloud,项目名称:spring-cloud-dataflow-metrics-collector,代码行数:20,代码来源:MetricsAggregatorTests.java
示例4: filterUsingInvalidDelimiter
import com.github.benmanes.caffeine.cache.Caffeine; //导入依赖的package包/类
@Test
public void filterUsingInvalidDelimiter() throws Exception {
Cache<String, LinkedList<ApplicationMetrics>> rawCache = Caffeine.newBuilder().build();
ApplicationMetricsService service = new ApplicationMetricsService(rawCache);
MetricsAggregator aggregator = new MetricsAggregator(service);
MetricsCollectorEndpoint endpoint = new MetricsCollectorEndpoint(service);
ApplicationMetrics app = createMetrics("httpIngest", "http", "foo", 0);
ApplicationMetrics app2 = createMetrics("woodchuck", "time", "bar", 0);
ApplicationMetrics app3 = createMetrics("twitter", "twitterstream", "bar", 0);
aggregator.receive(app);
aggregator.receive(app2);
aggregator.receive(app3);
Assert.assertEquals(0, endpoint.fetchMetrics("httpIngest;woodchuck").getBody().getContent().size());
}
开发者ID:spring-cloud,项目名称:spring-cloud-dataflow-metrics-collector,代码行数:18,代码来源:MetricsAggregatorTests.java
示例5: loadingCacheExposesMetricsForLoadsAndExceptions
import com.github.benmanes.caffeine.cache.Caffeine; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Test
void loadingCacheExposesMetricsForLoadsAndExceptions() throws Exception {
LoadingCache<Integer, String> cache = CaffeineCacheMetrics.monitor(registry, Caffeine.newBuilder()
.recordStats()
.build(key -> {
if (key % 2 == 0)
throw new Exception("no evens!");
return key.toString();
}), "c", userTags);
cache.get(1);
cache.get(1);
try {
cache.get(2); // throws exception
} catch (Exception ignored) {
}
cache.get(3);
assertThat(registry.mustFind("c.requests").tags("result", "hit").tags(userTags).functionCounter().count()).isEqualTo(1.0);
assertThat(registry.mustFind("c.requests").tags("result", "miss").tags(userTags).functionCounter().count()).isEqualTo(3.0);
assertThat(registry.mustFind("c.load").tags("result", "failure").functionCounter().count()).isEqualTo(1.0);
assertThat(registry.mustFind("c.load").tags("result", "success").functionCounter().count()).isEqualTo(2.0);
}
示例6: includeOneMetric
import com.github.benmanes.caffeine.cache.Caffeine; //导入依赖的package包/类
@Test
public void includeOneMetric() throws Exception {
Long now = System.currentTimeMillis();
Metric<Double> inputSendCount = new Metric<Double>("integration.channel.input.sendCount",10.0, new Date(now));
ApplicationMetrics app = createMetrics("httpIngest", "http", "foo",0);
app.getMetrics().add(inputSendCount);
Cache<String, LinkedList<ApplicationMetrics>> rawCache = Caffeine.newBuilder().build();
ApplicationMetricsService service = new ApplicationMetricsService(rawCache);
MetricsAggregator aggregator = new MetricsAggregator(service);
MetricsCollectorEndpoint endpoint = new MetricsCollectorEndpoint(service);
aggregator.receive(app);
Assert.assertEquals(1, rawCache.estimatedSize());
StreamMetrics streamMetrics = endpoint.fetchMetrics("").getBody().iterator().next();
Application application = streamMetrics.getApplications().get(0);
Assert.assertNotNull(streamMetrics);
Assert.assertEquals("http", application.getName());
Instance instance = application.getInstances().get(0);
Assert.assertEquals(app.getName(),instance.getKey());
Assert.assertEquals("foo", instance.getGuid());
Metric<Double> computed = instance.getMetrics().stream().filter(metric -> metric.getName().equals("integration.channel.input.send.mean")).findFirst().get();
Assert.assertEquals(0, computed.getValue(),0.0);
}
开发者ID:spring-cloud,项目名称:spring-cloud-dataflow-metrics-collector,代码行数:24,代码来源:MetricsAggregatorTests.java
示例7: addApplication
import com.github.benmanes.caffeine.cache.Caffeine; //导入依赖的package包/类
@Test
public void addApplication() throws Exception {
Cache<String, LinkedList<ApplicationMetrics>> rawCache = Caffeine.newBuilder().build();
ApplicationMetricsService service = new ApplicationMetricsService(rawCache);
MetricsAggregator aggregator = new MetricsAggregator(service);
MetricsCollectorEndpoint endpoint = new MetricsCollectorEndpoint(service);
ApplicationMetrics app = createMetrics("httpIngest", "http", "foo", 0);
ApplicationMetrics app2 = createMetrics("httpIngest", "log", "bar", 0);
aggregator.receive(app);
aggregator.receive(app2);
Assert.assertEquals(2, rawCache.estimatedSize());
StreamMetrics streamMetrics = endpoint.fetchMetrics("").getBody().iterator().next();
Assert.assertEquals(2, streamMetrics.getApplications().size());
}
开发者ID:spring-cloud,项目名称:spring-cloud-dataflow-metrics-collector,代码行数:19,代码来源:MetricsAggregatorTests.java
示例8: addStream
import com.github.benmanes.caffeine.cache.Caffeine; //导入依赖的package包/类
@Test
public void addStream() throws Exception {
Cache<String, LinkedList<ApplicationMetrics>> rawCache = Caffeine.newBuilder().build();
ApplicationMetricsService service = new ApplicationMetricsService(rawCache);
MetricsAggregator aggregator = new MetricsAggregator(service);
MetricsCollectorEndpoint endpoint = new MetricsCollectorEndpoint(service);
ApplicationMetrics app = createMetrics("httpIngest", "http", "foo", 0);
ApplicationMetrics app2 = createMetrics("woodchuck", "time", "bar", 0);
aggregator.receive(app);
aggregator.receive(app2);
Assert.assertEquals(2, endpoint.fetchMetrics("").getBody().getContent().size());
}
开发者ID:spring-cloud,项目名称:spring-cloud-dataflow-metrics-collector,代码行数:17,代码来源:MetricsAggregatorTests.java
示例9: getCache
import com.github.benmanes.caffeine.cache.Caffeine; //导入依赖的package包/类
public static Cache<String, Authentication> getCache() {
if (-1 == sessionMaxAge) {
throw new IllegalStateException("Cache session max age not configured.");
}
if (null == CACHE) {
CACHE = Caffeine.newBuilder().expireAfterAccess(sessionMaxAge, TimeUnit.SECONDS).build();
}
return CACHE;
}
示例10: init
import com.github.benmanes.caffeine.cache.Caffeine; //导入依赖的package包/类
public static synchronized void init(Configuration config) {
if (CACHE == null) {
long expireMinutes = config.getVisibilityCache().getExpirationMinutes();
int initialCapacity = config.getVisibilityCache().getInitialCapacity();
long maxCapacity = config.getVisibilityCache().getMaxCapacity();
CACHE = Caffeine.newBuilder().expireAfterAccess(expireMinutes, TimeUnit.MINUTES)
.initialCapacity(initialCapacity).maximumSize(maxCapacity).build();
}
}
示例11: CachingGoogleAuthCodeFlow
import com.github.benmanes.caffeine.cache.Caffeine; //导入依赖的package包/类
private CachingGoogleAuthCodeFlow(final int authCacheTtl,
final String clientId,
final String clientSecret,
final String organizationId,
final String redirectUri) throws IOException {
this.authCache = Caffeine.newBuilder()
.maximumSize(4096)
.expireAfterWrite(authCacheTtl, MILLISECONDS)
.build(k -> this.isOrganizationMember(k, true));
this.authFlow = new GoogleAuthorizationCodeFlow.Builder(
HTTP_TRANSPORT,
JSON_FACTORY,
clientId,
clientSecret,
SCOPES
).setDataStoreFactory(
DATA_STORE_FACTORY
).setAccessType(
"offline"
).setApprovalPrompt(
"force"
).build();
this.organizationId = organizationId;
this.redirectUri = redirectUri;
}
示例12: PokeAPI
import com.github.benmanes.caffeine.cache.Caffeine; //导入依赖的package包/类
public PokeAPI(File storageDir) {
storageDir.mkdirs();
this.pokemonCache = Caffeine.newBuilder()
.maximumSize(100)
.build((k)->{
File f = new File(storageDir, k + ".pokemon");
if(f.exists()) {
FileInputStream fis = new FileInputStream(f);
Pokemon p = Pokemon.load(new DataInputStream(fis));
fis.close();
return Optional.of(p);
}
return Optional.ofNullable(fromJSONObject(f, REQUESTER.newRequest("http://pokeapi.co/api/v2/pokemon/" + k)
.header("User-Agent", "Gabriel (Discord bot)")
.get()
.asObject()
));
});
}
示例13: ChannelPool
import com.github.benmanes.caffeine.cache.Caffeine; //导入依赖的package包/类
/**
* <p>Constructor for ChannelPool.</p>
*
* @param factory a {@link com.github.ibole.microservice.rpc.client.grpc.ChannelPool.ChannelFactory} object.
* @param initialCapacity the initial capacity of the channel pool
* @param maximumSize the maximum size of the channel pool
* @return the instance of ChannelPool
* @throws java.io.IOException if any.
*/
private ChannelPool(ChannelFactory factory, int initialCapacity, int maximumSize) {
Preconditions.checkArgument(factory != null,
"ChannelFactory cannot be null.");
Preconditions.checkArgument(initialCapacity > 0,
"Channel initial capacity has to be a positive number.");
Preconditions.checkArgument(maximumSize > 0,
"Channel maximum size has to be a positive number.");
Preconditions
.checkArgument(maximumSize >= initialCapacity,
"The maximum size of channel pool has to be greater than or equal to the initial capacity.");
Caffeine<String, InstrumentedChannel> caffeine = Caffeine.newBuilder().initialCapacity(initialCapacity).maximumSize(maximumSize).weakKeys()
.softValues().removalListener(new ChannelRemovalListener());
this.factory = factory;
channelPool = caffeine.build();
}
示例14: init
import com.github.benmanes.caffeine.cache.Caffeine; //导入依赖的package包/类
@PostConstruct
public void init() {
analyseStatsCache = Caffeine.newBuilder()
.maximumSize(config.readLong(ConfigProperty.ANALYSIS_STATISTIC_CACHE_SIZE))
.expireAfterWrite(config.readLong(ConfigProperty.ANALYSIS_STATISTIC_CACHE_SECONDS), TimeUnit.SECONDS)
.build();
dispatcher.requireAuth().blocking().filter(c ->
c.files().size() == 1,
"You should provide only a single log file.")
.postUpload("/gc/jvm/log/process", this::processJvmLog);
dispatcher.blocking().requireAuth().filter(requiredWithPeriod(), periodMessage())
.get("/gc/jvm/events", this::jvmEvents);
dispatcher.blocking().requireAuth().filter(requiredWithPeriod(), periodMessage())
.get("/gc/jvm/events/stream", this::jvmEventsStream);
dispatcher.blocking().requireAuth().filter(requiredWithPeriod(), periodMessage())
.get("/gc/jvm/events/full/stream", this::fullJvmEventsStream);
dispatcher.blocking().requireAuth().filter(requiredWithPeriod(), periodMessage())
.get("/gc/jvm/events/full/sample/stream", this::fullJvmSampleEventsStream);
dispatcher.requireAuth().filter(requiredWithPeriod(), periodMessage())
.get("/gc/jvm/events/erase", this::jvmEventsErase);
dispatcher.requireAuth().filter(requiredWithoutPeriod(), message())
.get("/gc/jvm/events/erase/all", this::jvmEventsEraseAll);
dispatcher.requireAuth().filter(requiredWithPeriod(), message())
.get("/gc/jvm/events/stats", this::jvmStats);
}
示例15: LRUCache
import com.github.benmanes.caffeine.cache.Caffeine; //导入依赖的package包/类
/**
* Constructs an empty <tt>LRUCache</tt> instance with the
* specified initial capacity, maximumCacheSize,load factor and ordering mode.
*
* @param initialCapacity the initial capacity.
* @param maximumCacheSize the max capacity.
* @param stopOnEviction whether to stop service on eviction.
* @param soft whether to use soft values a soft cache (default is false)
* @param weak whether to use weak keys/values as a weak cache (default is false)
* @param syncListener whether to use synchronous call for the eviction listener (default is false)
* @throws IllegalArgumentException if the initial capacity is negative
*/
public LRUCache(int initialCapacity, int maximumCacheSize, boolean stopOnEviction,
boolean soft, boolean weak, boolean syncListener) {
Caffeine<K, V> caffeine = Caffeine.newBuilder()
.initialCapacity(initialCapacity)
.maximumSize(maximumCacheSize)
.removalListener(this);
if (soft) {
caffeine.softValues();
}
if (weak) {
caffeine.weakKeys();
caffeine.weakValues();
}
if (syncListener) {
caffeine.executor(Runnable::run);
}
this.cache = caffeine.build();
this.map = cache.asMap();
this.maxCacheSize = maximumCacheSize;
this.stopOnEviction = stopOnEviction;
}