From f3e324c09bc07e00731a54df1833398fc632f6a4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Feb 2022 09:04:51 +0000 Subject: [PATCH 01/24] Bump postgresql from 42.0.0 to 42.2.25 in /bucket4j-postgresql Bumps [postgresql](https://github.com/pgjdbc/pgjdbc) from 42.0.0 to 42.2.25. - [Release notes](https://github.com/pgjdbc/pgjdbc/releases) - [Changelog](https://github.com/pgjdbc/pgjdbc/blob/master/CHANGELOG.md) - [Commits](https://github.com/pgjdbc/pgjdbc/compare/REL42.0.0...REL42.2.25) --- updated-dependencies: - dependency-name: org.postgresql:postgresql dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- bucket4j-postgresql/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bucket4j-postgresql/pom.xml b/bucket4j-postgresql/pom.xml index 90b05973..23b055eb 100644 --- a/bucket4j-postgresql/pom.xml +++ b/bucket4j-postgresql/pom.xml @@ -42,7 +42,7 @@ org.postgresql postgresql - 42.0.0 + 42.2.25 com.zaxxer From 90ebac0fe450bcfc378b9fb9d0348c512fe9b9aa Mon Sep 17 00:00:00 2001 From: "vladimir.bukhtoyarov" Date: Thu, 10 Feb 2022 20:32:31 +0300 Subject: [PATCH 02/24] begin new version --- README.md | 2 +- asciidoc/pom.xml | 2 +- bucket4j-benchmarks/pom.xml | 2 +- bucket4j-coherence/pom.xml | 2 +- bucket4j-core/pom.xml | 2 +- bucket4j-dynamodb-sdk-v1/pom.xml | 2 +- bucket4j-examples/pom.xml | 2 +- bucket4j-hazelcast-all/bucket4j-hazelcast-3/pom.xml | 2 +- bucket4j-hazelcast-all/bucket4j-hazelcast/pom.xml | 2 +- bucket4j-hazelcast-all/pom.xml | 2 +- bucket4j-ignite/pom.xml | 2 +- bucket4j-infinispan-all/bucket4j-infinispan-8/pom.xml | 2 +- bucket4j-infinispan-all/bucket4j-infinispan/pom.xml | 2 +- bucket4j-infinispan-all/pom.xml | 2 +- bucket4j-jcache/pom.xml | 2 +- bucket4j-mysql/pom.xml | 2 +- bucket4j-parent/pom.xml | 2 +- bucket4j-postgresql/pom.xml | 2 +- bucket4j-redis/pom.xml | 2 +- experimental/bucket4j-lua/pom.xml | 2 +- experimental/pom.xml | 2 +- lincheck-tests/pom.xml | 2 +- pom.xml | 4 ++-- 23 files changed, 24 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index 64531686..630961ad 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ The Bucket4j is distributed through [Maven Central](http://search.maven.org/): com.github.vladimir-bukhtoyarov bucket4j-core - 7.2.0 + 7.3.0 ``` #### You can build Bucket4j from sources diff --git a/asciidoc/pom.xml b/asciidoc/pom.xml index 360c5384..347e556d 100644 --- a/asciidoc/pom.xml +++ b/asciidoc/pom.xml @@ -8,7 +8,7 @@ com.github.vladimir-bukhtoyarov bucket4j-parent - 7.2.0 + 7.3.0 ../bucket4j-parent asciidoc diff --git a/bucket4j-benchmarks/pom.xml b/bucket4j-benchmarks/pom.xml index 425dc638..ba6637f5 100644 --- a/bucket4j-benchmarks/pom.xml +++ b/bucket4j-benchmarks/pom.xml @@ -8,7 +8,7 @@ com.github.vladimir-bukhtoyarov bucket4j-parent - 7.2.0 + 7.3.0 ../bucket4j-parent bucket4j-benchmarks diff --git a/bucket4j-coherence/pom.xml b/bucket4j-coherence/pom.xml index e3972ae6..33841a8f 100644 --- a/bucket4j-coherence/pom.xml +++ b/bucket4j-coherence/pom.xml @@ -9,7 +9,7 @@ com.github.vladimir-bukhtoyarov bucket4j-parent - 7.2.0 + 7.3.0 ../bucket4j-parent diff --git a/bucket4j-core/pom.xml b/bucket4j-core/pom.xml index b10bca26..62b7d4e7 100644 --- a/bucket4j-core/pom.xml +++ b/bucket4j-core/pom.xml @@ -7,7 +7,7 @@ com.github.vladimir-bukhtoyarov bucket4j-parent - 7.2.0 + 7.3.0 ../bucket4j-parent 4.0.0 diff --git a/bucket4j-dynamodb-sdk-v1/pom.xml b/bucket4j-dynamodb-sdk-v1/pom.xml index 0dc4c766..c7ccf4fd 100644 --- a/bucket4j-dynamodb-sdk-v1/pom.xml +++ b/bucket4j-dynamodb-sdk-v1/pom.xml @@ -7,7 +7,7 @@ com.github.vladimir-bukhtoyarov bucket4j-parent - 7.2.0 + 7.3.0 ../bucket4j-parent diff --git a/bucket4j-examples/pom.xml b/bucket4j-examples/pom.xml index 99744818..434fdb41 100644 --- a/bucket4j-examples/pom.xml +++ b/bucket4j-examples/pom.xml @@ -23,7 +23,7 @@ com.github.vladimir-bukhtoyarov bucket4j-parent - 7.2.0 + 7.3.0 ../bucket4j-parent 4.0.0 diff --git a/bucket4j-hazelcast-all/bucket4j-hazelcast-3/pom.xml b/bucket4j-hazelcast-all/bucket4j-hazelcast-3/pom.xml index 54c0cc12..56cb86f8 100644 --- a/bucket4j-hazelcast-all/bucket4j-hazelcast-3/pom.xml +++ b/bucket4j-hazelcast-all/bucket4j-hazelcast-3/pom.xml @@ -8,7 +8,7 @@ com.github.vladimir-bukhtoyarov bucket4j-hazelcast-all - 7.2.0 + 7.3.0 ../../bucket4j-hazelcast-all bucket4j-hazelcast-3 diff --git a/bucket4j-hazelcast-all/bucket4j-hazelcast/pom.xml b/bucket4j-hazelcast-all/bucket4j-hazelcast/pom.xml index 4ccc9446..094c313c 100644 --- a/bucket4j-hazelcast-all/bucket4j-hazelcast/pom.xml +++ b/bucket4j-hazelcast-all/bucket4j-hazelcast/pom.xml @@ -8,7 +8,7 @@ com.github.vladimir-bukhtoyarov bucket4j-hazelcast-all - 7.2.0 + 7.3.0 ../../bucket4j-hazelcast-all bucket4j-hazelcast diff --git a/bucket4j-hazelcast-all/pom.xml b/bucket4j-hazelcast-all/pom.xml index 8481dae2..8d1aef09 100644 --- a/bucket4j-hazelcast-all/pom.xml +++ b/bucket4j-hazelcast-all/pom.xml @@ -5,7 +5,7 @@ bucket4j-parent com.github.vladimir-bukhtoyarov - 7.2.0 + 7.3.0 ../bucket4j-parent pom diff --git a/bucket4j-ignite/pom.xml b/bucket4j-ignite/pom.xml index 6b72a702..d8479a75 100644 --- a/bucket4j-ignite/pom.xml +++ b/bucket4j-ignite/pom.xml @@ -8,7 +8,7 @@ com.github.vladimir-bukhtoyarov bucket4j-parent - 7.2.0 + 7.3.0 ../bucket4j-parent bucket4j-ignite diff --git a/bucket4j-infinispan-all/bucket4j-infinispan-8/pom.xml b/bucket4j-infinispan-all/bucket4j-infinispan-8/pom.xml index 70d3770f..a4140f7a 100644 --- a/bucket4j-infinispan-all/bucket4j-infinispan-8/pom.xml +++ b/bucket4j-infinispan-all/bucket4j-infinispan-8/pom.xml @@ -8,7 +8,7 @@ com.github.vladimir-bukhtoyarov bucket4j-infinispan-all - 7.2.0 + 7.3.0 ../../bucket4j-infinispan-all diff --git a/bucket4j-infinispan-all/bucket4j-infinispan/pom.xml b/bucket4j-infinispan-all/bucket4j-infinispan/pom.xml index 0045480d..60b65f23 100644 --- a/bucket4j-infinispan-all/bucket4j-infinispan/pom.xml +++ b/bucket4j-infinispan-all/bucket4j-infinispan/pom.xml @@ -8,7 +8,7 @@ com.github.vladimir-bukhtoyarov bucket4j-infinispan-all - 7.2.0 + 7.3.0 ../../bucket4j-infinispan-all bucket4j-infinispan diff --git a/bucket4j-infinispan-all/pom.xml b/bucket4j-infinispan-all/pom.xml index a9421e35..d17ba291 100644 --- a/bucket4j-infinispan-all/pom.xml +++ b/bucket4j-infinispan-all/pom.xml @@ -6,7 +6,7 @@ bucket4j-parent com.github.vladimir-bukhtoyarov - 7.2.0 + 7.3.0 ../bucket4j-parent 4.0.0 diff --git a/bucket4j-jcache/pom.xml b/bucket4j-jcache/pom.xml index 75779df8..71498f7a 100644 --- a/bucket4j-jcache/pom.xml +++ b/bucket4j-jcache/pom.xml @@ -7,7 +7,7 @@ com.github.vladimir-bukhtoyarov bucket4j-parent - 7.2.0 + 7.3.0 ../bucket4j-parent 4.0.0 diff --git a/bucket4j-mysql/pom.xml b/bucket4j-mysql/pom.xml index 9b757696..4933553d 100644 --- a/bucket4j-mysql/pom.xml +++ b/bucket4j-mysql/pom.xml @@ -7,7 +7,7 @@ com.github.vladimir-bukhtoyarov bucket4j-parent - 7.2.0 + 7.3.0 ../bucket4j-parent diff --git a/bucket4j-parent/pom.xml b/bucket4j-parent/pom.xml index d854aaf8..4f9a5736 100644 --- a/bucket4j-parent/pom.xml +++ b/bucket4j-parent/pom.xml @@ -7,7 +7,7 @@ 4.0.0 com.github.vladimir-bukhtoyarov - 7.2.0 + 7.3.0 bucket4j-parent pom bucket4j-parent diff --git a/bucket4j-postgresql/pom.xml b/bucket4j-postgresql/pom.xml index 23b055eb..f751fb72 100644 --- a/bucket4j-postgresql/pom.xml +++ b/bucket4j-postgresql/pom.xml @@ -7,7 +7,7 @@ com.github.vladimir-bukhtoyarov bucket4j-parent - 7.2.0 + 7.3.0 ../bucket4j-parent diff --git a/bucket4j-redis/pom.xml b/bucket4j-redis/pom.xml index 4ddbb42f..edbb15c0 100644 --- a/bucket4j-redis/pom.xml +++ b/bucket4j-redis/pom.xml @@ -8,7 +8,7 @@ com.github.vladimir-bukhtoyarov bucket4j-parent - 7.2.0 + 7.3.0 ../bucket4j-parent bucket4j-redis diff --git a/experimental/bucket4j-lua/pom.xml b/experimental/bucket4j-lua/pom.xml index f6c75635..2fbd48ce 100644 --- a/experimental/bucket4j-lua/pom.xml +++ b/experimental/bucket4j-lua/pom.xml @@ -24,7 +24,7 @@ com.github.vladimir-bukhtoyarov bucket4j-parent - 7.2.0 + 7.3.0 ../../bucket4j-parent bucket4j-lua diff --git a/experimental/pom.xml b/experimental/pom.xml index fef68b78..33434106 100644 --- a/experimental/pom.xml +++ b/experimental/pom.xml @@ -6,7 +6,7 @@ com.github.vladimir-bukhtoyarov bucket4j-parent - 7.2.0 + 7.3.0 ../bucket4j-parent 4.0.0 diff --git a/lincheck-tests/pom.xml b/lincheck-tests/pom.xml index 1d5b46e7..a7390009 100644 --- a/lincheck-tests/pom.xml +++ b/lincheck-tests/pom.xml @@ -6,7 +6,7 @@ com.github.vladimir-bukhtoyarov bucket4j-parent - 7.2.0 + 7.3.0 ../bucket4j-parent diff --git a/pom.xml b/pom.xml index 1593b663..4c3997d9 100644 --- a/pom.xml +++ b/pom.xml @@ -5,13 +5,13 @@ com.github.vladimir-bukhtoyarov bucket4j - 7.2.0 + 7.3.0 pom com.github.vladimir-bukhtoyarov bucket4j-parent - 7.2.0 + 7.3.0 ./bucket4j-parent From 9cf71fa88e93f02cde1f5a7147e15e15baef24ce Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 20:41:56 +0200 Subject: [PATCH 03/24] configuration-replacement.adoc --- .../advanced/configuration-replacement.adoc | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/advanced/configuration-replacement.adoc b/asciidoc/src/main/docs/asciidoc/advanced/configuration-replacement.adoc index eb2ff1fd..249b3f41 100644 --- a/asciidoc/src/main/docs/asciidoc/advanced/configuration-replacement.adoc +++ b/asciidoc/src/main/docs/asciidoc/advanced/configuration-replacement.adoc @@ -1,15 +1,15 @@ [[configuration-replacement]] === On-the-fly configuration replacement -As previously mentioned in the definition for <> it is immutable object. +As previously mentioned in the definition for <> it is an immutable object. It is not possible to add, remove or change the limits for already created configuration, however, you can replace configuration of bucket via creating new configuration instance and calling `bucket.replaceConfiguration(newConfiguration, tokensInheritanceStrategy)`. ==== Why configuration replacement is not trivial? -1. The first problem of configuration replacement is making decision how to propagate available tokens from bucket with previous configuration to bucket with new configuration. If you don't care about previous bucket state then use `TokensInheritanceStrategy.RESET`. But it becomes to a tricky problem when we expect that previous consumption(that has not been compensated by refill yet) should take effect to the bucket with new configuration. In this case you need to make a choice between: +1. The first problem of configuration replacement is making decision on how to propagate available tokens from a bucket with a previous configuration to the bucket with a new configuration. If you don't care about previous the bucket state then use `TokensInheritanceStrategy.RESET`. But it becomes a tricky problem when we expect that previous consumption(that has not been compensated by refill yet) should take effect on the bucket with new configuration. In this case you need to make a choice between: * <> * <> * <> -2. There is another problem when you are choosing <>, <> or <> or <> and bucket has more then one bandwidth. For example how does replaceConfiguration implementation should bind bandwidths to each other in the following example? +2. There is another problem when you are choosing <>, <> or <> or <> and a bucket has more than one bandwidth. For example how does replaceConfiguration implementation bind bandwidths to each other in the following example? [source, java] ---- Bucket bucket = Bucket.builder() @@ -23,11 +23,11 @@ BucketConfiguration newConfiguration = BucketConfiguration.configurationBuilder( .build(); bucket.replaceConfiguration(newConfiguration, TokensInheritanceStrategy.AS_IS); ---- -It is obviously that simple strategy - copying tokens by bandwidth index will not work well in this case, because of it highly depends on order in which bandwidths were mentioneed in new and previous configuration. +It is obvious that a simple strategy - copying tokens by bandwidth index will not work well in this case, because it highly depends on order in which bandwidths were mentioned in new and previous configuration. ==== Taking control over replacement process via bandwidth identifiers -Instead of inventing the backward maggic Bucket4j provides to you ability to deap controll of this process by specifying identifiers for bandwidth, -so in case of multiple bandwidth configuratoin replacement code can copy available tokens by bandwidth ID. So it is better to rewrite code above as following: +Instead of inventing the backward magic Bucket4j provides to you ability to deap control of this process by specifying identifiers for bandwidth, +so in case of multiple bandwidth configuration replacement code can copy available tokens by bandwidth ID. So it is better to rewrite code above as following: [source, java] ---- Bucket bucket = Bucket.builder() @@ -44,7 +44,7 @@ Bucket bucket = Bucket.builder() .There are following rules for bandwidth identifiers: * By default bandwidth has null identifier. * null value of identifier equals to another null value if and only if there is only one bandwidth with null identifier. -* If identifier for bandwidth is specified then it must has unique in the bucket. Bucket does not allow to create several bandwidth with same ID. +* If an identifier for bandwidth is specified then it must be unique in the bucket. Bucket does not allow to create several bandwidths with the same ID. ==== TokensInheritanceStrategy explanation *TokensInheritanceStrategy* specifies the rules for inheritance of available tokens during configuration replacement process. @@ -56,15 +56,15 @@ Makes to copy available tokens proportional to bandwidth capacity by following f .PROPORTIONALLY strategy examples: ** *Example 1:* imagine bandwidth that was created by `Bandwidth.classic(100, Refill.gready(10, Duration.ofMinutes(1)))`. + + -At the moment of config replacement it was 40 available tokens. + +At the moment of config replacement there were 40 available tokens. + + After replacing this bandwidth by following `Bandwidth.classic(200, Refill.gready(10, Duration.ofMinutes(1)))` 40 available tokens will be multiplied by 2(200/100), and after replacement we will have 80 available tokens. ** *Example 2:* imagine bandwidth that was created by `Bandwidth.classic(100, Refill.gready(10, Duration.ofMinutes(1)))`. - At the moment of config replacement it was 40 available tokens. After replacing this bandwidth by following `Bandwidth.classic(20, Refill.gready(10, Duration.ofMinutes(1)))` 40 available tokens will be multiplied by 0.2(20/100), and after replacement we will have 8 available tokens. + At the moment of config replacement there were 40 available tokens. After replacing this bandwidth by following `Bandwidth.classic(20, Refill.gready(10, Duration.ofMinutes(1)))` 40 available tokens will be multiplied by 0.2(20/100), and after replacement we will have 8 available tokens. AS_IS:: -Instructs to copy available tokens as is, but with one exclusion: if available tokens is greater than new capacity, available tokens will be decreased to new capacity. +Instructs to copy available tokens as is, but with one exclusion: if available tokens are greater than new capacity, available tokens will be decreased to new capacity. + .AS_IS strategy examples: ** *Example 1:* imagine bandwidth that was created by `Bandwidth.classic(100, Refill.gready(10, Duration.ofMinutes(1)))`. + @@ -80,7 +80,7 @@ At the moment of config replacement it was 40 available tokens. + After replacing this bandwidth by following `Bandwidth.classic(20, Refill.gready(10, Duration.ofMinutes(1)))` 40 available tokens can not be copied as is, because it is greater than new capacity, so available tokens will be reduced to 20. RESET:: -Use this mode when you want just to forget about previous bucket state. RESET just instructs to erases all previous state. Using this strategy equals to removing bucket and creating again with new configuration. +Use this mode when you want just to forget about the previous bucket state. RESET just instructs to erase all previous states. Using this strategy equals removing a bucket and creating again with a new configuration. ADDITIVE:: Instructs to copy available tokens as is, but with one exclusion: if new bandwidth capacity is greater than old capacity, available tokens will be increased by the difference between the old and the new configuration. + @@ -93,7 +93,7 @@ Instructs to copy available tokens as is, but with one exclusion: if new bandwid + At the moment of configuration replacement, it was 40 available tokens. + + -After replacing this bandwidth by following `Bandwidth.classic(200, Refill.gready(10, Duration.ofMinutes(1)))` 40 available tokens will be copied and added to the difference between old and new configuration, and after replacement, we will have 140 available tokens. +After replacing this bandwidth by following `Bandwidth.classic(200, Refill.gready(10, Duration.ofMinutes(1)))` 40 available tokens will be copied and added to the difference between old and new configurations, and after replacement, we will have 140 available tokens. ** *Example 2:* imagine bandwidth that was created by `Bandwidth.classic(100, Refill.gready(10, Duration.ofMinutes(1)))`. + + @@ -104,6 +104,6 @@ and after replacement we will have 20 available tokens. ** *Example 3:* imagine bandwidth that was created by `Bandwidth.classic(100, Refill.gready(10, Duration.ofMinutes(1)))`. + + -At the moment of config replacement it was 10 available tokens. +At the moment of config replacement, it was 10 available tokens. + -After replacing this bandwidth by following `Bandwidth.classic(20, Refill.gready(10, Duration.ofMinutes(1))))`, and after replacement we will have 10 available tokens. +After replacing this bandwidth by following `Bandwidth.classic(20, Refill.gready(10, Duration.ofMinutes(1))))`, and after replacement, we will have 10 available tokens. From c9431be4a863d7d9bad502224134185a8a17bef4 Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 20:44:44 +0200 Subject: [PATCH 04/24] listener.adoc --- .../main/docs/asciidoc/advanced/listener.adoc | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/advanced/listener.adoc b/asciidoc/src/main/docs/asciidoc/advanced/listener.adoc index 426a8de9..5f7f197c 100644 --- a/asciidoc/src/main/docs/asciidoc/advanced/listener.adoc +++ b/asciidoc/src/main/docs/asciidoc/advanced/listener.adoc @@ -3,30 +3,30 @@ ==== What can be listened .You can decorate the bucket by listener in order to track following events: -- When tokens consumed from bucket. -- When consumption requests was rejected by bucket. -- When thread was parked for wait of tokens refill in result of interaction with ``BlockingBucket``. -- When thread was interrupted during the wait of tokens refill in result of interaction with ``BlockingBucket``. -- When delayed task was submit to ``ScheduledExecutorService`` in result of interaction with ``AsyncScheduledBucket``. +- When tokens are consumed from a bucket. +- When consumption requests were rejected by the bucket. +- When thread was parked to wait for tokens refill as a result of interaction with ``BlockingBucket``. +- When thread was interrupted during the wait for tokens to be refilled as a result of interaction with ``BlockingBucket``. +- When a delayed task was submitted to ``ScheduledExecutorService`` as a result of interaction with ``AsyncScheduledBucket``. ==== Listener API - corner cases ======== -**Question:** How many listeners is need to create in case of application uses many buckets? +**Question:** How many listeners are needed to create an application that uses many buckets? **Answer:** it depends: -- If you want to have aggregated statistics for all buckets then create single listener per application and reuse this listener for all buckets. -- If you want to measure statistics independently per each bucket then use listener per bucket model. +- If you want to have aggregated statistics for all buckets then create a single listener per application and reuse this listener for all buckets. +- If you want to measure statistics independently per each bucket then use a listener per bucket model. ======== ======== -**Question:** where is methods of listener are invoking in case of distributed usage? +**Question:** where are methods the listener is invoking in case of distributed usage? -**Answer:** listener always invoked on client side, it is means that each client JVM will have own totally independent stat for same bucket. +**Answer:** listener always invoked on client side, it means that each client JVM will have its own totally independent stat for the same bucket. ======== ======== -**Question:** Why does bucket invoke the listener on client side instead of server side in case of distributed scenario? What I need to do if I need in aggregated stat across the whole cluster? +**Question:** Why does bucket invoke the listener on client side instead of server side in case of distributed scenario? What do I need to do if I need an aggregated stat across the whole cluster? **Answer:** Because of planned expansion to non-JVM back-ends such as Redis, MySQL, PostgreSQL. It is not possible to serialize and invoke listener on this non-java back-ends, so it was decided to invoke listener on client side, @@ -34,8 +34,8 @@ in order to avoid inconsistency between different back-ends in the future. You can do post-aggregation of monitoring statistics via features built-into your monitoring database or via mediator(like StatsD) between your application and monitoring database. ======== -==== How to attach listener to bucket? -The bucket can be decorated by listener via ``toListenable`` method. +==== How to attach a listener to a bucket? +The bucket can be decorated by the listener via the ``toListenable`` method. [source, java] ---- BucketListener listener = new MyListener(); @@ -47,17 +47,17 @@ Bucket bucket = Bucket.builder() ---- ==== Example of integration with Dropwizard metrics-core -`io.github.bucket4j.SimpleBucketListener` is simple implementation of `io.github.bucket4j.BucketListener` interface that available out of the box. Bellow the example of exposing statistics via Dropwizard Metrics(for Micrometer it should be quite similar): +`io.github.bucket4j.SimpleBucketListener` is a simple implementation of `io.github.bucket4j.BucketListener` interface that is available out of the box. Below the example of exposing statistics via Dropwizard Metrics(for Micrometer it should be quite similar): [source, java] ---- public static Bucket decorateBucketByStatListener(Bucket originalBucket, String bucketName, MetricRegistry registry) { - SimpleBucketListener stat = new SimpleBucketListener(); + SimpleBucketListener stat = new SimpleBucketListener(); registry.register(name + ".consumed", (Gauge) stat::getConsumed); registry.register(name + ".rejected", (Gauge) stat::getRejected); registry.register(name + ".parkedNanos", (Gauge) stat::getParkedNanos); registry.register(name + ".interrupted", (Gauge) stat::getInterrupted); registry.register(name + ".delayedNanos", (Gauge) stat::getDelayedNanos); - + return originalBucket.toListenable(stat); } ----- +---- \ No newline at end of file From 8e052815e659df7806cb64007d0ca4a74f81964d Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 20:45:19 +0200 Subject: [PATCH 05/24] verbose-api.adoc --- .../src/main/docs/asciidoc/advanced/verbose-api.adoc | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/advanced/verbose-api.adoc b/asciidoc/src/main/docs/asciidoc/advanced/verbose-api.adoc index 20ee58cd..0a99c9d6 100644 --- a/asciidoc/src/main/docs/asciidoc/advanced/verbose-api.adoc +++ b/asciidoc/src/main/docs/asciidoc/advanced/verbose-api.adoc @@ -1,6 +1,6 @@ [[verbose-api]] === Verbose API -Verbose API:: is the API which intent is in injecting low-level diagnostic information into results of any interaction with bucket. Verbose API providing the same functionality as Regular API, with one exception - result of any method always decorated by `VerboseResult` wrapper. +Verbose API:: is the API whose intent is in injecting low-level diagnostic information into results of any interaction with a bucket. Verbose API provides the same functionality as Regular API, with one exception - result of any method always decorated by `VerboseResult` wrapper. VerboseResult:: is the wrapper for interaction result that provides the snapshot of bucket and its configuration that was actual at the moment of interaction with bucket. @@ -35,9 +35,9 @@ AsyncVerboseBucket verboseBucket = bucket.asVerbose(); NOTE: BlockingBucket and ScheduledBucket do not provide the verbose analogs. VerboseResult has no sense for this kind of buckets because interactions with them can be followed by thread sleep or delayed execution, so VerboseResult can be absolutely stale and irrelevant to the moment of time when control over execution is being returned to your code. ==== Principles of result decoration - * void return type always decorated by `VerboseResult` - * A primitive result type like long, boolean always decorated by correspondent boxed type for example `VerboseResult` - * None primitive reult type always decorated as is, for example `VerboseResult` +* void return type always decorated by `VerboseResult` +* A primitive result type like long, boolean always decorated by correspondent boxed type for example `VerboseResult` +* Non primitive reult type always decorated as is, for example `VerboseResult` ==== Example of Verbose API usage [source, java] @@ -65,4 +65,5 @@ if (probe.isConsumed()) { httpResponse.setContentType("text/plain"); httpResponse.getWriter().append("Too many requests"); } ----- \ No newline at end of file +---- + From 01cdb9c93c51e1f7f129d5b8426581a007df6e75 Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 20:55:38 +0200 Subject: [PATCH 06/24] api-reference.adoc --- .../docs/asciidoc/basic/api-reference.adoc | 100 +++++++++--------- 1 file changed, 50 insertions(+), 50 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/basic/api-reference.adoc b/asciidoc/src/main/docs/asciidoc/basic/api-reference.adoc index 5d75af7c..f33431cc 100644 --- a/asciidoc/src/main/docs/asciidoc/basic/api-reference.adoc +++ b/asciidoc/src/main/docs/asciidoc/basic/api-reference.adoc @@ -18,7 +18,7 @@ boolean tryConsume(long numTokens); ---- /** * Consumes {@code tokens} from bucket ignoring all limits. - * In result of this operation amount of tokens in the bucket could became negative. + * As a result of this operation amount of tokens in the bucket could become negative. * * There are two possible reasons to use this method: *
    @@ -26,15 +26,15 @@ boolean tryConsume(long numTokens); *
  • You want to apply custom blocking strategy instead of default which applied on {@code asScheduler().consume(tokens)}
  • *
* - * @param tokens amount of tokens that should be consumed from bucket. + * @param tokens amount of tokens that should be consumed from a bucket. * * @return - * the amount of rate limit violation in nanoseconds calculated in following way: + * the amount of rate limit violation in nanoseconds is calculated in the following way: *
    *
  • zero if rate limit was not violated. For example bucket had 5 tokens before invocation of {@code consumeIgnoringRateLimits(2)}, * after invocation there are 3 tokens remain in the bucket, since limits were not violated zero returned as result.
  • *
  • Positive value which describes the amount of rate limit violation in nanoseconds. - * For example bucket with limit 10 tokens per 1 second, currently has the 2 tokens available, last refill happen 100 milliseconds ago, and {@code consumeIgnoringRateLimits(6)} called. + * For example, a bucket with a limit of 10 tokens per 1 second, currently has the 2 tokens available, last refill happen 100 milliseconds ago, and {@code consumeIgnoringRateLimits(6)} called. * 300_000_000 will be returned as result and available tokens in the bucket will became -3, and any variation of {@code tryConsume...} will not be successful for 400 milliseconds(time required to refill amount of available tokens until 1). *
  • *
@@ -50,7 +50,7 @@ boolean tryConsume(long numTokens); * * @param numTokens The number of tokens to consume from the bucket, must be a positive number. * - * @return {@link ConsumptionProbe} which describes both result of consumption and tokens remaining in the bucket after consumption. + * @return {@link ConsumptionProbe} which describes both the result of consumption and tokens remaining in the bucket after consumption. */ ConsumptionProbe tryConsumeAndReturnRemaining(long numTokens); ---- @@ -72,21 +72,21 @@ boolean tryConsume(long numTokens); [source, java] ---- /** - * Tries to consume as much tokens from this bucket as available at the moment of invocation. + * Tries to consume as many tokens from this bucket as available at the moment of invocation. * - * @return number of tokens which has been consumed, or zero if was consumed nothing. + * @return number of tokens which have been consumed, or zero if nothing was consumed. */ long tryConsumeAsMuchAsPossible(); ---- [source, java] ---- /** - * Tries to consume as much tokens from bucket as available in the bucket at the moment of invocation, - * but tokens which should be consumed is limited by {@code limit}. + * Tries to consume as much tokens from the bucket as available in the bucket at the moment of invocation, + * but tokens which should be consumed are limited by {@code limit}. * - * @param limit maximum number of tokens to consume, should be positive. + * @param limit a maximum number of tokens to consume, should be positive. * - * @return number of tokens which has been consumed, or zero if was consumed nothing. + * @return number of tokens which has been consumed, or zero if nothing was consumed. */ long tryConsumeAsMuchAsPossible(long limit); ---- @@ -98,10 +98,10 @@ boolean tryConsume(long numTokens); * Add tokensToAdd to bucket. * Resulted count of tokens are calculated by following formula: *
newTokens = Math.min(capacity, currentTokens + tokensToAdd)
- * in other words resulted number of tokens never exceeds capacity independent of tokensToAdd. + * In other words resulted number of tokens never exceeds capacity independent of tokensToAdd. * *

Example of usage

- * The "compensating transaction" is one of obvious use case, when any piece of code consumed tokens from bucket, tried to do something and failed, the "addTokens" will be helpful to return tokens back to bucket: + * The "compensating transaction" is one of the obvious use case, when any piece of code consumed tokens from a bucket, tried to do something, and failed, the "addTokens" will be helpful to return tokens back to the bucket: *
{@code
      *      Bucket wallet;
      *      ...
@@ -127,7 +127,7 @@ boolean tryConsume(long numTokens);
      * Add tokensToAdd to bucket. In opposite to {@link #addTokens(long)} usage of this method can lead to overflow bucket capacity.
      *
      * 

Example of usage

- * The "compensating transaction" is one of obvious use case, when any piece of code consumed tokens from bucket, tried to do something and failed, the "addTokens" will be helpful to return tokens back to bucket: + * The "compensating transaction" is one of the obvious use case, when any piece of code consumed tokens from a bucket, tried to do something, and failed, the "addTokens" will be helpful to return tokens back to the bucket: *
{@code
      *      Bucket wallet;
      *      ...
@@ -150,9 +150,9 @@ boolean tryConsume(long numTokens);
 [source, java]
 ----
     /**
-     * Returns amount of available tokens in this bucket.
+     * Returns the amount of available tokens in this bucket.
 * 

-* Typically you should avoid using of this method for, because available tokens can be changed by concurrent transactions for case of multithreaded/multi-process environment. +* Typically you should avoid using this method for, because available tokens can be changed by concurrent transactions in the case of a multithreaded/multi-process environment. * * @return amount of available tokens */ @@ -179,13 +179,13 @@ long getAvailableTokens(); * Replaces configuration of this bucket. * *

- * The first hard problem of configuration replacement is making decision how to propagate available tokens from bucket with previous configuration to bucket with new configuration. - * If you don't care about previous bucket state then use {@link TokensInheritanceStrategy#RESET}. - * But it becomes to a tricky problem when we expect that previous consumption(that has not been compensated by refill yet) should take effect to the bucket with new configuration. - * In this case you need to make a choice between {@link TokensInheritanceStrategy#PROPORTIONALLY} and {@link TokensInheritanceStrategy#AS_IS}, read documentation about both with strong attention. + * The first hard problem of configuration replacement is making decisions on how to propagate available tokens from the bucket with the previous configuration to the bucket with a new configuration. + * If you don't care about the previous bucket state then use {@link TokensInheritanceStrategy#RESET}. + * But it becomes a tricky problem when we expect that previous consumption(that has not been compensated by refill yet) should take effect to the bucket with a new configuration. + * In this case you need to make a choice between {@link TokensInheritanceStrategy#PROPORTIONALLY} and {@link TokensInheritanceStrategy#AS_IS}, read the documentation about both with strong attention. * - *

There is another problem when you are choosing {@link TokensInheritanceStrategy#PROPORTIONALLY} and {@link TokensInheritanceStrategy#AS_IS} and bucket has more then one bandwidth. - * For example how does replaceConfiguration implementation should bind bandwidths to each other in the following example? + *

There is another problem when you are choosing {@link TokensInheritanceStrategy#PROPORTIONALLY} and {@link TokensInheritanceStrategy#AS_IS} and the bucket has more than one bandwidth. + * For example how does replaceConfiguration implementation bind bandwidths to each other in the following example? *

      * 
      *     Bucket bucket = Bucket.builder()
@@ -200,9 +200,9 @@ long getAvailableTokens();
      *     bucket.replaceConfiguration(newConfiguration, TokensInheritanceStrategy.AS_IS);
      * 
      * 
- * It is obviously that simple strategy - copying tokens by bandwidth index will not work well in this case, because of it highly depends from order. - * Instead of inventing the backward maggic Bucket4j provides to you ability to deap controll of this process by specifying identifiers for bandwidth, - * so in case of multiple bandwidth configuratoin replacement code can copy available tokens by bandwidth ID. So it is better to rewrite code above as following: + * It is obvious that a simple strategy - copying tokens by bandwidth index will not work well in this case, because it highly depends from order. + * Instead of inventing the backward magic Bucket4j provides to you the ability to deap controll of this process by specifying identifiers for bandwidth, + * so in case of multiple bandwidth configuratoin replacement code can copy available tokens by bandwidth ID. So it is better to rewrite the code above as following: *
      * 
      * Bucket bucket = Bucket.builder()
@@ -220,7 +220,7 @@ long getAvailableTokens();
      *
      *
      * 

- * There are following rules for bandwidth identifiers: + * There are the following rules for bandwidth identifiers: *

    *
  • * By default bandwidth has null identifier. @@ -229,11 +229,11 @@ long getAvailableTokens(); * null value of identifier equals to another null value if and only if there is only one bandwidth with null identifier. *
  • *
  • - * If identifier for bandwidth is specified then it must has unique in the bucket. Bucket does not allow to create several bandwidth with same ID. + * If an identifier for bandwidth is specified then it must be unique in the bucket. Bucket does not allow to create of several bandwidths with the same ID. *
  • *
  • - * {@link TokensInheritanceStrategy#RESET} strategy will be applied for tokens migration during config replacement for bandwidth which has no bound bandwidth with same ID in previous configuration, - * idependently of strategy that was requested. + * {@link TokensInheritanceStrategy#RESET} strategy will be applied for tokens migration during config replacement for bandwidth which has no bound bandwidth with the same ID in the previous configuration, + * independently of the strategy that was requested. *
  • *
* @@ -288,10 +288,10 @@ See <> section for more details. [source, java] ---- /** - * Returns new copy of this bucket instance decorated by {@code listener}. - * The created bucket will share same tokens with source bucket and vice versa. + * Returns a new copy of this bucket instance decorated by {@code listener}. + * The created bucket will share the same tokens with the source bucket and vice versa. * - * See javadocs for {@link BucketListener} in order to understand semantic of listener. + * See javadocs for {@link BucketListener} in order to understand the semantics of listener. * * @param listener the listener of bucket events. * @@ -320,20 +320,20 @@ See <> section for more details. * *
  • * If bucket has no enough tokens, - * but deficit can be closed in period of time less then maxWaitTimeNanos nanoseconds, + * but deficit can be closed in period of time less than maxWaitTimeNanos nanoseconds, * then tokens consumed(reserved in fair manner) from bucket and current thread blocked for a time required to close deficit, * after unblocking method returns true. * *

    * Note: If InterruptedException happen when thread was blocked * then tokens will be not returned back to bucket, - * but you can use {@link Bucket#addTokens(long)} to returned tokens back. + * but you can use {@link Bucket#addTokens(long)} to return tokens back. *

  • * * * @param numTokens The number of tokens to consume from the bucket. * @param maxWaitTimeNanos limit of time(in nanoseconds) which thread can wait. - * @param blockingStrategy specifies the way to block current thread to amount of time required to refill missed number of tokens in the bucket + * @param blockingStrategy specifies the way to block the current thread to the amount of time required to refill a missed number of tokens in the bucket * * @return true if {@code numTokens} has been consumed or false when {@code numTokens} has not been consumed * @@ -350,7 +350,7 @@ See <> section for more details. * * @param numTokens The number of tokens to consume from the bucket. * @param maxWait limit of time which thread can wait. - * @param blockingStrategy specifies the way to block current thread to amount of time required to refill missed number of tokens in the bucket + * @param blockingStrategy specifies the way to block the current thread to the amount of time required to refill a missed number of tokens in the bucket * * @return true if {@code numTokens} has been consumed or false when {@code numTokens} has not been consumed * @@ -405,7 +405,7 @@ See <> section for more details. * * @param numTokens The number of tokens to consume from the bucket. * @param maxWaitTimeNanos limit of time(in nanoseconds) which thread can wait. - * @param blockingStrategy specifies the way to block current thread to amount of time required to refill missed number of tokens in the bucket + * @param blockingStrategy specifies the way to block the current thread to the amount of time required to refill missed number of tokens in the bucket * * @return true if {@code numTokens} has been consumed or false when {@code numTokens} has not been consumed * @@ -420,7 +420,7 @@ See <> section for more details. * * @param numTokens The number of tokens to consume from the bucket. * @param maxWait limit of time which thread can wait. - * @param blockingStrategy specifies the way to block current thread to amount of time required to refill missed number of tokens in the bucket + * @param blockingStrategy specifies the way to block the current thread to the amount of time required to refill a missed number of tokens in the bucket * * @return true if {@code numTokens} has been consumed or false when {@code numTokens} has not been consumed * @@ -484,7 +484,7 @@ See <> section for more details. * * * @param numTokens The number of tokens to consume from the bucket. - * @param blockingStrategy specifies the way to block current thread to amount of time required to refill missed number of tokens in the bucket + * @param blockingStrategy specifies the way to block the current thread to the amount of time required to refill a missed number of tokens in the bucket * * * @throws InterruptedException in case of current thread has been interrupted during the waiting @@ -514,7 +514,7 @@ See <> section for more details. * Has same semantic with {@link #consume(long, BlockingStrategy)} but ignores interrupts(just restores interruption flag on exit). * * @param numTokens The number of tokens to consume from the bucket. - * @param blockingStrategy specifies the way to block current thread to amount of time required to refill missed number of tokens in the bucket + * @param blockingStrategy specifies the way to block the current thread to the amount of time required to refill a missed number of tokens in the bucket * * @see #consume(long, BlockingStrategy) */ @@ -545,26 +545,26 @@ See <> section for more details. *

    * The algorithm for all type of buckets is following: *

      -*
    • Implementation issues asynchronous request to back-end behind the bucket(for local bucket it is just a synchronous call) in way which specific for each particular back-end.
    • +*
    • Implementation issues asynchronous request to back-end behind the bucket(for local bucket it is just a synchronous call) in a way which specific for each particular back-end.
    • *
    • Then uncompleted future returned to the caller.
    • *
    • If back-end provides signal(through callback) that asynchronous request failed, then future completed exceptionally.
    • *
    • When back-end provides signal(through callback) that request is done(for local bucket response got immediately), then following post-processing rules will be applied: *
        *
      • -* If tokens were consumed then future immediately completed by true. +* If tokens were consumed then the future immediately completed by true. *
      • *
      • -* If tokens were not consumed because were not enough tokens in the bucket and maxWaitNanos nanoseconds is not enough time to refill deficit, -* then future immediately completed by false. +* If tokens were not consumed because were not enough tokens in the bucket and maxWaitNanos nanoseconds is not enough time to refill the deficit, +* then the future is immediately completed by false. *
      • *
      • * If tokens were reserved(effectively consumed) then task to delayed completion will be scheduled to the scheduler via {@link ScheduledExecutorService#schedule(Runnable, long, TimeUnit)}, -* when delay equals to time required to refill the deficit of tokens. After scheduler executes task the future completed by true. +* when delay equals to the time required to refill the deficit of tokens. After the scheduler executes the task the future completed by true. *
      • *
      *
    • *
    -* It is strongly not recommended to do any heavy work in thread which completes the future, +* It is strongly not recommended to do any heavy work in a thread which completes the future, * because typically this will be a back-end thread which handles NIO selectors, * blocking this thread will take negative performance effect to back-end throughput, * so you always should resume control flow in another executor via methods like {@link CompletableFuture#thenApplyAsync(Function, Executor)}. @@ -600,22 +600,22 @@ CompletableFuture tryConsume(long numTokens, long maxWaitNanos, Schedul *

    * The algorithm for all type of buckets is following: *

      - *
    • Implementation issues asynchronous request to back-end behind the bucket(for local bucket it is just a synchronous call) in way which specific for each particular back-end.
    • + *
    • Implementation issues asynchronous request to back-end behind the bucket(for local bucket it is just a synchronous call) in a way which specific for each particular back-end.
    • *
    • Then uncompleted future returned to the caller.
    • *
    • If back-end provides signal(through callback) that asynchronous request failed, then future completed exceptionally.
    • *
    • When back-end provides signal(through callback) that request is done(for local bucket response got immediately), then following post-processing rules will be applied: *
        *
      • - * If tokens were consumed then future immediately completed. + * If tokens were consumed then the future was immediately completed. *
      • *
      • * Else tokens reserved(effectively consumed) and task to delayed completion will be scheduled to the scheduler via {@link ScheduledExecutorService#schedule(Runnable, long, TimeUnit)}, - * when delay equals to time required to refill the deficit of tokens. After scheduler executes task the future completed. + * when delay equals the time required to refill the deficit of tokens. After the scheduler executes the task the future is completed. *
      • *
      *
    • *
    - * It is strongly not recommended to do any heavy work in thread which completes the future, + * It is strongly not recommended to do any heavy work in a thread that completes the future, * because typically this will be a back-end thread which handles NIO selectors, * blocking this thread will take negative performance effect to back-end throughput, * so you always should resume control flow in another executor via methods like {@link CompletableFuture#thenApplyAsync(Function, Executor)}. From 852309e69f09f282337c6f42a978dde337666eb4 Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 21:17:27 +0200 Subject: [PATCH 07/24] concepts.adoc --- .../main/docs/asciidoc/basic/concepts.adoc | 58 +++++++++---------- 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/basic/concepts.adoc b/asciidoc/src/main/docs/asciidoc/basic/concepts.adoc index c38e0235..8ac0cd5f 100644 --- a/asciidoc/src/main/docs/asciidoc/basic/concepts.adoc +++ b/asciidoc/src/main/docs/asciidoc/basic/concepts.adoc @@ -2,14 +2,14 @@ [[bucket, Bucket]] ==== Bucket -`Bucket` is rate-limiter that is implemented on the top of ideas of well-known https://en.wikipedia.org/wiki/Token_bucket[Token Bucket algorithm]. +`Bucket` is a rate-limiter that is implemented on the top of ideas of well-known https://en.wikipedia.org/wiki/Token_bucket[Token Bucket algorithm]. In the Bucket4j library code the `Bucket` is represented by interface https://github.com/vladimir-bukhtoyarov/bucket4j/blob/{minor-number}/bucket4j-core/src/main/java/io/github/bucket4j/Bucket.java[io.github.bucket4j.Bucket]. -.Bucket aggregates following parts: -* <> specifies an immutable collection of limitation rules that is used by bucket during its work. -* <> the place where bucket stores mutable state like amount of current available tokens. +.Bucket aggregates the following parts: +* <> specifies an immutable collection of limitation rules that are used by the bucket during its work. +* <> the place where bucket stores mutable state like the amount of currently available tokens. -Bucket can be constructed via special builder API <> that is available by factory method: +A bucket can be constructed via a special builder API <> that is available by factory method: [source, java] ---- Bucket bucket = Bucket.builder() @@ -20,9 +20,9 @@ Bucket bucket = Bucket.builder() [[bucket-bonfiguration, BucketConfiguration]] ==== BucketConfiguration `BucketConfiguration` can be described as collection of <> that are used by <> during its job. Configuration -In the Bucket4j library code the `BucketConfiguration` is represented by class https://github.com/vladimir-bukhtoyarov/bucket4j/blob/{minor-number}/bucket4j-core/src/main/java/io/github/bucket4j/BucketConfiguration.java[io.github.bucket4j.BucketConfiguration]. Configuration is immutable, there is no way to add or remove a limit to already created configuration. However, you can replace configuration of bucket via creating new configuration instance and calling `bucket.replaceConfiguration(newConfiguration)`. +In the Bucket4j library code the `BucketConfiguration` is represented by class https://github.com/vladimir-bukhtoyarov/bucket4j/blob/{minor-number}/bucket4j-core/src/main/java/io/github/bucket4j/BucketConfiguration.java[io.github.bucket4j.BucketConfiguration]. Configuration is immutable, there is no way to add or remove a limit to already created configuration. However, you can replace the configuration of the bucket via creating a new configuration instance and calling `bucket.replaceConfiguration(newConfiguration)`. -Usually you should not create BucketConfiguration directly(excepting the case with configuration replacement) because <> does for you behind the scene, for rare cases when you need to create configuration directly you have to use `ConfigurationBuilder` that is available by factory method: +Usually, you should not create BucketConfiguration directly(excepting the case with configuration replacement) because <> does for you behind the scene, for rare cases when you need to create configuration directly you have to use `ConfigurationBuilder` that is available by factory method: [source, java] ---- BucketConfiguration configuration = BucketConfiguration.builder() @@ -30,21 +30,21 @@ BucketConfiguration configuration = BucketConfiguration.builder() .build() ---- -IMPORTANT: Mostly users configure single limit per configuration, but it is strongly recommended analyzing whether <> - can affect your application and if so then thinking about to adding more limits. +IMPORTANT: Most users configure a single limit per configuration, but it is strongly recommended to analyze whether <> +can affect your application and if so then think about adding more limits. [[bandwidth]] ==== Limitation/Bandwidth -Limitations that are used by bucket can be denoted in terms of bandwidths. Bandwidth is denoted by following terms: +Limitations that are used by bucket can be denoted in terms of bandwidths. Bandwidth is denoted by the following terms: Capacity:: -Capacity is the term that directly inherited from classic interpretation of token-bucket algorithm, this specifies how many tokens your bucket has. +Capacity is the term that is directly inherited from the classic interpretation of the token-bucket algorithm, this specifies how many tokens your bucket has. Refill:: -<> specifies how fast tokens can be refilled after it were consumed from bucket. +<> specifies how fast tokens can be refilled after it was consumed from a bucket. Initial tokens:: -Bucket4j extend token-bucket algorithm by allowing to specify initial amount of tokens for each bandwidth. By default, initial amount of tokens equals to capacity, and can be changed by `withInitialTokens` method: + +Bucket4j extends the token-bucket algorithm by allowing to specify the initial amount of tokens for each bandwidth. By default, an initial amount of tokens equal to capacity and can be changed by `withInitialTokens` method: + + [source, java] ---- @@ -53,8 +53,8 @@ Bandwidth bandwidth = Bandwidth.simple(42, Duration.ofMinutes(1)) ---- ID:: -Identifier is the optional attribute that is null by default. You may prefer to assign identifiers for bandwidths if you use on the fly configuration replacement and your buckets have more than one bandwidth per bucket, otherwise it is better to avoid using identifiers in order to preserve memory. -Identifier for a bandwidth can be specified by `withId` method: + +The identifier is the optional attribute that is null by default. You may prefer to assign identifiers for bandwidths if you use on-the-fly configuration replacement and your buckets have more than one bandwidth per bucket, otherwise, it is better to avoid using identifiers to preserve memory. +The Identifier for bandwidth can be specified by `withId` method: + + [source, java] ---- @@ -63,15 +63,15 @@ BucketConfiguration configuration = BucketConfiguration.builder() .addLimit(Bandwidth.simple(100, Duration.ofSeconds(1)).withId("burst-protection")) .build(); ---- -NOTE: Identifiers are critical for on the fly configuration replacement functionality because during replacement it needs to make decision about how correctly propagate information about already consumed tokens from state before config replacement to state after replacement. This is not trivial task especially when amount of limits is changing. +NOTE: Identifiers are critical for on-the-fly configuration replacement functionality because during replacement it needs to decide how correctly propagate information about already consumed tokens from the state before config replacement to the state after replacement. This is not a trivial task especially when the number of limits is changing. [[refill, Refill]] ==== Refill Specifies the speed of tokens regeneration. -.There are tree types of refill: +.There are three types of refill: Greedy:: -This type of refill regenerates tokens in greedy manner, it tries to add the tokens to bucket as soon as possible. For example refill "10 tokens per 1 second" adds 1 token per each 100 millisecond, in other words refill will not wait 1 second to regenerate whole bunch of 10 tokens. The three refills bellow do refill of tokens with same speed: + +This type of refill greedily regenerates tokens manner, it tries to add the tokens to the bucket as soon as possible. For example refill "10 tokens per 1 second" adds 1 token per every 100 milliseconds, in other words, the refill will not wait 1 second to regenerate a whole bunch of 10 tokens. The three refills below do refill of tokens with the same speed: + + [source, java] ---- @@ -80,7 +80,7 @@ Refill.greedy(10, Duration.ofSeconds(1)); Refill.greedy(1, Duration.ofMillis(100)); ---- + -`Greedy` is default type of refill that is used when you create `simple` bandwidth + +`Greedy` is the default type of refill that is used when you create `simple` bandwidth + + [source, java] ---- @@ -89,8 +89,8 @@ Bandwidth.simple(100, Duration.ofMinutes(1)) Bandwidth.classic(100, Refill.greedy(100, Duration.ofMinutes(1))) ---- -Intervally:: -This type of refill regenerates tokens in intervally manner. "Intervally" in opposite to "greedy" will wait until whole period will be elapsed before regenerate the whole amount of tokens. + +Interval:: +This type of refill regenerates tokens in an interval manner. "Interval" in opposite to "greedy" will wait until the whole period will be elapsed before regenerating the whole amount of tokens. + + .Example: + + @@ -101,7 +101,7 @@ Refill.greedy(100, Duration.ofMinutes(1)); ---- IntervallyAligned:: -This type of refill regenerates that does refill of tokens in intervally manner. Intervally" in opposite to "greedy" will wait until whole period will be elapsed before regenerate the whole amount of tokens. In additional to *Intervally* it is possible to specify the time when first refill should happen. This type can be used to configure clear interval boundary i.e. start of second, minute, hour, day. To get more details reed javadocs for `Refill#intervallyAligned` method. + +This type of refill regenerates that does refill of tokens in an interval manner. Interval" in opposite to "greedy" will wait until the whole period will be elapsed before regenerating the whole amount of tokens. In addition to *Interval* it is possible to specify the time when the first refill should happen. This type can be used to configure clear interval boundary i.e. start of the second, minute, hour, day. To get more details to read javadocs for `Refill#intervallyAligned` method. + + .Example: [source, java] @@ -120,7 +120,7 @@ Bandwidth.classic(400, Refill.intervallyAligned(400, Duration.ofHours(1), firstR ==== BucketState BucketState is the place where bucket stores own mutable state like: -* Amount of current available tokens. +* Amount of currently available tokens. * Timestamp when the last refill was happen. `BucketState` is represented by interface https://github.com/vladimir-bukhtoyarov/bucket4j/blob/{minor-number}/bucket4j-core/src/main/java/io/github/bucket4j/Bucket.java[io.github.bucket4j.BucketState]. Usually you never interact with this interface, excepting the cases when you want to get access to low-level diagnostic API that is described in @@ -128,13 +128,13 @@ BucketState is the place where bucket stores own mutable state like: [[local-bucket-builder, BucketBuilder]] ==== BucketBuilder -It was explicitly decided by library authors to not provide for end users to construct a library entity via direct constructors. +It was explicitly decided by library authors to not provide for end-users to construct a library entity via direct constructors. -.It were to reason to split built-time and usage-time APIs: +.It was to reason to split built-time and usage-time APIs: * To be able in the future to change internal implementations without breaking backward compatibility. -* In order to provide `Fluent Builder API` that in our minds is good modern library design pattern. +* To to provide `Fluent Builder API` that in our minds is a good modern library design pattern. -`LocalBucketBuilder` is a fluent builder that is specialized to construct the local buckets, where local bucket is the bucket that holds internal state just in memory and does not provide clustering functionality. Bellow an example of LocalBucketBuilder usage: +`LocalBucketBuilder` is a fluent builder that is specialized to construct the local buckets, where a local bucket is a bucket that holds an internal state just in memory and does not provide clustering functionality. Bellow an example of LocalBucketBuilder usage: [source, java] ---- Bucket bucket = Bucket.builder() @@ -142,6 +142,4 @@ Bucket bucket = Bucket.builder() .withNanosecondPrecision() .withSynchronizationStrategy(SynchronizationStrategy.LOCK_FREE) .build() ----- - - +---- \ No newline at end of file From 3e6469e34e86d6202bed1339d32a8ad58f4fd3e7 Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 21:19:31 +0200 Subject: [PATCH 08/24] configuration-replacement.adoc --- .../advanced/configuration-replacement.adoc | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/advanced/configuration-replacement.adoc b/asciidoc/src/main/docs/asciidoc/advanced/configuration-replacement.adoc index 249b3f41..f930aeff 100644 --- a/asciidoc/src/main/docs/asciidoc/advanced/configuration-replacement.adoc +++ b/asciidoc/src/main/docs/asciidoc/advanced/configuration-replacement.adoc @@ -1,15 +1,15 @@ [[configuration-replacement]] === On-the-fly configuration replacement As previously mentioned in the definition for <> it is an immutable object. -It is not possible to add, remove or change the limits for already created configuration, however, you can replace configuration of bucket via creating new configuration instance and calling `bucket.replaceConfiguration(newConfiguration, tokensInheritanceStrategy)`. +It is not possible to add, remove or change the limits for already created configuration, however, you can replace the configuration of the bucket via creating a new configuration instance and calling `bucket.replaceConfiguration(newConfiguration, tokensInheritanceStrategy)`. ==== Why configuration replacement is not trivial? -1. The first problem of configuration replacement is making decision on how to propagate available tokens from a bucket with a previous configuration to the bucket with a new configuration. If you don't care about previous the bucket state then use `TokensInheritanceStrategy.RESET`. But it becomes a tricky problem when we expect that previous consumption(that has not been compensated by refill yet) should take effect on the bucket with new configuration. In this case you need to make a choice between: +1. The first problem of configuration replacement is deciding on how to propagate available tokens from a bucket with a previous configuration to the bucket with a new configuration. If you don't care about previous the bucket state then use `TokensInheritanceStrategy.RESET`. But it becomes a tricky problem when we expect that previous consumption(that has not been compensated by refill yet) should take effect on the bucket with a new configuration. In this case, you need to choose between: * <> * <> * <> -2. There is another problem when you are choosing <>, <> or <> or <> and a bucket has more than one bandwidth. For example how does replaceConfiguration implementation bind bandwidths to each other in the following example? +2. There is another problem when you are choosing <>, <> or <> or <> and a bucket has more than one bandwidth. For example, how does replaceConfiguration implementation bind bandwidths to each other in the following example? [source, java] ---- Bucket bucket = Bucket.builder() @@ -23,11 +23,11 @@ BucketConfiguration newConfiguration = BucketConfiguration.configurationBuilder( .build(); bucket.replaceConfiguration(newConfiguration, TokensInheritanceStrategy.AS_IS); ---- -It is obvious that a simple strategy - copying tokens by bandwidth index will not work well in this case, because it highly depends on order in which bandwidths were mentioned in new and previous configuration. +It is obvious that a simple strategy - copying tokens by bandwidth index will not work well in this case, because it highly depends on the order in which bandwidths were mentioned in the new and previous configuration. ==== Taking control over replacement process via bandwidth identifiers -Instead of inventing the backward magic Bucket4j provides to you ability to deap control of this process by specifying identifiers for bandwidth, -so in case of multiple bandwidth configuration replacement code can copy available tokens by bandwidth ID. So it is better to rewrite code above as following: +Instead of inventing the backward magic Bucket4j provides you the ability to keep control this process by specifying identifiers for bandwidth, +so in case of multiple bandwidth configuration replacement codes can copy available tokens by bandwidth ID. So it is better to rewrite the code above as follows: [source, java] ---- Bucket bucket = Bucket.builder() @@ -41,10 +41,10 @@ Bucket bucket = Bucket.builder() .build(); bucket.replaceConfiguration(newConfiguration, TokensInheritanceStrategy.PROPORTIONALLY); ---- -.There are following rules for bandwidth identifiers: +.There are the following rules for bandwidth identifiers: * By default bandwidth has null identifier. -* null value of identifier equals to another null value if and only if there is only one bandwidth with null identifier. -* If an identifier for bandwidth is specified then it must be unique in the bucket. Bucket does not allow to create several bandwidths with the same ID. +* null value of identifier equals to another null value if and only if there is only one bandwidth with a null identifier. +* If an identifier for bandwidth is specified then it must be unique in the bucket. Bucket does not allow to create of several bandwidths with the same ID. ==== TokensInheritanceStrategy explanation *TokensInheritanceStrategy* specifies the rules for inheritance of available tokens during configuration replacement process. @@ -56,12 +56,12 @@ Makes to copy available tokens proportional to bandwidth capacity by following f .PROPORTIONALLY strategy examples: ** *Example 1:* imagine bandwidth that was created by `Bandwidth.classic(100, Refill.gready(10, Duration.ofMinutes(1)))`. + + -At the moment of config replacement there were 40 available tokens. + +At the moment of config replacement, there were 40 available tokens. + + -After replacing this bandwidth by following `Bandwidth.classic(200, Refill.gready(10, Duration.ofMinutes(1)))` 40 available tokens will be multiplied by 2(200/100), and after replacement we will have 80 available tokens. +After replacing this bandwidth by following `Bandwidth.classic(200, Refill.gready(10, Duration.ofMinutes(1)))` 40 available tokens will be multiplied by 2(200/100), and after replacement, we will have 80 available tokens. ** *Example 2:* imagine bandwidth that was created by `Bandwidth.classic(100, Refill.gready(10, Duration.ofMinutes(1)))`. - At the moment of config replacement there were 40 available tokens. After replacing this bandwidth by following `Bandwidth.classic(20, Refill.gready(10, Duration.ofMinutes(1)))` 40 available tokens will be multiplied by 0.2(20/100), and after replacement we will have 8 available tokens. +At the moment of config replacement, there were 40 available tokens. After replacing this bandwidth by following `Bandwidth.classic(20, Refill.gready(10, Duration.ofMinutes(1)))` 40 available tokens will be multiplied by 0.2(20/100), and after replacement, we will have 8 available tokens. AS_IS:: Instructs to copy available tokens as is, but with one exclusion: if available tokens are greater than new capacity, available tokens will be decreased to new capacity. @@ -69,15 +69,15 @@ Instructs to copy available tokens as is, but with one exclusion: if available t .AS_IS strategy examples: ** *Example 1:* imagine bandwidth that was created by `Bandwidth.classic(100, Refill.gready(10, Duration.ofMinutes(1)))`. + + -At the moment of config replacement it was 40 available tokens. + +At the moment of config replacement, it was 40 available tokens. + + -After replacing this bandwidth by following `Bandwidth.classic(200, Refill.gready(10, Duration.ofMinutes(1)))}` 40 available tokens will be just copied, and after replacement we will have 40 available tokens. +After replacing this bandwidth by following `Bandwidth.classic(200, Refill.gready(10, Duration.ofMinutes(1)))}` 40 available tokens will be just copied, and after replacement, we will have 40 available tokens. ** *Example 2:* imagine bandwidth that was created by `Bandwidth.classic(100, Refill.gready(10, Duration.ofMinutes(1)))`. + + -At the moment of config replacement it was 40 available tokens. + +At the moment of config replacement, it was 40 available tokens. + + -After replacing this bandwidth by following `Bandwidth.classic(20, Refill.gready(10, Duration.ofMinutes(1)))` 40 available tokens can not be copied as is, because it is greater than new capacity, so available tokens will be reduced to 20. +After replacing this bandwidth by following `Bandwidth.classic(20, Refill.gready(10, Duration.ofMinutes(1)))` 40 available tokens can not be copied as is because it is greater than new capacity, so available tokens will be reduced to 20. RESET:: Use this mode when you want just to forget about the previous bucket state. RESET just instructs to erase all previous states. Using this strategy equals removing a bucket and creating again with a new configuration. @@ -97,7 +97,7 @@ After replacing this bandwidth by following `Bandwidth.classic(200, Refill.gread ** *Example 2:* imagine bandwidth that was created by `Bandwidth.classic(100, Refill.gready(10, Duration.ofMinutes(1)))`. + + -At the moment of config replacement it was 40 available tokens. + +At the moment of config replacement, it was 40 available tokens. + + After replacing this bandwidth by following `Bandwidth.classic(20, Refill.gready(10, Duration.ofMinutes(1))))`, and after replacement we will have 20 available tokens. From d22a70fbfd101a96cf7e7abac7aad5c5fae6937a Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 21:20:45 +0200 Subject: [PATCH 09/24] listener.adoc --- .../main/docs/asciidoc/advanced/listener.adoc | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/advanced/listener.adoc b/asciidoc/src/main/docs/asciidoc/advanced/listener.adoc index 5f7f197c..7c7d880c 100644 --- a/asciidoc/src/main/docs/asciidoc/advanced/listener.adoc +++ b/asciidoc/src/main/docs/asciidoc/advanced/listener.adoc @@ -2,11 +2,11 @@ === Listening for bucket events ==== What can be listened -.You can decorate the bucket by listener in order to track following events: +.You can decorate the bucket by listener to track following events: - When tokens are consumed from a bucket. - When consumption requests were rejected by the bucket. -- When thread was parked to wait for tokens refill as a result of interaction with ``BlockingBucket``. -- When thread was interrupted during the wait for tokens to be refilled as a result of interaction with ``BlockingBucket``. +- When the thread was parked to wait for tokens refill as a result of interaction with ``BlockingBucket``. +- When the thread was interrupted during the wait for tokens to be refilled as a result of interaction with ``BlockingBucket``. - When a delayed task was submitted to ``ScheduledExecutorService`` as a result of interaction with ``AsyncScheduledBucket``. ==== Listener API - corner cases @@ -16,22 +16,22 @@ **Answer:** it depends: - If you want to have aggregated statistics for all buckets then create a single listener per application and reuse this listener for all buckets. -- If you want to measure statistics independently per each bucket then use a listener per bucket model. +- If you want to measure statistics independently per bucket then use a listener per bucket model. ======== ======== **Question:** where are methods the listener is invoking in case of distributed usage? -**Answer:** listener always invoked on client side, it means that each client JVM will have its own totally independent stat for the same bucket. +**Answer:** listener always invoked on the client-side, which means that each client JVM will have its independent stat for the same bucket. ======== ======== -**Question:** Why does bucket invoke the listener on client side instead of server side in case of distributed scenario? What do I need to do if I need an aggregated stat across the whole cluster? +**Question:** Why does bucket invoke the listener on the client-side instead of the server-side in case of distributed scenario? What do I need to do if I need an aggregated stat across the whole cluster? -**Answer:** Because of planned expansion to non-JVM back-ends such as Redis, MySQL, PostgreSQL. -It is not possible to serialize and invoke listener on this non-java back-ends, so it was decided to invoke listener on client side, -in order to avoid inconsistency between different back-ends in the future. -You can do post-aggregation of monitoring statistics via features built-into your monitoring database or via mediator(like StatsD) between your application and monitoring database. +**Answer:** Because of a planned expansion to non-JVM back-ends such as Redis, MySQL, PostgreSQL. +It is not possible to serialize and invoke listener on this non-java back-ends, so it was decided to invoke listener on the client-side, +to avoid inconsistency between different back-ends in the future. +You can do post-aggregation of monitoring statistics via features built into your monitoring database or via mediator(like StatsD) between your application and the monitoring database. ======== ==== How to attach a listener to a bucket? @@ -47,7 +47,7 @@ Bucket bucket = Bucket.builder() ---- ==== Example of integration with Dropwizard metrics-core -`io.github.bucket4j.SimpleBucketListener` is a simple implementation of `io.github.bucket4j.BucketListener` interface that is available out of the box. Below the example of exposing statistics via Dropwizard Metrics(for Micrometer it should be quite similar): +`io.github.bucket4j.SimpleBucketListener` is a simple implementation of `io.github.bucket4j.BucketListener` interface that is available out of the box. Below is the example of exposing statistics via Dropwizard Metrics(for Micrometer it should be quite similar): [source, java] ---- public static Bucket decorateBucketByStatListener(Bucket originalBucket, String bucketName, MetricRegistry registry) { @@ -60,4 +60,4 @@ public static Bucket decorateBucketByStatListener(Bucket originalBucket, String return originalBucket.toListenable(stat); } ----- \ No newline at end of file +---- From f85d21bd99066c8fe92727ce9adc54f0b4fb1acc Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 21:21:50 +0200 Subject: [PATCH 10/24] verbose-api.adoc --- .../src/main/docs/asciidoc/advanced/verbose-api.adoc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/advanced/verbose-api.adoc b/asciidoc/src/main/docs/asciidoc/advanced/verbose-api.adoc index 0a99c9d6..fc065a45 100644 --- a/asciidoc/src/main/docs/asciidoc/advanced/verbose-api.adoc +++ b/asciidoc/src/main/docs/asciidoc/advanced/verbose-api.adoc @@ -1,11 +1,11 @@ [[verbose-api]] === Verbose API -Verbose API:: is the API whose intent is in injecting low-level diagnostic information into results of any interaction with a bucket. Verbose API provides the same functionality as Regular API, with one exception - result of any method always decorated by `VerboseResult` wrapper. +Verbose API:: is the API whose intent is in injecting low-level diagnostic information into the results of any interaction with a bucket. Verbose API provides the same functionality as Regular API, with one exception - a result of any method always decorated by `VerboseResult` wrapper. -VerboseResult:: is the wrapper for interaction result that provides the snapshot of bucket and its configuration that was actual at the moment of interaction with bucket. +VerboseResult:: is the wrapper for interaction result that provides the snapshot of a bucket and its configuration that was actual at the moment of interaction with a bucket. ==== Verbose API entry-points -The way to get access for `Verbose API` is the same for all type of buckets, just call `asVerbose()` method: +The way to get access for `Verbose API` is the same for all types of buckets, just call `asVerbose()` method: [source, java] ---- @@ -32,12 +32,12 @@ AsyncBucketProxy bucket = ...; AsyncVerboseBucket verboseBucket = bucket.asVerbose(); ---- -NOTE: BlockingBucket and ScheduledBucket do not provide the verbose analogs. VerboseResult has no sense for this kind of buckets because interactions with them can be followed by thread sleep or delayed execution, so VerboseResult can be absolutely stale and irrelevant to the moment of time when control over execution is being returned to your code. +NOTE: BlockingBucket and ScheduledBucket do not provide the verbose analogs. VerboseResult has no sense for this kind of buckets because interactions with them can be followed by thread sleep or delayed execution, so VerboseResult can be stale and irrelevant to the moment when control over execution is being returned to your code. ==== Principles of result decoration * void return type always decorated by `VerboseResult` * A primitive result type like long, boolean always decorated by correspondent boxed type for example `VerboseResult` -* Non primitive reult type always decorated as is, for example `VerboseResult` +* Non-primitive result type always decorated as is, for example, `VerboseResult` ==== Example of Verbose API usage [source, java] From 6669c1af8b8fca97d96fc0f3bf074e8c60954a63 Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 21:22:39 +0200 Subject: [PATCH 11/24] limitations.adoc --- .../main/docs/asciidoc/basic/limitations.adoc | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/basic/limitations.adoc b/asciidoc/src/main/docs/asciidoc/basic/limitations.adoc index 34de1ac1..9c298b61 100644 --- a/asciidoc/src/main/docs/asciidoc/basic/limitations.adoc +++ b/asciidoc/src/main/docs/asciidoc/basic/limitations.adoc @@ -1,8 +1,8 @@ === Technical limitations -In order to provide the best precision, Bucket4j uses integer arithmetic as much as possible, so any internal calculation is limited by bound ``Long.MAX_VALUE``. Library introduces several limits that described further, in order to be sure that calculations will never exceed the bound. +To provide the best precision, Bucket4j uses integer arithmetic as much as possible, so any internal calculation is limited by bound ``Long.MAX_VALUE``. The library introduces several limits that are described further, to be sure that calculations will never exceed the bound. ==== Maximum refill rate -Maximum refill rate is limited by ``1 token/ 1 nanosecond``. Following examples of API usage will raise exceptions +The maximum refill rate is limited by ``1 token/ 1 nanosecond``. Following examples of API usage will raise exceptions [source, java] ---- @@ -23,15 +23,15 @@ Bucket4j works with time intervals as the 64-bit number of nanoseconds. So maxim ---- Duration.ofNanos(Long.MAX_VALUE); ---- -Any attempt to specify period longer that limit above will fail with exception. For example the code bellow will failed +Any attempt to specify a period longer than the limit above will fail with an exception. For example, the code below will failed [source, java] ---- Bandwidth.simple(42, Duration.ofMinutes(153722867280912930)); Exception in thread "main" java.lang.ArithmeticException: long overflow - at java.lang.Math.multiplyExact(Math.java:892) - at java.time.Duration.toNanos(Duration.java:1186) - at io.github.bucket4j.Refill.(Refill.java:48) - at io.github.bucket4j.Refill.greedy(Refill.java:100) - at io.github.bucket4j.Bandwidth.simple(Bandwidth.java:102) ----- \ No newline at end of file + at java.lang.Math.multiplyExact(Math.java:892) + at java.time.Duration.toNanos(Duration.java:1186) + at io.github.bucket4j.Refill.(Refill.java:48) + at io.github.bucket4j.Refill.greedy(Refill.java:100) + at io.github.bucket4j.Bandwidth.simple(Bandwidth.java:102) +---- From b9a5eaa867c6d16d269a73c41abfacbbc4cf0703 Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 21:25:12 +0200 Subject: [PATCH 12/24] production-generic-checklist.adoc --- .../basic/production-generic-checklist.adoc | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/basic/production-generic-checklist.adoc b/asciidoc/src/main/docs/asciidoc/basic/production-generic-checklist.adoc index 286a1b16..fd13cd68 100644 --- a/asciidoc/src/main/docs/asciidoc/basic/production-generic-checklist.adoc +++ b/asciidoc/src/main/docs/asciidoc/basic/production-generic-checklist.adoc @@ -1,6 +1,6 @@ === Generic production checklist -The considerations described bellow are applicable to each solution based on the token-bucket or leaky-bucket algorithm. -You need to understand, agree and configure following points: +The considerations described below apply to each solution based on the token-bucket or leaky-bucket algorithm. +You need to understand, agree, and configure the following points: ==== Be wary of long periods When you are planning to use any solution based on token-bucket for throttling incoming requests, @@ -8,10 +8,10 @@ you need to pay close attention to the throttling time window. .Example of a dangerous configuration: * Given a bucket with a limit of 10000 tokens/ per 1 hour per user. -* A malicious attacker may send 9999 request in very short period, for example within 10 seconds. This would correspond to 100 request per second which could seriously impact your system. -* A skilled attacker could stop at 9999 request per hour, and repeat every hour, which would make this attack impossible to detect (because the limit would not be reached). +* A malicious attacker may send 9999 requests in a very short period, for example within 10 seconds. This would correspond to 100 requests per second which could seriously impact your system. +* A skilled attacker could stop at 9999 requests per hour, and repeat every hour, which would make this attack impossible to detect (because the limit would not be reached). -To protect from this kind attacks, you should specify multiple limits like bellow +To protect from this kind of attack, you should specify multiple limits like bellow [source, java] ---- Bucket bucket = Bucket.builder() @@ -23,18 +23,18 @@ The number of limits specified per bucket does not impact the performance. [[short-timed-bursts, short-timed bursts]] ==== Be wary of short-timed bursts -Token bucket is an efficient algorithm with low and fixed memory footprint, independently of the incoming request-rate(it can be millions per second) the bucket consumes no more then 40 bytes(five longs). -But an efficient memory footprint has its own cost - bandwidth limitation is only satisfied over a long period of time. In other words you cannot avoid short-timed bursts. +The token bucket is an efficient algorithm with a low and fixed memory footprint, independently of the incoming request rate (it can be millions per second) the bucket consumes no more than 40 bytes(five longs). +But an efficient memory footprint has its own cost - bandwidth limitation is only satisfied over a long period. In other words, you cannot avoid short-timed bursts. -.Let us describe an example of local burst: +.Let us describe an example of a local burst: * Given a bucket with a limit of 100 tokens/min. We start with a full bucket, i.e. with 100 tokens. * At ``T1`` 100 requests are made and thus the bucket becomes empty. -* At ``T1+1min`` the bucket is full again because tokens fully regenerated and we can immediately consume 100 tokens. -* This means that between ``T1`` and ``T1+1min`` we have consumed 200 tokens. Over a long period of time there will be no more than 100 requests per min, but as shown above it is possible to burst at **twice the limit** here at 100 tokens per min. +* At ``T1+1min`` the bucket is full again because tokens are fully regenerated and we can immediately consume 100 tokens. +* This means that between ``T1`` and ``T1+1min`` we have consumed 200 tokens. Over a long time, there will be no more than 100 requests per min, but as shown above, it is possible to burst at **twice the limit** here at 100 tokens per min. .These bursts are inherent to token bucket algorithms and cannot be avoided. If short-timed bursts are unacceptable you then have three options: * Do not use Bucket4j or any other solution implemented on top of token-bucket algorithms, because token-bucket is specially designed for network traffic management devices for which short-living traffic spike is a regular case, trying to avoid spike at all contradicts with the nature of token-bucket. -* Since the value of burst always equals to capacity, try to reduce the capacity and speed of refill. For example if you have ***strong*** requirements ``100tokens/60seconds`` then configure bucket as ``capacity=50tokens refill=50tokens/60seconds``. It worth to mention that this way leads to following drawbacks: --- In one time you are not allowed to consume amount of tokens greater than capacity, according to example above - before capacity reducing you was able to consume 100 tokens in single request, after reducing you are able to consume 50 tokens in one request at max. --- Reducing the speed of refill leads to underconsumptions on long term periods, it is obvious that with refill ``50tokens/60seconds`` you will be able to consume 3050 tokens for 1 hour, instead of 6100(as was prior refill reducing). --- As a summary of two drawbacks above, we can say that you will pay via **underconsumption** for eliminating the risk of **overconsumption**. \ No newline at end of file +* Since the value of burst always equals capacity, try to reduce the capacity and speed of refill. For example, if you have ***strong*** requirements ``100tokens/60seconds`` then configure bucket as ``capacity=50tokens refill=50tokens/60seconds``. It is worth mentioning that this way leads to the following drawbacks: +-- In one time you are not allowed to consume several tokens greater than capacity, according to the example above - before capacity reducing you were able to consume 100 tokens in a single request, after reducing you can consume 50 tokens in one request at max. +-- Reducing the speed of refill leads to underconsumption on long term periods, it is obvious that with refill ``50tokens/60seconds`` you will be able to consume 3050 tokens for 1 hour, instead of 6100(as was prior refill reducing). +-- As a summary of the two drawbacks above, we can say that you will pay via **underconsumption** for eliminating the risk of **overconsumption**. From 274109d4de22b897e18ab54985ed8943c089954d Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 21:28:29 +0200 Subject: [PATCH 13/24] quick-start.adoc --- .../main/docs/asciidoc/basic/quick-start.adoc | 64 +++++++++---------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/basic/quick-start.adoc b/asciidoc/src/main/docs/asciidoc/basic/quick-start.adoc index 134d7046..b03b6402 100644 --- a/asciidoc/src/main/docs/asciidoc/basic/quick-start.adoc +++ b/asciidoc/src/main/docs/asciidoc/basic/quick-start.adoc @@ -1,7 +1,7 @@ === Quick start examples ==== How to dependency to Bucket4j The Bucket4j is distributed through https://mvnrepository.com/artifact/com.github.vladimir-bukhtoyarov/bucket4j-core[Maven Central]. -You need to add dependency to your project as described bellow in order to be able to compile and run examples +You need to add the dependency to your project as described below in order to be able to compile and run examples .Maven dependency [source, xml, subs=attributes+] @@ -20,9 +20,9 @@ implementation 'com.github.vladimir-bukhtoyarov:bucket4j-core:{revnumber}' ---- ==== Create your first Bucket, limiting the rate of heavy work -Imagine that you have a thread-pool executor and you want to know what your threads are doing in the moment when thread-pool throws RejectedExecutionException. -Printing stacktraces of all threads in the JVM will be the best way to know where are all threads have stuck and why thread-pool is overflown. -But acquiring stacktraces is very cost operation by itself, and you want to do it not often than 1 time per 10 minutes: +Imagine that you have a thread-pool executor and you want to know what your threads are doing at the moment when thread-pool throws RejectedExecutionException. +Printing stack traces of all threads in the JVM will be the best way to know where are all threads have stuck and why the thread pool is overflown. +But acquiring stack traces is a very cost operation by itself, and you want to do it not often than 1 time per 10 minutes: [source, java] ---- // define the limit 1 time per 10 minute @@ -45,9 +45,9 @@ try { ---- ==== Using bucket as scheduler -Suppose you need to have the fresh exchange rate between dollars and euros. +Suppose you need to have a fresh exchange rate between dollars and euros. To get the rate you continuously poll the third-party provider, -and by contract with provider you should poll not often than 100 times per 1 minute, else provider will block your ip: +and by contract with the provider you should poll not often than 100 times per 1 minute, else provider will block your IP: [source, java] ---- // define the limit 100 times per 1 minute @@ -71,14 +71,14 @@ while (true) { ==== Limiting the rate of access to REST API Imagine that you develop yet another social network and you want to provide REST API for third-party developers. -To protect your system from overloading you want to introduce following limitation: +To protect your system from overloading you want to introduce the following limitation: > The bucket size is 50 calls (which cannot be exceeded at any given time), with a "refill rate" of 10 calls per second that continually increases tokens in the bucket. -In other words. if client app averages 10 calls per second, it will never be throttled, -and moreover client have overdraft equals to 50 calls which can be used if average is little bit higher that 10 call/sec on short time period. +In other words. if the client app averages 10 calls per second, it will never be throttled, +and moreover, the client has overdraft equals to 50 calls which can be used if the average is a little bit higher than 10 calls/sec in a short time period. -Constructing the bucket to satisfy the requirements above is little bit more complicated that for previous examples, -because we have deal with overdraft, but it is not rocket science: +Constructing the bucket to satisfy the requirements above is a little bit more complicated than for previous examples, +because we have to deal with overdraft, but it is not rocket science: [source, java] ---- import io.github.bucket4j.Bucket4j; @@ -119,7 +119,7 @@ public class ThrottlingFilter implements javax.servlet.Filter { } ---- -If you want provide more information to end user about the state of bucket, then last fragment of code above can be rewritten in following way: +If you want to provide more information to the end-user about the state of the bucket, then the last fragment of code above can be rewritten in the following way: [source, java] ---- HttpServletResponse httpResponse = (HttpServletResponse) servletResponse; @@ -139,9 +139,9 @@ If you want provide more information to end user about the state of bucket, then ---- ==== Example of multiple bandwidth -Imagine that you are developing load testing tool, in order to be ensure that testable system is able to dispatch 1000 requests per 1 minute. -But you do not want to randomly kill the testable system by generation all 1000 events in one second instead of 1 minute. -To solve problem you can construct following bucket: +Imagine that you are developing a load testing tool, in order to ensure that a testable system is able to dispatch 1000 requests per 1 minute. +But you do not want to randomly kill the testable system by generating all 1000 events in one second instead of 1 minute. +To solve the problem you can construct the following bucket: [source, java] ---- static final long MAX_WAIT_NANOS = TimeUnit.HOURS.toNanos(1); @@ -164,8 +164,8 @@ while (true) { ---- ==== Specifying initial amount of tokens -By default initial size of bucket equals to capacity. -But sometimes, you may want to have lesser initial size, for example for case of cold start in order to prevent denial of service: +By default initial size of the bucket equals capacity. +But sometimes, you may want to have a lesser initial size, for example for the case of cold start in order to prevent denial of service: [source, java] ---- @@ -179,12 +179,12 @@ Bucket bucket = Bucket.builder() ---- ==== Turning-off the refill greediness -When bandwidth created via ``Bandwidth#simple`` method it does refill in greedy manner, because bandwidth tries to add the tokens to bucket as soon as possible. -For example bandwidth with refill "10 tokens per 1 second" will add 1 token per each 100 millisecond, -in other words refill will not wait 1 second to regenerate whole bunch of 10 tokens. +When bandwidth is created via ``Bandwidth#simple`` method it does refill in a greedy manner, because bandwidth tries to add the tokens to the bucket as soon as possible. +For example bandwidth with refill "10 tokens per 1 second" will add 1 token per every 100 milliseconds, +in other words, the refill will not wait 1 second to regenerate a whole bunch of 10 tokens. -If greediness is undesired then you should explicitly choose non-greedy refill. -For example the bandwidth bellow will refill 10 tokens per 1 second instead of 1 token per 100 milliseconds: +If greediness is undesired then you should explicitly choose a non-greedy refill. +For example, the bandwidth bellow will refill 10 tokens per 1 second instead of 1 token per 100 milliseconds: [source, java] ---- // When refill created via "intervally" factory method then greediness is turned-off. @@ -192,8 +192,8 @@ Refill refill = Refill.intervally(10, Duration.ofSeconds(1)); Bandwidth bandwidth = Bandwidth.classic(600, refill); ---- -Also it is possible to specify the time when first refill should happen. -This option can be used to configure clear interval boundary i.e. start of second, minute, hour, day. +Also, it is possible to specify the time when the first refill should happen. +This option can be used to configure clear interval boundary i.e. start of the second, minute, hour, day. [source, java] ---- // imagine that wall clock is 16:20, and we need to schedule the first refill to 17:00 @@ -210,7 +210,7 @@ This option can be used to configure clear interval boundary i.e. start of secon ==== Returning tokens back to bucket -The https://en.wikipedia.org/wiki/Compensating_transaction[compensating transaction] is one of obvious use case when you want to return tokens back to bucket: +The https://en.wikipedia.org/wiki/Compensating_transaction[compensating transaction] is one of the obvious use cases when you want to return tokens back to the bucket: [source, java] ---- Bucket wallet; @@ -226,19 +226,19 @@ if (wallet.tryConsume(50)) { // get 50 cents from wallet ---- ==== Customizing time measurement - choosing nanotime time resolution -By default Bucket4j uses millisecond time resolution, it is preferred time measurement strategy. -But rarely(for example benchmarking) you wish the nanosecond precision: +By default Bucket4j uses millisecond time resolution, it is the preferred time measurement strategy. +But rarely(for example benchmarking) do you wish the nanosecond precision: [source, java] ---- Bucket.builder().withNanosecondPrecision() ---- Be very careful to choose this time measurement strategy, because ``System.nanoTime()`` produces inaccurate results, -use this strategy only if period of bandwidth is too small that millisecond resolution will be undesired. +use this strategy only if the period of bandwidth is too small that millisecond resolution will be undesired. ==== Customizing time measurement - Specify custom time measurement strategy -You can specify your custom time meter, if existing miliseconds or nanotime time meters is not enough for your purposes. -Imagine that you have a clock, which synchronizes its time with other machines in current cluster, -if you want to use time provided by this clock instead of time provided by JVM then you can write something like this: +You can specify your custom time meter if existing milliseconds or nanotime time meters are not enough for your purposes. +Imagine that you have a clock, which synchronizes its time with other machines in the current cluster, +if you want to use the time provided by this clock instead of time provided by JVM then you can write something like this: [source, java] ---- @@ -256,4 +256,4 @@ Bucket bucket = Bucket.builder() .withCustomTimePrecision(new ClusteredTimeMeter()) .addLimit(limit) .build(); ----- \ No newline at end of file +---- From 382aab0941e431b5d7d6b1706f232cc5fe5b9fa7 Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 21:29:11 +0200 Subject: [PATCH 14/24] coherence.adoc --- .../main/docs/asciidoc/distributed/jcache/coherence.adoc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/distributed/jcache/coherence.adoc b/asciidoc/src/main/docs/asciidoc/distributed/jcache/coherence.adoc index 00fb0432..1ff4bea1 100644 --- a/asciidoc/src/main/docs/asciidoc/distributed/jcache/coherence.adoc +++ b/asciidoc/src/main/docs/asciidoc/distributed/jcache/coherence.adoc @@ -1,7 +1,7 @@ [[bucket4j-coherence, Bucket4j-Coherence]] === Oracle Coherence integration ==== Dependencies -To use ``bucket4j-coherence`` extension you need to add following dependency: +To use ``bucket4j-coherence`` extension you need to add the following dependency: [source, xml, subs=attributes+] ---- @@ -27,8 +27,8 @@ Bucket bucket = proxyManager.builder().build(configuration); ==== Configuring POF serialization for Bucket4j library classes If you configure nothing, then by default Java serialization will be used for serialization Bucket4j library classes. Java serialization can be rather slow and should be avoided in general. -``Bucket4j`` provides https://docs.oracle.com/cd/E24290_01/coh.371/e22837/api_pof.htm#COHDG1363[custom POF serializers] for all library classes that could be transferred over network. -To let Coherence know about POF serializers you should register three serializers in the POF configuration config file: +``Bucket4j`` provides https://docs.oracle.com/cd/E24290_01/coh.371/e22837/api_pof.htm#COHDG1363[custom POF serializers] for all library classes that could be transferred over the network. +To let Coherence know about POF serializers you should register three serializers in the POF configuration config file: ==== ``io.github.bucket4j.grid.coherence.pof.CoherenceEntryProcessorPofSerializer`` for class ``io.github.bucket4j.grid.coherence.CoherenceProcessor`` ==== @@ -55,4 +55,4 @@ To let Coherence know about POF serializers you should register three serializer ---- -Double check with https://docs.oracle.com/cd/E24290_01/coh.371/e22837/api_pof.htm#COHDG5182[official Oracle Coherence documentation] in case of any questions related to ``Portable Object Format``. +Double-check with https://docs.oracle.com/cd/E24290_01/coh.371/e22837/api_pof.htm#COHDG5182[official Oracle Coherence documentation] in case of any questions related to ``Portable Object Format``. From 61662ca5319d577c2d17075dcab93a2fa53eb0c5 Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 21:30:34 +0200 Subject: [PATCH 15/24] hazelcast.adoc --- .../asciidoc/distributed/jcache/hazelcast.adoc | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/distributed/jcache/hazelcast.adoc b/asciidoc/src/main/docs/asciidoc/distributed/jcache/hazelcast.adoc index d5714ff2..463c06b3 100644 --- a/asciidoc/src/main/docs/asciidoc/distributed/jcache/hazelcast.adoc +++ b/asciidoc/src/main/docs/asciidoc/distributed/jcache/hazelcast.adoc @@ -1,7 +1,7 @@ [[bucket4j-hazelcast, Bucket4j-Hazelcast]] === Hazelcast integration ==== Dependencies -To use Bucket4j extension for Hazelcast with ``Hazelcast 4.x`` you need to add following dependency: +To use Bucket4j extension for Hazelcast with ``Hazelcast 4.x`` you need to add the following dependency: [source, xml, subs=attributes+] ---- @@ -10,7 +10,7 @@ To use Bucket4j extension for Hazelcast with ``Hazelcast 4.x`` you need to add f {revnumber} ---- -If you are using legacy version of Hazelcast ``3.x`` then you need to add following dependency: +If you are using a legacy version of Hazelcast ``3.x`` then you need to add the following dependency: [source, xml, subs=attributes+] ---- @@ -21,8 +21,8 @@ If you are using legacy version of Hazelcast ``3.x`` then you need to add follow ---- ==== General compatibility matrix principles: -* Bucket4j authors do not perform continues monitoring of new Hazelcast releases. So, there is can be case when there is no one version of Bucket4j which is compatible with newly released Hazelcast, -just log issue to https://github.com/vladimir-bukhtoyarov/bucket4j/issues[bug tracker] in this case, adding support to new version of Hazelcast is usually easy exercise. +* Bucket4j authors do not perform continuous monitoring of new Hazelcast releases. So, there is can be a case when there is no one version of Bucket4j which is compatible with the newly released Hazelcast, +just log an issue to https://github.com/vladimir-bukhtoyarov/bucket4j/issues[bug tracker] in this case, adding support to new version of Hazelcast is usually an easy exercise. * Integrations with legacy versions of Hazelcast are not removed without a clear reason. Hence You are in safety, even you are working in a big enterprise company that does not update its infrastructure frequently because You still get new Bucket4j's features even for legacy Hazelcast's releases. ==== Example of Bucket instantiation @@ -41,7 +41,7 @@ Bucket bucket = proxyManager.builder().build(configuration); ==== Configuring Custom Serialization for Bucket4j library classes If you configure nothing, then by default Java serialization will be used for serialization Bucket4j library classes. Java serialization can be rather slow and should be avoided in general. -``Bucket4j`` provides https://docs.hazelcast.org/docs/3.0/manual/html/ch03s03.html[custom serializers] for all library classes that could be transferred over network. +``Bucket4j`` provides https://docs.hazelcast.org/docs/3.0/manual/html/ch03s03.html[custom serializers] for all library classes that could be transferred over the network. To let Hazelcast know about fast serializers you should register them programmatically in the serialization config: [source, java] ---- @@ -55,15 +55,15 @@ import io.github.bucket4j.grid.hazelcast.serialization.HazelcastSerializer; // the starting type ID number for Bucket4j classes. // you free to choose any unused ID, but be aware that Bucket4j uses 2 types currently, - // and may use more types in the future, so leave enough empty space after baseTypeIdNumber + // and may use more types in the future, so leave enough empty space after baseTypeIdNumber int baseTypeIdNumber = 10000; - + HazelcastProxyManager.addCustomSerializers(serializationConfig, baseTypeIdNumber); ---- ==== Support for externally managed Hazelcast without classpath access `bucket4j-hazelcast` requires putting Bucket4j jars to classpath of each node of Hazelcast cluster. -Sometimes you have no control over classpath because Hazelcast cluster is externally managed(Paas scenario). +Sometimes you have no control over classpath because the Hazelcast cluster is externally managed(Paas scenario). In such cases ```HazelcastProxyManager``` can not be used because it is implemented on top of https://docs.hazelcast.com/imdg/4.2/computing/entry-processor[EntryProcessor] functionality. .Bucket4j provides two alternatives for PaaS topology: @@ -74,7 +74,7 @@ HazelcastCompareAndSwapBasedProxyManager:: is implemented on top IMap methods `g This implementation requires 2 network hops if no contention happens, but in case of high contention on the key amount of hops is unpredictable. .Limitations of HazelcastLockBasedProxyManager and HazelcastCompareAndSwapBasedProxyManager -* `HazelcastLockBasedProxyManager` does not provide async API because lack of `lockAsync` and `unlockAsync` methods inside IMap API. +* `HazelcastLockBasedProxyManager` does not provide async API because of lack of `lockAsync` and `unlockAsync` methods inside IMap API. * `HazelcastCompareAndSwapBasedProxyManager` does not provide async API because lack of `replaceAsync` and `putIfAbsentAsync` methods inside IMap API. If you wish to async API be supported by `HazelcastLockBasedProxyManager` and `HazelcastCompareAndSwapBasedProxyManager` ask Hazelcast maintainers to support the missed APIs mentioned above. From f906b442bc09459068cc0a0cc3c1c7352cc55a0b Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 21:31:23 +0200 Subject: [PATCH 16/24] ignite.adoc --- .../docs/asciidoc/distributed/jcache/ignite.adoc | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/distributed/jcache/ignite.adoc b/asciidoc/src/main/docs/asciidoc/distributed/jcache/ignite.adoc index 3bf3b108..eb994894 100644 --- a/asciidoc/src/main/docs/asciidoc/distributed/jcache/ignite.adoc +++ b/asciidoc/src/main/docs/asciidoc/distributed/jcache/ignite.adoc @@ -1,15 +1,15 @@ [[bucket4j-ignite, Bucket4j-Ignite]] === Apache Ignite integration Before use ``bucket4j-ignite`` module please read [bucket4j-jcache documentation](jcache-usage.md), -because ``bucket4j-ignite`` is just a follow-up of ``bucket4j-jcache``. +because ``bucket4j-ignite`` is just a follow-up of ``bucket4j-jcache``. -Bucket4j supports Ignite Thin-Client as well as regular deployment scenario. +Bucket4j supports Ignite Thin-Client as well as regular deployment scenarios. -**Question:** Bucket4j already supports JCache since version ``1.2``. Why it was needed to introduce direct support for ``Apache Ignite``? +**Question:** Bucket4j already supports JCache since version ``1.2``. Why it was needed to introduce direct support for ``Apache Ignite``? **Answer:** Because https://www.jcp.org/en/jsr/detail?id=107[JCache API (JSR 107)] does not specify asynchronous API, developing the dedicated module ``bucket4j-ignite`` was the only way to provide asynchrony for users who use ``Bucket4j`` and ``Apache Ignite`` together. -**Question:** Should I migrate from ``bucket4j-jcache`` to ``bucketj-ignite`` If I do not need in asynchronous API? +**Question:** Should I migrate from ``bucket4j-jcache`` to ``bucketj-ignite`` If I do not need an asynchronous API? **Answer:** No, you should not migrate to ``bucketj-ignite`` in this case. ==== Dependencies @@ -35,7 +35,7 @@ BucketConfiguration configuration = BucketConfiguration.builder() .build(); Bucket bucket = proxyManager.builder().build(key, configuration); ---- -IMPORTANT: Pay attention that IgniteProxyManager requires from all nodes in the cluster to contain bucket4j Jars in classpath. +IMPORTANT: Pay attention that IgniteProxyManager requires all nodes in the cluster to contain bucket4j Jars in classpath. ==== Example of Bucket instantiation via Thin Client [source, java] @@ -50,7 +50,7 @@ BucketConfiguration configuration = BucketConfiguration.builder() .build(); Bucket bucket = proxyManager.builder().build(key, configuration); ---- -IMPORTANT: Pay attention that IgniteThinClientProxyManager requires from all nodes in the cluster to contain bucket4j Jars in classpath. +IMPORTANT: Pay attention that IgniteThinClientProxyManager requires all nodes in the cluster to contain bucket4j Jars in classpath. ==== Example of Bucket instantiation of via Thin Client and IgniteThinClientCasBasedProxyManager [source, java] @@ -64,4 +64,4 @@ BucketConfiguration configuration = BucketConfiguration.builder() .build(); Bucket bucket = proxyManager.builder().build(key, configuration); ---- -IMPORTANT: IgniteThinClientCasBasedProxyManager does not require from all nodes in the cluster to contain bucket4j Jars in classpath, but it operates with more latency, so choose it over IgniteThinClientProxyManager if and only if you have no control over cluster classpath. \ No newline at end of file +IMPORTANT: IgniteThinClientCasBasedProxyManager does not require all nodes in the cluster to contain bucket4j Jars in classpath, but it operates with more latency, so choose it over IgniteThinClientProxyManager if and only if you have no control over cluster classpath. From d7ba3452536cf64fb50a5e52c29a7f0931b729c7 Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 21:32:25 +0200 Subject: [PATCH 17/24] infinispan.adoc --- .../distributed/jcache/infinispan.adoc | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/distributed/jcache/infinispan.adoc b/asciidoc/src/main/docs/asciidoc/distributed/jcache/infinispan.adoc index b64eab98..cb03f1a5 100644 --- a/asciidoc/src/main/docs/asciidoc/distributed/jcache/infinispan.adoc +++ b/asciidoc/src/main/docs/asciidoc/distributed/jcache/infinispan.adoc @@ -10,7 +10,7 @@ To use ``bucket4j-infinispan`` with ``Infinispan 9.x, 10.x`` extension you need {revnumber} ---- -If you are using legacy version of Infinispan ``8.x`` then you need to add following dependency: +If you are using a legacy version of Infinispan ``8.x`` then you need to add the following dependency: [source, xml, subs=attributes+] ---- @@ -20,26 +20,26 @@ If you are using legacy version of Infinispan ``8.x`` then you need to add follo ---- ==== General compatibility matrix principles:: -* Bucket4j authors do not perform continues monitoring of new Infinispan releases. So, there is can be case when there is no one version of Bucket4j which is compatible with newly released Infinispan, just log issue to https://github.com/vladimir-bukhtoyarov/bucket4j/issues[bug tracker] in this case, adding support to new version of Infinispan is usually easy exercise. +* Bucket4j authors do not perform continuous monitoring of new Infinispan releases. So, there is can be a case when there is no one version of Bucket4j which is compatible with the newly released Infinispan, just log an issue to https://github.com/vladimir-bukhtoyarov/bucket4j/issues[bug tracker] in this case, adding support to new version of Infinispan is usually an easy exercise. * Integrations with legacy versions of Infinispan are not removed without a clear reason. Hence, you are in safety, even you are working in a big enterprise company that does not update its infrastructure frequently because You still get new Bucket4j's features even for legacy Infinispan's releases. ==== Special notes for Infinispan 10.0+ -As mentioned in the https://infinispan.org/docs/dev/titles/developing/developing.html#marshalling[Infinispan Marshalling documentation], since release ``10.0.0`` Infinispan does not allow deserialization of custom payloads into Java classes. If you do not configure serialization(as described bellow), you will get the error like this on any attempt to use Bucket4j with brand new Infinispan release: +As mentioned in the https://infinispan.org/docs/dev/titles/developing/developing.html#marshalling[Infinispan Marshalling documentation], since release ``10.0.0`` Infinispan does not allow deserialization of custom payloads into Java classes. If you do not configure serialization(as described below), you will get an error like this on any attempt to use Bucket4j with a brand new Infinispan release: [source, bash] ---- Jan 02, 2020 4:57:56 PM org.infinispan.marshall.persistence.impl.PersistenceMarshallerImpl objectToBuffer WARN: ISPN000559: Cannot marshall 'class io.github.bucket4j.grid.infinispan.InfinispanProcessor' java.lang.IllegalArgumentException: No marshaller registered for Java type io.github.bucket4j.grid.infinispan.SerializableFunctionAdapter - at org.infinispan.protostream.impl.SerializationContextImpl.getMarshallerDelegate(SerializationContextImpl.java:279) - at org.infinispan.protostream.WrappedMessage.writeMessage(WrappedMessage.java:240) - at org.infinispan.protostream.ProtobufUtil.toWrappedStream(ProtobufUtil.java:196) + at org.infinispan.protostream.impl.SerializationContextImpl.getMarshallerDelegate(SerializationContextImpl.java:279) + at org.infinispan.protostream.WrappedMessage.writeMessage(WrappedMessage.java:240) + at org.infinispan.protostream.ProtobufUtil.toWrappedStream(ProtobufUtil.java:196) ---- There are three options to solve this problem: -* Configure Jboss marshalling instead of default ProtoStream marshaller as described https://infinispan.org/docs/dev/titles/developing/developing.html#jboss_marshalling[there]. +* Configure Jboss marshaling instead of defaulting ProtoStream marshaller as described https://infinispan.org/docs/dev/titles/developing/developing.html#jboss_marshalling[there]. * Configure Java Serialization Marshaller instead of default ProtoStream marshaller, as described https://infinispan.org/docs/dev/titles/developing/developing.html#java_serialization_marshaller[there]. Do not forget to add ``io.github.bucket4j.*`` regexp to the whitelist if choosing this way. -* And last way(recommended) just register ``Bucket4j serialization context initializer`` in the serialization configuration. +* And last way(recommended) just register ``Bucket4j serialization context initializer`` in the serialization configuration. You can do it in both programmatically and declarative ways: .Programmatic registration of Bucket4jProtobufContextInitializer @@ -59,7 +59,7 @@ builder.serialization().addContextInitializer(new Bucket4jProtobufContextInitial ---- -And that is all. Just registering ``Bucket4jProtobufContextInitializer`` in any way is enough to make Bucket4j compatible with ProtoStream marshaller, you do not have to care about ``*.proto`` files, annotations, whitelist etc, all neccessary Protobuffers configs generated by ``Bucket4jProtobufContextInitializer`` and registerd on the fly. +And that is all. Just registering ``Bucket4jProtobufContextInitializer`` in any way is enough to make Bucket4j compatible with ProtoStream marshaller, you do not have to care about ``*.proto`` files, annotations, whitelist, etc, all necessary Protobuffers configs generated by ``Bucket4jProtobufContextInitializer`` and register on the fly. ==== Example of Bucket instantiation [source, java] @@ -73,4 +73,4 @@ BucketConfiguration configuration = BucketConfiguration.builder() .build(key, configuration); Bucket bucket = proxyManager.builder().build(configuration); ----- \ No newline at end of file +---- From 21beb6484079b3159ca3f0fc7fceb2af1f409fab Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 21:34:38 +0200 Subject: [PATCH 18/24] jcache-usage.adoc --- .../distributed/jcache/jcache-usage.adoc | 66 +++++++++---------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/distributed/jcache/jcache-usage.adoc b/asciidoc/src/main/docs/asciidoc/distributed/jcache/jcache-usage.adoc index 754e2a5c..e7c32a72 100644 --- a/asciidoc/src/main/docs/asciidoc/distributed/jcache/jcache-usage.adoc +++ b/asciidoc/src/main/docs/asciidoc/distributed/jcache/jcache-usage.adoc @@ -2,9 +2,9 @@ === JCache integration ``Bucket4j`` supports any GRID solution which compatible with https://www.jcp.org/en/jsr/detail?id=107[JCache API (JSR 107)] specification. -NOTE: Do not forget to read <> before using the Bucket4j over JCache cluster. +NOTE: Do not forget to read <> before using the Bucket4j over the JCache cluster. -To use JCache extension you also need to add following dependency: +To use the JCache extension you also need to add the following dependency: [source, xml, subs=attributes+] ---- @@ -14,7 +14,7 @@ To use JCache extension you also need to add following dependency: ---- -JCache expects javax.cache.cache-api to be a provided dependency. Do not forget to add following dependency: +JCache expects javax.cache.cache-api to be a provided dependency. Do not forget to add the following dependency: [source, xml] ---- @@ -25,14 +25,14 @@ JCache expects javax.cache.cache-api to be a provided dependency. Do not forget ---- ==== Example 1 - limiting access to HTTP server by IP address -> Imagine that you develop any Servlet based WEB application and want to limit access per IP basis. -You want to use same limits for each IP - 30 requests per minute. +> Imagine that you develop any Servlet-based WEB application and want to limit access per IP basis. +You want to use the same limits for each IP - 30 requests per minute. -ServletFilter would be obvious place to check limits: +ServletFilter would be the obvious place to check limits: [source, java] ---- public class IpThrottlingFilter implements javax.servlet.Filter { - + private static final BucketConfiguration configuration = BucketConfiguration.builder() .addLimit(Bandwidth.simple(30, Duration.ofMinutes(1))) .build(); @@ -40,21 +40,21 @@ public class IpThrottlingFilter implements javax.servlet.Filter { // cache for storing token buckets, where IP is key. @Inject private javax.cache.Cache cache; - + private ProxyManager buckets; - + @Override public void init(FilterConfig filterConfig) throws ServletException { // init bucket registry buckets = new JCacheProxyManager<>(cache); } - + @Override public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain) throws IOException, ServletException { HttpServletRequest httpRequest = (HttpServletRequest) servletRequest; String ip = IpHelper.getIpFromRequest(httpRequest); - - // acquire cheap proxy to bucket + + // acquire cheap proxy to the bucket Bucket bucket = proxyManager.builder().build(key, configuration); // tryConsume returns false immediately if no tokens available with the bucket @@ -74,12 +74,12 @@ public class IpThrottlingFilter implements javax.servlet.Filter { ---- ==== Example 2 - limiting access to service by contract agreements -> Imagine that you provide paid language translation service via HTTP. Each user has unique agreement which differs from each other. -Details of each agreement is stored in relational database, and takes significant time to fetch(for example 100ms). -The example above will not work fine in this case, because time to create/fetch configuration of bucket from database +> Imagine that you provide paid language translation service via HTTP. Each user has a unique agreement that differs from the other. +Details of each agreement are stored in a relational database and take significant time to fetch(for example 100ms). +The example above will not work fine in this case, because time to create/fetch the configuration of the bucket from the database will be 100 times slower than limit-checking itself. -Bucket4j solves this problem via lazy configuration suppliers which are called if and only if bucket was not yet stored in grid, -thus it is possible to implement solution that will read agreement from database once per each user. +Bucket4j solves this problem via lazy configuration suppliers which are called if and only if the bucket was not yet stored in the grid, +thus it is possible to implement a solution that will read the agreement from the database once per user. [source, java] ---- @@ -88,28 +88,28 @@ public class IpThrottlingFilter implements javax.servlet.Filter { // service to provide per user limits @Inject private LimitProvider limitProvider; - + // cache for storing token buckets, where IP is key. @Inject private javax.cache.Cache cache; - + private ProxyManager buckets; - + @Override public void init(FilterConfig filterConfig) throws ServletException { // init bucket registry buckets = new JCacheProxyManager<>(cache); } - + @Override public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain) throws IOException, ServletException { HttpServletRequest httpRequest = (HttpServletRequest) servletRequest; String userId = AutentificationHelper.getUserIdFromRequest(httpRequest); - - // prepare configuration supplier which will be called(on first interaction with proxy) if bucket was not saved yet previously. + + // prepare configuration supplier which will be called(on the first interaction with proxy) if the bucket was not saved yet previously. Supplier configurationLazySupplier = getConfigSupplierForUser(userId); - - // acquire cheap proxy to bucket + + // acquire cheap proxy to the bucket Bucket bucket = proxyManager.builder().build(key, configurationLazySupplier); // tryConsume returns false immediately if no tokens available with the bucket @@ -124,7 +124,7 @@ public class IpThrottlingFilter implements javax.servlet.Filter { httpResponse.getWriter().append("Too many requests"); } } - + private Supplier getConfigSupplierForUser(String userId) { return () -> { long translationsPerDay = limitProvider.readPerDayLimitFromAgreementsDatabase(userId); @@ -138,7 +138,7 @@ public class IpThrottlingFilter implements javax.servlet.Filter { ---- ==== Why JCache specification is not enough in modern stacks and since 3.0 were introduced the dedicated modules for Infinispan, Hazelcast, Coherence and Ignite? -Asynchronous processing is very important for high-throughput applications, but JCache specification does not specify asynchronous API, because two early attempts to bring this kind functionality at spec level https://github.com/jsr107/jsr107spec/issues/307[307], https://github.com/jsr107/jsr107spec/issues/312[312] were failed in absence of consensus. +Asynchronous processing is very important for high-throughput applications, but JCache specification does not specify asynchronous API, because two early attempts to bring this kind of functionality at spec level https://github.com/jsr107/jsr107spec/issues/307[307], https://github.com/jsr107/jsr107spec/issues/312[312] were failed in absence of consensus. .Sad, but true, if you need for asynchronous API, then JCache extension is useless, and you need to choose from following extensions: * <> @@ -146,15 +146,15 @@ Asynchronous processing is very important for high-throughput applications, but * <> * <> -Also, implementation the asynchronous support for any other JCache provider outside from the list above should be easy exercise, so feel free to return back the pull request addressed to cover your favorite JCache provider. +Also, implementing the asynchronous support for any other JCache provider outside of the list above should be an easy exercise, so feel free to return back the pull request addressed to cover your favorite JCache provider. -==== Verification of compatibility with particular JCache provider is your responsibility -IMPORTANT: Keep in mind that there are many non-certified implementations of JCache specification on the market. +==== Verification of compatibility with a particular JCache provider is your responsibility +IMPORTANT: Keep in mind that there are many non-certified implementations of JCache specifications on the market. Many of them want to increase their popularity by declaring support for the JCache API, but often only the API is supported and the semantic of JCache is totally ignored. -Usage Bucket4j with this kind of libraries should be completely avoided. +Usage Bucket4j with this kind of library should be completely avoided. -Bucket4j is only compatible with implementations which obey the JCache specification rules(especially related to EntryProcessor execution). Oracle Coherence, Apache Ignite, Hazelcast are good examples of safe implementations of JCache. +Bucket4j is only compatible with implementations that obey the JCache specification rules(especially related to EntryProcessor execution). Oracle Coherence, Apache Ignite, Hazelcast are good examples of safe implementations of JCache. IMPORTANT: Because it is impossible to test all possible JCache providers, you need to test your provider by yourself. @@ -211,4 +211,4 @@ public class CompatibilityTest { } ---- The check does 4000 increments of integer in parallel and verifies that no one update has been missed. -If check passed then your JCache provider is compatible with Bucket4j, the throttling will work fine in distributed and concurrent environment. If check is not passed, then reach to the particular JCache provider team and consult why its implementation misses the writes. \ No newline at end of file +If the check passed then your JCache provider is compatible with Bucket4j, the throttling will work fine in a distributed and concurrent environment. If the check is not passed, then reach out to the particular JCache provider team and consult why its implementation misses the writes. From d27977d21d902f8141fd71a254cb38ecc9c7474f Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 21:36:26 +0200 Subject: [PATCH 19/24] asynchronous.adoc --- .../asciidoc/distributed/asynchronous.adoc | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/distributed/asynchronous.adoc b/asciidoc/src/main/docs/asciidoc/distributed/asynchronous.adoc index 38b35bdc..02663fe1 100644 --- a/asciidoc/src/main/docs/asciidoc/distributed/asynchronous.adoc +++ b/asciidoc/src/main/docs/asciidoc/distributed/asynchronous.adoc @@ -1,5 +1,5 @@ === Asynchronous API -Since version ``3.0`` Bucket4j provides asynchronous analogs for majority of API methods. +Since version ``3.0`` Bucket4j provides asynchronous analogs for the majority of API methods. Async view of proxyManager is available through ``asAsync()`` method: [source, java] ---- @@ -9,31 +9,31 @@ AsyncProxyManager asyncProxyManager = proxyManager.asAsync(); BucketConfiguration configuration = ...; AsyncBucketProxy asyncBucket = asyncProxyManager.builder().build(key, configuration); ---- -Each method of class ```AsyncBucketProxy``` has full equivalence with same semantic in synchronous version in the ```Bucket``` class. +Each method of class ```AsyncBucketProxy``` has full equivalence with the same semantic in asynchronous version in the ```Bucket``` class. -==== Example - limiting the rate of access to asynchronous servlet -Imagine that you develop SMS service, which allows send SMS via HTTP interface. -You want from your architecture to be protected from overloading, clustered and fully asynchronous. +==== Example - limiting the rate of access to the asynchronous servlet +Imagine that you develop an SMS service, which allows sending SMS via an HTTP interface. +You want your architecture to be protected from overloading, clustered, and fully asynchronous. **Overloading protection requirement:** -> To prevent fraud and service overloading you want to introduce following limit for any outbound phone number: The bucket size is 20 SMS (which cannot be exceeded at any given time), with a "refill rate" of 10 SMS per minute that continually increases tokens in the bucket. -In other words, if client sends 10 SMS per minute, it will never be throttled, -and moreover client have overdraft equals to 20 SMS which can be used if average is little bit higher that 10 SMS/minute on short time period. -**Solution:** lets use bucket4j for this. +> To prevent fraud and service overloading you want to introduce the following limit for any outbound phone number: The bucket size is 20 SMS (which cannot be exceeded at any given time), with a "refill rate" of 10 SMS per minute that continually increases tokens in the bucket. +In other words, if a client sends 10 SMS per minute, it will never be throttled, +and moreover, the client has overdraft equals to 20 SMS which can be used if the average is a little bit higher than 10 SMS/minute on short time period. +**Solution:** let's use bucket4j for this. **Clustering requirement:** > You want to avoid the single point of failure, if one server crashed that information about consumed tokens should not be lost, -thus it would be better to use any distributed computation platform for storing the buckets. +thus it would be better to use any distributed computation platform for storing the buckets. -**Solution:** lets use JBoss Infinispan for this and ``bucket4j-infinispan`` extension. -Hazelcast and Apache Ignite will be also well choice, Infinispan just selected as example. +**Solution:** let's use JBoss Infinispan for this and ``bucket4j-infinispan`` extension. +Hazelcast and Apache Ignite will be also well-chosen, Infinispan just selected as an example. **Asynchronous processing requirement:** -Also for maximum scalability you want from architecture to be fully non-blocking, -non-blocking architecture means that both SMS sending and limit checking should be asynchronous. -**Solution:** lets use asynchronous features provided by bucket4j and Servlet-API. +Also for maximum scalability, you want from architecture to be fully non-blocking, +non-blocking architecture means that both sms sending and limit checking should be asynchronous. +**Solution:** let's use asynchronous features provided by bucket4j and Servlet-API. **Mockup of service based on top of Servlet API and bucket4j-infinispan**: [source, java] From b8fff5efc1789075f292354e835e6b228144b622 Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 21:37:33 +0200 Subject: [PATCH 20/24] distributed-checklist.adoc --- .../distributed/distributed-checklist.adoc | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/distributed/distributed-checklist.adoc b/asciidoc/src/main/docs/asciidoc/distributed/distributed-checklist.adoc index 82abfa1f..a861e82f 100644 --- a/asciidoc/src/main/docs/asciidoc/distributed/distributed-checklist.adoc +++ b/asciidoc/src/main/docs/asciidoc/distributed/distributed-checklist.adoc @@ -1,28 +1,28 @@ [[distributed-checklist, Distributed usage checklist]] === Production checklist especially in the context of distributed systems -Before using Bucket4j in clustered scenario you need to understand, agree and configure following points: +Before using Bucket4j in clustered scenario you need to understand, agree, and configure the following points: .Do not forget about exception handling -When working within a distributed system, it is innevitable that requests may cross the border of the current JVM, leading to a communication on the network. -Network being unreliable, it is impossible to avoid failures. Thus you should embrace this reality and be ready to get unchecked exceptions when interacting with a distributed bucket. +When working within a distributed system, it is inevitable that requests may cross the border of the current JVM, leading to communication on the network. +The network being unreliable, it is impossible to avoid failures. Thus you should embrace this reality and be ready to get unchecked exceptions when interacting with a distributed bucket. **It is your responsibility to handle(or ignore) such exceptions:** * You probably do not want to fail business transactions if the grid responsible for throttling goes down. If this is the case you can simply log the exception and continue your business transaction without throttling * If you wish to fail your business transaction when the grid responsible for throttling goes down, simply rethrow or don't catch the exception .Do not forget to configure backups -If the state of any bucket should survive the restart/crash of grid node that holds its state, you need to configure backups yourself, in way specific to the particular grid vendor. For example, see how to https://apacheignite.readme.io/v2.3/docs/primary-and-backup-copies[configure backups for Apache Ignite]. +If the state of any bucket should survive the restart/crash of the grid node that holds its state, you need to configure backups yourself, in a way specific to the particular grid vendor. For example, see how to https://apacheignite.readme.io/v2.3/docs/primary-and-backup-copies[configure backups for Apache Ignite]. .Retention tuning is your responsibility When dealing with multi-tenant scenarios like a bucket per user or a bucket per IP address, -the amount of buckets in the cache will continuously increase. This is because a new bucket will be created each time a new key is detected. -To prevent exhausting the available memory of your cluster you need to configure following aspects: +the number of buckets in the cache will continuously increase. This is because a new bucket will be created each time a new key is detected. +To prevent exhausting the available memory of your cluster you need to configure the following aspects: * **Expiration since last access** - in order to allow the grid to remove the keys which haven't been used in a long time. For example, see how to https://apacheignite.readme.io/docs/expiry-policies[configure expiration policy for Apache Ignite]. -* **Maximum cache size(in units or bytes)** - Obviously it is preferable to lose bucket data than lose the whole cluster due to out of memory exception. +* **Maximum cache size(in units of bytes)** - Obviously it is preferable to lose bucket data than lose the whole cluster due to memory exception. .High availability(HA) tuning and testing is your responsibility -There are no special settings for HA supported by Bucket4j, because Bucket4j does nothing more that just invoking EntryProcessors on the cache. -Instead Bucket4j relies on *you* to configure the cache with proper parameters that control redundancy and high availability. +There are no special settings for HA supported by Bucket4j because Bucket4j does nothing more than just invoking EntryProcessors on the cache. +Instead, Bucket4j relies on *you* to configure the cache with proper parameters that control redundancy and high availability. -Years of experience working with distributed system has tought the author that High Availability does not come for free. You need to test and verify that your system remains available. This cannot be provided by this or any other library. Your system will most certainly go down if you do not plan for that. \ No newline at end of file +Years of experience working with the distributed system has taught the author that High Availability does not come for free. You need to test and verify that your system remains available. This cannot be provided by this or any other library. Your system will most certainly go down if you do not plan for that. From f0be328b9a0c481f3994089000dcb1b7b768e607 Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 21:39:07 +0200 Subject: [PATCH 21/24] about.adoc --- asciidoc/src/main/docs/asciidoc/about.adoc | 28 +++++++++++----------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/about.adoc b/asciidoc/src/main/docs/asciidoc/about.adoc index dfebae45..6101fb57 100644 --- a/asciidoc/src/main/docs/asciidoc/about.adoc +++ b/asciidoc/src/main/docs/asciidoc/about.adoc @@ -1,29 +1,29 @@ == About Bucket4j === What is Bucket4j -Bucket4j is Java rate-limiting library is mainly based on token-bucket algorithm, which are by de-facto standard for rate limiting in the IT industry. +Bucket4j is a Java rate-limiting library that is mainly based on the token-bucket algorithm, which is by the de-facto standard for rate-limiting in the IT industry. -.Bucket4j is more than direct implementation of token-bucket +.Bucket4j is more than a direct implementation of token-bucket IMPORTANT: Its math model provides several useful extensions that are not mentioned in the classic token-bucket interpretations, such as multiple limits per bucket or overdraft. These math extensions will be detailed described later. -You can read more about token bucket by following links: +You can read more about the token bucket by following links: -* https://en.wikipedia.org/wiki/Token_bucket[Token bucket] - wikipedia page describes the token-bucket algorithm in classical form. -* https://vbukhtoyarov-java.blogspot.com/2021/11/non-formal-overview-of-token-bucket.html[Non-formal overview of token-bucket algorithm] - the brief overview of token-bucket algorithm. +* https://en.wikipedia.org/wiki/Token_bucket[Token bucket] - Wikipedia page describes the token-bucket algorithm in classical form. +* https://vbukhtoyarov-java.blogspot.com/2021/11/non-formal-overview-of-token-bucket.html[Non-formal overview of token-bucket algorithm] - the brief overview of the token-bucket algorithm. === Bucket4j basic features -* *Absolutely non-compromise precision* - Bucket4j does not operate with floats or doubles, all calculation are performed in the integer arithmetic, this feature protects end users from calculation errors involved by rounding. +* *Absolutely non-compromise precision* - Bucket4j does not operate with floats or doubles, all calculations are performed in integer arithmetic, this feature protects end-users from calculation errors involved by rounding. * *Effective implementation in terms of concurrency*: - - Bucket4j is good scalable for multi-threading case it by defaults uses lock-free implementation. - - In same time, library provides different concurrency strategies that can be chosen when default lock-free strategy is not desired. +- Bucket4j is good scalable for multi-threading cases it by default uses lock-free implementation. +- At the same time, the library provides different concurrency strategies that can be chosen when a default lock-free strategy is not desired. * *Effective API in terms of garbage collector footprint*: Bucket4j API tries to use primitive types as much as it is possible in order to avoid boxing and other types of floating garbage. -* *Pluggable listener API* that allows to implement monitoring and logging. -* *Rich diagnostic API* that allows to investigate internal state. -* *Rich configuration management* - configuration of the bucket can be changed on fly +* *Pluggable listener API* that allows implementing monitoring and logging. +* *Rich diagnostic API* that allows investigating internal state. +* *Rich configuration management* - configuration of the bucket can be changed on the fly === Bucket4j distributed features -In additional to basic features described above, `Bucket4j` provides ability to implement rate-limiting in cluster of JVMs: +In addition to the basic features described above, `Bucket4j` provides the ability to implement rate-limiting in a cluster of JVMs: * Bucket4j out of the box supports any GRID solution which compatible with JCache API (JSR 107) specification. -* Bucket4j provides the framework that allows to quickly build integration with your own persistent technology like RDMS or a key-value storage. -* For clustered usage scenarios Bucket4j supports asynchronous API that extremely matters when going to distribute world, because asynchronous API allows avoiding blocking your application threads each time when you need to execute Network request. +* Bucket4j provides the framework that allows you to quickly build integration with your own persistent technology like RDMS or key-value storage. +* For clustered usage scenarios Bucket4j supports asynchronous API that extremely matters when going to distribute world because asynchronous API allows avoiding blocking your application threads each time when you need to execute Network request. From 5c6d941fa6cb8c2a5ecadc0f73df79457494ca13 Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 21:40:12 +0200 Subject: [PATCH 22/24] index.adoc --- asciidoc/src/main/docs/asciidoc/index.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/asciidoc/src/main/docs/asciidoc/index.adoc b/asciidoc/src/main/docs/asciidoc/index.adoc index 85ae4119..92207df0 100644 --- a/asciidoc/src/main/docs/asciidoc/index.adoc +++ b/asciidoc/src/main/docs/asciidoc/index.adoc @@ -20,7 +20,7 @@ Maxim Bartkov:: Lead Java developer at RooX Solutions + Kharkov, Ukraine + maxgalayoutop@gmail.com + -Role: evangelist, author of scientific publications + +Role: contributor, evangelist, author of scientific publications + image:images/Maxim_Bartkov.jpg[80,80] + == Third-party articles and integrations From c40d65c45ea06fb4e8176114fdfc8e2c9abd1dbb Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 21:40:58 +0200 Subject: [PATCH 23/24] concept.adoc --- .../distributed/implement-custom-database/concept.adoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/distributed/implement-custom-database/concept.adoc b/asciidoc/src/main/docs/asciidoc/distributed/implement-custom-database/concept.adoc index 5fb268ca..9e2a8bef 100644 --- a/asciidoc/src/main/docs/asciidoc/distributed/implement-custom-database/concept.adoc +++ b/asciidoc/src/main/docs/asciidoc/distributed/implement-custom-database/concept.adoc @@ -1,5 +1,5 @@ === Framework to implement custom work with your database -The Bucket4j library gives an opportunity to implement work with any database. +The Bucket4j library allows implementing work with any database. If you didn't find in distributed realization your database (currently Bucket4j supports the next databases: Redis, Hazelcast, Apache Ignite, Infinispan, Oracle coherence, Dynamodb, PostgreSQL, MySQL) you can implement your database as a distributed storage. All what you need to do, extends from io.github.bucket4j.distributed.proxy.generic.select_for_update.AbstractLockBasedProxyManager or @@ -16,7 +16,7 @@ To define in which class you should extend, need to understand the main idea of `AbstractSelectForUpdateBasedProxyManager` - Uses to realize Select For Update concept -After need to override works of allocation transaction, in order to do that, we should override method allocateTransaction. +After need to override works of allocation transaction, to do that, we should override method allocateTransaction. The main idea of allocateTransaction to just return class which implements `LockBasedTransaction` (for `AbstractLockBasedProxyManager`) or `SelectForUpdateBasedTransaction` (for `AbstractSelectForUpdateBasedProxyManager`) - we will implement it later And override removeProxy() for remove bucket from the table which store buckets. @@ -25,7 +25,7 @@ And override removeProxy() for remove bucket from the table which store buckets. Need to implement `LockBasedTransaction` or `SelectForUpdateBasedTransaction` to realize custom work of database for transaction. -In order to do that, we need to create a custom class to implement from one of these classes +To do that, we need to create a custom class to implement from one of these classes *LockBasedTransaction* ---- From f0be6a96e55edd812c9294806684884cf2cbafcd Mon Sep 17 00:00:00 2001 From: mbartkov Date: Thu, 10 Feb 2022 21:41:26 +0200 Subject: [PATCH 24/24] jdbc-integraions.adoc --- .../implement-custom-database/jdbc-integraions.adoc | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/asciidoc/src/main/docs/asciidoc/distributed/implement-custom-database/jdbc-integraions.adoc b/asciidoc/src/main/docs/asciidoc/distributed/implement-custom-database/jdbc-integraions.adoc index c510de42..aa666649 100644 --- a/asciidoc/src/main/docs/asciidoc/distributed/implement-custom-database/jdbc-integraions.adoc +++ b/asciidoc/src/main/docs/asciidoc/distributed/implement-custom-database/jdbc-integraions.adoc @@ -20,7 +20,7 @@ CREATE TABLE IF NOT EXISTS buckets(id BIGINT PRIMARY KEY, state BLOB); * Each proxy manager takes `SQLProxyConfiguration` to customize work with database -* In order to do that, you should use `SQLProxyConfigurationBuilder`, which includes the next parameters: +* To do that, you should use `SQLProxyConfigurationBuilder`, which includes the next parameters: ---- /** @@ -46,7 +46,7 @@ CREATE TABLE IF NOT EXISTS buckets(id BIGINT PRIMARY KEY, state BLOB); [[listener]] ===== Overriding table configuration -You can override the names of the columns to set your custom name of columns, in order to do that, you should use `BucketTableSettings` to set into `SQLProxyConfigurationBuilder` of your JDBC implementation. +You can override the names of the columns to set your custom name of columns, to do that, you should use `BucketTableSettings` to set into `SQLProxyConfigurationBuilder` of your JDBC implementation. * `SQLProxyConfigurationBuilder` Takes `BucketTableSettings` - is the class to define a configuration of the table to use as a buckets store. By default, under the hood uses `BucketTableSettings.getDefault()` @@ -134,7 +134,4 @@ To use Bucket4j extension for MySQL you need to add following dependency: .addLimit(Bandwidth.simple(10, Duration.ofSeconds(1))) .build(); BucketProxy bucket = proxyManager.builder().build(key, bucketConfiguration); ----- - - - +---- \ No newline at end of file