Skip to content

Commit

Permalink
Merge branch 'customresource' of github.com:shawkins/kubernetes-client
Browse files Browse the repository at this point in the history
into customresource
  • Loading branch information
shawkins committed May 19, 2022
2 parents b860309 + efeb932 commit d20b263
Show file tree
Hide file tree
Showing 4 changed files with 99 additions and 109 deletions.
2 changes: 1 addition & 1 deletion doc/FAQ.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ At the core the thread utilization will depend upon the http client implementati

With the JDK http client it will only maintain a selector thread and a small worker pool which will be based upon your available processors per client. It does not matter how many Informers or Watches you run, the same worker pool is shared.

It is recommended with either http client that logic you supply via Watchers, ExecListeners, ResourceEventHandlers, Predicates, etc. do not execute long running tasks.
> **Note:** It is recommended with either HTTP client implementation that logic you supply via Watchers, ExecListeners, ResourceEventHandlers, Predicates, etc. does not execute long running tasks.
For non-ResourceEventHandlers call backs long-running operation can be a problem. When using the OkHttp client and default settings holding a IO thread inhibits websocket processing that can timeout the ping and may prevent additional requests since the okhttp client defaults to only 5 concurrent requests per host. When using the JDK http client the long running task will inhibit the use of that IO thread for ALL http processing. Note that calling other KubernetesClient operations, especially those with waits, can be long-running. We are working towards providing non-blocking mode for many of these operations, but until that is available consider using a separate task queue for such work.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import io.fabric8.kubernetes.api.model.ConfigMapBuilder;
import io.fabric8.kubernetes.api.model.ConfigMapList;
import io.fabric8.kubernetes.api.model.ObjectMetaBuilder;
import io.fabric8.kubernetes.client.NamespacedKubernetesClient;
import io.fabric8.kubernetes.client.KubernetesClient;
import io.fabric8.kubernetes.client.dsl.MixedOperation;
import io.fabric8.kubernetes.client.dsl.ReplaceDeletable;
import io.fabric8.kubernetes.client.dsl.Resource;
Expand Down Expand Up @@ -51,18 +51,18 @@

class ConfigMapLockTest {

private NamespacedKubernetesClient kc;
private KubernetesClient kc;
private MixedOperation<ConfigMap, ConfigMapList, Resource<ConfigMap>> configMaps;
private ConfigMapBuilder configMapBuilder;
private ConfigMapBuilder.MetadataNested<ConfigMapBuilder> metadata;

@BeforeEach
void setUp() {
kc = mock(NamespacedKubernetesClient.class, RETURNS_DEEP_STUBS);
kc = mock(KubernetesClient.class, RETURNS_DEEP_STUBS);
configMaps = mock(MixedOperation.class, RETURNS_DEEP_STUBS);
configMapBuilder = Mockito.mock(ConfigMapBuilder.class, RETURNS_DEEP_STUBS);
metadata = mock(ConfigMapBuilder.MetadataNested.class, RETURNS_DEEP_STUBS);
when(kc.inNamespace(anyString()).configMaps()).thenReturn(configMaps);
when(kc.configMaps().inNamespace(anyString())).thenReturn(configMaps);
when(configMapBuilder.editOrNewMetadata()).thenReturn(metadata);
}

Expand Down Expand Up @@ -104,11 +104,10 @@ void getWithExistingConfigMapShouldReturnLeaderElectionRecord() {
final ConfigMap cm = new ConfigMap();
when(configMaps.withName(ArgumentMatchers.eq("name")).get()).thenReturn(cm);
cm.setMetadata(new ObjectMetaBuilder()
.withAnnotations(
Collections.singletonMap("control-plane.alpha.kubernetes.io/leader",
"{\"holderIdentity\":\"1337\",\"leaseDuration\":15,\"acquireTime\":1445401740,\"renewTime\":1445412480}")
)
.withResourceVersion("313373").build());
.withAnnotations(
Collections.singletonMap("control-plane.alpha.kubernetes.io/leader",
"{\"holderIdentity\":\"1337\",\"leaseDuration\":15,\"acquireTime\":1445401740,\"renewTime\":1445412480}"))
.withResourceVersion("313373").build());
final ConfigMapLock lock = new ConfigMapLock("namespace", "name", "1337");
// When
final LeaderElectionRecord result = lock.get(kc);
Expand All @@ -124,7 +123,7 @@ void getWithExistingConfigMapShouldReturnLeaderElectionRecord() {
void createWithValidLeaderElectionRecordShouldSendPostRequest() throws Exception {
// Given
final LeaderElectionRecord record = new LeaderElectionRecord(
"1", Duration.ofSeconds(1), ZonedDateTime.now(), ZonedDateTime.now(), 0);
"1", Duration.ofSeconds(1), ZonedDateTime.now(), ZonedDateTime.now(), 0);
final ConfigMapLock lock = new ConfigMapLock("namespace", "name", "1337");
// When
lock.create(kc, record);
Expand All @@ -142,7 +141,7 @@ void updateWithValidLeaderElectionRecordShouldSendPutRequest() throws Exception
configMapInTheCluster.setMetadata(new ObjectMetaBuilder().withAnnotations(new HashMap<>()).build());
when(configMapResource.get()).thenReturn(configMapInTheCluster);
final LeaderElectionRecord record = new LeaderElectionRecord(
"1337", Duration.ofSeconds(1), ZonedDateTime.now(), ZonedDateTime.now(), 0);
"1337", Duration.ofSeconds(1), ZonedDateTime.now(), ZonedDateTime.now(), 0);
record.setVersion("313373");
final ConfigMapLock lock = new ConfigMapLock("namespace", "name", "1337");
// When
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
import io.fabric8.kubernetes.api.model.coordination.v1.LeaseBuilder;
import io.fabric8.kubernetes.api.model.coordination.v1.LeaseList;
import io.fabric8.kubernetes.api.model.coordination.v1.LeaseSpec;
import io.fabric8.kubernetes.client.NamespacedKubernetesClient;
import io.fabric8.kubernetes.client.KubernetesClient;
import io.fabric8.kubernetes.client.dsl.MixedOperation;
import io.fabric8.kubernetes.client.dsl.ReplaceDeletable;
import io.fabric8.kubernetes.client.dsl.Resource;
Expand All @@ -47,23 +47,24 @@

class LeaseLockTest {

private NamespacedKubernetesClient kc;
private KubernetesClient kc;
private MixedOperation<Lease, LeaseList, Resource<Lease>> leases;
private LeaseBuilder leaserBuilder;
private LeaseBuilder.MetadataNested<LeaseBuilder> metadata;
private LeaseBuilder.SpecNested<LeaseBuilder> spec;

@BeforeEach
void setUp() {
kc = mock(NamespacedKubernetesClient.class, RETURNS_DEEP_STUBS);
kc = mock(KubernetesClient.class, RETURNS_DEEP_STUBS);
leases = mock(MixedOperation.class, RETURNS_DEEP_STUBS);
leaserBuilder = mock(LeaseBuilder.class, RETURNS_DEEP_STUBS);
metadata = mock(LeaseBuilder.MetadataNested.class, RETURNS_DEEP_STUBS);
spec = mock(LeaseBuilder.SpecNested.class, RETURNS_DEEP_STUBS);
when(kc.inNamespace(anyString()).leases()).thenReturn(leases);
when(kc.leases().inNamespace(anyString())).thenReturn(leases);
when(leaserBuilder.withNewMetadata()).thenReturn(metadata);
when(leaserBuilder.withNewSpec()).thenReturn(spec);
}

@Test
void missingNamespaceShouldThrowException() {
// Given
Expand Down Expand Up @@ -92,12 +93,12 @@ void missingIdentityShouldThrowException() {
void getWithExistingLeaseShouldReturnLeaderElectionRecord() {
// Given
final Lease lease = new LeaseBuilder().withNewSpec()
.withHolderIdentity("1337")
.withLeaseDurationSeconds(15)
.withAcquireTime(ZonedDateTime.of(2015, 10, 21, 4, 29, 0, 0, ZoneId.of("UTC")))
.withRenewTime(ZonedDateTime.of(2015, 10, 21, 7, 28, 0, 0, ZoneId.of("UTC")))
.withLeaseTransitions(0)
.endSpec().build();
.withHolderIdentity("1337")
.withLeaseDurationSeconds(15)
.withAcquireTime(ZonedDateTime.of(2015, 10, 21, 4, 29, 0, 0, ZoneId.of("UTC")))
.withRenewTime(ZonedDateTime.of(2015, 10, 21, 7, 28, 0, 0, ZoneId.of("UTC")))
.withLeaseTransitions(0)
.endSpec().build();
when(leases.withName(eq("name")).get()).thenReturn(lease);
lease.setMetadata(new ObjectMetaBuilder().withResourceVersion("313373").build());
final LeaseLock lock = new LeaseLock("namespace", "name", "1337");
Expand All @@ -115,7 +116,7 @@ void getWithExistingLeaseShouldReturnLeaderElectionRecord() {
void createWithValidLeaderElectionRecordShouldSendPostRequest() throws Exception {
// Given
final LeaderElectionRecord record = new LeaderElectionRecord(
"1", Duration.ofSeconds(1), ZonedDateTime.now(), ZonedDateTime.now(), 0);
"1", Duration.ofSeconds(1), ZonedDateTime.now(), ZonedDateTime.now(), 0);
final LeaseLock lock = new LeaseLock("namespace", "name", "1337");
// When
lock.create(kc, record);
Expand All @@ -133,7 +134,7 @@ void updateWithValidLeaderElectionRecordShouldSendPutRequest() throws Exception
leaseInTheCluster.setSpec(new LeaseSpec());
when(leaseResource.get()).thenReturn(leaseInTheCluster);
final LeaderElectionRecord record = new LeaderElectionRecord(
"1337", Duration.ofSeconds(1), ZonedDateTime.now(), ZonedDateTime.now(), 0);
"1337", Duration.ofSeconds(1), ZonedDateTime.now(), ZonedDateTime.now(), 0);
record.setVersion("313373");
final LeaseLock lock = new LeaseLock("namespace", "name", "1337");
// When
Expand Down

0 comments on commit d20b263

Please sign in to comment.