Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix size based eviction when cluster scaled down #18871

Merged
merged 1 commit into from
Jun 17, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Expand Up @@ -16,9 +16,12 @@

package com.hazelcast.map.impl.operation;

import com.hazelcast.config.EvictionConfig;
import com.hazelcast.config.IndexConfig;
import com.hazelcast.config.MapConfig;
import com.hazelcast.internal.nio.IOUtil;
import com.hazelcast.internal.partition.IPartitionService;
import com.hazelcast.internal.serialization.Data;
import com.hazelcast.internal.serialization.SerializationService;
import com.hazelcast.internal.services.ObjectNamespace;
import com.hazelcast.internal.services.ServiceNamespace;
Expand All @@ -27,15 +30,17 @@
import com.hazelcast.internal.util.ThreadUtil;
import com.hazelcast.map.impl.MapContainer;
import com.hazelcast.map.impl.MapDataSerializerHook;
import com.hazelcast.map.impl.MapService;
import com.hazelcast.map.impl.MapServiceContext;
import com.hazelcast.map.impl.PartitionContainer;
import com.hazelcast.map.impl.StoreAdapter;
import com.hazelcast.map.impl.eviction.Evictor;
import com.hazelcast.map.impl.record.Record;
import com.hazelcast.map.impl.record.Records;
import com.hazelcast.map.impl.recordstore.RecordStore;
import com.hazelcast.map.impl.recordstore.RecordStoreAdapter;
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.internal.serialization.Data;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.query.impl.Index;
import com.hazelcast.query.impl.Indexes;
Expand All @@ -50,6 +55,7 @@
import java.util.Map;
import java.util.Set;

import static com.hazelcast.config.MaxSizePolicy.PER_NODE;
import static com.hazelcast.internal.util.MapUtil.createHashMap;
import static com.hazelcast.internal.util.MapUtil.isNullOrEmpty;

Expand Down Expand Up @@ -135,7 +141,8 @@ void prepare(PartitionContainer container, Collection<ServiceNamespace> namespac
}
}

@SuppressWarnings("checkstyle:npathcomplexity")
@SuppressWarnings({"checkstyle:npathcomplexity",
"checkstyle:cyclomaticcomplexity", "checkstyle:nestedifdepth"})
void applyState() {
ThreadUtil.assertRunningOnPartitionThread();

Expand Down Expand Up @@ -174,18 +181,34 @@ void applyState() {

long nowInMillis = Clock.currentTimeMillis();

long ownedEntryCountOnThisNode = entryCountOnThisNode(mapContainer);
EvictionConfig evictionConfig = mapContainer.getMapConfig().getEvictionConfig();
boolean perNodeEvictionConfigured = mapContainer.getEvictor() != Evictor.NULL_EVICTOR
&& evictionConfig.getMaxSizePolicy() == PER_NODE;

for (int i = 0; i < keyRecord.size(); i += 2) {
Data dataKey = (Data) keyRecord.get(i);
Record record = (Record) keyRecord.get(i + 1);

recordStore.putReplicatedRecord(dataKey, record, nowInMillis, populateIndexes);

if (recordStore.shouldEvict()) {
// No need to continue replicating records anymore.
// We are already over eviction threshold, each put record will cause another eviction.
recordStore.evictEntries(dataKey);
break;
if (perNodeEvictionConfigured) {
if (ownedEntryCountOnThisNode >= evictionConfig.getSize()) {
if (operation.getReplicaIndex() == 0) {
recordStore.doPostEvictionOperations(dataKey, record);
}
} else {
recordStore.putReplicatedRecord(dataKey, record, nowInMillis, populateIndexes);
ownedEntryCountOnThisNode++;
}
} else {
recordStore.putReplicatedRecord(dataKey, record, nowInMillis, populateIndexes);
if (recordStore.shouldEvict()) {
mmedenjak marked this conversation as resolved.
Show resolved Hide resolved
// No need to continue replicating records anymore.
// We are already over eviction threshold, each put record will cause another eviction.
recordStore.evictEntries(dataKey);
break;
}
}

recordStore.disposeDeferredBlocks();
}

Expand All @@ -196,6 +219,32 @@ void applyState() {
}
}

// owned or backup
private long entryCountOnThisNode(MapContainer mapContainer) {
int replicaIndex = operation.getReplicaIndex();
long owned = 0;
if (mapContainer.getEvictor() != Evictor.NULL_EVICTOR
&& PER_NODE == mapContainer.getMapConfig().getEvictionConfig().getMaxSizePolicy()) {

MapService mapService = operation.getService();
MapServiceContext mapServiceContext = mapService.getMapServiceContext();
IPartitionService partitionService = mapServiceContext.getNodeEngine().getPartitionService();
int partitionCount = partitionService.getPartitionCount();

for (int partitionId = 0; partitionId < partitionCount; partitionId++) {
if (replicaIndex == 0 ? partitionService.isPartitionOwner(partitionId)
: !partitionService.isPartitionOwner(partitionId)) {
RecordStore store = mapServiceContext.getExistingRecordStore(partitionId, mapContainer.getName());
if (store != null) {
owned += store.size();
}
}
}
}

return owned;
}

private void applyIndexesState() {
if (mapIndexInfos != null) {
for (MapIndexInfo mapIndexInfo : mapIndexInfos) {
Expand Down
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How come? :)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

always asking myself that.

* Copyright (c) 2008-2020, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
* Copyright (c) 2008-2020, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down
Expand Up @@ -22,11 +22,12 @@
import com.hazelcast.config.EvictionPolicy;
import com.hazelcast.config.MapConfig;
import com.hazelcast.config.MaxSizePolicy;
import com.hazelcast.core.EntryEventType;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.spi.eviction.EvictionPolicyComparator;
import com.hazelcast.internal.eviction.impl.comparator.LRUEvictionPolicyComparator;
import com.hazelcast.internal.partition.IPartitionService;
import com.hazelcast.internal.util.MemoryInfoAccessor;
import com.hazelcast.internal.util.MutableLong;
import com.hazelcast.map.impl.EntryCostEstimator;
import com.hazelcast.map.impl.MapContainer;
import com.hazelcast.map.impl.MapService;
Expand All @@ -38,6 +39,8 @@
import com.hazelcast.map.impl.proxy.MapProxyImpl;
import com.hazelcast.map.impl.recordstore.DefaultRecordStore;
import com.hazelcast.map.impl.recordstore.RecordStore;
import com.hazelcast.map.listener.EntryEvictedListener;
import com.hazelcast.spi.eviction.EvictionPolicyComparator;
import com.hazelcast.spi.impl.NodeEngine;
import com.hazelcast.spi.properties.ClusterProperty;
import com.hazelcast.test.AssertTask;
Expand All @@ -52,7 +55,10 @@

import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;

import static com.hazelcast.config.MaxSizePolicy.FREE_HEAP_PERCENTAGE;
import static com.hazelcast.config.MaxSizePolicy.FREE_HEAP_SIZE;
Expand Down Expand Up @@ -85,38 +91,50 @@ public void testPerNodePolicy_withManyNodes() {

@Test
public void testPerNodePolicy_afterGracefulShutdown() {
int nodeCount = 2;
int nodeCount = 3;
int perNodeMaxSize = 1000;
int numberOfPuts = 3000;

// eviction takes place if a partitions size exceeds this number
// see EvictionChecker#toPerPartitionMaxSize
double maxPartitionSize = 1D * nodeCount * perNodeMaxSize / PARTITION_COUNT;

String mapName = "testPerNodePolicy_afterGracefulShutdown";
Config config = createConfig(PER_NODE, perNodeMaxSize, mapName);

ConcurrentHashMap<EntryEventType, MutableLong> eventsPerType = new ConcurrentHashMap<>();
Set<Object> evictedKeySet = Collections.newSetFromMap(new ConcurrentHashMap<>());
// populate map from one of the nodes
Collection<HazelcastInstance> nodes = createNodes(nodeCount, config);
for (HazelcastInstance node : nodes) {
IMap map = node.getMap(mapName);
for (int i = 0; i < 5000; i++) {
map.put(i, i);
}
List<HazelcastInstance> nodes = createNodes(nodeCount, config);

node.shutdown();
break;
IMap map = nodes.get(0).getMap(mapName);
for (int i = 0; i < numberOfPuts; i++) {
map.put(i, i);
}

for (HazelcastInstance node : nodes) {
if (node.getLifecycleService().isRunning()) {
int mapSize = node.getMap(mapName).size();
String message = format("map size is %d and it should be smaller "
+ "than maxPartitionSize * PARTITION_COUNT which is %.0f",
mapSize, maxPartitionSize * PARTITION_COUNT);
int initialMapSize = map.size();

nodes.get(1).getMap(mapName).addEntryListener((EntryEvictedListener) event -> {
evictedKeySet.add(event.getKey());
}, true);

nodes.get(0).shutdown();

assertTrueEventually(() -> {
for (HazelcastInstance node : nodes) {
if (node.getLifecycleService().isRunning()) {
int currentMapSize = node.getMap(mapName).size();
int evictedKeyCount = evictedKeySet.size();
String message = format("initialMapSize=%d, evictedKeyCount=%d, map size is %d and it should be smaller "
+ "than maxPartitionSize * PARTITION_COUNT which is %.0f",
initialMapSize, evictedKeyCount, currentMapSize, maxPartitionSize * PARTITION_COUNT);

assertTrue(message, mapSize <= maxPartitionSize * PARTITION_COUNT);
assertEquals(message, initialMapSize, evictedKeyCount + currentMapSize);
// current map size should approximately be around (nodeCount - 1) * perNodeMaxSize.
assertTrue(message, ((nodeCount - 1) * perNodeMaxSize)
+ (PARTITION_COUNT / nodeCount) >= currentMapSize);
}
}
}
});
}

/**
Expand Down Expand Up @@ -403,6 +421,7 @@ Collection<IMap> createMaps(String mapName, Config config, int nodeCount) {

Config createConfig(MaxSizePolicy maxSizePolicy, int maxSize, String mapName) {
Config config = getConfig();
config.getMetricsConfig().setEnabled(false);
config.setProperty(ClusterProperty.PARTITION_COUNT.getName(), String.valueOf(PARTITION_COUNT));

MapConfig mapConfig = config.getMapConfig(mapName);
Expand All @@ -414,10 +433,10 @@ Config createConfig(MaxSizePolicy maxSizePolicy, int maxSize, String mapName) {
return config;
}

Collection<HazelcastInstance> createNodes(int nodeCount, Config config) {
List<HazelcastInstance> createNodes(int nodeCount, Config config) {
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(nodeCount);
factory.newInstances(config);
return factory.getAllHazelcastInstances();
return new ArrayList<>(factory.getAllHazelcastInstances());
}

int getSize(Collection<IMap> maps) {
Expand Down
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
* Copyright (c) 2008-2020, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
* Copyright (c) 2008-2020, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down