Skip to content

Commit

Permalink
HBASE-28568 Incremental backup set does not correctly shrink (apache#…
Browse files Browse the repository at this point in the history
…5876)

The incremental backup set is the set of tables included when
an incremental backup is created, it is managed per backup
root dir and contains all tables that are present in at least
one backup (in that root dir).

The incremental backup set can only shrink when backups are
deleted. However, the implementation was incorrect, causing this
set to never be able to shrink.

Reviewed-by: Ray Mattingly <rmdmattingly@gmail.com>
Signed-off-by: Nick Dimiduk <ndimiduk@apache.org>
  • Loading branch information
DieterDP-ng authored and ndimiduk committed May 17, 2024
1 parent 0ac43d4 commit bcc4f78
Show file tree
Hide file tree
Showing 3 changed files with 58 additions and 38 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,6 @@ public BackupInfo getBackupInfo(String backupId) throws IOException {
public int deleteBackups(String[] backupIds) throws IOException {

int totalDeleted = 0;
Map<String, HashSet<TableName>> allTablesMap = new HashMap<>();

boolean deleteSessionStarted;
boolean snapshotDone;
Expand Down Expand Up @@ -130,20 +129,16 @@ public int deleteBackups(String[] backupIds) throws IOException {
}
snapshotDone = true;
try {
List<String> affectedBackupRootDirs = new ArrayList<>();
for (int i = 0; i < backupIds.length; i++) {
BackupInfo info = sysTable.readBackupInfo(backupIds[i]);
if (info != null) {
String rootDir = info.getBackupRootDir();
HashSet<TableName> allTables = allTablesMap.get(rootDir);
if (allTables == null) {
allTables = new HashSet<>();
allTablesMap.put(rootDir, allTables);
}
allTables.addAll(info.getTableNames());
totalDeleted += deleteBackup(backupIds[i], sysTable);
if (info == null) {
continue;
}
affectedBackupRootDirs.add(info.getBackupRootDir());
totalDeleted += deleteBackup(backupIds[i], sysTable);
}
finalizeDelete(allTablesMap, sysTable);
finalizeDelete(affectedBackupRootDirs, sysTable);
// Finish
sysTable.finishDeleteOperation();
// delete snapshot
Expand Down Expand Up @@ -176,26 +171,23 @@ public int deleteBackups(String[] backupIds) throws IOException {

/**
* Updates incremental backup set for every backupRoot
* @param tablesMap map [backupRoot: {@code Set<TableName>}]
* @param table backup system table
* @param backupRoots backupRoots for which to revise the incremental backup set
* @param table backup system table
* @throws IOException if a table operation fails
*/
private void finalizeDelete(Map<String, HashSet<TableName>> tablesMap, BackupSystemTable table)
private void finalizeDelete(List<String> backupRoots, BackupSystemTable table)
throws IOException {
for (String backupRoot : tablesMap.keySet()) {
for (String backupRoot : backupRoots) {
Set<TableName> incrTableSet = table.getIncrementalBackupTableSet(backupRoot);
Map<TableName, ArrayList<BackupInfo>> tableMap =
Map<TableName, List<BackupInfo>> tableMap =
table.getBackupHistoryForTableSet(incrTableSet, backupRoot);
for (Map.Entry<TableName, ArrayList<BackupInfo>> entry : tableMap.entrySet()) {
if (entry.getValue() == null) {
// No more backups for a table
incrTableSet.remove(entry.getKey());
}
}

// Keep only the tables that are present in other backups
incrTableSet.retainAll(tableMap.keySet());

table.deleteIncrementalBackupTableSet(backupRoot);
if (!incrTableSet.isEmpty()) {
table.addIncrementalBackupTableSet(incrTableSet, backupRoot);
} else { // empty
table.deleteIncrementalBackupTableSet(backupRoot);
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,9 @@
* <ul>
* <li>1. Backup sessions rowkey= "session:"+backupId; value =serialized BackupInfo</li>
* <li>2. Backup start code rowkey = "startcode:"+backupRoot; value = startcode</li>
* <li>3. Incremental backup set rowkey="incrbackupset:"+backupRoot; value=[list of tables]</li>
* <li>4. Table-RS-timestamp map rowkey="trslm:"+backupRoot+table_name; value = map[RS-%3E last WAL
* <li>3. Incremental backup set rowkey="incrbackupset:"+backupRoot; table="meta:"+tablename of
* include table; value=empty</li>
* <li>4. Table-RS-timestamp map rowkey="trslm:"+backupRoot+table_name; value = map[RS-> last WAL
* timestamp]</li>
* <li>5. RS - WAL ts map rowkey="rslogts:"+backupRoot +server; value = last WAL timestamp</li>
* <li>6. WALs recorded rowkey="wals:"+WAL unique file name; value = backupId and full WAL file
Expand Down Expand Up @@ -839,23 +840,25 @@ public List<BackupInfo> getBackupHistoryForTable(TableName name) throws IOExcept
return tableHistory;
}

public Map<TableName, ArrayList<BackupInfo>> getBackupHistoryForTableSet(Set<TableName> set,
/**
* Goes through all backup history corresponding to the provided root folder, and collects all
* backup info mentioning each of the provided tables.
* @param set the tables for which to collect the {@code BackupInfo}
* @param backupRoot backup destination path to retrieve backup history for
* @return a map containing (a subset of) the provided {@code TableName}s, mapped to a list of at
* least one {@code BackupInfo}
* @throws IOException if getting the backup history fails
*/
public Map<TableName, List<BackupInfo>> getBackupHistoryForTableSet(Set<TableName> set,
String backupRoot) throws IOException {
List<BackupInfo> history = getBackupHistory(backupRoot);
Map<TableName, ArrayList<BackupInfo>> tableHistoryMap = new HashMap<>();
for (Iterator<BackupInfo> iterator = history.iterator(); iterator.hasNext();) {
BackupInfo info = iterator.next();
if (!backupRoot.equals(info.getBackupRootDir())) {
continue;
}
Map<TableName, List<BackupInfo>> tableHistoryMap = new HashMap<>();
for (BackupInfo info : history) {
List<TableName> tables = info.getTableNames();
for (TableName tableName : tables) {
if (set.contains(tableName)) {
ArrayList<BackupInfo> list = tableHistoryMap.get(tableName);
if (list == null) {
list = new ArrayList<>();
tableHistoryMap.put(tableName, list);
}
List<BackupInfo> list =
tableHistoryMap.computeIfAbsent(tableName, k -> new ArrayList<>());
list.add(info);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hbase.backup;

import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;

import java.io.ByteArrayOutputStream;
Expand All @@ -30,6 +31,7 @@
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.EnvironmentEdge;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Assert;
import org.junit.ClassRule;
Expand Down Expand Up @@ -158,4 +160,27 @@ public long currentTime() {
LOG.info(baos.toString());
assertTrue(output.indexOf("Deleted 1 backups") >= 0);
}

/**
* Verify that backup deletion updates the incremental-backup-set.
*/
@Test
public void testBackupDeleteUpdatesIncrementalBackupSet() throws Exception {
LOG.info("Test backup delete updates the incremental backup set");
BackupSystemTable backupSystemTable = new BackupSystemTable(TEST_UTIL.getConnection());

String backupId1 = fullTableBackup(Lists.newArrayList(table1, table2));
assertTrue(checkSucceeded(backupId1));
assertEquals(Sets.newHashSet(table1, table2),
backupSystemTable.getIncrementalBackupTableSet(BACKUP_ROOT_DIR));

String backupId2 = fullTableBackup(Lists.newArrayList(table3));
assertTrue(checkSucceeded(backupId2));
assertEquals(Sets.newHashSet(table1, table2, table3),
backupSystemTable.getIncrementalBackupTableSet(BACKUP_ROOT_DIR));

getBackupAdmin().deleteBackups(new String[] { backupId1 });
assertEquals(Sets.newHashSet(table3),
backupSystemTable.getIncrementalBackupTableSet(BACKUP_ROOT_DIR));
}
}

0 comments on commit bcc4f78

Please sign in to comment.