Skip to content

Commit

Permalink
Support Table History Policy in Snapshot Expiration Job (#274)
Browse files Browse the repository at this point in the history
## Summary

<!--- HINT: Replace #nnn with corresponding Issue number, if you are
fixing an existing issue -->

[[Issue](https://github.com/linkedin/openhouse/issues/#nnn)] Briefly
discuss the summary of the changes made in this
pull request in 2-3 lines.

Following up from #262 and
#259
this PR adds support for snapshot expiration table maintenance job to
use the history policies defined.

Most notably snapshot expiration will follow the settings of `maxAge`,
`granularity`, and `versions` as follows:
1. If maxAge is provided, it will expire snapshots older than maxAge in
granularity timeunit.
2. If versions is provided, it will retain the last versions snapshots
regardless of their age.
3. If both are provided, it will prioritize maxAge; only retain up to
versions number of snapshots younger than the maxAge. This is done by
pruning the snapshots older than maxAge, and then running a second
expiration to keeping N versions after that.

Note: If versions are defined and there are less than N versions in the
history, then there were not enough commits (within that timespan if
defined). Snapshot expiration will always keep at least 1 version.

The default behavior of snapshot expiration job will remain the same,
keep snapshots within the last 3 days.

## Changes

- [ ] Client-facing API Changes
- [ ] Internal API Changes
- [ ] Bug Fixes
- [ ] New Features
- [ ] Performance Improvements
- [ ] Code Style
- [ ] Refactoring
- [ ] Documentation
- [ ] Tests

For all the boxes checked, please include additional details of the
changes made in this pull request.

## Testing Done
<!--- Check any relevant boxes with "x" -->

- [ ] Manually Tested on local docker setup. Please include commands
ran, and their output.
- [x] Added new tests for the changes made.
- [x] Updated existing tests to reflect the changes made.
- [ ] No tests added or updated. Please explain why. If unsure, please
feel free to ask for help.
- [ ] Some other form of testing like staging or soak time in
production. Please explain.

For all the boxes checked, include a detailed description of the testing
done for the changes made in this pull request.

# Additional Information

- [ ] Breaking Changes
- [ ] Deprecations
- [ ] Large PR broken into smaller PRs, and PR plan linked in the
description.

For all the boxes checked, include additional details of the changes
made in this pull request.
  • Loading branch information
Will-Lo authored Jan 6, 2025
1 parent f12fb15 commit 3e8d387
Show file tree
Hide file tree
Showing 6 changed files with 280 additions and 32 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@
import com.linkedin.openhouse.jobs.client.JobsClient;
import com.linkedin.openhouse.jobs.client.TablesClient;
import com.linkedin.openhouse.jobs.client.model.JobConf;
import com.linkedin.openhouse.jobs.util.HistoryConfig;
import com.linkedin.openhouse.jobs.util.TableMetadata;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

Expand All @@ -28,10 +30,18 @@ public JobConf.JobTypeEnum getType() {

@Override
protected List<String> getArgs() {
return Arrays.asList(
"--tableName", metadata.fqtn(),
"--granularity", "days",
"--count", "3");
HistoryConfig config = metadata.getHistoryConfig();
List<String> jobArgs = new ArrayList<>();
if (config.getMaxAge() > 0) {
jobArgs.addAll(
Arrays.asList(
"--maxAge", Integer.toString(config.getMaxAge()),
"--granularity", config.getGranularity().getValue()));
}
if (config.getVersions() > 0) {
jobArgs.addAll(Arrays.asList("--versions", Integer.toString(config.getVersions())));
}
return jobArgs;
}

@Override
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
package com.linkedin.openhouse.jobs.spark;

import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.linkedin.openhouse.common.stats.model.IcebergTableStats;
import com.linkedin.openhouse.jobs.util.SparkJobUtil;
Expand All @@ -20,6 +21,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.iceberg.CatalogUtil;
import org.apache.iceberg.ExpireSnapshots;
import org.apache.iceberg.Schema;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.Table;
Expand Down Expand Up @@ -201,17 +203,36 @@ public void deleteStagedOrphanDirectory(
}

/** Expire snapshots on a given fully-qualified table name. */
public void expireSnapshots(String fqtn, long expireBeforeTimestampMs) {
expireSnapshots(getTable(fqtn), expireBeforeTimestampMs);
public void expireSnapshots(String fqtn, int maxAge, String granularity, int versions) {
expireSnapshots(getTable(fqtn), maxAge, granularity, versions);
}

/** Expire snapshots on a given {@link Table}. */
public void expireSnapshots(Table table, long expireBeforeTimestampMs) {
table
.expireSnapshots()
.cleanExpiredFiles(false)
.expireOlderThan(expireBeforeTimestampMs)
.commit();
/**
* Expire snapshots on a given {@link Table}. If maxAge is provided, it will expire snapshots
* older than maxAge in granularity timeunit. If versions is provided, it will retain the last
* versions snapshots. If both are provided, it will prioritize maxAge; only retain up to versions
* number of snapshots younger than the maxAge
*/
public void expireSnapshots(Table table, int maxAge, String granularity, int versions) {
ExpireSnapshots expireSnapshotsCommand = table.expireSnapshots().cleanExpiredFiles(false);

// maxAge is always defined with granularity
if (!granularity.isEmpty()) {
TimeUnit timeUnitGranularity = TimeUnit.valueOf(granularity.toUpperCase());
long expireBeforeTimestampMs =
System.currentTimeMillis() - timeUnitGranularity.toMillis(maxAge);
log.info("Expiring snapshots for table: {} older than {}ms", table, expireBeforeTimestampMs);
expireSnapshotsCommand.expireOlderThan(expireBeforeTimestampMs).commit();
}
if (versions > 0 && Iterators.size(table.snapshots().iterator()) > versions) {
log.info("Expiring snapshots for table: {} retaining last {} versions", table, versions);
// Note: retainLast keeps the last N snapshots that WOULD be expired, hence expireOlderThan
// currentTime
expireSnapshotsCommand
.expireOlderThan(System.currentTimeMillis())
.retainLast(versions)
.commit();
}
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import com.linkedin.openhouse.jobs.spark.state.StateManager;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
Expand All @@ -18,41 +17,68 @@
@Slf4j
public class SnapshotsExpirationSparkApp extends BaseTableSparkApp {
private final String granularity;
private final int count;
private final int maxAge;
private final int versions;

public static class DEFAULT_CONFIGURATION {
public static final int MAX_AGE = 3;
public static final String GRANULARITY = "DAYS";
public static final int VERSIONS = 0;
}

private static final String DEFAULT_GRANULARITY = "";

// By default do not define versions, and only retain snapshots based on max age
private static final String DEFAULT_VERSIONS = "0";

public SnapshotsExpirationSparkApp(
String jobId, StateManager stateManager, String fqtn, String granularity, int count) {
String jobId,
StateManager stateManager,
String fqtn,
int maxAge,
String granularity,
int versions) {
super(jobId, stateManager, fqtn);
this.granularity = granularity;
this.count = count;
if (maxAge == 0 && versions == 0) {
this.maxAge = DEFAULT_CONFIGURATION.MAX_AGE;
this.granularity = DEFAULT_CONFIGURATION.GRANULARITY;
this.versions = DEFAULT_CONFIGURATION.VERSIONS;
} else {
this.granularity = granularity;
this.maxAge = maxAge;
this.versions = versions;
}
}

@Override
protected void runInner(Operations ops) {
log.info(
"Snapshot expiration app start for table {}, expiring older than {} {}s",
"Snapshot expiration app start for table {}, expiring older than {} {}s or with more than {} versions",
fqtn,
count,
granularity);
long expireBeforeTimestampMs = System.currentTimeMillis() - TimeUnit.DAYS.toMillis(count);
log.info("Expire snapshots before timestamp ms {}", expireBeforeTimestampMs);
ops.expireSnapshots(fqtn, expireBeforeTimestampMs);
maxAge,
granularity,
versions);
ops.expireSnapshots(fqtn, maxAge, granularity, versions);
}

public static void main(String[] args) {
List<Option> extraOptions = new ArrayList<>();
extraOptions.add(new Option("t", "tableName", true, "Fully-qualified table name"));
extraOptions.add(
new Option("a", "maxAge", true, "Delete snapshots older than <maxAge> <granularity>s"));
extraOptions.add(new Option("g", "granularity", true, "Granularity: day"));
extraOptions.add(
new Option("c", "count", true, "Delete snapshots older than <count> <granularity>s"));
new Option("v", "versions", true, "Number of versions to keep after snapshot expiration"));
CommandLine cmdLine = createCommandLine(args, extraOptions);

SnapshotsExpirationSparkApp app =
new SnapshotsExpirationSparkApp(
getJobId(cmdLine),
createStateManager(cmdLine),
cmdLine.getOptionValue("tableName"),
cmdLine.getOptionValue("granularity"),
Integer.parseInt(cmdLine.getOptionValue("count")));
Integer.parseInt(cmdLine.getOptionValue("maxAge", "0")),
cmdLine.getOptionValue("granularity", ""),
Integer.parseInt(cmdLine.getOptionValue("minVersions", "0")));
app.run();
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
package com.linkedin.openhouse.jobs.util;

import com.linkedin.openhouse.tables.client.model.History;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;

/** History Policy config class. This is app side representation of /tables policies->history */
@Builder
@Getter
@EqualsAndHashCode
@ToString
public class HistoryConfig {
private final int maxAge;
private final int versions;
private final History.GranularityEnum granularity;
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ public class TableMetadata extends Metadata {
protected boolean isClustered;
@Builder.Default protected @NonNull Map<String, String> jobExecutionProperties = new HashMap<>();
protected @Nullable RetentionConfig retentionConfig;
protected @Nullable HistoryConfig historyConfig;

public String fqtn() {
return String.format("%s.%s", dbName, tableName);
Expand Down
Loading

0 comments on commit 3e8d387

Please sign in to comment.