diff --git a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java index fd7eef4d4cfb..1f4a5e4a687c 100644 --- a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java @@ -10,14 +10,25 @@ *
* At present, we just permit an overriding Java class to wrap a C++
* implementation
+ *
+ * @param
* Implementations of Comparators in Java should extend this class.
*/
public abstract class AbstractComparator
@@ -20,6 +20,11 @@ public abstract class AbstractComparator
super();
}
+ /**
+ * Construct an AbstractComparator.
+ *
+ * @param comparatorOptions options for the comparator.
+ */
protected AbstractComparator(final ComparatorOptions comparatorOptions) {
super(comparatorOptions.nativeHandle_);
}
@@ -59,7 +64,7 @@ ComparatorType getComparatorType() {
* Three-way key comparison. Implementations should provide a
* total order
* on keys that might be passed to it.
- *
+ *
* The implementation may modify the {@code ByteBuffer}s passed in, though
* it would be unconventional to modify the "limit" or any of the
* underlying bytes. As a callback, RocksJava will ensure that {@code a}
@@ -114,6 +119,11 @@ public void findShortSuccessor(final ByteBuffer key) {
// no-op
}
+ /**
+ * Returns true if we are using direct byte buffers.
+ *
+ * @return true if we are using direct byte buffers, false otherwise.
+ */
public final boolean usingDirectBuffers() {
return usingDirectBuffers(nativeHandle_);
}
diff --git a/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java b/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java
index d0ceef93d419..9bd1ff7694bc 100644
--- a/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java
+++ b/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java
@@ -12,7 +12,7 @@
* it holds methods which are called
* from C++ to interact with a Comparator
* written in Java.
- *
+ *
* Placing these bridge methods in this
* class keeps the API of the
* {@link org.rocksdb.AbstractComparator} clean.
diff --git a/java/src/main/java/org/rocksdb/AbstractEventListener.java b/java/src/main/java/org/rocksdb/AbstractEventListener.java
index c9371c45eb0c..5c7f58ab6afb 100644
--- a/java/src/main/java/org/rocksdb/AbstractEventListener.java
+++ b/java/src/main/java/org/rocksdb/AbstractEventListener.java
@@ -12,28 +12,120 @@
*/
@SuppressWarnings("PMD.AvoidDuplicateLiterals")
public abstract class AbstractEventListener extends RocksCallbackObject implements EventListener {
+
+ /**
+ * Callback events that can be enabled.
+ */
public enum EnabledEventCallback {
+
+ /**
+ * Flush completed.
+ */
ON_FLUSH_COMPLETED((byte) 0x0),
+
+ /**
+ * Flush beginning.
+ */
ON_FLUSH_BEGIN((byte) 0x1),
+
+ /**
+ * Table file was deleted.
+ */
ON_TABLE_FILE_DELETED((byte) 0x2),
+
+ /**
+ * Compaction beginning.
+ */
ON_COMPACTION_BEGIN((byte) 0x3),
+
+ /**
+ * Compaction completed.
+ */
ON_COMPACTION_COMPLETED((byte) 0x4),
+
+ /**
+ * Table file created.
+ */
ON_TABLE_FILE_CREATED((byte) 0x5),
+
+ /**
+ * Started creation of Table file.
+ */
ON_TABLE_FILE_CREATION_STARTED((byte) 0x6),
+
+ /**
+ * Memtable has been sealed.
+ */
ON_MEMTABLE_SEALED((byte) 0x7),
+
+ /**
+ * Started deletion of Column Family handle.
+ */
ON_COLUMN_FAMILY_HANDLE_DELETION_STARTED((byte) 0x8),
+
+ /**
+ * External file ingested.
+ */
ON_EXTERNAL_FILE_INGESTED((byte) 0x9),
+
+ /**
+ * Background error.
+ */
ON_BACKGROUND_ERROR((byte) 0xA),
+
+ /**
+ * Stall conditions have been changed.
+ */
ON_STALL_CONDITIONS_CHANGED((byte) 0xB),
+
+ /**
+ * File read has finished.
+ */
ON_FILE_READ_FINISH((byte) 0xC),
+
+ /**
+ * File write has finished.
+ */
ON_FILE_WRITE_FINISH((byte) 0xD),
+
+ /**
+ * File flush has finished.
+ */
ON_FILE_FLUSH_FINISH((byte) 0xE),
+
+ /**
+ * File sync has finished.
+ */
ON_FILE_SYNC_FINISH((byte) 0xF),
+
+ /**
+ * Range file read sync finished.
+ */
ON_FILE_RANGE_SYNC_FINISH((byte) 0x10),
+
+ /**
+ * File truncation has finished.
+ */
ON_FILE_TRUNCATE_FINISH((byte) 0x11),
+
+ /**
+ * Closing a file has finished.
+ */
ON_FILE_CLOSE_FINISH((byte) 0x12),
+
+ /**
+ * Flag has been set to be notified on file IO.
+ */
SHOULD_BE_NOTIFIED_ON_FILE_IO((byte) 0x13),
+
+ /**
+ * Error recovery beginning.
+ */
ON_ERROR_RECOVERY_BEGIN((byte) 0x14),
+
+ /**
+ * Error recovery completed.
+ */
ON_ERROR_RECOVERY_COMPLETED((byte) 0x15);
private final byte value;
diff --git a/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java b/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java
index 173d63e9011e..8c500d8a5df2 100644
--- a/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java
+++ b/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java
@@ -22,6 +22,11 @@ public abstract class AbstractImmutableNativeReference
*/
protected final AtomicBoolean owningHandle_;
+ /**
+ * Construct an AbstractImmutableNativeReference.
+ *
+ * @param owningHandle true if this Java object owns the underlying C++ object, false otherwise.
+ */
protected AbstractImmutableNativeReference(final boolean owningHandle) {
this.owningHandle_ = new AtomicBoolean(owningHandle);
}
diff --git a/java/src/main/java/org/rocksdb/AbstractMutableOptions.java b/java/src/main/java/org/rocksdb/AbstractMutableOptions.java
index ff9b8569fd89..86294fd9ac1a 100644
--- a/java/src/main/java/org/rocksdb/AbstractMutableOptions.java
+++ b/java/src/main/java/org/rocksdb/AbstractMutableOptions.java
@@ -9,12 +9,26 @@
* The constructor is protected, so it will always be used as a base class.
*/
public class AbstractMutableOptions {
+ /**
+ * Separator between Key/Value pairs.
+ */
protected static final String KEY_VALUE_PAIR_SEPARATOR = ";";
+
+ /**
+ * Separator between Key and Value.
+ */
protected static final char KEY_VALUE_SEPARATOR = '=';
+
+ /**
+ * Separator between integers in an integer array.
+ */
static final String INT_ARRAY_INT_SEPARATOR = ":";
private static final String HAS_NOT_BEEN_SET = " has not been set";
+ /**
+ * the keys.
+ */
protected final String[] keys;
private final String[] values;
@@ -62,12 +76,24 @@ public String toString() {
return buffer.toString();
}
+ /**
+ * Builder base class for constructing Mutable Options.
+ *
+ * @param
extends RocksObject implements RocksIteratorInterface {
final P parent_;
+ /**
+ * Constructs an AbstractRocksIterator.
+ *
+ * @param parent the parent object from which the Rocks Iterator was created.
+ * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ RocksIterator.
+ */
protected AbstractRocksIterator(final P parent,
final long nativeHandle) {
super(nativeHandle);
diff --git a/java/src/main/java/org/rocksdb/AbstractSlice.java b/java/src/main/java/org/rocksdb/AbstractSlice.java
index f321b9910aeb..9dad909fdf7f 100644
--- a/java/src/main/java/org/rocksdb/AbstractSlice.java
+++ b/java/src/main/java/org/rocksdb/AbstractSlice.java
@@ -23,13 +23,23 @@
* the Java @see org.rocksdb.AbstractComparator subclass, it disposes the
* C++ BaseComparatorJniCallback subclass, which in turn destroys the
* Java @see org.rocksdb.AbstractSlice subclass Objects.
+ *
+ * @param
+ * The updates are applied in the order in which they are added
+ * to the WriteBatch. For example, the value of "key" will be "v3"
+ * after the following batch is written:
+ *
* Taken from include/rocksdb/advanced_options.h
+ *
+ * @param Default: false
+ * Default: false
* Taken from include/rocksdb/advanced_options.h
* and MutableCFOptions in util/cf_options.h
+ *
+ * @param
* See {@link IndexShorteningMode}.
*
* @param indexShortening the index shortening mode.
@@ -888,7 +892,7 @@ public BlockBasedTableConfig setCacheNumShardBits(
*
* @deprecated This option is now deprecated. No matter what value it
* is set to, it will behave as
- * if {@link #hashIndexAllowCollision()} == true.
+ * if {@code setHashIndexAllowCollision(true)}
*/
@Deprecated
public boolean hashIndexAllowCollision() {
diff --git a/java/src/main/java/org/rocksdb/BuiltinComparator.java b/java/src/main/java/org/rocksdb/BuiltinComparator.java
index 2c89bf218d1d..f4806fe57d72 100644
--- a/java/src/main/java/org/rocksdb/BuiltinComparator.java
+++ b/java/src/main/java/org/rocksdb/BuiltinComparator.java
@@ -6,15 +6,16 @@
package org.rocksdb;
/**
- * Builtin RocksDB comparators
- *
- *
+ * Compaction filter for removing expired Cassandra data with ttl.
+ * Is also in charge of removing tombstone that has been
+ * promoted to kValue type after serials of merging in compaction.
*/
public class CassandraCompactionFilter
extends AbstractCompactionFilter
* CassandraValueMergeOperator is a merge operator that merges two cassandra wide column
* values.
*/
public class CassandraValueMergeOperator extends MergeOperator {
+
+ /**
+ * Constructs a new CassandraValueMergeOperator.
+ *
+ * @param gcGracePeriodInSeconds the grace period in seconds for gc.
+ */
public CassandraValueMergeOperator(final int gcGracePeriodInSeconds) {
super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, 0));
}
+ /**
+ * Constructs a new CassandraValueMergeOperator.
+ *
+ * @param gcGracePeriodInSeconds the grace period in seconds for gc.
+ * @param operandsLimit the maximum size of the operands list before merge is applied.
+ */
public CassandraValueMergeOperator(final int gcGracePeriodInSeconds, final int operandsLimit) {
super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, operandsLimit));
}
diff --git a/java/src/main/java/org/rocksdb/Checkpoint.java b/java/src/main/java/org/rocksdb/Checkpoint.java
index 347221df6ed6..61ccc65e65d3 100644
--- a/java/src/main/java/org/rocksdb/Checkpoint.java
+++ b/java/src/main/java/org/rocksdb/Checkpoint.java
@@ -50,6 +50,22 @@ public void createCheckpoint(final String checkpointPath)
createCheckpoint(nativeHandle_, checkpointPath);
}
+ /**
+ * Exports all live SST files of a specified Column Family into {@code exportPath}.
+ *
+ * Always triggers a flush.
+ *
+ * @param columnFamilyHandle the column family to export.
+ *
+ * @param exportPath should not already exist and will be created by this API.
+ * SST files will be created as hard links when the directory specified
+ * is in the same partition as the db directory, copied otherwise.
+ *
+ * @return metadata about the exported SST files.
+ *
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
public ExportImportFilesMetaData exportColumnFamily(final ColumnFamilyHandle columnFamilyHandle,
final String exportPath) throws RocksDBException {
return new ExportImportFilesMetaData(
diff --git a/java/src/main/java/org/rocksdb/ChecksumType.java b/java/src/main/java/org/rocksdb/ChecksumType.java
index 5b3d2249250f..556220f8baa5 100644
--- a/java/src/main/java/org/rocksdb/ChecksumType.java
+++ b/java/src/main/java/org/rocksdb/ChecksumType.java
@@ -14,18 +14,20 @@ public enum ChecksumType {
*/
kNoChecksum((byte) 0),
/**
- * CRC32 Checksum
+ * CRC32 Checksum.
*/
kCRC32c((byte) 1),
/**
- * XX Hash
+ * XX Hash.
*/
kxxHash((byte) 2),
/**
- * XX Hash 64
+ * XX Hash 64.
*/
kxxHash64((byte) 3),
-
+ /**
+ * XX Hash v3.
+ */
kXXH3((byte) 4);
/**
diff --git a/java/src/main/java/org/rocksdb/ClockCache.java b/java/src/main/java/org/rocksdb/ClockCache.java
index f9f6da74c081..784f80db2f50 100644
--- a/java/src/main/java/org/rocksdb/ClockCache.java
+++ b/java/src/main/java/org/rocksdb/ClockCache.java
@@ -14,6 +14,7 @@
* configuration parameter that is not provided by this API. This function
* simply returns a new LRUCache for functional compatibility.
*/
+@Deprecated
public class ClockCache extends Cache {
/**
* Create a new cache with a fixed size capacity.
@@ -22,6 +23,7 @@ public class ClockCache extends Cache {
*
* @param capacity The fixed size capacity of the cache
*/
+ @Deprecated
public ClockCache(final long capacity) {
super(newClockCache(capacity, -1, false));
}
@@ -39,6 +41,7 @@ public ClockCache(final long capacity) {
* @param numShardBits The cache is sharded to 2^numShardBits shards,
* by hash of the key
*/
+ @Deprecated
public ClockCache(final long capacity, final int numShardBits) {
super(newClockCache(capacity, numShardBits, false));
}
@@ -58,6 +61,7 @@ public ClockCache(final long capacity, final int numShardBits) {
* by hash of the key
* @param strictCapacityLimit insert to the cache will fail when cache is full
*/
+ @Deprecated
public ClockCache(final long capacity, final int numShardBits,
final boolean strictCapacityLimit) {
super(newClockCache(capacity, numShardBits, strictCapacityLimit));
diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java
index 9fd63e768052..1f12f5e90915 100644
--- a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java
+++ b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java
@@ -123,6 +123,11 @@ public int hashCode() {
}
}
+ /**
+ * Returns true if this is the handle for the default column family.
+ *
+ * @return true if this is the handle for the default column family, false otherwise.
+ */
protected boolean isDefaultColumnFamily() {
return nativeHandle_ == rocksDB_.getDefaultColumnFamily().nativeHandle_;
}
diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
index 4776773bd8bd..40c7c5806409 100644
--- a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
+++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
@@ -8,6 +8,11 @@
import java.util.Collection;
import java.util.List;
+/**
+ * Interface for Column Family Options.
+ *
+ * @param
* This could be a new value or a deletion entry for that key so this field
* sums up all updated and deleted keys.
*
@@ -149,7 +155,7 @@ public long totalInputRawValueBytes() {
/**
* Get the number of deletion entries before compaction.
- *
+ *
* Deletion entries can disappear after compaction because they expired.
*
* @return the number of deletion entries before compaction.
@@ -182,7 +188,7 @@ public long numCorruptKeys() {
/**
* Get the Time spent on file's Append() call.
- *
+ *
* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
*
* @return the Time spent on file's Append() call.
@@ -193,7 +199,7 @@ public long fileWriteNanos() {
/**
* Get the Time spent on sync file range.
- *
+ *
* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
*
* @return the Time spent on sync file range.
@@ -204,7 +210,7 @@ public long fileRangeSyncNanos() {
/**
* Get the Time spent on file fsync.
- *
+ *
* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
*
* @return the Time spent on file fsync.
@@ -215,7 +221,7 @@ public long fileFsyncNanos() {
/**
* Get the Time spent on preparing file write (falocate, etc)
- *
+ *
* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
*
* @return the Time spent on preparing file write (falocate, etc).
diff --git a/java/src/main/java/org/rocksdb/CompactionOptions.java b/java/src/main/java/org/rocksdb/CompactionOptions.java
index 2c7e391fbf78..dd6dea6c5613 100644
--- a/java/src/main/java/org/rocksdb/CompactionOptions.java
+++ b/java/src/main/java/org/rocksdb/CompactionOptions.java
@@ -14,13 +14,16 @@
*/
public class CompactionOptions extends RocksObject {
+ /**
+ * Constructs a new CompactionOptions.
+ */
public CompactionOptions() {
super(newCompactionOptions());
}
/**
* Get the compaction output compression type.
- *
+ *
* See {@link #setCompression(CompressionType)}.
*
* @return the compression type.
@@ -32,9 +35,9 @@ public CompressionType compression() {
/**
* Set the compaction output compression type.
- *
+ *
* Default: snappy
- *
+ *
* If set to {@link CompressionType#DISABLE_COMPRESSION_OPTION},
* RocksDB will choose compression type according to the
* {@link ColumnFamilyOptions#compressionType()}, taking into account
@@ -52,7 +55,7 @@ public CompactionOptions setCompression(final CompressionType compression) {
/**
* Get the compaction output file size limit.
- *
+ *
* See {@link #setOutputFileSizeLimit(long)}.
*
* @return the file size limit.
@@ -63,7 +66,7 @@ public long outputFileSizeLimit() {
/**
* Compaction will create files of size {@link #outputFileSizeLimit()}.
- *
+ *
* Default: 2^64-1, which means that compaction will create a single file
*
* @param outputFileSizeLimit the size limit
@@ -90,9 +93,9 @@ public int maxSubcompactions() {
* This value represents the maximum number of threads that will
* concurrently perform a compaction job by breaking it into multiple,
* smaller ones that are run simultaneously.
- *
+ *
* Default: 0 (i.e. no subcompactions)
- *
+ *
* If > 0, it will replace the option in
* {@link DBOptions#maxSubcompactions()} for this compaction.
*
diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java
index 92b21fc50c30..d0c8ccfe9c08 100644
--- a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java
+++ b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java
@@ -10,6 +10,9 @@
*/
public class CompactionOptionsFIFO extends RocksObject {
+ /**
+ * Constructs a new CompactionOptionsFIFO.
+ */
public CompactionOptionsFIFO() {
super(newCompactionOptionsFIFO());
}
diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java
index 4d2ebdb1f562..c18a04cd65db 100644
--- a/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java
+++ b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java
@@ -10,6 +10,9 @@
*/
public class CompactionOptionsUniversal extends RocksObject {
+ /**
+ * Constructs a new CompactionOptionsUniversal.
+ */
public CompactionOptionsUniversal() {
super(newCompactionOptionsUniversal());
}
diff --git a/java/src/main/java/org/rocksdb/CompactionReason.java b/java/src/main/java/org/rocksdb/CompactionReason.java
index 46ec33f3f141..a6050c716a18 100644
--- a/java/src/main/java/org/rocksdb/CompactionReason.java
+++ b/java/src/main/java/org/rocksdb/CompactionReason.java
@@ -5,7 +5,14 @@
package org.rocksdb;
+/**
+ * Reasons for compaction.
+ */
public enum CompactionReason {
+
+ /**
+ * Unknown.
+ */
kUnknown((byte)0x0),
/**
diff --git a/java/src/main/java/org/rocksdb/CompactionStyle.java b/java/src/main/java/org/rocksdb/CompactionStyle.java
index 7b955a7a248c..6a1de336abfb 100644
--- a/java/src/main/java/org/rocksdb/CompactionStyle.java
+++ b/java/src/main/java/org/rocksdb/CompactionStyle.java
@@ -35,9 +35,24 @@
* FIFO Compaction
*/
public enum CompactionStyle {
+ /**
+ * Level Compaction.
+ */
LEVEL((byte) 0x0),
+
+ /**
+ * Universal Compaction.
+ */
UNIVERSAL((byte) 0x1),
+
+ /**
+ * First-in First-out Compaction.
+ */
FIFO((byte) 0x2),
+
+ /**
+ * No compaction.
+ */
NONE((byte) 0x3);
private final byte value;
diff --git a/java/src/main/java/org/rocksdb/ComparatorOptions.java b/java/src/main/java/org/rocksdb/ComparatorOptions.java
index ee5beb8f6edc..abc8a5082f7e 100644
--- a/java/src/main/java/org/rocksdb/ComparatorOptions.java
+++ b/java/src/main/java/org/rocksdb/ComparatorOptions.java
@@ -13,6 +13,9 @@
* instance becomes out-of-scope to release the allocated memory in C++.
*/
public class ComparatorOptions extends RocksObject {
+ /**
+ * Constructs a new ComparatorOptions.
+ */
public ComparatorOptions() {
super(newComparatorOptions());
}
diff --git a/java/src/main/java/org/rocksdb/CompressionOptions.java b/java/src/main/java/org/rocksdb/CompressionOptions.java
index 2e1ee57310b1..eabfef5a13c0 100644
--- a/java/src/main/java/org/rocksdb/CompressionOptions.java
+++ b/java/src/main/java/org/rocksdb/CompressionOptions.java
@@ -10,33 +10,93 @@
*/
public class CompressionOptions extends RocksObject {
+ /**
+ * RocksDB's generic default compression level. Internally it'll be translated
+ * to the default compression level specific to the library being used.
+ */
+ public static final int DEFAULT_COMPRESSION_LEVEL = 32767;
+
+ /**
+ * Constructs a new CompressionOptions.
+ */
public CompressionOptions() {
super(newCompressionOptions());
}
+ /**
+ * Set the Window size.
+ * Zlib only.
+ *
+ * @param windowBits the size of the window.
+ *
+ * @return the reference to the current compression options.
+ */
public CompressionOptions setWindowBits(final int windowBits) {
setWindowBits(nativeHandle_, windowBits);
return this;
}
+ /**
+ * Get the Window size.
+ * Zlib only.
+ *
+ * @return the size of the window.
+ */
public int windowBits() {
return windowBits(nativeHandle_);
}
+ /**
+ * Compression "level" applicable to zstd, zlib, LZ4, and LZ4HC. Except for
+ * {@link #DEFAULT_COMPRESSION_LEVEL}, the meaning of each value depends
+ * on the compression algorithm. Decreasing across non-
+ * {@link #DEFAULT_COMPRESSION_LEVEL} values will either favor speed over
+ * compression ratio or have no effect.
+ *
+ * In LZ4 specifically, the absolute value of a negative `level` internally
+ * configures the `acceleration` parameter. For example, set `level=-10` for
+ * `acceleration=10`. This negation is necessary to ensure decreasing `level`
+ * values favor speed over compression ratio.
+ *
+ * @param level the compression level.
+ *
+ * @return the reference to the current compression options.
+ */
public CompressionOptions setLevel(final int level) {
setLevel(nativeHandle_, level);
return this;
}
+ /**
+ * Get the Compression "level".
+ *
+ * See {@link #setLevel(int)}
+ *
+ * @return the compression level.
+ */
public int level() {
return level(nativeHandle_);
}
+ /**
+ * Set the compression strategy.
+ * Zlib only.
+ *
+ * @param strategy the strategy.
+ *
+ * @return the reference to the current compression options.
+ */
public CompressionOptions setStrategy(final int strategy) {
setStrategy(nativeHandle_, strategy);
return this;
}
+ /**
+ * Get the compression strategy.
+ * Zlib only.
+ *
+ * @return the strategy.
+ */
public int strategy() {
return strategy(nativeHandle_);
}
diff --git a/java/src/main/java/org/rocksdb/CompressionType.java b/java/src/main/java/org/rocksdb/CompressionType.java
index d1ecf0ac84c5..4f683d036735 100644
--- a/java/src/main/java/org/rocksdb/CompressionType.java
+++ b/java/src/main/java/org/rocksdb/CompressionType.java
@@ -14,14 +14,49 @@
* compression method (if any) is used to compress a block.
* Features:
* - Throttle the deletion rate of the SST files.
* - Keep track the total size of all SST files.
@@ -167,7 +171,7 @@ public interface DBOptionsInterface
* Limitations:
* - Only track and throttle deletes of SST files in
* first db_path (db_name if db_paths is empty).
@@ -208,7 +212,7 @@ public interface DBOptionsInterface
* Default: 16
*
* @param maxFileOpeningThreads the maximum number of threads to use to
@@ -222,7 +226,7 @@ public interface DBOptionsInterface
* Default: 16
*
* @return the maximum number of threads to use to open files
@@ -278,27 +282,27 @@ public interface DBOptionsInterface
* For example, you have a flash device with 10GB allocated for the DB,
* as well as a hard drive of 2TB, you should config it to be:
* [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
- *
+ *
* The system will try to guarantee data under each path is close to but
* not larger than the target size. But current and future file sizes used
* by determining where to place a file are based on best-effort estimation,
* which means there is a chance that the actual size under the directory
* is slightly more than target size under some workloads. User should give
* some buffer room for those cases.
- *
+ *
* If none of the paths has sufficient room to place a file, the file will
* be placed to the last path anyway, despite to the target size.
- *
+ *
* Placing newer data to earlier paths is also best-efforts. User should
* expect user files to be placed in higher levels in some extreme cases.
- *
+ *
* If left empty, only one path will be used, which is db_name passed when
* opening the DB.
- *
+ *
* Default: empty
*
* @param dbPaths the paths and target sizes
@@ -311,27 +315,27 @@ public interface DBOptionsInterface
* For example, you have a flash device with 10GB allocated for the DB,
* as well as a hard drive of 2TB, you should config it to be:
* [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
- *
+ *
* The system will try to guarantee data under each path is close to but
* not larger than the target size. But current and future file sizes used
* by determining where to place a file are based on best-effort estimation,
* which means there is a chance that the actual size under the directory
* is slightly more than target size under some workloads. User should give
* some buffer room for those cases.
- *
+ *
* If none of the paths has sufficient room to place a file, the file will
* be placed to the last path anyway, despite to the target size.
- *
+ *
* Placing newer data to earlier paths is also best-efforts. User should
* expect user files to be placed in higher levels in some extreme cases.
- *
+ *
* If left empty, only one path will be used, which is db_name passed when
* opening the DB.
- *
+ *
* Default: {@link java.util.Collections#emptyList()}
*
* @return dbPaths the paths and target sizes
@@ -352,7 +356,7 @@ public interface DBOptionsInterface
* If it is empty, the log files will be in the same dir as data.
* If it is non empty, the log files will be in the specified dir,
* and the db data dir's absolute path will be used as the log file
@@ -377,7 +381,7 @@ public interface DBOptionsInterface
* If it is empty, the log files will be in the same dir as data,
* dbname is used as the data dir by default
* If it is non empty, the log files will be in kept the specified dir.
@@ -439,7 +443,7 @@ public interface DBOptionsInterface
* Specifies the maximum number of concurrent background flush jobs.
* If you're increasing this, also consider increasing number of threads in
* HIGH priority thread pool. For more information, see
@@ -463,7 +467,7 @@ public interface DBOptionsInterface
* Returns the maximum number of concurrent background flush jobs.
* If you're increasing this, also consider increasing number of threads in
* HIGH priority thread pool. For more information, see
@@ -542,16 +546,16 @@ public interface DBOptionsInterface
* If non-zero, we will reuse previously written log files for new
* logs, overwriting the old data. The value indicates how many
* such files we will keep around at any point in time for later
* use.
- *
+ *
* This is more efficient because the blocks are already
* allocated and fdatasync does not need to update the inode after
* each write.
- *
+ *
* Default: 0
*
* @param recycleLogFileNum the number of log files to keep for recycling
@@ -562,16 +566,16 @@ public interface DBOptionsInterface
* If non-zero, we will reuse previously written log files for new
* logs, overwriting the old data. The value indicates how many
* such files we will keep around at any point in time for later
* use.
- *
+ *
* This is more efficient because the blocks are already
* allocated and fdatasync does not need to update the inode after
* each write.
- *
+ *
* Default: 0
*
* @return the number of log files kept for recycling
@@ -617,17 +621,17 @@ public interface DBOptionsInterface
* When both are zero, obsolete WALs will not be archived and will be deleted
* immediately. Otherwise, obsolete WALs will be archived prior to deletion.
- *
+ *
* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the
* earliest will be deleted until the total size of the archive falls below
* this limit. All empty WALs will be deleted.
- *
+ *
* When `WAL_ttl_seconds` is nonzero, archived WALs older than
* `WAL_ttl_seconds` will be deleted.
- *
+ *
* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived
* WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only
* `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten
@@ -643,17 +647,17 @@ public interface DBOptionsInterface
* When both are zero, obsolete WALs will not be archived and will be deleted
* immediately. Otherwise, obsolete WALs will be archived prior to deletion.
- *
+ *
* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the
* earliest will be deleted until the total size of the archive falls below
* this limit. All empty WALs will be deleted.
- *
+ *
* When `WAL_ttl_seconds` is nonzero, archived WALs older than
* `WAL_ttl_seconds` will be deleted.
- *
+ *
* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived
* WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only
* `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten
@@ -668,17 +672,17 @@ public interface DBOptionsInterface
* When both are zero, obsolete WALs will not be archived and will be deleted
* immediately. Otherwise, obsolete WALs will be archived prior to deletion.
- *
+ *
* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the
* earliest will be deleted until the total size of the archive falls below
* this limit. All empty WALs will be deleted.
- *
+ *
* When `WAL_ttl_seconds` is nonzero, archived WALs older than
* `WAL_ttl_seconds` will be deleted.
- *
+ *
* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived
* WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only
* `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten
@@ -694,17 +698,17 @@ public interface DBOptionsInterface
* When both are zero, obsolete WALs will not be archived and will be deleted
* immediately. Otherwise, obsolete WALs will be archived prior to deletion.
- *
+ *
* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the
* earliest will be deleted until the total size of the archive falls below
* this limit. All empty WALs will be deleted.
- *
+ *
* When `WAL_ttl_seconds` is nonzero, archived WALs older than
* `WAL_ttl_seconds` will be deleted.
- *
+ *
* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived
* WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only
* `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten
@@ -720,7 +724,7 @@ public interface DBOptionsInterface
* Default: 1 MB
*
* @param maxWriteBatchGroupSizeBytes the maximum limit of number of bytes, see description.
@@ -732,7 +736,7 @@ public interface DBOptionsInterface
* Default: 1 MB
*
* @return the maximum limit of number of bytes, see description.
@@ -885,13 +889,13 @@ public interface DBOptionsInterface
* This is distinct from {@link ColumnFamilyOptions#writeBufferSize()},
* which enforces a limit for a single memtable.
- *
+ *
* This feature is disabled by default. Specify a non-zero value
* to enable it.
- *
+ *
* Default: 0 (disabled)
*
* @param dbWriteBufferSize the size of the write buffer
@@ -903,7 +907,7 @@ public interface DBOptionsInterface
* Check
* https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager
* for more details on when to use it
@@ -925,13 +929,13 @@ public interface DBOptionsInterface
* This is distinct from {@link ColumnFamilyOptions#writeBufferSize()},
* which enforces a limit for a single memtable.
- *
+ *
* This feature is disabled by default. Specify a non-zero value
* to enable it.
- *
+ *
* Default: 0 (disabled)
*
* @return the size of the write buffer
@@ -941,7 +945,7 @@ public interface DBOptionsInterface
* Default: {@link AccessHint#NORMAL}
*
* @param accessHint The access hint
@@ -953,7 +957,7 @@ public interface DBOptionsInterface
* Default: {@link AccessHint#NORMAL}
*
* @return The access hint
@@ -971,11 +975,11 @@ public interface DBOptionsInterface
* This option is currently honored only on Windows
- *
+ *
* Default: 1 Mb
- *
+ *
* Special value: 0 - means do not maintain per instance buffer. Allocate
* per request buffer and avoid locking.
*
@@ -996,11 +1000,11 @@ public interface DBOptionsInterface
* This option is currently honored only on Windows
- *
+ *
* Default: 1 Mb
- *
+ *
* Special value: 0 - means do not maintain per instance buffer. Allocate
* per request buffer and avoid locking.
*
@@ -1034,7 +1038,7 @@ public interface DBOptionsInterface
* Note: the RocksJava API currently only supports EventListeners implemented in Java.
* It could be extended in future to also support adding/removing EventListeners implemented in
* C++.
@@ -1048,7 +1052,7 @@ public interface DBOptionsInterface
* Note: the RocksJava API currently only supports EventListeners implemented in Java.
* It could be extended in future to also support adding/removing EventListeners implemented in
* C++.
@@ -1060,7 +1064,7 @@ public interface DBOptionsInterface
* Default: false
*
* @param enableThreadTracking true to enable tracking
@@ -1072,7 +1076,7 @@ public interface DBOptionsInterface
* Default: false
*
* @return true if tracking is enabled
@@ -1083,7 +1087,7 @@ public interface DBOptionsInterface
* If {@link #enablePipelinedWrite()} is true, separate write thread queue is
* maintained for WAL write and memtable write. A write thread first enter WAL
* writer queue and then memtable writer queue. Pending thread on the WAL
@@ -1091,7 +1095,7 @@ public interface DBOptionsInterface
* Default: false
*
* @param enablePipelinedWrite true to enabled pipelined writes
@@ -1118,7 +1122,7 @@ public interface DBOptionsInterface
* By default, i.e., when it is false, rocksdb does not advance the sequence
* number for new snapshots unless all the writes with lower sequence numbers
* are already finished. This provides the immutability that we except from
@@ -1263,7 +1267,7 @@ T setEnableWriteThreadAdaptiveYield(
* compaction decision by loading table properties from many files.
* Turning off this feature will improve DBOpen time especially in
* disk environment.
- *
+ *
* Default: false
*
* @param skipStatsUpdateOnDbOpen true if updating stats will be skipped
@@ -1277,7 +1281,7 @@ T setEnableWriteThreadAdaptiveYield(
* compaction decision by loading table properties from many files.
* Turning off this feature will improve DBOpen time especially in
* disk environment.
- *
+ *
* Default: false
*
* @return true if updating stats will be skipped
@@ -1291,7 +1295,7 @@ T setEnableWriteThreadAdaptiveYield(
* We'll still check that all required sst files exist.
* If {@code paranoid_checks} is false, this option is ignored, and sst files are
* not checked at all.
- *
+ *
* Default: false
*
* @param skipCheckingSstFileSizesOnDbOpen if true, then SST file sizes will not be checked
@@ -1307,7 +1311,7 @@ T setEnableWriteThreadAdaptiveYield(
* We'll still check that all required sst files exist.
* If {@code paranoid_checks} is false, this option is ignored, and sst files are
* not checked at all.
- *
+ *
* Default: false
*
* @return true, if file sizes will not be checked when calling {@link RocksDB#open(String)}.
@@ -1316,7 +1320,7 @@ T setEnableWriteThreadAdaptiveYield(
/**
* Recovery mode to control the consistency while replaying WAL
- *
+ *
* Default: {@link WALRecoveryMode#PointInTimeRecovery}
*
* @param walRecoveryMode The WAL recover mode
@@ -1327,7 +1331,7 @@ T setEnableWriteThreadAdaptiveYield(
/**
* Recovery mode to control the consistency while replaying WAL
- *
+ *
* Default: {@link WALRecoveryMode#PointInTimeRecovery}
*
* @return The WAL recover mode
@@ -1337,7 +1341,7 @@ T setEnableWriteThreadAdaptiveYield(
/**
* if set to false then recovery will fail when a prepared
* transaction is encountered in the WAL
- *
+ *
* Default: false
*
* @param allow2pc true if two-phase-commit is enabled
@@ -1349,7 +1353,7 @@ T setEnableWriteThreadAdaptiveYield(
/**
* if set to false then recovery will fail when a prepared
* transaction is encountered in the WAL
- *
+ *
* Default: false
*
* @return true if two-phase-commit is enabled
@@ -1358,7 +1362,7 @@ T setEnableWriteThreadAdaptiveYield(
/**
* A global cache for table-level rows.
- *
+ *
* Default: null (disabled)
*
* @param rowCache The global row cache
@@ -1369,7 +1373,7 @@ T setEnableWriteThreadAdaptiveYield(
/**
* A global cache for table-level rows.
- *
+ *
* Default: null (disabled)
*
* @return The global row cache
@@ -1401,7 +1405,7 @@ T setEnableWriteThreadAdaptiveYield(
* If true, then DB::Open / CreateColumnFamily / DropColumnFamily
* / SetOptions will fail if options file is not detected or properly
* persisted.
- *
+ *
* DEFAULT: false
*
* @param failIfOptionsFileError true if we should fail if there is an error
@@ -1415,7 +1419,7 @@ T setEnableWriteThreadAdaptiveYield(
* If true, then DB::Open / CreateColumnFamily / DropColumnFamily
* / SetOptions will fail if options file is not detected or properly
* persisted.
- *
+ *
* DEFAULT: false
*
* @return true if we should fail if there is an error in the options file
@@ -1425,7 +1429,7 @@ T setEnableWriteThreadAdaptiveYield(
/**
* If true, then print malloc stats together with rocksdb.stats
* when printing to LOG.
- *
+ *
* DEFAULT: false
*
* @param dumpMallocStats true if malloc stats should be printed to LOG
@@ -1437,7 +1441,7 @@ T setEnableWriteThreadAdaptiveYield(
/**
* If true, then print malloc stats together with rocksdb.stats
* when printing to LOG.
- *
+ *
* DEFAULT: false
*
* @return true if malloc stats should be printed to LOG
@@ -1450,7 +1454,7 @@ T setEnableWriteThreadAdaptiveYield(
* to avoid (but not guarantee not to) flush during recovery. Also, existing
* WAL logs will be kept, so that if crash happened before flush, we still
* have logs to recover from.
- *
+ *
* DEFAULT: false
*
* @param avoidFlushDuringRecovery true to try to avoid (but not guarantee
@@ -1466,7 +1470,7 @@ T setEnableWriteThreadAdaptiveYield(
* to avoid (but not guarantee not to) flush during recovery. Also, existing
* WAL logs will be kept, so that if crash happened before flush, we still
* have logs to recover from.
- *
+ *
* DEFAULT: false
*
* @return true to try to avoid (but not guarantee not to) flush during
@@ -1482,7 +1486,7 @@ T setEnableWriteThreadAdaptiveYield(
* 1) Disable some internal optimizations around SST file compression
* 2) Reserve bottom-most level for ingested files only.
* 3) Note that num_levels should be >= 3 if this option is turned on.
- *
+ *
* DEFAULT: false
*
* @param allowIngestBehind true to allow ingest behind, false to disallow.
@@ -1505,7 +1509,7 @@ T setEnableWriteThreadAdaptiveYield(
* allows the memtable writes not to lag behind other writes. It can be used
* to optimize MySQL 2PC in which only the commits, which are serial, write to
* memtable.
- *
+ *
* DEFAULT: false
*
* @param twoWriteQueues true to enable two write queues, false otherwise.
@@ -1525,7 +1529,7 @@ T setEnableWriteThreadAdaptiveYield(
* If true WAL is not flushed automatically after each write. Instead it
* relies on manual invocation of FlushWAL to write the WAL buffer to its
* file.
- *
+ *
* DEFAULT: false
*
* @param manualWalFlush true to set disable automatic WAL flushing,
@@ -1553,7 +1557,7 @@ T setEnableWriteThreadAdaptiveYield(
* For manual flush, application has to specify which column families to
* flush atomically in {@link RocksDB#flush(FlushOptions, List)}.
* For auto-triggered flush, RocksDB atomically flushes ALL column families.
- *
+ *
* Currently, any WAL-enabled writes after atomic flush may be replayed
* independently if the process crashes later and tries to recover.
*
@@ -1565,7 +1569,7 @@ T setEnableWriteThreadAdaptiveYield(
/**
* Determine if atomic flush of multiple column families is enabled.
- *
+ *
* See {@link #setAtomicFlush(boolean)}.
*
* @return true if atomic flush is enabled.
@@ -1666,7 +1670,7 @@ T setEnableWriteThreadAdaptiveYield(
* The number of bytes to prefetch when reading the log. This is mostly useful
* for reading a remotely located log, as it can save the number of
* round-trips. If 0, then the prefetching is disabled.
- *
+ *
* Default: 0
*
* @param logReadaheadSize the number of bytes to prefetch when reading the log.
@@ -1678,7 +1682,7 @@ T setEnableWriteThreadAdaptiveYield(
* The number of bytes to prefetch when reading the log. This is mostly useful
* for reading a remotely located log, as it can save the number of
* round-trips. If 0, then the prefetching is disabled.
- *
+ *
* Default: 0
*
* @return the number of bytes to prefetch when reading the log.
@@ -1721,7 +1725,7 @@ T setEnableWriteThreadAdaptiveYield(
* can be auto-recovered (e.g., retryable IO Error during Flush or WAL write),
* then db resume is called in background to recover from the error. If this
* value is 0 or negative, db resume will not be called.
- *
+ *
* Default: INT_MAX
*
* @param maxBgerrorResumeCount maximum number of times db resume should be called when IO Error
@@ -1737,7 +1741,7 @@ T setEnableWriteThreadAdaptiveYield(
* can be auto-recovered (e.g., retryable IO Error during Flush or WAL write),
* then db resume is called in background to recover from the error. If this
* value is 0 or negative, db resume will not be called.
- *
+ *
* Default: INT_MAX
*
* @return maximum number of times db resume should be called when IO Error happens.
@@ -1748,7 +1752,7 @@ T setEnableWriteThreadAdaptiveYield(
* If max_bgerror_resume_count is ≥ 2, db resume is called multiple times.
* This option decides how long to wait to retry the next resume if the
* previous resume fails and satisfy redo resume conditions.
- *
+ *
* Default: 1000000 (microseconds).
*
* @param bgerrorResumeRetryInterval how many microseconds to wait between DB resume attempts.
@@ -1760,7 +1764,7 @@ T setEnableWriteThreadAdaptiveYield(
* If max_bgerror_resume_count is ≥ 2, db resume is called multiple times.
* This option decides how long to wait to retry the next resume if the
* previous resume fails and satisfy redo resume conditions.
- *
+ *
* Default: 1000000 (microseconds).
*
* @return the instance of the current object.
diff --git a/java/src/main/java/org/rocksdb/DbPath.java b/java/src/main/java/org/rocksdb/DbPath.java
index 3f0b67557c5e..3895b258556e 100644
--- a/java/src/main/java/org/rocksdb/DbPath.java
+++ b/java/src/main/java/org/rocksdb/DbPath.java
@@ -14,6 +14,12 @@ public class DbPath {
final Path path;
final long targetSize;
+ /**
+ * Constructs a DbPath.
+ *
+ * @param path the path.
+ * @param targetSize the target size.
+ */
public DbPath(final Path path, final long targetSize) {
this.path = path;
this.targetSize = targetSize;
diff --git a/java/src/main/java/org/rocksdb/DirectSlice.java b/java/src/main/java/org/rocksdb/DirectSlice.java
index 5aa0866ffe29..b5741fe636b7 100644
--- a/java/src/main/java/org/rocksdb/DirectSlice.java
+++ b/java/src/main/java/org/rocksdb/DirectSlice.java
@@ -16,6 +16,10 @@
* values consider using @see org.rocksdb.Slice
*/
public class DirectSlice extends AbstractSlice
* Note that this function should be called only after all
* RocksDB instances referencing the filter are closed.
* Otherwise an undefined behavior will occur.
diff --git a/java/src/main/java/org/rocksdb/FilterPolicyType.java b/java/src/main/java/org/rocksdb/FilterPolicyType.java
index 6a693ee4039d..c7051ac07be6 100644
--- a/java/src/main/java/org/rocksdb/FilterPolicyType.java
+++ b/java/src/main/java/org/rocksdb/FilterPolicyType.java
@@ -9,6 +9,9 @@
* IndexType used in conjunction with BlockBasedTable.
*/
public enum FilterPolicyType {
+ /**
+ * Unknown filter policy.
+ */
kUnknownFilterPolicy((byte) 0),
/**
@@ -25,7 +28,7 @@ public enum FilterPolicyType {
*/
kRibbonFilterPolicy((byte) 2);
- public Filter createFilter(final long handle, final double param) {
+ Filter createFilter(final long handle, final double param) {
if (this == kBloomFilterPolicy) {
return new BloomFilter(handle, param);
}
diff --git a/java/src/main/java/org/rocksdb/FlushJobInfo.java b/java/src/main/java/org/rocksdb/FlushJobInfo.java
index 414d3a2f332e..52af3afe1795 100644
--- a/java/src/main/java/org/rocksdb/FlushJobInfo.java
+++ b/java/src/main/java/org/rocksdb/FlushJobInfo.java
@@ -7,6 +7,9 @@
import java.util.Objects;
+/**
+ * Information about a flush job.
+ */
public class FlushJobInfo {
private final long columnFamilyId;
private final String columnFamilyName;
diff --git a/java/src/main/java/org/rocksdb/FlushReason.java b/java/src/main/java/org/rocksdb/FlushReason.java
index 9d486cda16bd..3a5bcf2d485d 100644
--- a/java/src/main/java/org/rocksdb/FlushReason.java
+++ b/java/src/main/java/org/rocksdb/FlushReason.java
@@ -5,18 +5,68 @@
package org.rocksdb;
+/**
+ * Reasons for a flush.
+ */
public enum FlushReason {
+ /**
+ * Other.
+ */
OTHERS((byte) 0x00),
+
+ /**
+ * Get live files.
+ */
GET_LIVE_FILES((byte) 0x01),
+
+ /**
+ * Shutdown.
+ */
SHUTDOWN((byte) 0x02),
+
+ /**
+ * External file ingestion.
+ */
EXTERNAL_FILE_INGESTION((byte) 0x03),
+
+ /**
+ * Manual compaction.
+ */
MANUAL_COMPACTION((byte) 0x04),
+
+ /**
+ * Write buffer manager.
+ */
WRITE_BUFFER_MANAGER((byte) 0x05),
+
+ /**
+ * Write buffer full.
+ */
WRITE_BUFFER_FULL((byte) 0x06),
+
+ /**
+ * Test.
+ */
TEST((byte) 0x07),
+
+ /**
+ * Delete file(s).
+ */
DELETE_FILES((byte) 0x08),
+
+ /**
+ * Automatic compaction.
+ */
AUTO_COMPACTION((byte) 0x09),
+
+ /**
+ * Manual flush.
+ */
MANUAL_FLUSH((byte) 0x0a),
+
+ /**
+ * Error recovery.
+ */
ERROR_RECOVERY((byte) 0xb);
private final byte value;
diff --git a/java/src/main/java/org/rocksdb/GetStatus.java b/java/src/main/java/org/rocksdb/GetStatus.java
index a2afafe39ebd..a7ab4902f3f3 100644
--- a/java/src/main/java/org/rocksdb/GetStatus.java
+++ b/java/src/main/java/org/rocksdb/GetStatus.java
@@ -12,7 +12,15 @@
* If the target of the fetch is not big enough, this may be bigger than the contents of the target.
*/
public class GetStatus {
+
+ /**
+ * The status of the request to fetch into the buffer.
+ */
public final Status status;
+
+ /**
+ * The size of the data, which may be bigger than the buffer.
+ */
public final int requiredSize;
/**
diff --git a/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java b/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java
index a9868df57d7b..1ee66c4117ca 100644
--- a/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java
+++ b/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java
@@ -15,22 +15,42 @@
* and post a warning in the LOG.
*/
public class HashLinkedListMemTableConfig extends MemTableConfig {
+
+ /**
+ * The default number of buckets.
+ */
public static final long DEFAULT_BUCKET_COUNT = 50_000;
+
+ /**
+ * The default size of huge TLB pages.
+ */
public static final long DEFAULT_HUGE_PAGE_TLB_SIZE = 0;
+
+ /**
+ * The default log threshold for bucket entries.
+ */
public static final int DEFAULT_BUCKET_ENTRIES_LOG_THRES = 4096;
+
+ /**
+ * The default of whether to log when a bucket is flushed.
+ */
public static final boolean
DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH = true;
- public static final int DEFAUL_THRESHOLD_USE_SKIPLIST = 256;
/**
- * HashLinkedListMemTableConfig constructor
+ * The default threshold for determining when to use a Skip List.
+ */
+ public static final int DEFAULT_THRESHOLD_USE_SKIPLIST = 256;
+
+ /**
+ * Constructs a HashLinkedListMemTableConfig.
*/
public HashLinkedListMemTableConfig() {
bucketCount_ = DEFAULT_BUCKET_COUNT;
hugePageTlbSize_ = DEFAULT_HUGE_PAGE_TLB_SIZE;
bucketEntriesLoggingThreshold_ = DEFAULT_BUCKET_ENTRIES_LOG_THRES;
ifLogBucketDistWhenFlush_ = DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH;
- thresholdUseSkiplist_ = DEFAUL_THRESHOLD_USE_SKIPLIST;
+ thresholdUseSkiplist_ = DEFAULT_THRESHOLD_USE_SKIPLIST;
}
/**
diff --git a/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java b/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java
index 80d6b7115182..6a250d59920e 100644
--- a/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java
+++ b/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java
@@ -15,12 +15,24 @@
* and post a warning in the LOG.
*/
public class HashSkipListMemTableConfig extends MemTableConfig {
+
+ /**
+ * The default number of buckets.
+ */
public static final int DEFAULT_BUCKET_COUNT = 1_000_000;
+
+ /**
+ * The default branching factor.
+ */
public static final int DEFAULT_BRANCHING_FACTOR = 4;
+
+ /**
+ * The default skip list height.
+ */
public static final int DEFAULT_HEIGHT = 4;
/**
- * HashSkipListMemTableConfig constructor
+ * Constructs a HashSkipListMemTableConfig.
*/
public HashSkipListMemTableConfig() {
bucketCount_ = DEFAULT_BUCKET_COUNT;
diff --git a/java/src/main/java/org/rocksdb/HistogramData.java b/java/src/main/java/org/rocksdb/HistogramData.java
index 81d890883487..1fdd0c26e9a7 100644
--- a/java/src/main/java/org/rocksdb/HistogramData.java
+++ b/java/src/main/java/org/rocksdb/HistogramData.java
@@ -5,6 +5,9 @@
package org.rocksdb;
+/**
+ * Histogram Data.
+ */
public class HistogramData {
private final double median_;
private final double percentile95_;
@@ -16,12 +19,34 @@ public class HistogramData {
private final long sum_;
private final double min_;
+ /**
+ * Constructs a HistogramData.
+ *
+ * @param median the median value.
+ * @param percentile95 the 95th percentile value.
+ * @param percentile99 the 99th percentile value.
+ * @param average the average value.
+ * @param standardDeviation the value of the standard deviation.
+ */
public HistogramData(final double median, final double percentile95,
final double percentile99, final double average,
final double standardDeviation) {
this(median, percentile95, percentile99, average, standardDeviation, 0.0, 0, 0, 0.0);
}
+ /**
+ * Constructs a HistogramData.
+ *
+ * @param median the median value.
+ * @param percentile95 the 95th percentile value.
+ * @param percentile99 the 99th percentile value.
+ * @param average the average value.
+ * @param standardDeviation the value of the standard deviation.
+ * @param max the maximum value.
+ * @param count the number of values.
+ * @param sum the sum of the values.
+ * @param min the minimum value.
+ */
public HistogramData(final double median, final double percentile95,
final double percentile99, final double average,
final double standardDeviation, final double max, final long count,
@@ -37,38 +62,83 @@ public HistogramData(final double median, final double percentile95,
sum_ = sum;
}
+ /**
+ * Get the median value.
+ *
+ * @return the median value.
+ */
public double getMedian() {
return median_;
}
+ /**
+ * Get the 95th percentile value.
+ *
+ * @return the 95th percentile value.
+ */
public double getPercentile95() {
return percentile95_;
}
+ /**
+ * Get the 99th percentile value.
+ *
+ * @return the 99th percentile value.
+ */
public double getPercentile99() {
return percentile99_;
}
+ /**
+ * Get the average value.
+ *
+ * @return the average value.
+ */
public double getAverage() {
return average_;
}
+ /**
+ * Get the value of the standard deviation.
+ *
+ * @return the value of the standard deviation.
+ */
public double getStandardDeviation() {
return standardDeviation_;
}
+ /**
+ * Get the maximum value.
+ *
+ * @return the maximum value.
+ */
public double getMax() {
return max_;
}
+ /**
+ * Get the number of values.
+ *
+ * @return the number of values.
+ */
public long getCount() {
return count_;
}
+ /**
+ * Get the sum of the values.
+ *
+ * @return the sum of the values.
+ */
public long getSum() {
return sum_;
}
+ /**
+ * Get the minimum value.
+ *
+ * @return the minimum value.
+ */
public double getMin() {
return min_;
}
diff --git a/java/src/main/java/org/rocksdb/HistogramType.java b/java/src/main/java/org/rocksdb/HistogramType.java
index 41fe241ad3ab..f9feb9439acf 100644
--- a/java/src/main/java/org/rocksdb/HistogramType.java
+++ b/java/src/main/java/org/rocksdb/HistogramType.java
@@ -5,77 +5,157 @@
package org.rocksdb;
+/**
+ * The types of histogram.
+ */
public enum HistogramType {
-
+ /**
+ * DB Get.
+ */
DB_GET((byte) 0x0),
+ /**
+ * DB Write.
+ */
DB_WRITE((byte) 0x1),
+ /**
+ * Time spent in compaction.
+ */
COMPACTION_TIME((byte) 0x2),
+ /**
+ * Time spent in setting up sub-compaction.
+ */
SUBCOMPACTION_SETUP_TIME((byte) 0x3),
+ /**
+ * Time spent in IO during table sync.
+ * Measured in microseconds.
+ */
TABLE_SYNC_MICROS((byte) 0x4),
+ /**
+ * Time spent in IO during compaction of outfile.
+ * Measured in microseconds.
+ */
COMPACTION_OUTFILE_SYNC_MICROS((byte) 0x5),
+ /**
+ * Time spent in IO during WAL file sync.
+ * Measured in microseconds.
+ */
WAL_FILE_SYNC_MICROS((byte) 0x6),
+ /**
+ * Time spent in IO during manifest file sync.
+ * Measured in microseconds.
+ */
MANIFEST_FILE_SYNC_MICROS((byte) 0x7),
/**
- * TIME SPENT IN IO DURING TABLE OPEN.
+ * Time spent in IO during table open.
+ * Measured in microseconds.
*/
TABLE_OPEN_IO_MICROS((byte) 0x8),
+ /**
+ * DB Multi-Get.
+ */
DB_MULTIGET((byte) 0x9),
+ /**
+ * Time spent in block reads during compaction.
+ * Measured in microseconds.
+ */
READ_BLOCK_COMPACTION_MICROS((byte) 0xA),
+ /**
+ * Time spent in block reads.
+ * Measured in microseconds.
+ */
READ_BLOCK_GET_MICROS((byte) 0xB),
+ /**
+ * Time spent in raw block writes.
+ * Measured in microseconds.
+ */
WRITE_RAW_BLOCK_MICROS((byte) 0xC),
+ /**
+ * Number of files in a single compaction.
+ */
NUM_FILES_IN_SINGLE_COMPACTION((byte) 0x12),
+ /**
+ * DB Seek.
+ */
DB_SEEK((byte) 0x13),
+ /**
+ * Write stall.
+ */
WRITE_STALL((byte) 0x14),
+ /**
+ * Time spent in SST reads.
+ * Measured in microseconds.
+ */
SST_READ_MICROS((byte) 0x15),
/**
- * The number of subcompactions actually scheduled during a compaction.
+ * The number of sub-compactions actually scheduled during a compaction.
*/
NUM_SUBCOMPACTIONS_SCHEDULED((byte) 0x16),
/**
+ * Bytes per read.
* Value size distribution in each operation.
*/
BYTES_PER_READ((byte) 0x17),
+
+ /**
+ * Bytes per write.
+ * Value size distribution in each operation.
+ */
BYTES_PER_WRITE((byte) 0x18),
+
+ /**
+ * Bytes per Multi-Get.
+ * Value size distribution in each operation.
+ */
BYTES_PER_MULTIGET((byte) 0x19),
/**
- * number of bytes compressed.
+ * Number of bytes compressed.
*/
BYTES_COMPRESSED((byte) 0x1A),
/**
- * number of bytes decompressed.
- *
- * number of bytes is when uncompressed; i.e. before/after respectively
+ * Number of bytes decompressed.
+ * Number of bytes is when uncompressed; i.e. before/after respectively
*/
BYTES_DECOMPRESSED((byte) 0x1B),
+ /**
+ * Time spent in compression.
+ * Measured in nanoseconds.
+ */
COMPRESSION_TIMES_NANOS((byte) 0x1C),
+ /**
+ * Time spent in decompression.
+ * Measured in nanoseconds.
+ */
DECOMPRESSION_TIMES_NANOS((byte) 0x1D),
+ /**
+ * Number of merge operands for read.
+ */
READ_NUM_MERGE_OPERANDS((byte) 0x1E),
/**
- * Time spent flushing memtable to disk.
+ * Time spent flushing Memtable to disk.
*/
FLUSH_TIME((byte) 0x20),
@@ -91,62 +171,73 @@ public enum HistogramType {
/**
* BlobDB Put/PutWithTTL/PutUntil/Write latency.
+ * Measured in microseconds.
*/
BLOB_DB_WRITE_MICROS((byte) 0x23),
/**
* BlobDB Get lagency.
+ * Measured in microseconds.
*/
BLOB_DB_GET_MICROS((byte) 0x24),
/**
* BlobDB MultiGet latency.
+ * Measured in microseconds.
*/
BLOB_DB_MULTIGET_MICROS((byte) 0x25),
/**
* BlobDB Seek/SeekToFirst/SeekToLast/SeekForPrev latency.
+ * Measured in microseconds.
*/
BLOB_DB_SEEK_MICROS((byte) 0x26),
/**
* BlobDB Next latency.
+ * Measured in microseconds.
*/
BLOB_DB_NEXT_MICROS((byte) 0x27),
/**
* BlobDB Prev latency.
+ * Measured in microseconds.
*/
BLOB_DB_PREV_MICROS((byte) 0x28),
/**
* Blob file write latency.
+ * Measured in microseconds.
*/
BLOB_DB_BLOB_FILE_WRITE_MICROS((byte) 0x29),
/**
* Blob file read latency.
+ * Measured in microseconds.
*/
BLOB_DB_BLOB_FILE_READ_MICROS((byte) 0x2A),
/**
* Blob file sync latency.
+ * Measured in microseconds.
*/
BLOB_DB_BLOB_FILE_SYNC_MICROS((byte) 0x2B),
/**
* BlobDB compression time.
+ * Measured in microseconds.
*/
BLOB_DB_COMPRESSION_MICROS((byte) 0x2D),
/**
* BlobDB decompression time.
+ * Measured in microseconds.
*/
BLOB_DB_DECOMPRESSION_MICROS((byte) 0x2E),
/**
* Num of Index and Filter blocks read from file system per level in MultiGet
- * request
+ * request.
*/
NUM_INDEX_AND_FILTER_BLOCKS_READ_PER_LEVEL((byte) 0x2F),
@@ -160,6 +251,9 @@ public enum HistogramType {
*/
ERROR_HANDLER_AUTORESUME_RETRY_COUNT((byte) 0x32),
+ /**
+ * Bytes read asynchronously.
+ */
ASYNC_READ_BYTES((byte) 0x33),
/**
@@ -169,23 +263,58 @@ public enum HistogramType {
*/
TABLE_OPEN_PREFETCH_TAIL_READ_BYTES((byte) 0x39),
+ /**
+ * File read during flush.
+ * Measured in microseconds.
+ */
FILE_READ_FLUSH_MICROS((byte) 0x3A),
+ /**
+ * File read during compaction.
+ * Measured in microseconds.
+ */
FILE_READ_COMPACTION_MICROS((byte) 0x3B),
+ /**
+ * File read during DB Open.
+ * Measured in microseconds.
+ */
FILE_READ_DB_OPEN_MICROS((byte) 0x3C),
+ /**
+ * File read during DB Get.
+ * Measured in microseconds.
+ */
FILE_READ_GET_MICROS((byte) 0x3D),
+ /**
+ * File read during DB Multi-Get.
+ * Measured in microseconds.
+ */
FILE_READ_MULTIGET_MICROS((byte) 0x3E),
+ /**
+ * File read during DB Iterator.
+ * Measured in microseconds.
+ */
FILE_READ_DB_ITERATOR_MICROS((byte) 0x3F),
+ /**
+ * File read during DB checksum validation.
+ * Measured in microseconds.
+ */
FILE_READ_VERIFY_DB_CHECKSUM_MICROS((byte) 0x40),
+ /**
+ * File read during file checksum validation.
+ * Measured in microseconds.
+ */
FILE_READ_VERIFY_FILE_CHECKSUMS_MICROS((byte) 0x41),
- // 0x1F for backwards compatibility on current minor version.
+ /**
+ * The number of histogram types available.
+ * {@code 0x1F} for backwards compatibility on current minor version.
+ */
HISTOGRAM_ENUM_MAX((byte) 0x1F);
private final byte value;
diff --git a/java/src/main/java/org/rocksdb/Holder.java b/java/src/main/java/org/rocksdb/Holder.java
index 716a0bda0736..dd088dcd767e 100644
--- a/java/src/main/java/org/rocksdb/Holder.java
+++ b/java/src/main/java/org/rocksdb/Holder.java
@@ -7,6 +7,8 @@
/**
* Simple instance reference wrapper.
+ *
+ * @param
* The index contains a key separating each pair of consecutive blocks.
* Let A be the highest key in one block, B the lowest key in the next block,
* and I the index entry separating these two blocks:
@@ -22,7 +22,7 @@
* However, if I=A, this can't happen, and we'll read only the second block.
* In kNoShortening mode, we use I=A. In other modes, we use the shortest
* key in [A, B), which usually significantly reduces index size.
- *
+ *
* There's a similar story for the last index entry, which is an upper bound
* of the highest key in the file. If it's shortened and therefore
* overestimated, iterator is likely to unnecessarily read the last data block
diff --git a/java/src/main/java/org/rocksdb/InfoLogLevel.java b/java/src/main/java/org/rocksdb/InfoLogLevel.java
index 197bd89dab68..c5fda9acd7c3 100644
--- a/java/src/main/java/org/rocksdb/InfoLogLevel.java
+++ b/java/src/main/java/org/rocksdb/InfoLogLevel.java
@@ -5,12 +5,39 @@
* RocksDB log levels.
*/
public enum InfoLogLevel {
+ /**
+ * Log 'debug' level events.
+ */
DEBUG_LEVEL((byte)0),
+
+ /**
+ * Log 'info' level events.
+ */
INFO_LEVEL((byte)1),
+
+ /**
+ * Log 'warn' level events.
+ */
WARN_LEVEL((byte)2),
+
+ /**
+ * Log 'error' level events.
+ */
ERROR_LEVEL((byte)3),
+
+ /**
+ * Log 'fatal' level events.
+ */
FATAL_LEVEL((byte)4),
+
+ /**
+ * Log 'header' level events.
+ */
HEADER_LEVEL((byte)5),
+
+ /**
+ * The number of log levels available.
+ */
NUM_INFO_LOG_LEVELS((byte)6);
private final byte value_;
diff --git a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java
index 1a6a5fccd945..85eccea5a55d 100644
--- a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java
+++ b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java
@@ -12,11 +12,16 @@
*/
public class IngestExternalFileOptions extends RocksObject {
+ /**
+ * Constructs an IngestExternalFileOptions.
+ */
public IngestExternalFileOptions() {
super(newIngestExternalFileOptions());
}
/**
+ * Constructs an IngestExternalFileOptions.
+ *
* @param moveFiles {@link #setMoveFiles(boolean)}
* @param snapshotConsistency {@link #setSnapshotConsistency(boolean)}
* @param allowGlobalSeqNo {@link #setAllowGlobalSeqNo(boolean)}
diff --git a/java/src/main/java/org/rocksdb/KeyMayExist.java b/java/src/main/java/org/rocksdb/KeyMayExist.java
index cd2267528d85..98d176f6d4fd 100644
--- a/java/src/main/java/org/rocksdb/KeyMayExist.java
+++ b/java/src/main/java/org/rocksdb/KeyMayExist.java
@@ -5,8 +5,12 @@
package org.rocksdb;
+import java.nio.ByteBuffer;
import java.util.Objects;
+/**
+ * Indicates whether a key exists or not, and its corresponding value's length.
+ */
public class KeyMayExist {
@Override
public boolean equals(final Object o) {
@@ -23,13 +27,44 @@ public int hashCode() {
return Objects.hash(exists, valueLength);
}
- public enum KeyMayExistEnum { kNotExist, kExistsWithoutValue, kExistsWithValue }
+ /**
+ * Part of the return type from {@link RocksDB#keyMayExist(ColumnFamilyHandle, ByteBuffer, ByteBuffer)}.
+ */
+ public enum KeyMayExistEnum {
+ /**
+ * Key does not exist.
+ */
+ kNotExist,
+ /**
+ * Key may exist without a value.
+ */
+ kExistsWithoutValue,
+
+ /**
+ * Key may exist with a value.
+ */
+ kExistsWithValue
+ }
+
+ /**
+ * Constructs a KeyMayExist.
+ *
+ * @param exists indicates if the key exists.
+ * @param valueLength the length of the value pointed to by the key (if it exists).
+ */
KeyMayExist(final KeyMayExistEnum exists, final int valueLength) {
this.exists = exists;
this.valueLength = valueLength;
}
+ /**
+ * Indicates if the key exists.
+ */
public final KeyMayExistEnum exists;
+
+ /**
+ * The length of the value pointed to by the key (if it exists).
+ */
public final int valueLength;
}
diff --git a/java/src/main/java/org/rocksdb/LiveFileMetaData.java b/java/src/main/java/org/rocksdb/LiveFileMetaData.java
index cb0f1a30225b..8f1762fb7159 100644
--- a/java/src/main/java/org/rocksdb/LiveFileMetaData.java
+++ b/java/src/main/java/org/rocksdb/LiveFileMetaData.java
@@ -55,7 +55,7 @@ public int level() {
return level;
}
- public long newLiveFileMetaDataHandle() {
+ private long newLiveFileMetaDataHandle() {
return newLiveFileMetaDataHandle(columnFamilyName(), columnFamilyName().length, level(),
fileName(), path(), size(), smallestSeqno(), largestSeqno(), smallestKey(),
smallestKey().length, largestKey(), largestKey().length, numReadsSampled(),
diff --git a/java/src/main/java/org/rocksdb/LogFile.java b/java/src/main/java/org/rocksdb/LogFile.java
index 5ee2c9fcc64a..2be597ce5f9b 100644
--- a/java/src/main/java/org/rocksdb/LogFile.java
+++ b/java/src/main/java/org/rocksdb/LogFile.java
@@ -5,6 +5,9 @@
package org.rocksdb;
+/**
+ * A (journal) log file.
+ */
@SuppressWarnings("PMD.MissingStaticMethodInNonInstantiatableClass")
public class LogFile {
private final String pathName;
diff --git a/java/src/main/java/org/rocksdb/Logger.java b/java/src/main/java/org/rocksdb/Logger.java
index 614a7fa502f1..e5983dde8caa 100644
--- a/java/src/main/java/org/rocksdb/Logger.java
+++ b/java/src/main/java/org/rocksdb/Logger.java
@@ -96,15 +96,51 @@ public InfoLogLevel infoLogLevel() {
infoLogLevel(nativeHandle_));
}
+ /**
+ * Log a message.
+ *
+ * @param infoLogLevel the log level.
+ * @param logMsg the log message.
+ */
protected abstract void log(InfoLogLevel infoLogLevel,
String logMsg);
+ /**
+ * Create a new Logger with Options.
+ *
+ * @param options the native handle to the underlying C++ native options object
+ *
+ * @return the native handle to the underlying C++ native Logger object.
+ */
protected native long createNewLoggerOptions(
long options);
+
+ /**
+ * Create a new Logger with DBOptions.
+ *
+ * @param dbOptions the native handle to the underlying C++ native db options object
+ *
+ * @return the native handle to the underlying C++ native Logger object.
+ */
protected native long createNewLoggerDbOptions(
long dbOptions);
+
+ /**
+ * Set the log level.
+ *
+ * @param handle the native handle to the underlying C++ native Logger object.
+ * @param infoLogLevel the log level.
+ */
protected native void setInfoLogLevel(long handle,
byte infoLogLevel);
+
+ /**
+ * Get the log level.
+ *
+ * @param handle the native handle to the underlying C++ native Logger object.
+ *
+ * @return the log level.
+ */
protected native byte infoLogLevel(long handle);
/**
diff --git a/java/src/main/java/org/rocksdb/MemTableInfo.java b/java/src/main/java/org/rocksdb/MemTableInfo.java
index 3d429035a343..56396ac8d997 100644
--- a/java/src/main/java/org/rocksdb/MemTableInfo.java
+++ b/java/src/main/java/org/rocksdb/MemTableInfo.java
@@ -7,6 +7,9 @@
import java.util.Objects;
+/**
+ * Information about a Mem Table.
+ */
public class MemTableInfo {
private final String columnFamilyName;
private final long firstSeqno;
diff --git a/java/src/main/java/org/rocksdb/MergeOperator.java b/java/src/main/java/org/rocksdb/MergeOperator.java
index c299f62210fa..630c400cfa9a 100644
--- a/java/src/main/java/org/rocksdb/MergeOperator.java
+++ b/java/src/main/java/org/rocksdb/MergeOperator.java
@@ -12,6 +12,12 @@
* value.
*/
public abstract class MergeOperator extends RocksObject {
+
+ /**
+ * Constructs a MergeOperator.
+ *
+ * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ MergeOperator.
+ */
protected MergeOperator(final long nativeHandle) {
super(nativeHandle);
}
diff --git a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java
index e54db7171e54..b58098119e9e 100644
--- a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java
+++ b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java
@@ -7,6 +7,9 @@
import java.util.*;
+/**
+ * Mutable Column Family Options.
+ */
public class MutableColumnFamilyOptions extends AbstractMutableOptions {
/**
* User must use builder pattern, or parser.
@@ -54,24 +57,87 @@ public static MutableColumnFamilyOptionsBuilder parse(
return new MutableColumnFamilyOptionsBuilder().fromParsed(parsedOptions, ignoreUnknown);
}
+ /**
+ * Parses a String representation of MutableColumnFamilyOptions
+ *
+ * The format is: key1=value1;key2=value2;key3=value3 etc
+ *
+ * For int[] values, each int should be separated by a colon, e.g.
+ *
+ * key1=value1;intArrayKey1=1:2:3
+ *
+ * @param str The string representation of the mutable column family options
+ *
+ * @return A builder for the mutable column family options
+ */
public static MutableColumnFamilyOptionsBuilder parse(final String str) {
return parse(str, false);
}
private interface MutableColumnFamilyOptionKey extends MutableOptionKey {}
+ /**
+ * Mem Table options.
+ */
public enum MemtableOption implements MutableColumnFamilyOptionKey {
+ /**
+ * Write buffer size.
+ */
write_buffer_size(ValueType.LONG),
+
+ /**
+ * Arena block size.
+ */
arena_block_size(ValueType.LONG),
+
+ /**
+ * Prefix size ratio for Memtable's Bloom Filter.
+ */
memtable_prefix_bloom_size_ratio(ValueType.DOUBLE),
+
+ /**
+ * Whether to filter whole keys in the Memtable(s).
+ */
memtable_whole_key_filtering(ValueType.BOOLEAN),
+
+ /**
+ * Number of bits for the prefix in Memtable's Bloom Filter.
+ */
@Deprecated memtable_prefix_bloom_bits(ValueType.INT),
+
+ /**
+ * Number of probes for the prefix in Memtable's Bloom Filter.
+ */
@Deprecated memtable_prefix_bloom_probes(ValueType.INT),
+
+ /**
+ * Huge Page Size for Memtable(s).
+ */
memtable_huge_page_size(ValueType.LONG),
+
+ /**
+ * Maximum number of successive merges.
+ */
max_successive_merges(ValueType.LONG),
+
+ /**
+ * Whether to filter deletes.
+ */
@Deprecated filter_deletes(ValueType.BOOLEAN),
+
+ /**
+ * Maximum number of write buffers.
+ */
max_write_buffer_number(ValueType.INT),
+
+ /**
+ * Number of in-place update locks.
+ */
inplace_update_num_locks(ValueType.LONG),
+
+ /**
+ * Memory purge threshold.
+ */
experimental_mempurge_threshold(ValueType.DOUBLE);
private final ValueType valueType;
@@ -85,20 +151,78 @@ public ValueType getValueType() {
}
}
+ /**
+ * Compaction options.
+ */
public enum CompactionOption implements MutableColumnFamilyOptionKey {
+ /**
+ * Disable auto compaction.
+ */
disable_auto_compactions(ValueType.BOOLEAN),
+
+ /**
+ * Soft limit on the number of bytes pending before compaction.
+ */
soft_pending_compaction_bytes_limit(ValueType.LONG),
+
+ /**
+ * Hard limit on the number of bytes pending before compaction.
+ */
hard_pending_compaction_bytes_limit(ValueType.LONG),
+
+ /**
+ * Number of files in Level 0 before compaction is triggered.
+ */
level0_file_num_compaction_trigger(ValueType.INT),
+
+ /**
+ * Writes to Level 0 before a slowdown is triggered.
+ */
level0_slowdown_writes_trigger(ValueType.INT),
+
+ /**
+ * Writes to Level 0 before a stop is triggered.
+ */
level0_stop_writes_trigger(ValueType.INT),
+
+ /**
+ * Max compaction bytes.
+ */
max_compaction_bytes(ValueType.LONG),
+
+ /**
+ * Target for the base size of files.
+ */
target_file_size_base(ValueType.LONG),
+
+ /**
+ * Multiplier for the size of files.
+ */
target_file_size_multiplier(ValueType.INT),
+
+ /**
+ * Maximum size in bytes for level base.
+ */
max_bytes_for_level_base(ValueType.LONG),
+
+ /**
+ * Maximum bytes for level multiplier.
+ */
max_bytes_for_level_multiplier(ValueType.INT),
+
+ /**
+ * Maximum bytes for level multiplier(s) additional
+ */
max_bytes_for_level_multiplier_additional(ValueType.INT_ARRAY),
+
+ /**
+ * Time-to-live.
+ */
ttl(ValueType.LONG),
+
+ /**
+ * Compaction period in seconds.
+ */
periodic_compaction_seconds(ValueType.LONG);
private final ValueType valueType;
@@ -112,16 +236,58 @@ public ValueType getValueType() {
}
}
+ /**
+ * Blob options.
+ */
public enum BlobOption implements MutableColumnFamilyOptionKey {
+ /**
+ * Enable BLOB files.
+ */
enable_blob_files(ValueType.BOOLEAN),
+
+ /**
+ * Minimum BLOB size.
+ */
min_blob_size(ValueType.LONG),
+
+ /**
+ * BLOB file size.
+ */
blob_file_size(ValueType.LONG),
+
+ /**
+ * BLOB compression type.
+ */
blob_compression_type(ValueType.ENUM),
+
+ /**
+ * Enable BLOB garbage collection.
+ */
enable_blob_garbage_collection(ValueType.BOOLEAN),
+
+ /**
+ * BLOB garbage collection age cut-off.
+ */
blob_garbage_collection_age_cutoff(ValueType.DOUBLE),
+
+ /**
+ * Threshold for forcing BLOB garbage collection.
+ */
blob_garbage_collection_force_threshold(ValueType.DOUBLE),
+
+ /**
+ * BLOB compaction read-ahead size.
+ */
blob_compaction_readahead_size(ValueType.LONG),
+
+ /**
+ * BLOB file starting level.
+ */
blob_file_starting_level(ValueType.INT),
+
+ /**
+ * Prepopulate BLOB Cache.
+ */
prepopulate_blob_cache(ValueType.ENUM);
private final ValueType valueType;
@@ -135,10 +301,28 @@ public ValueType getValueType() {
}
}
+ /**
+ * Miscellaneous options.
+ */
public enum MiscOption implements MutableColumnFamilyOptionKey {
+ /**
+ * Maximum number of sequential keys to skip during iteration.
+ */
max_sequential_skip_in_iterations(ValueType.LONG),
+
+ /**
+ * Whether to enable paranoid file checks.
+ */
paranoid_file_checks(ValueType.BOOLEAN),
+
+ /**
+ * Whether to report background I/O stats.
+ */
report_bg_io_stats(ValueType.BOOLEAN),
+
+ /**
+ * Compression type.
+ */
compression(ValueType.ENUM);
private final ValueType valueType;
@@ -152,6 +336,9 @@ public ValueType getValueType() {
}
}
+ /**
+ * Builder for constructing MutableColumnFamilyOptions.
+ */
public static class MutableColumnFamilyOptionsBuilder
extends AbstractMutableOptionsBuilder
+ * The format is: key1=value1;key2=value2;key3=value3 etc
+ *
+ * For int[] values, each int should be separated by a comma, e.g.
+ *
+ * key1=value1;intArrayKey1=1:2:3
+ *
+ * @param str The string representation of the mutable db options
+ *
+ * @return A builder for the mutable db options
+ */
public static MutableDBOptionsBuilder parse(final String str) {
return parse(str, false);
}
private interface MutableDBOptionKey extends MutableOptionKey {}
+ /**
+ * Database options.
+ */
public enum DBOption implements MutableDBOptionKey {
+ /**
+ * Maximum number of background jobs.
+ */
max_background_jobs(ValueType.INT),
+
+ /**
+ * Maximum number of background compactions.
+ */
max_background_compactions(ValueType.INT),
+
+ /**
+ * Whether to avoid flush during shutdown.
+ */
avoid_flush_during_shutdown(ValueType.BOOLEAN),
+
+ /**
+ * Max buffer size for writing to files.
+ */
writable_file_max_buffer_size(ValueType.LONG),
+
+ /**
+ * Delayed write rate.
+ */
delayed_write_rate(ValueType.LONG),
+
+ /**
+ * Maximum total size of the WAL.
+ */
max_total_wal_size(ValueType.LONG),
+
+ /**
+ * The period to delete obsolete file.
+ * Measured in microseconds.
+ */
delete_obsolete_files_period_micros(ValueType.LONG),
+
+ /**
+ * The period to dump statistics.
+ * Measured in seconds.
+ */
stats_dump_period_sec(ValueType.INT),
+
+ /**
+ * The period that statistics persist.
+ * Measured in seconds.
+ */
stats_persist_period_sec(ValueType.INT),
+
+ /**
+ * Buffer size for statistics history.
+ */
stats_history_buffer_size(ValueType.LONG),
+
+ /**
+ * Maximum number of open files.
+ */
max_open_files(ValueType.INT),
+
+ /**
+ * Bytes per sync.
+ */
bytes_per_sync(ValueType.LONG),
+
+ /**
+ * WAL bytes per sync.
+ */
wal_bytes_per_sync(ValueType.LONG),
+
+ /**
+ * Strict limit of bytes per sync.
+ */
strict_bytes_per_sync(ValueType.BOOLEAN),
+
+ /**
+ * Compaction readahead size.
+ */
compaction_readahead_size(ValueType.LONG);
private final ValueType valueType;
@@ -89,6 +170,9 @@ public ValueType getValueType() {
}
}
+ /**
+ * Builder for constructing MutableDBOptions.
+ */
public static class MutableDBOptionsBuilder
extends AbstractMutableOptionsBuilder It also support prefix hash feature.
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
* @param key byte array of a key to search for*
* @return true if key exist in database, otherwise false.
@@ -2566,11 +2633,11 @@ public boolean keyExists(final byte[] key) {
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
* @param key byte array of a key to search for
* @param offset the offset of the "key" array to be used, must be
@@ -2586,11 +2653,11 @@ public boolean keyExists(final byte[] key, final int offset, final int len) {
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
@@ -2605,11 +2672,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, final byte
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
@@ -2629,11 +2696,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, final byte
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param readOptions {@link ReadOptions} instance
@@ -2648,11 +2715,11 @@ public boolean keyExists(final ReadOptions readOptions, final byte[] key) {
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param readOptions {@link ReadOptions} instance
@@ -2672,11 +2739,11 @@ public boolean keyExists(
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
@@ -2693,11 +2760,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle,
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
@@ -2721,11 +2788,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle,
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param key ByteBuffer with key. Must be allocated as direct.
@@ -2739,11 +2806,11 @@ public boolean keyExists(final ByteBuffer key) {
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
@@ -2758,11 +2825,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, final Byte
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param readOptions {@link ReadOptions} instance
@@ -2777,11 +2844,11 @@ public boolean keyExists(final ReadOptions readOptions, final ByteBuffer key) {
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
@@ -3641,10 +3708,26 @@ public long[] getApproximateSizes(final List
+ * Should stay in sync with /include/rocksdb/status.h:Code and /java/rocksjni/portal.h:toJavaStatusCode
+ */
public enum Code {
+ /**
+ * Success.
+ */
Ok( (byte)0x0),
+
+ /**
+ * Not found.
+ */
NotFound( (byte)0x1),
+
+ /**
+ * Corruption detected.
+ */
Corruption( (byte)0x2),
+
+ /**
+ * Not supported.
+ */
NotSupported( (byte)0x3),
+
+ /**
+ * Invalid argument provided.
+ */
InvalidArgument( (byte)0x4),
+
+ /**
+ * I/O error.
+ */
IOError( (byte)0x5),
+
+ /**
+ * There is a merge in progress.
+ */
MergeInProgress( (byte)0x6),
+
+ /**
+ * Incomplete.
+ */
Incomplete( (byte)0x7),
+
+ /**
+ * There is a shutdown in progress.
+ */
ShutdownInProgress( (byte)0x8),
+
+ /**
+ * An operation timed out.
+ */
TimedOut( (byte)0x9),
+
+ /**
+ * An operation was aborted.
+ */
Aborted( (byte)0xA),
+
+ /**
+ * The system is busy.
+ */
Busy( (byte)0xB),
+
+ /**
+ * The request expired.
+ */
Expired( (byte)0xC),
+
+ /**
+ * The operation should be reattempted.
+ */
TryAgain( (byte)0xD),
+
+ /**
+ * Undefined.
+ */
Undefined( (byte)0x7F);
private final byte value;
@@ -82,6 +184,15 @@ public enum Code {
this.value = value;
}
+ /**
+ * Get a code from its byte representation.
+ *
+ * @param value the byte representation of the code.
+ *
+ * @return the code
+ *
+ * @throws IllegalArgumentException if the {@code value} parameter does not represent a code.
+ */
public static Code getCode(final byte value) {
for (final Code code : Code.values()) {
if (code.value == value){
@@ -102,16 +213,56 @@ public byte getValue() {
}
}
- // should stay in sync with /include/rocksdb/status.h:SubCode and /java/rocksjni/portal.h:toJavaStatusSubCode
+ /**
+ * Status Sub-code.
+ *
+ * should stay in sync with /include/rocksdb/status.h:SubCode and /java/rocksjni/portal.h:toJavaStatusSubCode
+ */
public enum SubCode {
+
+ /**
+ * None.
+ */
None( (byte)0x0),
+
+ /**
+ * Timeout whilst waiting on Mutex.
+ */
MutexTimeout( (byte)0x1),
+
+ /**
+ * Timeout whilst waiting on Lock.
+ */
LockTimeout( (byte)0x2),
+
+ /**
+ * Maximum limit on number of locks reached.
+ */
LockLimit( (byte)0x3),
+
+ /**
+ * No space remaining.
+ */
NoSpace( (byte)0x4),
+
+ /**
+ * Deadlock detected.
+ */
Deadlock( (byte)0x5),
+
+ /**
+ * Stale file detected.
+ */
StaleFile( (byte)0x6),
+
+ /**
+ * Reached the maximum memory limit.
+ */
MemoryLimit( (byte)0x7),
+
+ /**
+ * Undefined.
+ */
Undefined( (byte)0x7F);
private final byte value;
@@ -120,6 +271,15 @@ public enum SubCode {
this.value = value;
}
+ /**
+ * Get a sub-code from its byte representation.
+ *
+ * @param value the byte representation of the sub-code.
+ *
+ * @return the sub-code
+ *
+ * @throws IllegalArgumentException if the {@code value} parameter does not represent a sub-code.
+ */
public static SubCode getSubCode(final byte value) {
for (final SubCode subCode : SubCode.values()) {
if (subCode.value == value){
diff --git a/java/src/main/java/org/rocksdb/StringAppendOperator.java b/java/src/main/java/org/rocksdb/StringAppendOperator.java
index 547371e7c08b..f383de4dc12f 100644
--- a/java/src/main/java/org/rocksdb/StringAppendOperator.java
+++ b/java/src/main/java/org/rocksdb/StringAppendOperator.java
@@ -11,14 +11,27 @@
* two strings.
*/
public class StringAppendOperator extends MergeOperator {
+ /**
+ * Constructs a StringAppendOperator.
+ */
public StringAppendOperator() {
this(',');
}
+ /**
+ * Constructs a StringAppendOperator.
+ *
+ * @param delim the character delimiter to use when appending.
+ */
public StringAppendOperator(final char delim) {
super(newSharedStringAppendOperator(delim));
}
+ /**
+ * Constructs a StringAppendOperator.
+ *
+ * @param delim the string delimiter to use when appending.
+ */
public StringAppendOperator(final String delim) {
super(newSharedStringAppendOperator(delim));
}
diff --git a/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java b/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java
index 8dc56796a25d..aaf34b2cbd57 100644
--- a/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java
+++ b/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java
@@ -7,6 +7,9 @@
import java.util.Objects;
+/**
+ * Brief information on Table File creation.
+ */
public class TableFileCreationBriefInfo {
private final String dbName;
private final String columnFamilyName;
diff --git a/java/src/main/java/org/rocksdb/TableFileCreationInfo.java b/java/src/main/java/org/rocksdb/TableFileCreationInfo.java
index 5654603c3833..1b65712b3b3b 100644
--- a/java/src/main/java/org/rocksdb/TableFileCreationInfo.java
+++ b/java/src/main/java/org/rocksdb/TableFileCreationInfo.java
@@ -7,6 +7,9 @@
import java.util.Objects;
+/**
+ * Information on Table File creation.
+ */
public class TableFileCreationInfo extends TableFileCreationBriefInfo {
private final long fileSize;
private final TableProperties tableProperties;
diff --git a/java/src/main/java/org/rocksdb/TableFileCreationReason.java b/java/src/main/java/org/rocksdb/TableFileCreationReason.java
index d3984663dd28..f45da28e5776 100644
--- a/java/src/main/java/org/rocksdb/TableFileCreationReason.java
+++ b/java/src/main/java/org/rocksdb/TableFileCreationReason.java
@@ -5,10 +5,29 @@
package org.rocksdb;
+/**
+ * Reasons for Table File creation.
+ */
public enum TableFileCreationReason {
+
+ /**
+ * Flush.
+ */
FLUSH((byte) 0x00),
+
+ /**
+ * Compaction.
+ */
COMPACTION((byte) 0x01),
+
+ /**
+ * Recovery.
+ */
RECOVERY((byte) 0x02),
+
+ /**
+ * Miscellaneous.
+ */
MISC((byte) 0x03);
private final byte value;
diff --git a/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java b/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java
index 9a777e3336c2..87bd2b8c87af 100644
--- a/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java
+++ b/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java
@@ -7,6 +7,9 @@
import java.util.Objects;
+/**
+ * Information on Table File deleteion.
+ */
public class TableFileDeletionInfo {
private final String dbName;
private final String filePath;
diff --git a/java/src/main/java/org/rocksdb/ThreadStatus.java b/java/src/main/java/org/rocksdb/ThreadStatus.java
index 4211453d1a0b..c75d85d276f6 100644
--- a/java/src/main/java/org/rocksdb/ThreadStatus.java
+++ b/java/src/main/java/org/rocksdb/ThreadStatus.java
@@ -7,6 +7,9 @@
import java.util.Map;
+/**
+ * The status of a Thread.
+ */
public class ThreadStatus {
private final long threadId;
private final ThreadType threadType;
@@ -155,6 +158,13 @@ public static String getOperationName(final OperationType operationType) {
return getOperationName(operationType.getValue());
}
+ /**
+ * Converts microseconds to a string representation.
+ *
+ * @param operationElapsedTime the microseconds.
+ *
+ * @return the string representation.
+ */
public static String microsToString(final long operationElapsedTime) {
return microsToStringNative(operationElapsedTime);
}
diff --git a/java/src/main/java/org/rocksdb/TickerType.java b/java/src/main/java/org/rocksdb/TickerType.java
index f2ca42776e79..381390678de4 100644
--- a/java/src/main/java/org/rocksdb/TickerType.java
+++ b/java/src/main/java/org/rocksdb/TickerType.java
@@ -19,7 +19,7 @@ public enum TickerType {
/**
* total block cache misses
- *
+ *
* REQUIRES: BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS +
* BLOCK_CACHE_FILTER_MISS +
* BLOCK_CACHE_DATA_MISS;
@@ -28,27 +28,30 @@ public enum TickerType {
/**
* total block cache hit
- *
+ *
* REQUIRES: BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT +
* BLOCK_CACHE_FILTER_HIT +
* BLOCK_CACHE_DATA_HIT;
*/
BLOCK_CACHE_HIT((byte) 0x1),
+ /**
+ * Number of blocks added to block cache.
+ */
BLOCK_CACHE_ADD((byte) 0x2),
/**
- * # of failures when adding blocks to block cache.
+ * Number of failures when adding blocks to block cache.
*/
BLOCK_CACHE_ADD_FAILURES((byte) 0x3),
/**
- * # of times cache miss when accessing index block from block cache.
+ * Number of times cache miss when accessing index block from block cache.
*/
BLOCK_CACHE_INDEX_MISS((byte) 0x4),
/**
- * # of times cache hit when accessing index block from block cache.
+ * Number of times cache hit when accessing index block from block cache.
*/
BLOCK_CACHE_INDEX_HIT((byte) 0x5),
@@ -63,12 +66,12 @@ public enum TickerType {
BLOCK_CACHE_INDEX_BYTES_INSERT((byte) 0x7),
/**
- * # of times cache miss when accessing filter block from block cache.
+ * Number of times cache miss when accessing filter block from block cache.
*/
BLOCK_CACHE_FILTER_MISS((byte) 0x9),
/**
- * # of times cache hit when accessing filter block from block cache.
+ * Number of times cache hit when accessing filter block from block cache.
*/
BLOCK_CACHE_FILTER_HIT((byte) 0xA),
@@ -83,12 +86,12 @@ public enum TickerType {
BLOCK_CACHE_FILTER_BYTES_INSERT((byte) 0xC),
/**
- * # of times cache miss when accessing data block from block cache.
+ * Number of times cache miss when accessing data block from block cache.
*/
BLOCK_CACHE_DATA_MISS((byte) 0xE),
/**
- * # of times cache hit when accessing data block from block cache.
+ * Number of times cache hit when accessing data block from block cache.
*/
BLOCK_CACHE_DATA_HIT((byte) 0xF),
@@ -113,7 +116,7 @@ public enum TickerType {
BLOCK_CACHE_BYTES_WRITE((byte) 0x13),
/**
- * # of times bloom filter has avoided file reads.
+ * Number of times bloom filter has avoided file reads.
*/
BLOOM_FILTER_USEFUL((byte) 0x14),
@@ -163,23 +166,18 @@ public enum TickerType {
GET_HIT_L2_AND_UP((byte) 0x1D),
/**
- * COMPACTION_KEY_DROP_* count the reasons for key drop during compaction
- * There are 4 reasons currently.
- */
-
- /**
- * key was written with a newer value.
+ * Compaction dropped the key because there is a newer entry.
*/
COMPACTION_KEY_DROP_NEWER_ENTRY((byte) 0x1E),
/**
+ * Compaction dropped the key because it is obsolete.
* Also includes keys dropped for range del.
- * The key is obsolete.
*/
COMPACTION_KEY_DROP_OBSOLETE((byte) 0x1F),
/**
- * key was covered by a range tombstone.
+ * Compaction dropped the key because it was covered by a range tombstone.
*/
COMPACTION_KEY_DROP_RANGE_DEL((byte) 0x20),
@@ -189,7 +187,7 @@ public enum TickerType {
COMPACTION_KEY_DROP_USER((byte) 0x21),
/**
- * all keys in range were deleted.
+ * Compaction dropped the key as all keys in range were deleted.
*/
COMPACTION_RANGE_DEL_DROP_OBSOLETE((byte) 0x22),
@@ -217,7 +215,7 @@ public enum TickerType {
/**
* The number of uncompressed bytes read from DB::Get(). It could be
* either from memtables, cache, or table files.
- *
+ *
* For the number of logical bytes read from DB::MultiGet(),
* please use {@link #NUMBER_MULTIGET_BYTES_READ}.
*/
@@ -259,8 +257,14 @@ public enum TickerType {
*/
ITER_BYTES_READ((byte) 0x2E),
+ /**
+ * The number of calls to open a file.
+ */
NO_FILE_OPENS((byte) 0x30),
+ /**
+ * The number of file errors.
+ */
NO_FILE_ERRORS((byte) 0x31),
/**
@@ -270,7 +274,7 @@ public enum TickerType {
/**
* The wait time for db mutex.
- *
+ *
* Disabled by default. To enable it set stats level to {@link StatsLevel#ALL}
*/
DB_MUTEX_WAIT_MICROS((byte) 0x36),
@@ -290,14 +294,21 @@ public enum TickerType {
*/
NUMBER_MULTIGET_BYTES_READ((byte) 0x3B),
+ /**
+ * Number of merge failures.
+ */
NUMBER_MERGE_FAILURES((byte) 0x3D),
/**
- * Number of times bloom was checked before creating iterator on a
+ * Number of times the bloom filter was checked before creating iterator on a
* file, and the number of times the check was useful in avoiding
* iterator creation (and thus likely IOPs).
*/
BLOOM_FILTER_PREFIX_CHECKED((byte) 0x3E),
+
+ /**
+ * Number of times the bloom filter returned false, and so prevented accessing data+index blocks.
+ */
BLOOM_FILTER_PREFIX_USEFUL((byte) 0x3F),
/**
@@ -358,42 +369,75 @@ public enum TickerType {
* table reader object.
*/
NUMBER_DIRECT_LOAD_TABLE_PROPERTIES((byte) 0x4F),
+
+ /**
+ * Number of times supervision was acquired.
+ */
NUMBER_SUPERVERSION_ACQUIRES((byte) 0x50),
+
+ /**
+ * Number of times supervision was released.
+ */
NUMBER_SUPERVERSION_RELEASES((byte) 0x51),
+
+ /**
+ * Number of times supervision was cleaned up.
+ */
NUMBER_SUPERVERSION_CLEANUPS((byte) 0x52),
/**
- * # of compressions/decompressions executed
+ * Number of block compressions executed.
*/
NUMBER_BLOCK_COMPRESSED((byte) 0x53),
+
+ /**
+ * Number of block de-compressions executed.
+ */
NUMBER_BLOCK_DECOMPRESSED((byte) 0x54),
+ /**
+ * Number of blocks not compressed.
+ */
+ @Deprecated
NUMBER_BLOCK_NOT_COMPRESSED((byte) 0x55),
+
+ /**
+ * Total time spent on merge operations.
+ */
MERGE_OPERATION_TOTAL_TIME((byte) 0x56),
+
+ /**
+ * Total time spent on filter operations.
+ */
FILTER_OPERATION_TOTAL_TIME((byte) 0x57),
/**
- * Row cache.
+ * Number of row cache hits.
*/
ROW_CACHE_HIT((byte) 0x58),
- ROW_CACHE_MISS((byte) 0x59),
/**
- * Read amplification statistics.
- *
- * Read amplification can be calculated using this formula
- * (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES)
- *
- * REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled
+ * Number of row cache misses.
*/
+ ROW_CACHE_MISS((byte) 0x59),
/**
- * Estimate of total bytes actually used.
+ * Read amplification estimate of total bytes actually used.
+ *
+ * Read amplification can be calculated using this formula
+ * ({@link #READ_AMP_TOTAL_READ_BYTES} / #READ_AMP_ESTIMATE_USEFUL_BYTES)
+ *
+ * REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled
*/
READ_AMP_ESTIMATE_USEFUL_BYTES((byte) 0x5A),
/**
- * Total size of loaded data blocks.
+ * Read amplification estimate of total size of loaded data blocks.
+ *
+ * Read amplification can be calculated using this formula
+ * (READ_AMP_TOTAL_READ_BYTES / {@link #READ_AMP_ESTIMATE_USEFUL_BYTES})
+ *
+ * REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled
*/
READ_AMP_TOTAL_READ_BYTES((byte) 0x5B),
@@ -434,12 +478,12 @@ public enum TickerType {
COMPACTION_CANCELLED((byte) 0x62),
/**
- * # of times bloom FullFilter has not avoided the reads.
+ * Number of times bloom FullFilter has not avoided the reads.
*/
BLOOM_FILTER_FULL_POSITIVE((byte) 0x63),
/**
- * # of times bloom FullFilter has not avoided the reads and data actually
+ * Number of times bloom FullFilter has not avoided the reads and data actually
* exist.
*/
BLOOM_FILTER_FULL_TRUE_POSITIVE((byte) 0x64),
@@ -531,7 +575,7 @@ public enum TickerType {
BLOB_DB_BLOB_FILE_BYTES_READ((byte) 0x75),
/**
- * # of times a blob files being synced.
+ * Number of times a blob files being synced.
*/
BLOB_DB_BLOB_FILE_SYNCED((byte) 0x76),
@@ -602,27 +646,27 @@ public enum TickerType {
/**
* These counters indicate a performance issue in WritePrepared transactions.
* We should not seem them ticking them much.
- * # of times prepare_mutex_ is acquired in the fast path.
+ * Number of times prepare_mutex_ is acquired in the fast path.
*/
TXN_PREPARE_MUTEX_OVERHEAD((byte) -0x09),
/**
- * # of times old_commit_map_mutex_ is acquired in the fast path.
+ * Number of times old_commit_map_mutex_ is acquired in the fast path.
*/
TXN_OLD_COMMIT_MAP_MUTEX_OVERHEAD((byte) -0x0A),
/**
- * # of times we checked a batch for duplicate keys.
+ * Number of times we checked a batch for duplicate keys.
*/
TXN_DUPLICATE_KEY_OVERHEAD((byte) -0x0B),
/**
- * # of times snapshot_mutex_ is acquired in the fast path.
+ * Number of times snapshot_mutex_ is acquired in the fast path.
*/
TXN_SNAPSHOT_MUTEX_OVERHEAD((byte) -0x0C),
/**
- * # of times ::Get returned TryAgain due to expired snapshot seq
+ * Number of times ::Get returned TryAgain due to expired snapshot seq
*/
TXN_GET_TRY_AGAIN((byte) -0x0D),
@@ -637,23 +681,63 @@ public enum TickerType {
FILES_DELETED_IMMEDIATELY((byte) -0x0f),
/**
- * Compaction read and write statistics broken down by CompactionReason
+ * Compaction bytes read and marked.
*/
COMPACT_READ_BYTES_MARKED((byte) -0x10),
+
+ /**
+ * Periodic compaction bytes read.
+ */
COMPACT_READ_BYTES_PERIODIC((byte) -0x11),
+
+ /**
+ * Compaction bytes read for TTL.
+ */
COMPACT_READ_BYTES_TTL((byte) -0x12),
+
+ /**
+ * Compaction bytes written and marked.
+ */
COMPACT_WRITE_BYTES_MARKED((byte) -0x13),
+
+ /**
+ * Periodic compaction bytes written.
+ */
COMPACT_WRITE_BYTES_PERIODIC((byte) -0x14),
+
+ /**
+ * Compaction bytes written for TTL.
+ */
COMPACT_WRITE_BYTES_TTL((byte) -0x15),
/**
- * DB error handler statistics
+ * DB error handler error count.
*/
ERROR_HANDLER_BG_ERROR_COUNT((byte) -0x16),
+
+ /**
+ * DB error handler background I/O error count.
+ */
ERROR_HANDLER_BG_IO_ERROR_COUNT((byte) -0x17),
+
+ /**
+ * DB error handler background retryable I/O error count.
+ */
ERROR_HANDLER_BG_RETRYABLE_IO_ERROR_COUNT((byte) -0x18),
+
+ /**
+ * DB error handler auto-resume count.
+ */
ERROR_HANDLER_AUTORESUME_COUNT((byte) -0x19),
+
+ /**
+ * DB error handler auto-resume retry count.
+ */
ERROR_HANDLER_AUTORESUME_RETRY_TOTAL_COUNT((byte) -0x1A),
+
+ /**
+ * DB error handler auto-resume success count.
+ */
ERROR_HANDLER_AUTORESUME_SUCCESS_COUNT((byte) -0x1B),
/**
@@ -679,33 +763,73 @@ public enum TickerType {
VERIFY_CHECKSUM_READ_BYTES((byte) -0x1F),
/**
- * Bytes read/written while creating backups
+ * Bytes read while creating backups.
*/
BACKUP_READ_BYTES((byte) -0x20),
+
+ /**
+ * Bytes written while creating backups.
+ */
BACKUP_WRITE_BYTES((byte) -0x21),
/**
- * Remote compaction read/write statistics
+ * Bytes read by remote compaction.
*/
REMOTE_COMPACT_READ_BYTES((byte) -0x22),
+
+ /**
+ * Bytes written by remote compaction.
+ */
REMOTE_COMPACT_WRITE_BYTES((byte) -0x23),
/**
- * Tiered storage related statistics
+ * Number of bytes read by tiered storage hot-file(s).
*/
HOT_FILE_READ_BYTES((byte) -0x24),
+
+ /**
+ * Number of bytes read by tiered storage warm-file(s).
+ */
WARM_FILE_READ_BYTES((byte) -0x25),
+
+ /**
+ * Number of bytes read by tiered storage cold-file(s).
+ */
COLD_FILE_READ_BYTES((byte) -0x26),
+
+ /**
+ * Number of reads on tiered storage hot-file(s).
+ */
HOT_FILE_READ_COUNT((byte) -0x27),
+
+ /**
+ * Number of reads on tiered storage warm-file(s).
+ */
WARM_FILE_READ_COUNT((byte) -0x28),
+
+ /**
+ * Number of reads on tiered storage cold-file(s).
+ */
COLD_FILE_READ_COUNT((byte) -0x29),
/**
- * (non-)last level read statistics
+ * Bytes read from the last level.
*/
LAST_LEVEL_READ_BYTES((byte) -0x2A),
+
+ /**
+ * Number of reads on the last level.
+ */
LAST_LEVEL_READ_COUNT((byte) -0x2B),
+
+ /**
+ * Bytes read from non-last level(s).
+ */
NON_LAST_LEVEL_READ_BYTES((byte) -0x2C),
+
+ /**
+ * Number of reads from non-last level(s).
+ */
NON_LAST_LEVEL_READ_COUNT((byte) -0x2D),
/**
@@ -714,12 +838,12 @@ public enum TickerType {
BLOCK_CHECKSUM_COMPUTE_COUNT((byte) -0x2E),
/**
- * # of times cache miss when accessing blob from blob cache.
+ * Number of times cache miss when accessing blob from blob cache.
*/
BLOB_DB_CACHE_MISS((byte) -0x2F),
/**
- * # of times cache hit when accessing blob from blob cache.
+ * Number of times cache hit when accessing blob from blob cache.
*/
BLOB_DB_CACHE_HIT((byte) -0x30),
@@ -764,18 +888,40 @@ public enum TickerType {
*/
BLOCK_CHECKSUM_MISMATCH_COUNT((byte) -0x3C),
+ /**
+ * Number of times readahead is trimmed during scans when
+ * {@link ReadOptions#setReadaheadSize(long)} is set.
+ */
READAHEAD_TRIMMED((byte) -0x3D),
+ /**
+ * FIFO compactions that drop files of a maximum size.
+ */
FIFO_MAX_SIZE_COMPACTIONS((byte) -0x3E),
+ /**
+ * FIFO compactions that drop files exceeding a TTL.
+ */
FIFO_TTL_COMPACTIONS((byte) -0x3F),
+ /**
+ * Number of bytes prefetched during user initiated scan.
+ */
PREFETCH_BYTES((byte) -0x40),
+ /**
+ * Number of prefetched bytes that were actually useful.
+ */
PREFETCH_BYTES_USEFUL((byte) -0x41),
+ /**
+ * Number of FS reads avoided due to scan prefetching.
+ */
PREFETCH_HITS((byte) -0x42),
+ /**
+ * maximum number of ticker types.
+ */
TICKER_ENUM_MAX((byte) 0x5F);
private final byte value;
diff --git a/java/src/main/java/org/rocksdb/TraceOptions.java b/java/src/main/java/org/rocksdb/TraceOptions.java
index cf5f7bbe12f8..85c8abcc2500 100644
--- a/java/src/main/java/org/rocksdb/TraceOptions.java
+++ b/java/src/main/java/org/rocksdb/TraceOptions.java
@@ -12,10 +12,18 @@
public class TraceOptions {
private final long maxTraceFileSize;
+ /**
+ * Constructs a TraceOptions.
+ */
public TraceOptions() {
this.maxTraceFileSize = 64L * 1024L * 1024L * 1024L; // 64 GB
}
+ /**
+ * Constructs a TraceOptions.
+ *
+ * @param maxTraceFileSize the maximum size of the trace file.
+ */
public TraceOptions(final long maxTraceFileSize) {
this.maxTraceFileSize = maxTraceFileSize;
}
diff --git a/java/src/main/java/org/rocksdb/Transaction.java b/java/src/main/java/org/rocksdb/Transaction.java
index cab7ed28737f..28c3fa3a5c9a 100644
--- a/java/src/main/java/org/rocksdb/Transaction.java
+++ b/java/src/main/java/org/rocksdb/Transaction.java
@@ -184,7 +184,9 @@ public void clearSnapshot() {
}
/**
- * Prepare the current transaction for 2PC
+ * Prepare the current transaction for 2PC.
+ *
+ * @throws RocksDBException if the transaction cannot be prepared
*/
public void prepare() throws RocksDBException {
//TODO(AR) consider a Java'ish version of this function, which returns an AutoCloseable (commit)
@@ -257,7 +259,7 @@ public void rollbackToSavePoint() throws RocksDBException {
/**
* This function has an inconsistent parameter order compared to other {@code get()}
* methods and is deprecated in favour of one with a consistent order.
- *
+ *
* This function is similar to
* {@link RocksDB#get(ColumnFamilyHandle, ReadOptions, byte[])} except it will
* also read pending changes in this transaction.
@@ -297,11 +299,11 @@ public byte[] get(final ColumnFamilyHandle columnFamilyHandle, final ReadOptions
* also read pending changes in this transaction.
* Currently, this function will return Status::MergeInProgress if the most
* recent write to the queried key in this batch is a Merge.
- *
+ *
* If {@link ReadOptions#snapshot()} is not set, the current version of the
* key will be read. Calling {@link #setSnapshot()} does not affect the
* version of the data returned.
- *
+ *
* Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect
* what is read from the DB but will NOT change which keys are read from this
* transaction (the keys in this transaction do not yet belong to any snapshot
@@ -560,7 +562,7 @@ public byte[][] multiGet(final ReadOptions readOptions,
* {@link org.rocksdb.ColumnFamilyHandle} instances.
* @param keys of keys for which values need to be retrieved.
*
- * @return Array of values, one for each key
+ * @return list of values, one for each key
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
@@ -646,7 +648,7 @@ public byte[][] multiGet(final ReadOptions readOptions, final byte[][] keys)
* {@link org.rocksdb.ColumnFamilyHandle} instances.
* @param keys of keys for which values need to be retrieved.
*
- * @return Array of values, one for each key
+ * @return list of values, one for each key
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
@@ -1157,7 +1159,6 @@ public GetStatus getForUpdate(final ReadOptions readOptions,
/**
* A multi-key version of
* {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}.
- *
*
* @param readOptions Read options.
* @param columnFamilyHandles {@link org.rocksdb.ColumnFamilyHandle}
@@ -1193,14 +1194,13 @@ public byte[][] multiGetForUpdate(final ReadOptions readOptions,
/**
* A multi-key version of
* {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}.
- *
*
* @param readOptions Read options.
* @param columnFamilyHandles {@link org.rocksdb.ColumnFamilyHandle}
* instances
* @param keys the keys to retrieve the values for.
*
- * @return Array of values, one for each key
+ * @return list of values, one for each key
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
@@ -1229,7 +1229,6 @@ public List
*
* @param readOptions Read options.
* @param keys the keys to retrieve the values for.
@@ -1253,7 +1252,6 @@ public byte[][] multiGetForUpdate(final ReadOptions readOptions, final byte[][]
/**
* A multi-key version of {@link #getForUpdate(ReadOptions, byte[], boolean)}.
- *
*
* @param readOptions Read options.
* @param keys the keys to retrieve the values for.
@@ -1300,7 +1298,7 @@ public RocksIterator getIterator() {
* Returns an iterator that will iterate on all keys in the default
* column family including both keys in the DB and uncommitted keys in this
* transaction.
- *
+ *
* Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read
* from the DB but will NOT change which keys are read from this transaction
* (the keys in this transaction do not yet belong to any snapshot and will be
@@ -1526,10 +1524,10 @@ public void put(final ColumnFamilyHandle columnFamilyHandle,
/**
* Similar to {@link RocksDB#put(byte[], byte[])}, but
* will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -1565,10 +1563,10 @@ public void put(final ByteBuffer key, final ByteBuffer value) throws RocksDBExce
/**
* Similar to {@link RocksDB#put(byte[], byte[])}, but
* will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -1608,6 +1606,30 @@ public void put(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer ke
key.position(key.limit());
value.position(value.limit());
}
+
+ /**
+ * Similar to {@link RocksDB#put(byte[], byte[])}, but
+ * will also perform conflict checking on the keys be written.
+ *
+ * If this Transaction was created on an {@link OptimisticTransactionDB},
+ * these functions should always succeed.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, an
+ * {@link RocksDBException} may be thrown with an accompanying {@link Status}
+ * when:
+ * {@link Status.Code#Busy} if there is a write conflict,
+ * {@link Status.Code#TimedOut} if a lock could not be acquired,
+ * {@link Status.Code#TryAgain} if the memtable history size is not large
+ * enough. See
+ * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()}
+ *
+ * @param columnFamilyHandle The column family to put the key/value into
+ * @param key the specified key to be inserted.
+ * @param value the value associated with the specified key.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
public void put(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key,
final ByteBuffer value) throws RocksDBException {
put(columnFamilyHandle, key, value, false);
@@ -1731,10 +1753,10 @@ public void merge(final byte[] key, final byte[] value)
/**
* Similar to {@link RocksDB#merge(byte[], byte[])}, but
* will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -1768,10 +1790,10 @@ public void merge(final ByteBuffer key, final ByteBuffer value) throws RocksDBEx
/**
* Similar to {@link RocksDB#merge(byte[], byte[])}, but
* will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -1811,10 +1833,10 @@ public void merge(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer
/**
* Similar to {@link RocksDB#merge(byte[], byte[])}, but
* will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -2268,10 +2290,10 @@ public void mergeUntracked(final ColumnFamilyHandle columnFamilyHandle,
* Similar to {@link RocksDB#merge(ColumnFamilyHandle, byte[], byte[])},
* but operates on the transactions write batch. This write will only happen
* if this transaction gets committed successfully.
- *
+ *
* Unlike {@link #merge(ColumnFamilyHandle, byte[], byte[])} no conflict
* checking will be performed for this key.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, this function
* will still acquire locks necessary to make sure this write doesn't cause
* conflicts in other transactions; This may cause a {@link RocksDBException}
@@ -2331,10 +2353,10 @@ public void mergeUntracked(final byte[] key, final byte[] value)
* Similar to {@link RocksDB#merge(byte[], byte[])},
* but operates on the transactions write batch. This write will only happen
* if this transaction gets committed successfully.
- *
+ *
* Unlike {@link #merge(byte[], byte[])} no conflict
* checking will be performed for this key.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, this function
* will still acquire locks necessary to make sure this write doesn't cause
* conflicts in other transactions; This may cause a {@link RocksDBException}
@@ -2777,20 +2799,57 @@ public long getId() {
return getId(nativeHandle_);
}
+ /**
+ * States of a Transaction.
+ */
public enum TransactionState {
+ /**
+ * Transaction started.
+ */
STARTED((byte)0),
+
+ /**
+ * Transaction is awaiting prepare.
+ */
AWAITING_PREPARE((byte)1),
+
+ /**
+ * Transaction is prepared.
+ */
PREPARED((byte)2),
+
+ /**
+ * Transaction awaiting commit.
+ */
AWAITING_COMMIT((byte)3),
+
+ /**
+ * Transaction is committed.
+ */
COMMITTED((byte)4),
+
+ /**
+ * Transaction is awaiting rollback.
+ */
AWAITING_ROLLBACK((byte)5),
+
+ /**
+ * Transaction rolled-back.
+ */
ROLLEDBACK((byte)6),
+
+ /**
+ * Transaction locks have been stolen.
+ */
LOCKS_STOLEN((byte)7);
- /*
- * Keep old misspelled variable as alias
- * Tip from https://stackoverflow.com/a/37092410/454544
+ /**
+ * Old misspelled variable as alias for {@link #COMMITTED}.
+ * Tip from https://stackoverflow.com/a/37092410/454544
+ *
+ * @deprecated use {@link #COMMITTED} instead.
*/
+ @Deprecated
public static final TransactionState COMMITED = COMMITTED;
private final byte value;
@@ -2835,6 +2894,9 @@ private WaitingTransactions newWaitingTransactions(
return new WaitingTransactions(columnFamilyId, key, transactionIds);
}
+ /**
+ * Waiting Transactions.
+ */
public static class WaitingTransactions {
private final long columnFamilyId;
private final String key;
diff --git a/java/src/main/java/org/rocksdb/TransactionDB.java b/java/src/main/java/org/rocksdb/TransactionDB.java
index a4ee951dc994..940dd4d92ec5 100644
--- a/java/src/main/java/org/rocksdb/TransactionDB.java
+++ b/java/src/main/java/org/rocksdb/TransactionDB.java
@@ -203,6 +203,14 @@ public Transaction beginTransaction(final WriteOptions writeOptions,
return oldTransaction;
}
+ /**
+ * Gets a transaction by name.
+ *
+ * @param transactionName the name of the transaction.
+ *
+ * @return the transaction, or null if the transaction can't be found.
+ *
+ */
public Transaction getTransactionByName(final String transactionName) {
final long jtxnHandle = getTransactionByName(nativeHandle_, transactionName);
if(jtxnHandle == 0) {
@@ -217,6 +225,11 @@ public Transaction getTransactionByName(final String transactionName) {
return txn;
}
+ /**
+ * Gets a list of all prepared transactions.
+ *
+ * @return the list of prepared transactions.
+ */
public List
* If 0, no waiting is done if a lock cannot instantly be acquired.
* If negative, there is no timeout and will block indefinitely when acquiring
* a lock.
*
- * @return the timeout in milliseconds when writing a key OUTSIDE of a
- * transaction
+ * @return the timeout in milliseconds when writing a key outside of the transaction
*/
public long getDefaultLockTimeout() {
assert(isOwningHandle());
@@ -128,8 +133,8 @@ public long getDefaultLockTimeout() {
/**
* If positive, specifies the wait timeout in milliseconds when writing a key
- * OUTSIDE of a transaction (ie by calling {@link RocksDB#put},
- * {@link RocksDB#merge}, {@link RocksDB#delete} or {@link RocksDB#write}
+ * OUTSIDE of a transaction (ie by calling {@link RocksDB#put(byte[], byte[])},
+ * {@link RocksDB#merge(byte[], byte[])}, {@link RocksDB#delete(byte[])} or {@link RocksDB#write(WriteOptions, WriteBatch)}
* directly).
*
* If 0, no waiting is done if a lock cannot instantly be acquired.
@@ -145,7 +150,7 @@ public long getDefaultLockTimeout() {
* Default: 1000
*
* @param defaultLockTimeout the timeout in milliseconds when writing a key
- * OUTSIDE of a transaction
+ * outside of the transaction
* @return this TransactionDBOptions instance
*/
public TransactionDBOptions setDefaultLockTimeout(final long defaultLockTimeout) {
diff --git a/java/src/main/java/org/rocksdb/TransactionOptions.java b/java/src/main/java/org/rocksdb/TransactionOptions.java
index f93d3cb3cbb8..9755724f1285 100644
--- a/java/src/main/java/org/rocksdb/TransactionOptions.java
+++ b/java/src/main/java/org/rocksdb/TransactionOptions.java
@@ -5,9 +5,15 @@
package org.rocksdb;
+/**
+ * Options for a Transaction.
+ */
public class TransactionOptions extends RocksObject
implements TransactionalOptions
* If 0, no waiting is done if a lock cannot instantly be acquired.
- * If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)}
+ * If negative, {@link TransactionDBOptions#getTransactionLockTimeout()}
* will be used
*
* @return the lock timeout in milliseconds
@@ -71,7 +77,7 @@ public long getLockTimeout() {
* a transaction attempts to lock a key.
*
* If 0, no waiting is done if a lock cannot instantly be acquired.
- * If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)}
+ * If negative, {@link TransactionDBOptions#getTransactionLockTimeout()}
* will be used
*
* Default: -1
diff --git a/java/src/main/java/org/rocksdb/UInt64AddOperator.java b/java/src/main/java/org/rocksdb/UInt64AddOperator.java
index 0cffdce8c117..b028bcc45df6 100644
--- a/java/src/main/java/org/rocksdb/UInt64AddOperator.java
+++ b/java/src/main/java/org/rocksdb/UInt64AddOperator.java
@@ -10,6 +10,9 @@
* integer value.
*/
public class UInt64AddOperator extends MergeOperator {
+ /**
+ * Constructs a UInt64AddOperator.
+ */
public UInt64AddOperator() {
super(newSharedUInt64AddOperator());
}
diff --git a/java/src/main/java/org/rocksdb/VectorMemTableConfig.java b/java/src/main/java/org/rocksdb/VectorMemTableConfig.java
index fb1e7a948549..9bbf8eed56f5 100644
--- a/java/src/main/java/org/rocksdb/VectorMemTableConfig.java
+++ b/java/src/main/java/org/rocksdb/VectorMemTableConfig.java
@@ -5,6 +5,10 @@
* The config for vector memtable representation.
*/
public class VectorMemTableConfig extends MemTableConfig {
+
+ /**
+ * The default reserved size for the Vector Mem Table.
+ */
public static final int DEFAULT_RESERVED_SIZE = 0;
/**
diff --git a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java
index 25d6e6f9d666..1bc51627dba3 100644
--- a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java
+++ b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java
@@ -7,10 +7,19 @@
import java.nio.ByteBuffer;
+/**
+ * Iterator over the contents of a Write Batch With Index.
+ */
public class WBWIRocksIterator
extends AbstractRocksIterator
* Similar to memcmp.c.
*
* @param x the first value to compare with
diff --git a/java/src/main/java/org/rocksdb/util/BytewiseComparator.java b/java/src/main/java/org/rocksdb/util/BytewiseComparator.java
index fd55fdf8c57c..fb32db3be0da 100644
--- a/java/src/main/java/org/rocksdb/util/BytewiseComparator.java
+++ b/java/src/main/java/org/rocksdb/util/BytewiseComparator.java
@@ -23,6 +23,11 @@
*/
public final class BytewiseComparator extends AbstractComparator {
+ /**
+ * Constructs a new BytewiseComparator.
+ *
+ * @param copt the configuration options for the comparator.
+ */
public BytewiseComparator(final ComparatorOptions copt) {
super(copt);
}
diff --git a/java/src/main/java/org/rocksdb/util/Environment.java b/java/src/main/java/org/rocksdb/util/Environment.java
index 78b73dc5d432..f6b2fa505657 100644
--- a/java/src/main/java/org/rocksdb/util/Environment.java
+++ b/java/src/main/java/org/rocksdb/util/Environment.java
@@ -5,6 +5,9 @@
import java.io.IOException;
import java.util.Locale;
+/**
+ * Provides information about the environment in which RocksJava is executing.
+ */
public class Environment {
@SuppressWarnings("FieldMayBeFinal")
private static String OS = System.getProperty("os.name").toLowerCase(Locale.getDefault());
@@ -24,38 +27,83 @@ public class Environment {
*/
private static Boolean MUSL_LIBC = null;
+ /**
+ * Returns true if the CPU architecture is aarch64.
+ *
+ * @return true if the CPU architecture is aarch64, false otherwise.
+ */
public static boolean isAarch64() {
return ARCH.contains("aarch64");
}
+ /**
+ * Returns true if the CPU architecture is ppc.
+ *
+ * @return true if the CPU architecture is ppc, false otherwise.
+ */
public static boolean isPowerPC() {
return ARCH.contains("ppc");
}
+ /**
+ * Returns true if the CPU architecture is s390x.
+ *
+ * @return true if the CPU architecture is s390x, false otherwise.
+ */
public static boolean isS390x() {
return ARCH.contains("s390x");
}
+ /**
+ * Returns true if the CPU architecture is riscv64.
+ *
+ * @return true if the CPU architecture is riscv64, false otherwise.
+ */
public static boolean isRiscv64() {
return ARCH.contains("riscv64");
}
+ /**
+ * Returns true if the OS is Windows.
+ *
+ * @return true if the OS is Windows, false otherwise.
+ */
public static boolean isWindows() {
return (OS.contains("win"));
}
+ /**
+ * Returns true if the OS is FreeBSD.
+ *
+ * @return true if the OS is FreeBSD, false otherwise.
+ */
public static boolean isFreeBSD() {
return (OS.contains("freebsd"));
}
+ /**
+ * Returns true if the OS is Mac.
+ *
+ * @return true if the OS is Mac, false otherwise.
+ */
public static boolean isMac() {
return (OS.contains("mac"));
}
+ /**
+ * Returns true if the OS is AIX.
+ *
+ * @return true if the OS is AIX, false otherwise.
+ */
public static boolean isAix() {
return OS.contains("aix");
}
-
+
+ /**
+ * Returns true if the OS is Unix.
+ *
+ * @return true if the OS is Unix, false otherwise.
+ */
public static boolean isUnix() {
return OS.contains("nix") ||
OS.contains("nux");
@@ -75,9 +123,9 @@ public static boolean isMuslLibc() {
/**
* Determine if the environment has a musl libc.
- *
+ *
* The initialisation counterpart of {@link #isMuslLibc()}.
- *
+ *
* Intentionally package-private for testing.
*
* @return true if the environment has a musl libc, false otherwise.
@@ -136,14 +184,29 @@ static boolean initIsMuslLibc() {
return false;
}
+ /**
+ * Returns true if the OS is Solaris.
+ *
+ * @return true if the OS is Solaris, false otherwise.
+ */
public static boolean isSolaris() {
return OS.contains("sunos");
}
+ /**
+ * Returns true if the OS is OpenBSD.
+ *
+ * @return true if the OS is OpenBSD, false otherwise.
+ */
public static boolean isOpenBSD() {
return (OS.contains("openbsd"));
}
+ /**
+ * Returns true if the system architecture is 64 bit.
+ *
+ * @return true if the system architecture is 64 bit, false otherwise.
+ */
public static boolean is64Bit() {
if (ARCH.contains(SPARCV9)) {
return true;
@@ -151,10 +214,24 @@ public static boolean is64Bit() {
return (ARCH.indexOf("64") > 0);
}
+ /**
+ * Get the name as that of a shared JNI library.
+ *
+ * @param name the name.
+ *
+ * @return the name of the shared JNI library.
+ */
public static String getSharedLibraryName(final String name) {
return name + "jni";
}
+ /**
+ * Get the filename as that of a shared JNI library.
+ *
+ * @param name the name.
+ *
+ * @return the filename of the shared JNI library.
+ */
public static String getSharedLibraryFileName(final String name) {
return appendLibOsSuffix("lib" + getSharedLibraryName(name), true);
}
@@ -181,6 +258,16 @@ private static String getLibcPostfix() {
return "-" + libcName;
}
+
+ /**
+ * Get the name as that of a JNI library.
+ *
+ * Deals with platform and architecture specific naming.
+ *
+ * @param name the name.
+ *
+ * @return the name of the JNI library.
+ */
public static String getJniLibraryName(final String name) {
if (isUnix()) {
final String arch = is64Bit() ? "64" : "32";
@@ -219,6 +306,15 @@ public static String getJniLibraryName(final String name) {
throw new UnsupportedOperationException(String.format("Cannot determine JNI library name for ARCH='%s' OS='%s' name='%s'", ARCH, OS, name));
}
+ /**
+ * Get a fallback name as that of a JNI library.
+ *
+ * Deals with platform and architecture specific naming.
+ *
+ * @param name the name.
+ *
+ * @return the fallback name of the JNI library.
+ */
public static /*@Nullable*/ String getFallbackJniLibraryName(final String name) {
if (isMac() && is64Bit()) {
return String.format("%sjni-osx", name);
@@ -226,10 +322,28 @@ public static String getJniLibraryName(final String name) {
return null;
}
+ /**
+ * Get the filename as that of a JNI library.
+ *
+ * Deals with platform and architecture specific naming.
+ *
+ * @param name the name.
+ *
+ * @return the filename of the JNI library.
+ */
public static String getJniLibraryFileName(final String name) {
return appendLibOsSuffix("lib" + getJniLibraryName(name), false);
}
+ /**
+ * Get the fallback filename as that of a JNI library.
+ *
+ * Deals with platform and architecture specific naming.
+ *
+ * @param name the name.
+ *
+ * @return the fallback filename of the JNI library.
+ */
public static /*@Nullable*/ String getFallbackJniLibraryFileName(final String name) {
final String fallbackJniLibraryName = getFallbackJniLibraryName(name);
if (fallbackJniLibraryName == null) {
@@ -249,6 +363,13 @@ private static String appendLibOsSuffix(final String libraryFileName, final bool
throw new UnsupportedOperationException();
}
+ /**
+ * Get the filename extension used for a JNI library.
+ *
+ * Deals with platform and architecture specific naming.
+ *
+ * @return the filename extension.
+ */
public static String getJniLibraryExtension() {
if (isWindows()) {
return ".dll";
diff --git a/java/src/main/java/org/rocksdb/util/IntComparator.java b/java/src/main/java/org/rocksdb/util/IntComparator.java
index 2caf0c601572..cf3c6423f08c 100644
--- a/java/src/main/java/org/rocksdb/util/IntComparator.java
+++ b/java/src/main/java/org/rocksdb/util/IntComparator.java
@@ -13,11 +13,11 @@
/**
* This is a Java implementation of a Comparator for Java int
* keys.
- *
+ *
* This comparator assumes keys are (at least) four bytes, so
* the caller must guarantee that in accessing other APIs in
* combination with this comparator.
- *
+ *
* The performance of Comparators implemented in Java is always
* less than their C++ counterparts due to the bridging overhead,
* as such you likely don't want to use this apart from benchmarking
@@ -25,8 +25,13 @@
*/
public final class IntComparator extends AbstractComparator {
- public IntComparator(final ComparatorOptions copt) {
- super(copt);
+ /**
+ * Constructs an IntComparator.
+ *
+ * @param comparatorOptions the options for the comparator.
+ */
+ public IntComparator(final ComparatorOptions comparatorOptions) {
+ super(comparatorOptions);
}
@Override
diff --git a/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java b/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java
index 3d3c429416b0..e145184eac6c 100644
--- a/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java
+++ b/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java
@@ -24,6 +24,11 @@
*/
public final class ReverseBytewiseComparator extends AbstractComparator {
+ /**
+ * Constructs a ReverseBytewiseComparator.
+ *
+ * @param copt the comparator options.
+ */
public ReverseBytewiseComparator(final ComparatorOptions copt) {
super(copt);
}
diff --git a/java/src/main/java/org/rocksdb/util/SizeUnit.java b/java/src/main/java/org/rocksdb/util/SizeUnit.java
index 0f717e8d4540..8582bb15436b 100644
--- a/java/src/main/java/org/rocksdb/util/SizeUnit.java
+++ b/java/src/main/java/org/rocksdb/util/SizeUnit.java
@@ -5,12 +5,33 @@
package org.rocksdb.util;
-public class SizeUnit {
- public static final long KB = 1024L;
- public static final long MB = KB * KB;
- public static final long GB = KB * MB;
- public static final long TB = KB * GB;
- public static final long PB = KB * TB;
+/**
+ * Simple factors of byte sizes.
+ */
+public interface SizeUnit {
- private SizeUnit() {}
+ /**
+ * 1 Kilobyte.
+ */
+ long KB = 1024L;
+
+ /**
+ * 1 Megabyte.
+ */
+ long MB = KB * KB;
+
+ /**
+ * 1 Gigabyte.
+ */
+ long GB = KB * MB;
+
+ /**
+ * 1 Terabyte.
+ */
+ long TB = KB * GB;
+
+ /**
+ * 1 Petabyte.
+ */
+ long PB = KB * TB;
}
diff --git a/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java b/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java
index 549b74beb1cc..9c6689ea8906 100644
--- a/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java
+++ b/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java
@@ -125,14 +125,14 @@ public void fullHistoryTSLowDefault() {
@Test
public void canceled() {
CompactRangeOptions opt = new CompactRangeOptions();
- assertThat(opt.canceled()).isEqualTo(false);
- opt.setCanceled(true);
- assertThat(opt.canceled()).isEqualTo(true);
- opt.setCanceled(false);
- assertThat(opt.canceled()).isEqualTo(false);
- opt.setCanceled(true);
- assertThat(opt.canceled()).isEqualTo(true);
- opt.setCanceled(true);
- assertThat(opt.canceled()).isEqualTo(true);
+ assertThat(opt.cancelled()).isEqualTo(false);
+ opt.setCancelled(true);
+ assertThat(opt.cancelled()).isEqualTo(true);
+ opt.setCancelled(false);
+ assertThat(opt.cancelled()).isEqualTo(false);
+ opt.setCancelled(true);
+ assertThat(opt.cancelled()).isEqualTo(true);
+ opt.setCancelled(true);
+ assertThat(opt.cancelled()).isEqualTo(true);
}
}
diff --git a/java/src/test/java/org/rocksdb/SstFileManagerTest.java b/java/src/test/java/org/rocksdb/SstFileManagerTest.java
index 2e136e820035..96c5627096d8 100644
--- a/java/src/test/java/org/rocksdb/SstFileManagerTest.java
+++ b/java/src/test/java/org/rocksdb/SstFileManagerTest.java
@@ -47,7 +47,7 @@ public void trackedFiles() throws RocksDBException {
@Test
public void deleteRateBytesPerSecond() throws RocksDBException {
try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) {
- assertThat(sstFileManager.getDeleteRateBytesPerSecond()).isEqualTo(SstFileManager.RATE_BYTES_PER_SEC_DEFAULT);
+ assertThat(sstFileManager.getDeleteRateBytesPerSecond()).isEqualTo(SstFileManager.DEFAULT_RATE_BYTES_PER_SEC);
final long ratePerSecond = 1024 * 1024 * 52;
sstFileManager.setDeleteRateBytesPerSecond(ratePerSecond);
assertThat(sstFileManager.getDeleteRateBytesPerSecond()).isEqualTo(ratePerSecond);
@@ -57,7 +57,7 @@ public void deleteRateBytesPerSecond() throws RocksDBException {
@Test
public void maxTrashDBRatio() throws RocksDBException {
try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) {
- assertThat(sstFileManager.getMaxTrashDBRatio()).isEqualTo(SstFileManager.MAX_TRASH_DB_RATION_DEFAULT);
+ assertThat(sstFileManager.getMaxTrashDBRatio()).isEqualTo(SstFileManager.DEFAULT_MAX_TRASH_DB_RATIO);
final double trashRatio = 0.2;
sstFileManager.setMaxTrashDBRatio(trashRatio);
assertThat(sstFileManager.getMaxTrashDBRatio()).isEqualTo(trashRatio);
diff --git a/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java b/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java
index 8d7956cf27f6..c3309d787878 100644
--- a/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java
+++ b/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java
@@ -8,7 +8,18 @@
import java.nio.ByteBuffer;
+/**
+ * Allocates for creating new buffers.
+ */
public interface ByteBufferAllocator {
+
+ /**
+ * Allocate a new ByteBuffer.
+ *
+ * @param capacity the capacity of the buffer.
+ *
+ * @return the new ByteBuffer.
+ */
ByteBuffer allocate(int capacity);
ByteBufferAllocator DIRECT = new DirectByteBufferAllocator();
diff --git a/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java b/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java
index 8ea104332cb1..8b06fbfabca0 100644
--- a/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java
+++ b/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java
@@ -124,16 +124,34 @@ public void markCommitWithTimestamp(final byte[] xid, final byte[] ts) throws Ro
events.add(new Event(Action.MARK_COMMIT_WITH_TIMESTAMP, (byte[]) null, (byte[]) null));
}
+ /**
+ * Event received by the handler.
+ */
public static class Event {
public final Action action;
public final int columnFamilyId;
public final byte[] key;
public final byte[] value;
+ /**
+ * Construct an event.
+ *
+ * @param action the action of the event
+ * @param key the key of the event
+ * @param value the value of the event
+ */
public Event(final Action action, final byte[] key, final byte[] value) {
this(action, 0, key, value);
}
+ /**
+ * Construct an event.
+ *
+ * @param action the action of the event
+ * @param columnFamilyId the id of the column family of the event
+ * @param key the key of the event
+ * @param value the value of the event
+ */
public Event(final Action action, final int columnFamilyId, final byte[] key,
final byte[] value) {
this.action = action;
+ */
public abstract class AbstractWriteBatch extends RocksObject
implements WriteBatchInterface {
+ /**
+ * Construct an AbstractWriteBatch.
+ *
+ * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ Write Batch object.
+ */
protected AbstractWriteBatch(final long nativeHandle) {
super(nativeHandle);
}
diff --git a/java/src/main/java/org/rocksdb/AccessHint.java b/java/src/main/java/org/rocksdb/AccessHint.java
index b7ccadd84a66..5731474cbf26 100644
--- a/java/src/main/java/org/rocksdb/AccessHint.java
+++ b/java/src/main/java/org/rocksdb/AccessHint.java
@@ -10,9 +10,24 @@
*/
@Deprecated
public enum AccessHint {
+ /**
+ * No file access pattern hint for compaction.
+ */
NONE((byte)0x0),
+
+ /**
+ * Normal file access pattern for compaction.
+ */
NORMAL((byte)0x1),
+
+ /**
+ * Sequential file access pattern for compaction.
+ */
SEQUENTIAL((byte)0x2),
+
+ /**
+ * Will need file access pattern for compaction.
+ */
WILLNEED((byte)0x3);
private final byte value;
diff --git a/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java
index d1d1123dded4..ebc4a70f9386 100644
--- a/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java
+++ b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java
@@ -12,6 +12,8 @@
* mutable (i.e. present in {@link AdvancedMutableColumnFamilyOptionsInterface})
*
+ * batch.put("key", "v1");
+ * batch.remove("key");
+ * batch.put("key", "v2");
+ * batch.put("key", "v3");
+ *
- *
+ * Builtin RocksDB comparators.
*/
public enum BuiltinComparator {
- BYTEWISE_COMPARATOR, REVERSE_BYTEWISE_COMPARATOR
+ /**
+ * Sorts all keys in ascending byte wise.
+ */
+ BYTEWISE_COMPARATOR,
+
+ /**
+ * Sorts all keys in descending byte wise order.
+ */
+ REVERSE_BYTEWISE_COMPARATOR
}
diff --git a/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java b/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java
index 4ab9e8475ce9..fead6b2c13b7 100644
--- a/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java
+++ b/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java
@@ -20,8 +20,19 @@
* {@link RocksDB#multiGetByteBuffers(ReadOptions, List, List, List)}
*/
public class ByteBufferGetStatus {
+ /**
+ * Status of the request to fetch into the buffer.
+ */
public final Status status;
+
+ /**
+ * Size of the data, which may be bigger than the buffer.
+ */
public final int requiredSize;
+
+ /**
+ * Buffer containing as much of the value as fits.
+ */
public final ByteBuffer value;
/**
diff --git a/java/src/main/java/org/rocksdb/Cache.java b/java/src/main/java/org/rocksdb/Cache.java
index 04bd3fcaa398..fc814c94beed 100644
--- a/java/src/main/java/org/rocksdb/Cache.java
+++ b/java/src/main/java/org/rocksdb/Cache.java
@@ -6,7 +6,16 @@
package org.rocksdb;
+/**
+ * Base class for Cache implementations.
+ */
public abstract class Cache extends RocksObject {
+
+ /**
+ * Construct a Cache.
+ *
+ * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ cache object.
+ */
protected Cache(final long nativeHandle) {
super(nativeHandle);
}
diff --git a/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java
index 12854c5102be..b452a54c3b73 100644
--- a/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java
+++ b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java
@@ -6,10 +6,26 @@
package org.rocksdb;
/**
- * Just a Java wrapper around CassandraCompactionFilter implemented in C++
+ * Just a Java wrapper around CassandraCompactionFilter implemented in C++.
+ *