diff --git a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java index fd7eef4d4cfb..1f4a5e4a687c 100644 --- a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java @@ -10,14 +10,25 @@ *

* At present, we just permit an overriding Java class to wrap a C++ * implementation + * + * @param the concrete type of the {@link AbstractSlice} that the Compaction Filter uses. */ public abstract class AbstractCompactionFilter> extends RocksObject { + /** + * Context of the Compaction Filter. + */ public static class Context { private final boolean fullCompaction; private final boolean manualCompaction; + /** + * Context constructor. + * + * @param fullCompaction true to filter full compaction, false otherwise. + * @param manualCompaction true to filter manual compaction, false otherwise. + */ public Context(final boolean fullCompaction, final boolean manualCompaction) { this.fullCompaction = fullCompaction; this.manualCompaction = manualCompaction; @@ -43,6 +54,12 @@ public boolean isManualCompaction() { } } + /** + * Constructor to be called by subclasses to set the + * handle to the underlying C++ object. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ Compaction Filter. + */ protected AbstractCompactionFilter(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java b/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java index 728cda8c1d42..231377f516df 100644 --- a/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java +++ b/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java @@ -14,6 +14,9 @@ public abstract class AbstractCompactionFilterFactory> extends RocksCallbackObject { + /** + * Constructs a new Compaction Filter Factory which has no underlying C++ object. + */ public AbstractCompactionFilterFactory() { super(0L); } @@ -26,8 +29,8 @@ protected long initializeNative(final long... nativeParameterHandles) { /** * Called from JNI, see compaction_filter_factory_jnicallback.cc * - * @param fullCompaction {@link AbstractCompactionFilter.Context#fullCompaction} - * @param manualCompaction {@link AbstractCompactionFilter.Context#manualCompaction} + * @param fullCompaction {@link AbstractCompactionFilter.Context#isFullCompaction()} + * @param manualCompaction {@link AbstractCompactionFilter.Context#isManualCompaction()} * * @return native handle of the CompactionFilter */ diff --git a/java/src/main/java/org/rocksdb/AbstractComparator.java b/java/src/main/java/org/rocksdb/AbstractComparator.java index 83e0f0676019..18a1036a4f81 100644 --- a/java/src/main/java/org/rocksdb/AbstractComparator.java +++ b/java/src/main/java/org/rocksdb/AbstractComparator.java @@ -10,7 +10,7 @@ /** * Comparators are used by RocksDB to determine * the ordering of keys. - * + *

* Implementations of Comparators in Java should extend this class. */ public abstract class AbstractComparator @@ -20,6 +20,11 @@ public abstract class AbstractComparator super(); } + /** + * Construct an AbstractComparator. + * + * @param comparatorOptions options for the comparator. + */ protected AbstractComparator(final ComparatorOptions comparatorOptions) { super(comparatorOptions.nativeHandle_); } @@ -59,7 +64,7 @@ ComparatorType getComparatorType() { * Three-way key comparison. Implementations should provide a * total order * on keys that might be passed to it. - * + *

* The implementation may modify the {@code ByteBuffer}s passed in, though * it would be unconventional to modify the "limit" or any of the * underlying bytes. As a callback, RocksJava will ensure that {@code a} @@ -114,6 +119,11 @@ public void findShortSuccessor(final ByteBuffer key) { // no-op } + /** + * Returns true if we are using direct byte buffers. + * + * @return true if we are using direct byte buffers, false otherwise. + */ public final boolean usingDirectBuffers() { return usingDirectBuffers(nativeHandle_); } diff --git a/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java b/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java index d0ceef93d419..9bd1ff7694bc 100644 --- a/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java +++ b/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java @@ -12,7 +12,7 @@ * it holds methods which are called * from C++ to interact with a Comparator * written in Java. - * + *

* Placing these bridge methods in this * class keeps the API of the * {@link org.rocksdb.AbstractComparator} clean. diff --git a/java/src/main/java/org/rocksdb/AbstractEventListener.java b/java/src/main/java/org/rocksdb/AbstractEventListener.java index c9371c45eb0c..5c7f58ab6afb 100644 --- a/java/src/main/java/org/rocksdb/AbstractEventListener.java +++ b/java/src/main/java/org/rocksdb/AbstractEventListener.java @@ -12,28 +12,120 @@ */ @SuppressWarnings("PMD.AvoidDuplicateLiterals") public abstract class AbstractEventListener extends RocksCallbackObject implements EventListener { + + /** + * Callback events that can be enabled. + */ public enum EnabledEventCallback { + + /** + * Flush completed. + */ ON_FLUSH_COMPLETED((byte) 0x0), + + /** + * Flush beginning. + */ ON_FLUSH_BEGIN((byte) 0x1), + + /** + * Table file was deleted. + */ ON_TABLE_FILE_DELETED((byte) 0x2), + + /** + * Compaction beginning. + */ ON_COMPACTION_BEGIN((byte) 0x3), + + /** + * Compaction completed. + */ ON_COMPACTION_COMPLETED((byte) 0x4), + + /** + * Table file created. + */ ON_TABLE_FILE_CREATED((byte) 0x5), + + /** + * Started creation of Table file. + */ ON_TABLE_FILE_CREATION_STARTED((byte) 0x6), + + /** + * Memtable has been sealed. + */ ON_MEMTABLE_SEALED((byte) 0x7), + + /** + * Started deletion of Column Family handle. + */ ON_COLUMN_FAMILY_HANDLE_DELETION_STARTED((byte) 0x8), + + /** + * External file ingested. + */ ON_EXTERNAL_FILE_INGESTED((byte) 0x9), + + /** + * Background error. + */ ON_BACKGROUND_ERROR((byte) 0xA), + + /** + * Stall conditions have been changed. + */ ON_STALL_CONDITIONS_CHANGED((byte) 0xB), + + /** + * File read has finished. + */ ON_FILE_READ_FINISH((byte) 0xC), + + /** + * File write has finished. + */ ON_FILE_WRITE_FINISH((byte) 0xD), + + /** + * File flush has finished. + */ ON_FILE_FLUSH_FINISH((byte) 0xE), + + /** + * File sync has finished. + */ ON_FILE_SYNC_FINISH((byte) 0xF), + + /** + * Range file read sync finished. + */ ON_FILE_RANGE_SYNC_FINISH((byte) 0x10), + + /** + * File truncation has finished. + */ ON_FILE_TRUNCATE_FINISH((byte) 0x11), + + /** + * Closing a file has finished. + */ ON_FILE_CLOSE_FINISH((byte) 0x12), + + /** + * Flag has been set to be notified on file IO. + */ SHOULD_BE_NOTIFIED_ON_FILE_IO((byte) 0x13), + + /** + * Error recovery beginning. + */ ON_ERROR_RECOVERY_BEGIN((byte) 0x14), + + /** + * Error recovery completed. + */ ON_ERROR_RECOVERY_COMPLETED((byte) 0x15); private final byte value; diff --git a/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java b/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java index 173d63e9011e..8c500d8a5df2 100644 --- a/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java +++ b/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java @@ -22,6 +22,11 @@ public abstract class AbstractImmutableNativeReference */ protected final AtomicBoolean owningHandle_; + /** + * Construct an AbstractImmutableNativeReference. + * + * @param owningHandle true if this Java object owns the underlying C++ object, false otherwise. + */ protected AbstractImmutableNativeReference(final boolean owningHandle) { this.owningHandle_ = new AtomicBoolean(owningHandle); } diff --git a/java/src/main/java/org/rocksdb/AbstractMutableOptions.java b/java/src/main/java/org/rocksdb/AbstractMutableOptions.java index ff9b8569fd89..86294fd9ac1a 100644 --- a/java/src/main/java/org/rocksdb/AbstractMutableOptions.java +++ b/java/src/main/java/org/rocksdb/AbstractMutableOptions.java @@ -9,12 +9,26 @@ * The constructor is protected, so it will always be used as a base class. */ public class AbstractMutableOptions { + /** + * Separator between Key/Value pairs. + */ protected static final String KEY_VALUE_PAIR_SEPARATOR = ";"; + + /** + * Separator between Key and Value. + */ protected static final char KEY_VALUE_SEPARATOR = '='; + + /** + * Separator between integers in an integer array. + */ static final String INT_ARRAY_INT_SEPARATOR = ":"; private static final String HAS_NOT_BEEN_SET = " has not been set"; + /** + * the keys. + */ protected final String[] keys; private final String[] values; @@ -62,12 +76,24 @@ public String toString() { return buffer.toString(); } + /** + * Builder base class for constructing Mutable Options. + * + * @param the type of the Mutable Options. + * @param the type of the Builder. + * @param the type of the Option Key. + */ public abstract static class AbstractMutableOptionsBuilder< T extends AbstractMutableOptions, U extends AbstractMutableOptionsBuilder, K extends MutableOptionKey> { private final Map> options = new LinkedHashMap<>(); private final List unknown = new ArrayList<>(); + /** + * Return the builder. + * + * @return the builder. + */ protected abstract U self(); /** @@ -87,6 +113,11 @@ public abstract static class AbstractMutableOptionsBuilder< */ protected abstract T build(final String[] keys, final String[] values); + /** + * Construct a subclass instance of {@link AbstractMutableOptions}. + * + * @return an instance of the options. + */ public T build() { final String[] keys = new String[options.size()]; final String[] values = new String[options.size()]; @@ -101,6 +132,14 @@ public T build() { return build(keys, values); } + /** + * Set an option of `Double` type. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected U setDouble( final K key, final double value) { if (key.getValueType() != MutableOptionKey.ValueType.DOUBLE) { @@ -111,6 +150,13 @@ protected U setDouble( return self(); } + /** + * Get an option of `Double` type. + * + * @param key the key. + * + * @return the value of the option. + */ protected double getDouble(final K key) throws NoSuchElementException, NumberFormatException { final MutableOptionValue value = options.get(key); @@ -120,6 +166,14 @@ protected double getDouble(final K key) return value.asDouble(); } + /** + * Set an option of `Long` type. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected U setLong( final K key, final long value) { if(key.getValueType() != MutableOptionKey.ValueType.LONG) { @@ -130,6 +184,13 @@ protected U setLong( return self(); } + /** + * Get an option of `Long` type. + * + * @param key the key. + * + * @return the value of the option. + */ protected long getLong(final K key) throws NoSuchElementException, NumberFormatException { final MutableOptionValue value = options.get(key); @@ -139,6 +200,14 @@ protected long getLong(final K key) return value.asLong(); } + /** + * Set an option of `int` type. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected U setInt( final K key, final int value) { if(key.getValueType() != MutableOptionKey.ValueType.INT) { @@ -149,6 +218,13 @@ protected U setInt( return self(); } + /** + * Get an option of `int` type. + * + * @param key the key. + * + * @return the value of the option. + */ protected int getInt(final K key) throws NoSuchElementException, NumberFormatException { final MutableOptionValue value = options.get(key); @@ -158,6 +234,14 @@ protected int getInt(final K key) return value.asInt(); } + /** + * Set an option of `boolean` type. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected U setBoolean( final K key, final boolean value) { if(key.getValueType() != MutableOptionKey.ValueType.BOOLEAN) { @@ -168,6 +252,13 @@ protected U setBoolean( return self(); } + /** + * Get an option of `boolean` type. + * + * @param key the key. + * + * @return the value of the option. + */ protected boolean getBoolean(final K key) throws NoSuchElementException, NumberFormatException { final MutableOptionValue value = options.get(key); @@ -177,6 +268,14 @@ protected boolean getBoolean(final K key) return value.asBoolean(); } + /** + * Set an option of `int[]` type. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected U setIntArray( final K key, final int[] value) { if(key.getValueType() != MutableOptionKey.ValueType.INT_ARRAY) { @@ -187,6 +286,13 @@ protected U setIntArray( return self(); } + /** + * Get an option of `int[]` type. + * + * @param key the key. + * + * @return the value of the option. + */ protected int[] getIntArray(final K key) throws NoSuchElementException, NumberFormatException { final MutableOptionValue value = options.get(key); @@ -196,6 +302,16 @@ protected int[] getIntArray(final K key) return value.asIntArray(); } + /** + * Set an option of `Enum[N]` type. + * + * @param the concrete type of the Enum. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected > U setEnum( final K key, final N value) { if(key.getValueType() != MutableOptionKey.ValueType.ENUM) { @@ -206,6 +322,15 @@ protected > U setEnum( return self(); } + /** + * Get an option of `Enum[N]` type. + * + * @param the concrete type of the Enum. + * + * @param key the key. + * + * @return the value of the option. + */ @SuppressWarnings("unchecked") protected > N getEnum(final K key) throws NoSuchElementException, NumberFormatException { diff --git a/java/src/main/java/org/rocksdb/AbstractRocksIterator.java b/java/src/main/java/org/rocksdb/AbstractRocksIterator.java index 1aade1b89826..a40587976a14 100644 --- a/java/src/main/java/org/rocksdb/AbstractRocksIterator.java +++ b/java/src/main/java/org/rocksdb/AbstractRocksIterator.java @@ -25,6 +25,12 @@ public abstract class AbstractRocksIterator

extends RocksObject implements RocksIteratorInterface { final P parent_; + /** + * Constructs an AbstractRocksIterator. + * + * @param parent the parent object from which the Rocks Iterator was created. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ RocksIterator. + */ protected AbstractRocksIterator(final P parent, final long nativeHandle) { super(nativeHandle); diff --git a/java/src/main/java/org/rocksdb/AbstractSlice.java b/java/src/main/java/org/rocksdb/AbstractSlice.java index f321b9910aeb..9dad909fdf7f 100644 --- a/java/src/main/java/org/rocksdb/AbstractSlice.java +++ b/java/src/main/java/org/rocksdb/AbstractSlice.java @@ -23,13 +23,23 @@ * the Java @see org.rocksdb.AbstractComparator subclass, it disposes the * C++ BaseComparatorJniCallback subclass, which in turn destroys the * Java @see org.rocksdb.AbstractSlice subclass Objects. + * + * @param the concrete Java type that is wrapped by the subclass of {@link AbstractSlice}. */ public abstract class AbstractSlice extends RocksMutableObject { + /** + * Constructs an AbstractSlice. + */ protected AbstractSlice() { super(); } + /** + * Constructs an AbstractSlice. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ Slice. + */ protected AbstractSlice(final long nativeHandle) { super(nativeHandle); } @@ -174,6 +184,13 @@ public boolean startsWith(final AbstractSlice prefix) { } } + /** + * Constructs a new Slice from a String. + * + * @param str the string. + * + * @return the handle to the native C++ Slice object. + */ protected static native long createNewSliceFromString(final String str); private native int size0(long handle); private native boolean empty0(long handle); @@ -186,6 +203,8 @@ public boolean startsWith(final AbstractSlice prefix) { * Note that this function should be called only after all * RocksDB instances referencing the slice are closed. * Otherwise, an undefined behavior will occur. + * + * @param handle the value of the C++ pointer to the underlying native C++ object. */ @Override protected final native void disposeInternal(final long handle); diff --git a/java/src/main/java/org/rocksdb/AbstractTableFilter.java b/java/src/main/java/org/rocksdb/AbstractTableFilter.java index c696c3e1352e..0c7f994fbee8 100644 --- a/java/src/main/java/org/rocksdb/AbstractTableFilter.java +++ b/java/src/main/java/org/rocksdb/AbstractTableFilter.java @@ -7,6 +7,9 @@ public abstract class AbstractTableFilter extends RocksCallbackObject implements TableFilter { + /** + * Constructs a new AbstractTableFilter. + */ protected AbstractTableFilter() { super(); } diff --git a/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java b/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java index b117e5cc2ad4..55818ddf6dde 100644 --- a/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java +++ b/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java @@ -12,6 +12,9 @@ public abstract class AbstractTransactionNotifier extends RocksCallbackObject { + /** + * Constructs an AbstractTransactionNotifier. + */ protected AbstractTransactionNotifier() { super(); } @@ -50,5 +53,14 @@ protected long initializeNative(final long... nativeParameterHandles) { protected void disposeInternal() { disposeInternal(nativeHandle_); } + + /** + * Deletes underlying C++ transaction notifier pointer. + * Note that this function should be called only after all + * RocksDB instances referencing the transaction notifier are closed. + * Otherwise, an undefined behavior will occur. + * + * @param handle the value of the C++ pointer to the underlying native C++ object. + */ protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java index 41d967f53179..59e253e75b0f 100644 --- a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java +++ b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java @@ -7,9 +7,27 @@ import java.nio.ByteBuffer; +/** + * WriteBatch holds a collection of updates to apply atomically to a DB. + *

+ * The updates are applied in the order in which they are added + * to the WriteBatch. For example, the value of "key" will be "v3" + * after the following batch is written: + *


+ *    batch.put("key", "v1");
+ *    batch.remove("key");
+ *    batch.put("key", "v2");
+ *    batch.put("key", "v3");
+ * 
+ */ public abstract class AbstractWriteBatch extends RocksObject implements WriteBatchInterface { + /** + * Construct an AbstractWriteBatch. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ Write Batch object. + */ protected AbstractWriteBatch(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/AccessHint.java b/java/src/main/java/org/rocksdb/AccessHint.java index b7ccadd84a66..5731474cbf26 100644 --- a/java/src/main/java/org/rocksdb/AccessHint.java +++ b/java/src/main/java/org/rocksdb/AccessHint.java @@ -10,9 +10,24 @@ */ @Deprecated public enum AccessHint { + /** + * No file access pattern hint for compaction. + */ NONE((byte)0x0), + + /** + * Normal file access pattern for compaction. + */ NORMAL((byte)0x1), + + /** + * Sequential file access pattern for compaction. + */ SEQUENTIAL((byte)0x2), + + /** + * Will need file access pattern for compaction. + */ WILLNEED((byte)0x3); private final byte value; diff --git a/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java index d1d1123dded4..ebc4a70f9386 100644 --- a/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java @@ -12,6 +12,8 @@ * mutable (i.e. present in {@link AdvancedMutableColumnFamilyOptionsInterface}) *

* Taken from include/rocksdb/advanced_options.h + * + * @param the concrete type of the Column Family Options. */ public interface AdvancedColumnFamilyOptionsInterface< T extends AdvancedColumnFamilyOptionsInterface & ColumnFamilyOptionsInterface> { @@ -422,7 +424,7 @@ T setCompactionOptionsFIFO( * even for key hit because they tell us whether to look in that level or go * to the higher level.

* - *

Default: false

+ *

Default: false

* * @param optimizeFiltersForHits boolean value indicating if this flag is set. * @return the reference to the current options. diff --git a/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java index c8fc841737dd..d6b182224e69 100644 --- a/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java @@ -10,6 +10,8 @@ *

* Taken from include/rocksdb/advanced_options.h * and MutableCFOptions in util/cf_options.h + * + * @param the concrete type of the Column Family Options. */ public interface AdvancedMutableColumnFamilyOptionsInterface< T extends AdvancedMutableColumnFamilyOptionsInterface> { diff --git a/java/src/main/java/org/rocksdb/BackgroundErrorReason.java b/java/src/main/java/org/rocksdb/BackgroundErrorReason.java index eec593d35c54..1c68a7e38dc5 100644 --- a/java/src/main/java/org/rocksdb/BackgroundErrorReason.java +++ b/java/src/main/java/org/rocksdb/BackgroundErrorReason.java @@ -5,10 +5,28 @@ package org.rocksdb; +/** + * Reasons for the background error. + */ public enum BackgroundErrorReason { + /** + * Flush. + */ FLUSH((byte) 0x0), + + /** + * Compaction. + */ COMPACTION((byte) 0x1), + + /** + * Write callback. + */ WRITE_CALLBACK((byte) 0x2), + + /** + * Memtable. + */ MEMTABLE((byte) 0x3); private final byte value; diff --git a/java/src/main/java/org/rocksdb/BackupEngine.java b/java/src/main/java/org/rocksdb/BackupEngine.java index 3ab2206830f3..f0c9d516d2de 100644 --- a/java/src/main/java/org/rocksdb/BackupEngine.java +++ b/java/src/main/java/org/rocksdb/BackupEngine.java @@ -19,6 +19,11 @@ */ public class BackupEngine extends RocksObject implements AutoCloseable { + /** + * Construct a BackupEngine. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ backup engine object. + */ protected BackupEngine(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java b/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java index c82c3ea10ee9..82e56873bbf0 100644 --- a/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java +++ b/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java @@ -11,6 +11,10 @@ */ // TODO(AR) should be renamed BlockBasedTableOptions public class BlockBasedTableConfig extends TableFormatConfig { + + /** + * Constructs a new BlockBasedTableConfig. + */ @SuppressWarnings("PMD.NullAssignment") public BlockBasedTableConfig() { //TODO(AR) flushBlockPolicyFactory @@ -810,7 +814,7 @@ public IndexShorteningMode indexShortening() { /** * Set the index shortening mode. - * + *

* See {@link IndexShorteningMode}. * * @param indexShortening the index shortening mode. @@ -888,7 +892,7 @@ public BlockBasedTableConfig setCacheNumShardBits( * * @deprecated This option is now deprecated. No matter what value it * is set to, it will behave as - * if {@link #hashIndexAllowCollision()} == true. + * if {@code setHashIndexAllowCollision(true)} */ @Deprecated public boolean hashIndexAllowCollision() { diff --git a/java/src/main/java/org/rocksdb/BuiltinComparator.java b/java/src/main/java/org/rocksdb/BuiltinComparator.java index 2c89bf218d1d..f4806fe57d72 100644 --- a/java/src/main/java/org/rocksdb/BuiltinComparator.java +++ b/java/src/main/java/org/rocksdb/BuiltinComparator.java @@ -6,15 +6,16 @@ package org.rocksdb; /** - * Builtin RocksDB comparators - * - *

    - *
  1. BYTEWISE_COMPARATOR - Sorts all keys in ascending bytewise - * order.
  2. - *
  3. REVERSE_BYTEWISE_COMPARATOR - Sorts all keys in descending bytewise - * order
  4. - *
+ * Builtin RocksDB comparators. */ public enum BuiltinComparator { - BYTEWISE_COMPARATOR, REVERSE_BYTEWISE_COMPARATOR + /** + * Sorts all keys in ascending byte wise. + */ + BYTEWISE_COMPARATOR, + + /** + * Sorts all keys in descending byte wise order. + */ + REVERSE_BYTEWISE_COMPARATOR } diff --git a/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java b/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java index 4ab9e8475ce9..fead6b2c13b7 100644 --- a/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java +++ b/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java @@ -20,8 +20,19 @@ * {@link RocksDB#multiGetByteBuffers(ReadOptions, List, List, List)} */ public class ByteBufferGetStatus { + /** + * Status of the request to fetch into the buffer. + */ public final Status status; + + /** + * Size of the data, which may be bigger than the buffer. + */ public final int requiredSize; + + /** + * Buffer containing as much of the value as fits. + */ public final ByteBuffer value; /** diff --git a/java/src/main/java/org/rocksdb/Cache.java b/java/src/main/java/org/rocksdb/Cache.java index 04bd3fcaa398..fc814c94beed 100644 --- a/java/src/main/java/org/rocksdb/Cache.java +++ b/java/src/main/java/org/rocksdb/Cache.java @@ -6,7 +6,16 @@ package org.rocksdb; +/** + * Base class for Cache implementations. + */ public abstract class Cache extends RocksObject { + + /** + * Construct a Cache. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ cache object. + */ protected Cache(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java index 12854c5102be..b452a54c3b73 100644 --- a/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java @@ -6,10 +6,26 @@ package org.rocksdb; /** - * Just a Java wrapper around CassandraCompactionFilter implemented in C++ + * Just a Java wrapper around CassandraCompactionFilter implemented in C++. + *

+ * Compaction filter for removing expired Cassandra data with ttl. + * Is also in charge of removing tombstone that has been + * promoted to kValue type after serials of merging in compaction. */ public class CassandraCompactionFilter extends AbstractCompactionFilter { + + /** + * Constructs a new CasandraCompactionFilter. + * + * @param purgeTtlOnExpiration if set to true, expired data will be directly purged, + * otherwise expired data will be converted to tombstones + * first and then be eventually removed after + * {@code gcGracePeriodInSeconds}. Should only be on in + * the case that all the writes have the same ttl setting, + * otherwise it could bring old data back. + * @param gcGracePeriodInSeconds the grace period in seconds for gc. + */ public CassandraCompactionFilter( final boolean purgeTtlOnExpiration, final int gcGracePeriodInSeconds) { super(createNewCassandraCompactionFilter0(purgeTtlOnExpiration, gcGracePeriodInSeconds)); diff --git a/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java b/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java index 732faee207a6..703d1b86c4d6 100644 --- a/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java +++ b/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java @@ -6,14 +6,28 @@ package org.rocksdb; /** + * Just a Java wrapper around CassandraValueMergeOperator implemented in C++. + *

* CassandraValueMergeOperator is a merge operator that merges two cassandra wide column * values. */ public class CassandraValueMergeOperator extends MergeOperator { + + /** + * Constructs a new CassandraValueMergeOperator. + * + * @param gcGracePeriodInSeconds the grace period in seconds for gc. + */ public CassandraValueMergeOperator(final int gcGracePeriodInSeconds) { super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, 0)); } + /** + * Constructs a new CassandraValueMergeOperator. + * + * @param gcGracePeriodInSeconds the grace period in seconds for gc. + * @param operandsLimit the maximum size of the operands list before merge is applied. + */ public CassandraValueMergeOperator(final int gcGracePeriodInSeconds, final int operandsLimit) { super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, operandsLimit)); } diff --git a/java/src/main/java/org/rocksdb/Checkpoint.java b/java/src/main/java/org/rocksdb/Checkpoint.java index 347221df6ed6..61ccc65e65d3 100644 --- a/java/src/main/java/org/rocksdb/Checkpoint.java +++ b/java/src/main/java/org/rocksdb/Checkpoint.java @@ -50,6 +50,22 @@ public void createCheckpoint(final String checkpointPath) createCheckpoint(nativeHandle_, checkpointPath); } + /** + * Exports all live SST files of a specified Column Family into {@code exportPath}. + *

+ * Always triggers a flush. + * + * @param columnFamilyHandle the column family to export. + * + * @param exportPath should not already exist and will be created by this API. + * SST files will be created as hard links when the directory specified + * is in the same partition as the db directory, copied otherwise. + * + * @return metadata about the exported SST files. + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ public ExportImportFilesMetaData exportColumnFamily(final ColumnFamilyHandle columnFamilyHandle, final String exportPath) throws RocksDBException { return new ExportImportFilesMetaData( diff --git a/java/src/main/java/org/rocksdb/ChecksumType.java b/java/src/main/java/org/rocksdb/ChecksumType.java index 5b3d2249250f..556220f8baa5 100644 --- a/java/src/main/java/org/rocksdb/ChecksumType.java +++ b/java/src/main/java/org/rocksdb/ChecksumType.java @@ -14,18 +14,20 @@ public enum ChecksumType { */ kNoChecksum((byte) 0), /** - * CRC32 Checksum + * CRC32 Checksum. */ kCRC32c((byte) 1), /** - * XX Hash + * XX Hash. */ kxxHash((byte) 2), /** - * XX Hash 64 + * XX Hash 64. */ kxxHash64((byte) 3), - + /** + * XX Hash v3. + */ kXXH3((byte) 4); /** diff --git a/java/src/main/java/org/rocksdb/ClockCache.java b/java/src/main/java/org/rocksdb/ClockCache.java index f9f6da74c081..784f80db2f50 100644 --- a/java/src/main/java/org/rocksdb/ClockCache.java +++ b/java/src/main/java/org/rocksdb/ClockCache.java @@ -14,6 +14,7 @@ * configuration parameter that is not provided by this API. This function * simply returns a new LRUCache for functional compatibility. */ +@Deprecated public class ClockCache extends Cache { /** * Create a new cache with a fixed size capacity. @@ -22,6 +23,7 @@ public class ClockCache extends Cache { * * @param capacity The fixed size capacity of the cache */ + @Deprecated public ClockCache(final long capacity) { super(newClockCache(capacity, -1, false)); } @@ -39,6 +41,7 @@ public ClockCache(final long capacity) { * @param numShardBits The cache is sharded to 2^numShardBits shards, * by hash of the key */ + @Deprecated public ClockCache(final long capacity, final int numShardBits) { super(newClockCache(capacity, numShardBits, false)); } @@ -58,6 +61,7 @@ public ClockCache(final long capacity, final int numShardBits) { * by hash of the key * @param strictCapacityLimit insert to the cache will fail when cache is full */ + @Deprecated public ClockCache(final long capacity, final int numShardBits, final boolean strictCapacityLimit) { super(newClockCache(capacity, numShardBits, strictCapacityLimit)); diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java index 9fd63e768052..1f12f5e90915 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java @@ -123,6 +123,11 @@ public int hashCode() { } } + /** + * Returns true if this is the handle for the default column family. + * + * @return true if this is the handle for the default column family, false otherwise. + */ protected boolean isDefaultColumnFamily() { return nativeHandle_ == rocksDB_.getDefaultColumnFamily().nativeHandle_; } diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java index 4776773bd8bd..40c7c5806409 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java @@ -8,6 +8,11 @@ import java.util.Collection; import java.util.List; +/** + * Interface for Column Family Options. + * + * @param the concrete type of the ColumnFamilyOptions. + */ public interface ColumnFamilyOptionsInterface> extends AdvancedColumnFamilyOptionsInterface { /** diff --git a/java/src/main/java/org/rocksdb/CompactRangeOptions.java b/java/src/main/java/org/rocksdb/CompactRangeOptions.java index 616a77572d41..565558e439d5 100644 --- a/java/src/main/java/org/rocksdb/CompactRangeOptions.java +++ b/java/src/main/java/org/rocksdb/CompactRangeOptions.java @@ -17,9 +17,11 @@ public class CompactRangeOptions extends RocksObject { private static final byte VALUE_kForce = 2; private static final byte VALUE_kForceOptimized = 3; - // For level based compaction, we can configure if we want to skip/force bottommost level - // compaction. The order of this enum MUST follow the C++ layer. See BottommostLevelCompaction in - // db/options.h + /** + * For level based compaction, we can configure if we want to skip/force bottommost level + * compaction. The order of this enum MUST follow the C++ layer. See BottommostLevelCompaction in + * db/options.h + */ public enum BottommostLevelCompaction { /** * Skip bottommost level compaction @@ -71,15 +73,34 @@ public static BottommostLevelCompaction fromRocksId(final int bottommostLevelCom } } + /** + * Timestamp. + */ public static class Timestamp { + /** + * the start. + */ public final long start; + + /** + * the range. + */ public final long range; - public Timestamp(final long start, final long duration) { + /** + * Constructs a Timestamp. + * + * @param start the start. + * @param range the range. + */ + public Timestamp(final long start, final long range) { this.start = start; - this.range = duration; + this.range = range; } + /** + * Constructs a Timestamp. + */ public Timestamp() { this.start = 0; this.range = 0; @@ -250,22 +271,46 @@ public CompactRangeOptions setMaxSubcompactions(final int maxSubcompactions) { return this; } + /** + * Set Full History Low Timestamp; + * + * @param tsLow low timestamp. + * + * @return This CompactRangeOptions. + */ public CompactRangeOptions setFullHistoryTSLow(final Timestamp tsLow) { setFullHistoryTSLow(nativeHandle_, tsLow.start, tsLow.range); return this; } + /** + * Get the Full History Low Timestamp; + * + * @return low timestamp. + */ public Timestamp fullHistoryTSLow() { return fullHistoryTSLow(nativeHandle_); } - public CompactRangeOptions setCanceled(final boolean canceled) { - setCanceled(nativeHandle_, canceled); + /** + * Set cancelled. + * + * @param cancelled true to cancel, otherwise false. + * + * @return This CompactRangeOptions. + */ + public CompactRangeOptions setCancelled(final boolean cancelled) { + setCancelled(nativeHandle_, cancelled); return this; } - public boolean canceled() { - return canceled(nativeHandle_); + /** + * Get the cancelled status. + * + * @return true if cancelled, false otherwise. + */ + public boolean cancelled() { + return cancelled(nativeHandle_); } private static native long newCompactRangeOptions(); @@ -298,7 +343,7 @@ private native void setFullHistoryTSLow( private native Timestamp fullHistoryTSLow(final long handle); - private native void setCanceled(final long handle, final boolean canceled); + private native void setCancelled(final long handle, final boolean canceled); - private native boolean canceled(final long handle); + private native boolean cancelled(final long handle); } diff --git a/java/src/main/java/org/rocksdb/CompactionJobInfo.java b/java/src/main/java/org/rocksdb/CompactionJobInfo.java index cf04bde24930..27f0a45da014 100644 --- a/java/src/main/java/org/rocksdb/CompactionJobInfo.java +++ b/java/src/main/java/org/rocksdb/CompactionJobInfo.java @@ -9,8 +9,14 @@ import java.util.List; import java.util.Map; +/** + * Information about a Compaction Job. + */ public class CompactionJobInfo extends RocksObject { + /** + * Constructs a new CompactionJobInfo. + */ public CompactionJobInfo() { super(newCompactionJobInfo()); } diff --git a/java/src/main/java/org/rocksdb/CompactionJobStats.java b/java/src/main/java/org/rocksdb/CompactionJobStats.java index 3d53b5565e6c..82ebe19bfd92 100644 --- a/java/src/main/java/org/rocksdb/CompactionJobStats.java +++ b/java/src/main/java/org/rocksdb/CompactionJobStats.java @@ -5,8 +5,14 @@ package org.rocksdb; +/** + * Statistics about a Compaction Job. + */ public class CompactionJobStats extends RocksObject { + /** + * Constructs a new CompactionJobStats. + */ public CompactionJobStats() { super(newCompactionJobStats()); } @@ -118,7 +124,7 @@ public long totalOutputBytes() { /** * Get the number of records being replaced by newer record associated * with same key. - * + *

* This could be a new value or a deletion entry for that key so this field * sums up all updated and deleted keys. * @@ -149,7 +155,7 @@ public long totalInputRawValueBytes() { /** * Get the number of deletion entries before compaction. - * + *

* Deletion entries can disappear after compaction because they expired. * * @return the number of deletion entries before compaction. @@ -182,7 +188,7 @@ public long numCorruptKeys() { /** * Get the Time spent on file's Append() call. - * + *

* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. * * @return the Time spent on file's Append() call. @@ -193,7 +199,7 @@ public long fileWriteNanos() { /** * Get the Time spent on sync file range. - * + *

* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. * * @return the Time spent on sync file range. @@ -204,7 +210,7 @@ public long fileRangeSyncNanos() { /** * Get the Time spent on file fsync. - * + *

* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. * * @return the Time spent on file fsync. @@ -215,7 +221,7 @@ public long fileFsyncNanos() { /** * Get the Time spent on preparing file write (falocate, etc) - * + *

* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. * * @return the Time spent on preparing file write (falocate, etc). diff --git a/java/src/main/java/org/rocksdb/CompactionOptions.java b/java/src/main/java/org/rocksdb/CompactionOptions.java index 2c7e391fbf78..dd6dea6c5613 100644 --- a/java/src/main/java/org/rocksdb/CompactionOptions.java +++ b/java/src/main/java/org/rocksdb/CompactionOptions.java @@ -14,13 +14,16 @@ */ public class CompactionOptions extends RocksObject { + /** + * Constructs a new CompactionOptions. + */ public CompactionOptions() { super(newCompactionOptions()); } /** * Get the compaction output compression type. - * + *

* See {@link #setCompression(CompressionType)}. * * @return the compression type. @@ -32,9 +35,9 @@ public CompressionType compression() { /** * Set the compaction output compression type. - * + *

* Default: snappy - * + *

* If set to {@link CompressionType#DISABLE_COMPRESSION_OPTION}, * RocksDB will choose compression type according to the * {@link ColumnFamilyOptions#compressionType()}, taking into account @@ -52,7 +55,7 @@ public CompactionOptions setCompression(final CompressionType compression) { /** * Get the compaction output file size limit. - * + *

* See {@link #setOutputFileSizeLimit(long)}. * * @return the file size limit. @@ -63,7 +66,7 @@ public long outputFileSizeLimit() { /** * Compaction will create files of size {@link #outputFileSizeLimit()}. - * + *

* Default: 2^64-1, which means that compaction will create a single file * * @param outputFileSizeLimit the size limit @@ -90,9 +93,9 @@ public int maxSubcompactions() { * This value represents the maximum number of threads that will * concurrently perform a compaction job by breaking it into multiple, * smaller ones that are run simultaneously. - * + *

* Default: 0 (i.e. no subcompactions) - * + *

* If > 0, it will replace the option in * {@link DBOptions#maxSubcompactions()} for this compaction. * diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java index 92b21fc50c30..d0c8ccfe9c08 100644 --- a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java +++ b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java @@ -10,6 +10,9 @@ */ public class CompactionOptionsFIFO extends RocksObject { + /** + * Constructs a new CompactionOptionsFIFO. + */ public CompactionOptionsFIFO() { super(newCompactionOptionsFIFO()); } diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java index 4d2ebdb1f562..c18a04cd65db 100644 --- a/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java +++ b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java @@ -10,6 +10,9 @@ */ public class CompactionOptionsUniversal extends RocksObject { + /** + * Constructs a new CompactionOptionsUniversal. + */ public CompactionOptionsUniversal() { super(newCompactionOptionsUniversal()); } diff --git a/java/src/main/java/org/rocksdb/CompactionReason.java b/java/src/main/java/org/rocksdb/CompactionReason.java index 46ec33f3f141..a6050c716a18 100644 --- a/java/src/main/java/org/rocksdb/CompactionReason.java +++ b/java/src/main/java/org/rocksdb/CompactionReason.java @@ -5,7 +5,14 @@ package org.rocksdb; +/** + * Reasons for compaction. + */ public enum CompactionReason { + + /** + * Unknown. + */ kUnknown((byte)0x0), /** diff --git a/java/src/main/java/org/rocksdb/CompactionStyle.java b/java/src/main/java/org/rocksdb/CompactionStyle.java index 7b955a7a248c..6a1de336abfb 100644 --- a/java/src/main/java/org/rocksdb/CompactionStyle.java +++ b/java/src/main/java/org/rocksdb/CompactionStyle.java @@ -35,9 +35,24 @@ * FIFO Compaction */ public enum CompactionStyle { + /** + * Level Compaction. + */ LEVEL((byte) 0x0), + + /** + * Universal Compaction. + */ UNIVERSAL((byte) 0x1), + + /** + * First-in First-out Compaction. + */ FIFO((byte) 0x2), + + /** + * No compaction. + */ NONE((byte) 0x3); private final byte value; diff --git a/java/src/main/java/org/rocksdb/ComparatorOptions.java b/java/src/main/java/org/rocksdb/ComparatorOptions.java index ee5beb8f6edc..abc8a5082f7e 100644 --- a/java/src/main/java/org/rocksdb/ComparatorOptions.java +++ b/java/src/main/java/org/rocksdb/ComparatorOptions.java @@ -13,6 +13,9 @@ * instance becomes out-of-scope to release the allocated memory in C++. */ public class ComparatorOptions extends RocksObject { + /** + * Constructs a new ComparatorOptions. + */ public ComparatorOptions() { super(newComparatorOptions()); } diff --git a/java/src/main/java/org/rocksdb/CompressionOptions.java b/java/src/main/java/org/rocksdb/CompressionOptions.java index 2e1ee57310b1..eabfef5a13c0 100644 --- a/java/src/main/java/org/rocksdb/CompressionOptions.java +++ b/java/src/main/java/org/rocksdb/CompressionOptions.java @@ -10,33 +10,93 @@ */ public class CompressionOptions extends RocksObject { + /** + * RocksDB's generic default compression level. Internally it'll be translated + * to the default compression level specific to the library being used. + */ + public static final int DEFAULT_COMPRESSION_LEVEL = 32767; + + /** + * Constructs a new CompressionOptions. + */ public CompressionOptions() { super(newCompressionOptions()); } + /** + * Set the Window size. + * Zlib only. + * + * @param windowBits the size of the window. + * + * @return the reference to the current compression options. + */ public CompressionOptions setWindowBits(final int windowBits) { setWindowBits(nativeHandle_, windowBits); return this; } + /** + * Get the Window size. + * Zlib only. + * + * @return the size of the window. + */ public int windowBits() { return windowBits(nativeHandle_); } + /** + * Compression "level" applicable to zstd, zlib, LZ4, and LZ4HC. Except for + * {@link #DEFAULT_COMPRESSION_LEVEL}, the meaning of each value depends + * on the compression algorithm. Decreasing across non- + * {@link #DEFAULT_COMPRESSION_LEVEL} values will either favor speed over + * compression ratio or have no effect. + *

+ * In LZ4 specifically, the absolute value of a negative `level` internally + * configures the `acceleration` parameter. For example, set `level=-10` for + * `acceleration=10`. This negation is necessary to ensure decreasing `level` + * values favor speed over compression ratio. + * + * @param level the compression level. + * + * @return the reference to the current compression options. + */ public CompressionOptions setLevel(final int level) { setLevel(nativeHandle_, level); return this; } + /** + * Get the Compression "level". + *

+ * See {@link #setLevel(int)} + * + * @return the compression level. + */ public int level() { return level(nativeHandle_); } + /** + * Set the compression strategy. + * Zlib only. + * + * @param strategy the strategy. + * + * @return the reference to the current compression options. + */ public CompressionOptions setStrategy(final int strategy) { setStrategy(nativeHandle_, strategy); return this; } + /** + * Get the compression strategy. + * Zlib only. + * + * @return the strategy. + */ public int strategy() { return strategy(nativeHandle_); } diff --git a/java/src/main/java/org/rocksdb/CompressionType.java b/java/src/main/java/org/rocksdb/CompressionType.java index d1ecf0ac84c5..4f683d036735 100644 --- a/java/src/main/java/org/rocksdb/CompressionType.java +++ b/java/src/main/java/org/rocksdb/CompressionType.java @@ -14,14 +14,49 @@ * compression method (if any) is used to compress a block.

*/ public enum CompressionType { + /** + * No compression. + */ NO_COMPRESSION((byte) 0x0, null, "kNoCompression"), + + /** + * Snappy compression. + */ SNAPPY_COMPRESSION((byte) 0x1, "snappy", "kSnappyCompression"), + + /** + * ZLib compression. + */ ZLIB_COMPRESSION((byte) 0x2, "z", "kZlibCompression"), + + /** + * BZ2 compression. + */ BZLIB2_COMPRESSION((byte) 0x3, "bzip2", "kBZip2Compression"), + + /** + * LZ4 compression. + */ LZ4_COMPRESSION((byte) 0x4, "lz4", "kLZ4Compression"), + + /** + * LZ4 with high compression. + */ LZ4HC_COMPRESSION((byte) 0x5, "lz4hc", "kLZ4HCCompression"), + + /** + * Microsoft XPress compression (Windows only). + */ XPRESS_COMPRESSION((byte) 0x6, "xpress", "kXpressCompression"), + + /** + * ZStd compression. + */ ZSTD_COMPRESSION((byte) 0x7, "zstd", "kZSTD"), + + /** + * Disable compression. + */ DISABLE_COMPRESSION_OPTION((byte) 0x7F, null, "kDisableCompressionOption"); /** diff --git a/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java b/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java index b4e34303b5f3..33bbcd5ba41e 100644 --- a/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java +++ b/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java @@ -6,7 +6,15 @@ package org.rocksdb; +/** + * Base class for Concurrent Task Limiters. + */ public abstract class ConcurrentTaskLimiter extends RocksObject { + /** + * Constructs a ConcurrentTaskLimiter. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ concurrent task limiter object. + */ protected ConcurrentTaskLimiter(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java b/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java index d28b9060a63e..9ce48c27ef3c 100644 --- a/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java +++ b/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java @@ -6,7 +6,17 @@ package org.rocksdb; +/** + * Concurrent Task Limiter. + */ public class ConcurrentTaskLimiterImpl extends ConcurrentTaskLimiter { + + /** + * Construct a new Concurrent Task Limiter. + * + * @param name the name of the limiter. + * @param maxOutstandingTask the maximum concurrent tasks. + */ public ConcurrentTaskLimiterImpl(final String name, final int maxOutstandingTask) { super(newConcurrentTaskLimiterImpl0(name, maxOutstandingTask)); } diff --git a/java/src/main/java/org/rocksdb/ConfigOptions.java b/java/src/main/java/org/rocksdb/ConfigOptions.java index b3b5423c876d..f0d9dd5c1b49 100644 --- a/java/src/main/java/org/rocksdb/ConfigOptions.java +++ b/java/src/main/java/org/rocksdb/ConfigOptions.java @@ -6,33 +6,72 @@ package org.rocksdb; +/** + * Configuration options. + */ public class ConfigOptions extends RocksObject { /** - * Construct with default Options + * Constructs a new ConfigOptions. */ public ConfigOptions() { super(newConfigOptionsInstance()); } + /** + * Set the delimiter used between options. + * + * @param delimiter the delimiter + * + * @return the reference to the current options + */ public ConfigOptions setDelimiter(final String delimiter) { setDelimiter(nativeHandle_, delimiter); return this; } + + /** + * Set whether to ignore unknown options. + * + * @param ignore true to ignore unknown options, otherwise raise an error. + * + * @return the reference to the current options + */ public ConfigOptions setIgnoreUnknownOptions(final boolean ignore) { setIgnoreUnknownOptions(nativeHandle_, ignore); return this; } + /** + * Set the environment. + * + * @param env the environment. + * + * @return the reference to the current options + */ public ConfigOptions setEnv(final Env env) { setEnv(nativeHandle_, env.nativeHandle_); return this; } + /** + * Set whether to escape input strings. + * + * @param escaped true to escape input strings, false otherwise. + * + * @return the reference to the current options + */ public ConfigOptions setInputStringsEscaped(final boolean escaped) { setInputStringsEscaped(nativeHandle_, escaped); return this; } + /** + * Set the sanity level. + * + * @param level the sanity level. + * + * @return the reference to the current options + */ public ConfigOptions setSanityLevel(final SanityLevel level) { setSanityLevel(nativeHandle_, level.getValue()); return this; diff --git a/java/src/main/java/org/rocksdb/DBOptionsInterface.java b/java/src/main/java/org/rocksdb/DBOptionsInterface.java index 084a399cd03b..36ef94993855 100644 --- a/java/src/main/java/org/rocksdb/DBOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/DBOptionsInterface.java @@ -8,6 +8,11 @@ import java.util.Collection; import java.util.List; +/** + * Interface for DB Options. + * + * @param the concrete type of DBOptions. + */ public interface DBOptionsInterface> { /** * Use this if your DB is very small (like under 1GB) and you don't want to @@ -78,8 +83,7 @@ public interface DBOptionsInterface> { * * @param flag a flag indicating if missing column families shall be * created automatically. - * @return true if missing column families shall be created automatically - * on open. + * @return the instance of the current Options */ T setCreateMissingColumnFamilies(boolean flag); @@ -159,7 +163,7 @@ public interface DBOptionsInterface> { /** * Use to track SST files and control their file deletion rate. - * + *

* Features: * - Throttle the deletion rate of the SST files. * - Keep track the total size of all SST files. @@ -167,7 +171,7 @@ public interface DBOptionsInterface> { * the DB wont do any further flushes or compactions and will set the * background error. * - Can be shared between multiple dbs. - * + *

* Limitations: * - Only track and throttle deletes of SST files in * first db_path (db_name if db_paths is empty). @@ -208,7 +212,7 @@ public interface DBOptionsInterface> { * If {@link MutableDBOptionsInterface#maxOpenFiles()} is -1, DB will open * all files on DB::Open(). You can use this option to increase the number * of threads used to open the files. - * + *

* Default: 16 * * @param maxFileOpeningThreads the maximum number of threads to use to @@ -222,7 +226,7 @@ public interface DBOptionsInterface> { * If {@link MutableDBOptionsInterface#maxOpenFiles()} is -1, DB will open all * files on DB::Open(). You can use this option to increase the number of * threads used to open the files. - * + *

* Default: 16 * * @return the maximum number of threads to use to open files @@ -278,27 +282,27 @@ public interface DBOptionsInterface> { * A list of paths where SST files can be put into, with its target size. * Newer data is placed into paths specified earlier in the vector while * older data gradually moves to paths specified later in the vector. - * + *

* For example, you have a flash device with 10GB allocated for the DB, * as well as a hard drive of 2TB, you should config it to be: * [{"/flash_path", 10GB}, {"/hard_drive", 2TB}] - * + *

* The system will try to guarantee data under each path is close to but * not larger than the target size. But current and future file sizes used * by determining where to place a file are based on best-effort estimation, * which means there is a chance that the actual size under the directory * is slightly more than target size under some workloads. User should give * some buffer room for those cases. - * + *

* If none of the paths has sufficient room to place a file, the file will * be placed to the last path anyway, despite to the target size. - * + *

* Placing newer data to earlier paths is also best-efforts. User should * expect user files to be placed in higher levels in some extreme cases. - * + *

* If left empty, only one path will be used, which is db_name passed when * opening the DB. - * + *

* Default: empty * * @param dbPaths the paths and target sizes @@ -311,27 +315,27 @@ public interface DBOptionsInterface> { * A list of paths where SST files can be put into, with its target size. * Newer data is placed into paths specified earlier in the vector while * older data gradually moves to paths specified later in the vector. - * + *

* For example, you have a flash device with 10GB allocated for the DB, * as well as a hard drive of 2TB, you should config it to be: * [{"/flash_path", 10GB}, {"/hard_drive", 2TB}] - * + *

* The system will try to guarantee data under each path is close to but * not larger than the target size. But current and future file sizes used * by determining where to place a file are based on best-effort estimation, * which means there is a chance that the actual size under the directory * is slightly more than target size under some workloads. User should give * some buffer room for those cases. - * + *

* If none of the paths has sufficient room to place a file, the file will * be placed to the last path anyway, despite to the target size. - * + *

* Placing newer data to earlier paths is also best-efforts. User should * expect user files to be placed in higher levels in some extreme cases. - * + *

* If left empty, only one path will be used, which is db_name passed when * opening the DB. - * + *

* Default: {@link java.util.Collections#emptyList()} * * @return dbPaths the paths and target sizes @@ -352,7 +356,7 @@ public interface DBOptionsInterface> { /** * Returns the directory of info log. - * + *

* If it is empty, the log files will be in the same dir as data. * If it is non empty, the log files will be in the specified dir, * and the db data dir's absolute path will be used as the log file @@ -377,7 +381,7 @@ public interface DBOptionsInterface> { /** * Returns the path to the write-ahead-logs (WAL) directory. - * + *

* If it is empty, the log files will be in the same dir as data, * dbname is used as the data dir by default * If it is non empty, the log files will be in kept the specified dir. @@ -439,7 +443,7 @@ public interface DBOptionsInterface> { * `max_background_jobs = max_background_compactions + max_background_flushes` * in the case where user sets at least one of `max_background_compactions` or * `max_background_flushes`. - * + *

* Specifies the maximum number of concurrent background flush jobs. * If you're increasing this, also consider increasing number of threads in * HIGH priority thread pool. For more information, see @@ -463,7 +467,7 @@ public interface DBOptionsInterface> { * `max_background_jobs = max_background_compactions + max_background_flushes` * in the case where user sets at least one of `max_background_compactions` or * `max_background_flushes`. - * + *

* Returns the maximum number of concurrent background flush jobs. * If you're increasing this, also consider increasing number of threads in * HIGH priority thread pool. For more information, see @@ -542,16 +546,16 @@ public interface DBOptionsInterface> { /** * Recycle log files. - * + *

* If non-zero, we will reuse previously written log files for new * logs, overwriting the old data. The value indicates how many * such files we will keep around at any point in time for later * use. - * + *

* This is more efficient because the blocks are already * allocated and fdatasync does not need to update the inode after * each write. - * + *

* Default: 0 * * @param recycleLogFileNum the number of log files to keep for recycling @@ -562,16 +566,16 @@ public interface DBOptionsInterface> { /** * Recycle log files. - * + *

* If non-zero, we will reuse previously written log files for new * logs, overwriting the old data. The value indicates how many * such files we will keep around at any point in time for later * use. - * + *

* This is more efficient because the blocks are already * allocated and fdatasync does not need to update the inode after * each write. - * + *

* Default: 0 * * @return the number of log files kept for recycling @@ -617,17 +621,17 @@ public interface DBOptionsInterface> { /** * {@link #walTtlSeconds()} and {@link #walSizeLimitMB()} affect when WALs * will be archived and deleted. - * + *

* When both are zero, obsolete WALs will not be archived and will be deleted * immediately. Otherwise, obsolete WALs will be archived prior to deletion. - * + *

* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the * earliest will be deleted until the total size of the archive falls below * this limit. All empty WALs will be deleted. - * + *

* When `WAL_ttl_seconds` is nonzero, archived WALs older than * `WAL_ttl_seconds` will be deleted. - * + *

* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived * WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only * `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten @@ -643,17 +647,17 @@ public interface DBOptionsInterface> { /** * WalTtlSeconds() and walSizeLimitMB() affect when WALs will be archived and * deleted. - * + *

* When both are zero, obsolete WALs will not be archived and will be deleted * immediately. Otherwise, obsolete WALs will be archived prior to deletion. - * + *

* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the * earliest will be deleted until the total size of the archive falls below * this limit. All empty WALs will be deleted. - * + *

* When `WAL_ttl_seconds` is nonzero, archived WALs older than * `WAL_ttl_seconds` will be deleted. - * + *

* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived * WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only * `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten @@ -668,17 +672,17 @@ public interface DBOptionsInterface> { /** * WalTtlSeconds() and walSizeLimitMB() affect how archived logs * will be deleted. - * + *

* When both are zero, obsolete WALs will not be archived and will be deleted * immediately. Otherwise, obsolete WALs will be archived prior to deletion. - * + *

* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the * earliest will be deleted until the total size of the archive falls below * this limit. All empty WALs will be deleted. - * + *

* When `WAL_ttl_seconds` is nonzero, archived WALs older than * `WAL_ttl_seconds` will be deleted. - * + *

* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived * WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only * `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten @@ -694,17 +698,17 @@ public interface DBOptionsInterface> { /** * WalTtlSeconds() and walSizeLimitMB() affect when WALs will be archived and * deleted. - * + *

* When both are zero, obsolete WALs will not be archived and will be deleted * immediately. Otherwise, obsolete WALs will be archived prior to deletion. - * + *

* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the * earliest will be deleted until the total size of the archive falls below * this limit. All empty WALs will be deleted. - * + *

* When `WAL_ttl_seconds` is nonzero, archived WALs older than * `WAL_ttl_seconds` will be deleted. - * + *

* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived * WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only * `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten @@ -720,7 +724,7 @@ public interface DBOptionsInterface> { * The maximum limit of number of bytes that are written in a single batch * of WAL or memtable write. It is followed when the leader write size * is larger than 1/8 of this limit. - * + *

* Default: 1 MB * * @param maxWriteBatchGroupSizeBytes the maximum limit of number of bytes, see description. @@ -732,7 +736,7 @@ public interface DBOptionsInterface> { * The maximum limit of number of bytes that are written in a single batch * of WAL or memtable write. It is followed when the leader write size * is larger than 1/8 of this limit. - * + *

* Default: 1 MB * * @return the maximum limit of number of bytes, see description. @@ -885,13 +889,13 @@ public interface DBOptionsInterface> { /** * Amount of data to build up in memtables across all column * families before writing to disk. - * + *

* This is distinct from {@link ColumnFamilyOptions#writeBufferSize()}, * which enforces a limit for a single memtable. - * + *

* This feature is disabled by default. Specify a non-zero value * to enable it. - * + *

* Default: 0 (disabled) * * @param dbWriteBufferSize the size of the write buffer @@ -903,7 +907,7 @@ public interface DBOptionsInterface> { /** * Use passed {@link WriteBufferManager} to control memory usage across * multiple column families and/or DB instances. - * + *

* Check * https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager * for more details on when to use it @@ -925,13 +929,13 @@ public interface DBOptionsInterface> { /** * Amount of data to build up in memtables across all column * families before writing to disk. - * + *

* This is distinct from {@link ColumnFamilyOptions#writeBufferSize()}, * which enforces a limit for a single memtable. - * + *

* This feature is disabled by default. Specify a non-zero value * to enable it. - * + *

* Default: 0 (disabled) * * @return the size of the write buffer @@ -941,7 +945,7 @@ public interface DBOptionsInterface> { /** * Specify the file access pattern once a compaction is started. * It will be applied to all input files of a compaction. - * + *

* Default: {@link AccessHint#NORMAL} * * @param accessHint The access hint @@ -953,7 +957,7 @@ public interface DBOptionsInterface> { /** * Specify the file access pattern once a compaction is started. * It will be applied to all input files of a compaction. - * + *

* Default: {@link AccessHint#NORMAL} * * @return The access hint @@ -971,11 +975,11 @@ public interface DBOptionsInterface> { * always try to read ahead. * With read-ahead we always pre-allocate buffer to the size instead of * growing it up to a limit. - * + *

* This option is currently honored only on Windows - * + *

* Default: 1 Mb - * + *

* Special value: 0 - means do not maintain per instance buffer. Allocate * per request buffer and avoid locking. * @@ -996,11 +1000,11 @@ public interface DBOptionsInterface> { * {@link MutableDBOptionsInterface#compactionReadaheadSize()} value and * always try to read ahead. With read-ahead we always pre-allocate buffer * to the size instead of growing it up to a limit. - * + *

* This option is currently honored only on Windows - * + *

* Default: 1 Mb - * + *

* Special value: 0 - means do not maintain per instance buffer. Allocate * per request buffer and avoid locking. * @@ -1034,7 +1038,7 @@ public interface DBOptionsInterface> { /** * Sets the {@link EventListener}s whose callback functions * will be called when specific RocksDB event happens. - * + *

* Note: the RocksJava API currently only supports EventListeners implemented in Java. * It could be extended in future to also support adding/removing EventListeners implemented in * C++. @@ -1048,7 +1052,7 @@ public interface DBOptionsInterface> { /** * Sets the {@link EventListener}s whose callback functions * will be called when specific RocksDB event happens. - * + *

* Note: the RocksJava API currently only supports EventListeners implemented in Java. * It could be extended in future to also support adding/removing EventListeners implemented in * C++. @@ -1060,7 +1064,7 @@ public interface DBOptionsInterface> { /** * If true, then the status of the threads involved in this DB will * be tracked and available via GetThreadList() API. - * + *

* Default: false * * @param enableThreadTracking true to enable tracking @@ -1072,7 +1076,7 @@ public interface DBOptionsInterface> { /** * If true, then the status of the threads involved in this DB will * be tracked and available via GetThreadList() API. - * + *

* Default: false * * @return true if tracking is enabled @@ -1083,7 +1087,7 @@ public interface DBOptionsInterface> { * By default, a single write thread queue is maintained. The thread gets * to the head of the queue becomes write batch group leader and responsible * for writing to WAL and memtable for the batch group. - * + *

* If {@link #enablePipelinedWrite()} is true, separate write thread queue is * maintained for WAL write and memtable write. A write thread first enter WAL * writer queue and then memtable writer queue. Pending thread on the WAL @@ -1091,7 +1095,7 @@ public interface DBOptionsInterface> { * WAL writing but not the memtable writing. Enabling the feature may improve * write throughput and reduce latency of the prepare phase of two-phase * commit. - * + *

* Default: false * * @param enablePipelinedWrite true to enabled pipelined writes @@ -1118,7 +1122,7 @@ public interface DBOptionsInterface> { * throughput. Using TransactionDB with WRITE_PREPARED write policy and * {@link #twoWriteQueues()} true is one way to achieve immutable snapshots despite * unordered_write. - * + *

* By default, i.e., when it is false, rocksdb does not advance the sequence * number for new snapshots unless all the writes with lower sequence numbers * are already finished. This provides the immutability that we except from @@ -1263,7 +1267,7 @@ T setEnableWriteThreadAdaptiveYield( * compaction decision by loading table properties from many files. * Turning off this feature will improve DBOpen time especially in * disk environment. - * + *

* Default: false * * @param skipStatsUpdateOnDbOpen true if updating stats will be skipped @@ -1277,7 +1281,7 @@ T setEnableWriteThreadAdaptiveYield( * compaction decision by loading table properties from many files. * Turning off this feature will improve DBOpen time especially in * disk environment. - * + *

* Default: false * * @return true if updating stats will be skipped @@ -1291,7 +1295,7 @@ T setEnableWriteThreadAdaptiveYield( * We'll still check that all required sst files exist. * If {@code paranoid_checks} is false, this option is ignored, and sst files are * not checked at all. - * + *

* Default: false * * @param skipCheckingSstFileSizesOnDbOpen if true, then SST file sizes will not be checked @@ -1307,7 +1311,7 @@ T setEnableWriteThreadAdaptiveYield( * We'll still check that all required sst files exist. * If {@code paranoid_checks} is false, this option is ignored, and sst files are * not checked at all. - * + *

* Default: false * * @return true, if file sizes will not be checked when calling {@link RocksDB#open(String)}. @@ -1316,7 +1320,7 @@ T setEnableWriteThreadAdaptiveYield( /** * Recovery mode to control the consistency while replaying WAL - * + *

* Default: {@link WALRecoveryMode#PointInTimeRecovery} * * @param walRecoveryMode The WAL recover mode @@ -1327,7 +1331,7 @@ T setEnableWriteThreadAdaptiveYield( /** * Recovery mode to control the consistency while replaying WAL - * + *

* Default: {@link WALRecoveryMode#PointInTimeRecovery} * * @return The WAL recover mode @@ -1337,7 +1341,7 @@ T setEnableWriteThreadAdaptiveYield( /** * if set to false then recovery will fail when a prepared * transaction is encountered in the WAL - * + *

* Default: false * * @param allow2pc true if two-phase-commit is enabled @@ -1349,7 +1353,7 @@ T setEnableWriteThreadAdaptiveYield( /** * if set to false then recovery will fail when a prepared * transaction is encountered in the WAL - * + *

* Default: false * * @return true if two-phase-commit is enabled @@ -1358,7 +1362,7 @@ T setEnableWriteThreadAdaptiveYield( /** * A global cache for table-level rows. - * + *

* Default: null (disabled) * * @param rowCache The global row cache @@ -1369,7 +1373,7 @@ T setEnableWriteThreadAdaptiveYield( /** * A global cache for table-level rows. - * + *

* Default: null (disabled) * * @return The global row cache @@ -1401,7 +1405,7 @@ T setEnableWriteThreadAdaptiveYield( * If true, then DB::Open / CreateColumnFamily / DropColumnFamily * / SetOptions will fail if options file is not detected or properly * persisted. - * + *

* DEFAULT: false * * @param failIfOptionsFileError true if we should fail if there is an error @@ -1415,7 +1419,7 @@ T setEnableWriteThreadAdaptiveYield( * If true, then DB::Open / CreateColumnFamily / DropColumnFamily * / SetOptions will fail if options file is not detected or properly * persisted. - * + *

* DEFAULT: false * * @return true if we should fail if there is an error in the options file @@ -1425,7 +1429,7 @@ T setEnableWriteThreadAdaptiveYield( /** * If true, then print malloc stats together with rocksdb.stats * when printing to LOG. - * + *

* DEFAULT: false * * @param dumpMallocStats true if malloc stats should be printed to LOG @@ -1437,7 +1441,7 @@ T setEnableWriteThreadAdaptiveYield( /** * If true, then print malloc stats together with rocksdb.stats * when printing to LOG. - * + *

* DEFAULT: false * * @return true if malloc stats should be printed to LOG @@ -1450,7 +1454,7 @@ T setEnableWriteThreadAdaptiveYield( * to avoid (but not guarantee not to) flush during recovery. Also, existing * WAL logs will be kept, so that if crash happened before flush, we still * have logs to recover from. - * + *

* DEFAULT: false * * @param avoidFlushDuringRecovery true to try to avoid (but not guarantee @@ -1466,7 +1470,7 @@ T setEnableWriteThreadAdaptiveYield( * to avoid (but not guarantee not to) flush during recovery. Also, existing * WAL logs will be kept, so that if crash happened before flush, we still * have logs to recover from. - * + *

* DEFAULT: false * * @return true to try to avoid (but not guarantee not to) flush during @@ -1482,7 +1486,7 @@ T setEnableWriteThreadAdaptiveYield( * 1) Disable some internal optimizations around SST file compression * 2) Reserve bottom-most level for ingested files only. * 3) Note that num_levels should be >= 3 if this option is turned on. - * + *

* DEFAULT: false * * @param allowIngestBehind true to allow ingest behind, false to disallow. @@ -1505,7 +1509,7 @@ T setEnableWriteThreadAdaptiveYield( * allows the memtable writes not to lag behind other writes. It can be used * to optimize MySQL 2PC in which only the commits, which are serial, write to * memtable. - * + *

* DEFAULT: false * * @param twoWriteQueues true to enable two write queues, false otherwise. @@ -1525,7 +1529,7 @@ T setEnableWriteThreadAdaptiveYield( * If true WAL is not flushed automatically after each write. Instead it * relies on manual invocation of FlushWAL to write the WAL buffer to its * file. - * + *

* DEFAULT: false * * @param manualWalFlush true to set disable automatic WAL flushing, @@ -1553,7 +1557,7 @@ T setEnableWriteThreadAdaptiveYield( * For manual flush, application has to specify which column families to * flush atomically in {@link RocksDB#flush(FlushOptions, List)}. * For auto-triggered flush, RocksDB atomically flushes ALL column families. - * + *

* Currently, any WAL-enabled writes after atomic flush may be replayed * independently if the process crashes later and tries to recover. * @@ -1565,7 +1569,7 @@ T setEnableWriteThreadAdaptiveYield( /** * Determine if atomic flush of multiple column families is enabled. - * + *

* See {@link #setAtomicFlush(boolean)}. * * @return true if atomic flush is enabled. @@ -1666,7 +1670,7 @@ T setEnableWriteThreadAdaptiveYield( * The number of bytes to prefetch when reading the log. This is mostly useful * for reading a remotely located log, as it can save the number of * round-trips. If 0, then the prefetching is disabled. - * + *

* Default: 0 * * @param logReadaheadSize the number of bytes to prefetch when reading the log. @@ -1678,7 +1682,7 @@ T setEnableWriteThreadAdaptiveYield( * The number of bytes to prefetch when reading the log. This is mostly useful * for reading a remotely located log, as it can save the number of * round-trips. If 0, then the prefetching is disabled. - * + *

* Default: 0 * * @return the number of bytes to prefetch when reading the log. @@ -1721,7 +1725,7 @@ T setEnableWriteThreadAdaptiveYield( * can be auto-recovered (e.g., retryable IO Error during Flush or WAL write), * then db resume is called in background to recover from the error. If this * value is 0 or negative, db resume will not be called. - * + *

* Default: INT_MAX * * @param maxBgerrorResumeCount maximum number of times db resume should be called when IO Error @@ -1737,7 +1741,7 @@ T setEnableWriteThreadAdaptiveYield( * can be auto-recovered (e.g., retryable IO Error during Flush or WAL write), * then db resume is called in background to recover from the error. If this * value is 0 or negative, db resume will not be called. - * + *

* Default: INT_MAX * * @return maximum number of times db resume should be called when IO Error happens. @@ -1748,7 +1752,7 @@ T setEnableWriteThreadAdaptiveYield( * If max_bgerror_resume_count is ≥ 2, db resume is called multiple times. * This option decides how long to wait to retry the next resume if the * previous resume fails and satisfy redo resume conditions. - * + *

* Default: 1000000 (microseconds). * * @param bgerrorResumeRetryInterval how many microseconds to wait between DB resume attempts. @@ -1760,7 +1764,7 @@ T setEnableWriteThreadAdaptiveYield( * If max_bgerror_resume_count is ≥ 2, db resume is called multiple times. * This option decides how long to wait to retry the next resume if the * previous resume fails and satisfy redo resume conditions. - * + *

* Default: 1000000 (microseconds). * * @return the instance of the current object. diff --git a/java/src/main/java/org/rocksdb/DbPath.java b/java/src/main/java/org/rocksdb/DbPath.java index 3f0b67557c5e..3895b258556e 100644 --- a/java/src/main/java/org/rocksdb/DbPath.java +++ b/java/src/main/java/org/rocksdb/DbPath.java @@ -14,6 +14,12 @@ public class DbPath { final Path path; final long targetSize; + /** + * Constructs a DbPath. + * + * @param path the path. + * @param targetSize the target size. + */ public DbPath(final Path path, final long targetSize) { this.path = path; this.targetSize = targetSize; diff --git a/java/src/main/java/org/rocksdb/DirectSlice.java b/java/src/main/java/org/rocksdb/DirectSlice.java index 5aa0866ffe29..b5741fe636b7 100644 --- a/java/src/main/java/org/rocksdb/DirectSlice.java +++ b/java/src/main/java/org/rocksdb/DirectSlice.java @@ -16,6 +16,10 @@ * values consider using @see org.rocksdb.Slice */ public class DirectSlice extends AbstractSlice { + + /** + * Constant for No Direct Slice. + */ public static final DirectSlice NONE = new DirectSlice(); /** @@ -110,6 +114,11 @@ public void removePrefix(final int n) { this.internalBufferOffset += n; } + /** + * Set the length of the direct slice. + * + * @param n the length. + */ public void setLength(final int n) { setLength0(getNativeHandle(), n); } diff --git a/java/src/main/java/org/rocksdb/Experimental.java b/java/src/main/java/org/rocksdb/Experimental.java index 64b404d6f195..13ac5a0e3b19 100644 --- a/java/src/main/java/org/rocksdb/Experimental.java +++ b/java/src/main/java/org/rocksdb/Experimental.java @@ -19,5 +19,10 @@ @Retention(RetentionPolicy.SOURCE) @Target({ElementType.TYPE, ElementType.METHOD}) public @interface Experimental { + /** + * A description explaining why the feature is experimental. + * + * @return the explanation of why the feature is experimental. + */ String value(); } diff --git a/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java b/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java index 7a99dd6bfe2f..4a348ab32389 100644 --- a/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java +++ b/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Information about the ingestion of External Files. + */ public class ExternalFileIngestionInfo { private final String columnFamilyName; private final String externalFilePath; diff --git a/java/src/main/java/org/rocksdb/Filter.java b/java/src/main/java/org/rocksdb/Filter.java index 7f490cf594be..b2374676e4a1 100644 --- a/java/src/main/java/org/rocksdb/Filter.java +++ b/java/src/main/java/org/rocksdb/Filter.java @@ -15,13 +15,18 @@ //TODO(AR) should be renamed FilterPolicy public abstract class Filter extends RocksObject { + /** + * Constructs a filter. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ filter object. + */ protected Filter(final long nativeHandle) { super(nativeHandle); } /** * Deletes underlying C++ filter pointer. - * + *

* Note that this function should be called only after all * RocksDB instances referencing the filter are closed. * Otherwise an undefined behavior will occur. diff --git a/java/src/main/java/org/rocksdb/FilterPolicyType.java b/java/src/main/java/org/rocksdb/FilterPolicyType.java index 6a693ee4039d..c7051ac07be6 100644 --- a/java/src/main/java/org/rocksdb/FilterPolicyType.java +++ b/java/src/main/java/org/rocksdb/FilterPolicyType.java @@ -9,6 +9,9 @@ * IndexType used in conjunction with BlockBasedTable. */ public enum FilterPolicyType { + /** + * Unknown filter policy. + */ kUnknownFilterPolicy((byte) 0), /** @@ -25,7 +28,7 @@ public enum FilterPolicyType { */ kRibbonFilterPolicy((byte) 2); - public Filter createFilter(final long handle, final double param) { + Filter createFilter(final long handle, final double param) { if (this == kBloomFilterPolicy) { return new BloomFilter(handle, param); } diff --git a/java/src/main/java/org/rocksdb/FlushJobInfo.java b/java/src/main/java/org/rocksdb/FlushJobInfo.java index 414d3a2f332e..52af3afe1795 100644 --- a/java/src/main/java/org/rocksdb/FlushJobInfo.java +++ b/java/src/main/java/org/rocksdb/FlushJobInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Information about a flush job. + */ public class FlushJobInfo { private final long columnFamilyId; private final String columnFamilyName; diff --git a/java/src/main/java/org/rocksdb/FlushReason.java b/java/src/main/java/org/rocksdb/FlushReason.java index 9d486cda16bd..3a5bcf2d485d 100644 --- a/java/src/main/java/org/rocksdb/FlushReason.java +++ b/java/src/main/java/org/rocksdb/FlushReason.java @@ -5,18 +5,68 @@ package org.rocksdb; +/** + * Reasons for a flush. + */ public enum FlushReason { + /** + * Other. + */ OTHERS((byte) 0x00), + + /** + * Get live files. + */ GET_LIVE_FILES((byte) 0x01), + + /** + * Shutdown. + */ SHUTDOWN((byte) 0x02), + + /** + * External file ingestion. + */ EXTERNAL_FILE_INGESTION((byte) 0x03), + + /** + * Manual compaction. + */ MANUAL_COMPACTION((byte) 0x04), + + /** + * Write buffer manager. + */ WRITE_BUFFER_MANAGER((byte) 0x05), + + /** + * Write buffer full. + */ WRITE_BUFFER_FULL((byte) 0x06), + + /** + * Test. + */ TEST((byte) 0x07), + + /** + * Delete file(s). + */ DELETE_FILES((byte) 0x08), + + /** + * Automatic compaction. + */ AUTO_COMPACTION((byte) 0x09), + + /** + * Manual flush. + */ MANUAL_FLUSH((byte) 0x0a), + + /** + * Error recovery. + */ ERROR_RECOVERY((byte) 0xb); private final byte value; diff --git a/java/src/main/java/org/rocksdb/GetStatus.java b/java/src/main/java/org/rocksdb/GetStatus.java index a2afafe39ebd..a7ab4902f3f3 100644 --- a/java/src/main/java/org/rocksdb/GetStatus.java +++ b/java/src/main/java/org/rocksdb/GetStatus.java @@ -12,7 +12,15 @@ * If the target of the fetch is not big enough, this may be bigger than the contents of the target. */ public class GetStatus { + + /** + * The status of the request to fetch into the buffer. + */ public final Status status; + + /** + * The size of the data, which may be bigger than the buffer. + */ public final int requiredSize; /** diff --git a/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java b/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java index a9868df57d7b..1ee66c4117ca 100644 --- a/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java +++ b/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java @@ -15,22 +15,42 @@ * and post a warning in the LOG. */ public class HashLinkedListMemTableConfig extends MemTableConfig { + + /** + * The default number of buckets. + */ public static final long DEFAULT_BUCKET_COUNT = 50_000; + + /** + * The default size of huge TLB pages. + */ public static final long DEFAULT_HUGE_PAGE_TLB_SIZE = 0; + + /** + * The default log threshold for bucket entries. + */ public static final int DEFAULT_BUCKET_ENTRIES_LOG_THRES = 4096; + + /** + * The default of whether to log when a bucket is flushed. + */ public static final boolean DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH = true; - public static final int DEFAUL_THRESHOLD_USE_SKIPLIST = 256; /** - * HashLinkedListMemTableConfig constructor + * The default threshold for determining when to use a Skip List. + */ + public static final int DEFAULT_THRESHOLD_USE_SKIPLIST = 256; + + /** + * Constructs a HashLinkedListMemTableConfig. */ public HashLinkedListMemTableConfig() { bucketCount_ = DEFAULT_BUCKET_COUNT; hugePageTlbSize_ = DEFAULT_HUGE_PAGE_TLB_SIZE; bucketEntriesLoggingThreshold_ = DEFAULT_BUCKET_ENTRIES_LOG_THRES; ifLogBucketDistWhenFlush_ = DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH; - thresholdUseSkiplist_ = DEFAUL_THRESHOLD_USE_SKIPLIST; + thresholdUseSkiplist_ = DEFAULT_THRESHOLD_USE_SKIPLIST; } /** diff --git a/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java b/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java index 80d6b7115182..6a250d59920e 100644 --- a/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java +++ b/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java @@ -15,12 +15,24 @@ * and post a warning in the LOG. */ public class HashSkipListMemTableConfig extends MemTableConfig { + + /** + * The default number of buckets. + */ public static final int DEFAULT_BUCKET_COUNT = 1_000_000; + + /** + * The default branching factor. + */ public static final int DEFAULT_BRANCHING_FACTOR = 4; + + /** + * The default skip list height. + */ public static final int DEFAULT_HEIGHT = 4; /** - * HashSkipListMemTableConfig constructor + * Constructs a HashSkipListMemTableConfig. */ public HashSkipListMemTableConfig() { bucketCount_ = DEFAULT_BUCKET_COUNT; diff --git a/java/src/main/java/org/rocksdb/HistogramData.java b/java/src/main/java/org/rocksdb/HistogramData.java index 81d890883487..1fdd0c26e9a7 100644 --- a/java/src/main/java/org/rocksdb/HistogramData.java +++ b/java/src/main/java/org/rocksdb/HistogramData.java @@ -5,6 +5,9 @@ package org.rocksdb; +/** + * Histogram Data. + */ public class HistogramData { private final double median_; private final double percentile95_; @@ -16,12 +19,34 @@ public class HistogramData { private final long sum_; private final double min_; + /** + * Constructs a HistogramData. + * + * @param median the median value. + * @param percentile95 the 95th percentile value. + * @param percentile99 the 99th percentile value. + * @param average the average value. + * @param standardDeviation the value of the standard deviation. + */ public HistogramData(final double median, final double percentile95, final double percentile99, final double average, final double standardDeviation) { this(median, percentile95, percentile99, average, standardDeviation, 0.0, 0, 0, 0.0); } + /** + * Constructs a HistogramData. + * + * @param median the median value. + * @param percentile95 the 95th percentile value. + * @param percentile99 the 99th percentile value. + * @param average the average value. + * @param standardDeviation the value of the standard deviation. + * @param max the maximum value. + * @param count the number of values. + * @param sum the sum of the values. + * @param min the minimum value. + */ public HistogramData(final double median, final double percentile95, final double percentile99, final double average, final double standardDeviation, final double max, final long count, @@ -37,38 +62,83 @@ public HistogramData(final double median, final double percentile95, sum_ = sum; } + /** + * Get the median value. + * + * @return the median value. + */ public double getMedian() { return median_; } + /** + * Get the 95th percentile value. + * + * @return the 95th percentile value. + */ public double getPercentile95() { return percentile95_; } + /** + * Get the 99th percentile value. + * + * @return the 99th percentile value. + */ public double getPercentile99() { return percentile99_; } + /** + * Get the average value. + * + * @return the average value. + */ public double getAverage() { return average_; } + /** + * Get the value of the standard deviation. + * + * @return the value of the standard deviation. + */ public double getStandardDeviation() { return standardDeviation_; } + /** + * Get the maximum value. + * + * @return the maximum value. + */ public double getMax() { return max_; } + /** + * Get the number of values. + * + * @return the number of values. + */ public long getCount() { return count_; } + /** + * Get the sum of the values. + * + * @return the sum of the values. + */ public long getSum() { return sum_; } + /** + * Get the minimum value. + * + * @return the minimum value. + */ public double getMin() { return min_; } diff --git a/java/src/main/java/org/rocksdb/HistogramType.java b/java/src/main/java/org/rocksdb/HistogramType.java index 41fe241ad3ab..f9feb9439acf 100644 --- a/java/src/main/java/org/rocksdb/HistogramType.java +++ b/java/src/main/java/org/rocksdb/HistogramType.java @@ -5,77 +5,157 @@ package org.rocksdb; +/** + * The types of histogram. + */ public enum HistogramType { - + /** + * DB Get. + */ DB_GET((byte) 0x0), + /** + * DB Write. + */ DB_WRITE((byte) 0x1), + /** + * Time spent in compaction. + */ COMPACTION_TIME((byte) 0x2), + /** + * Time spent in setting up sub-compaction. + */ SUBCOMPACTION_SETUP_TIME((byte) 0x3), + /** + * Time spent in IO during table sync. + * Measured in microseconds. + */ TABLE_SYNC_MICROS((byte) 0x4), + /** + * Time spent in IO during compaction of outfile. + * Measured in microseconds. + */ COMPACTION_OUTFILE_SYNC_MICROS((byte) 0x5), + /** + * Time spent in IO during WAL file sync. + * Measured in microseconds. + */ WAL_FILE_SYNC_MICROS((byte) 0x6), + /** + * Time spent in IO during manifest file sync. + * Measured in microseconds. + */ MANIFEST_FILE_SYNC_MICROS((byte) 0x7), /** - * TIME SPENT IN IO DURING TABLE OPEN. + * Time spent in IO during table open. + * Measured in microseconds. */ TABLE_OPEN_IO_MICROS((byte) 0x8), + /** + * DB Multi-Get. + */ DB_MULTIGET((byte) 0x9), + /** + * Time spent in block reads during compaction. + * Measured in microseconds. + */ READ_BLOCK_COMPACTION_MICROS((byte) 0xA), + /** + * Time spent in block reads. + * Measured in microseconds. + */ READ_BLOCK_GET_MICROS((byte) 0xB), + /** + * Time spent in raw block writes. + * Measured in microseconds. + */ WRITE_RAW_BLOCK_MICROS((byte) 0xC), + /** + * Number of files in a single compaction. + */ NUM_FILES_IN_SINGLE_COMPACTION((byte) 0x12), + /** + * DB Seek. + */ DB_SEEK((byte) 0x13), + /** + * Write stall. + */ WRITE_STALL((byte) 0x14), + /** + * Time spent in SST reads. + * Measured in microseconds. + */ SST_READ_MICROS((byte) 0x15), /** - * The number of subcompactions actually scheduled during a compaction. + * The number of sub-compactions actually scheduled during a compaction. */ NUM_SUBCOMPACTIONS_SCHEDULED((byte) 0x16), /** + * Bytes per read. * Value size distribution in each operation. */ BYTES_PER_READ((byte) 0x17), + + /** + * Bytes per write. + * Value size distribution in each operation. + */ BYTES_PER_WRITE((byte) 0x18), + + /** + * Bytes per Multi-Get. + * Value size distribution in each operation. + */ BYTES_PER_MULTIGET((byte) 0x19), /** - * number of bytes compressed. + * Number of bytes compressed. */ BYTES_COMPRESSED((byte) 0x1A), /** - * number of bytes decompressed. - *

- * number of bytes is when uncompressed; i.e. before/after respectively + * Number of bytes decompressed. + * Number of bytes is when uncompressed; i.e. before/after respectively */ BYTES_DECOMPRESSED((byte) 0x1B), + /** + * Time spent in compression. + * Measured in nanoseconds. + */ COMPRESSION_TIMES_NANOS((byte) 0x1C), + /** + * Time spent in decompression. + * Measured in nanoseconds. + */ DECOMPRESSION_TIMES_NANOS((byte) 0x1D), + /** + * Number of merge operands for read. + */ READ_NUM_MERGE_OPERANDS((byte) 0x1E), /** - * Time spent flushing memtable to disk. + * Time spent flushing Memtable to disk. */ FLUSH_TIME((byte) 0x20), @@ -91,62 +171,73 @@ public enum HistogramType { /** * BlobDB Put/PutWithTTL/PutUntil/Write latency. + * Measured in microseconds. */ BLOB_DB_WRITE_MICROS((byte) 0x23), /** * BlobDB Get lagency. + * Measured in microseconds. */ BLOB_DB_GET_MICROS((byte) 0x24), /** * BlobDB MultiGet latency. + * Measured in microseconds. */ BLOB_DB_MULTIGET_MICROS((byte) 0x25), /** * BlobDB Seek/SeekToFirst/SeekToLast/SeekForPrev latency. + * Measured in microseconds. */ BLOB_DB_SEEK_MICROS((byte) 0x26), /** * BlobDB Next latency. + * Measured in microseconds. */ BLOB_DB_NEXT_MICROS((byte) 0x27), /** * BlobDB Prev latency. + * Measured in microseconds. */ BLOB_DB_PREV_MICROS((byte) 0x28), /** * Blob file write latency. + * Measured in microseconds. */ BLOB_DB_BLOB_FILE_WRITE_MICROS((byte) 0x29), /** * Blob file read latency. + * Measured in microseconds. */ BLOB_DB_BLOB_FILE_READ_MICROS((byte) 0x2A), /** * Blob file sync latency. + * Measured in microseconds. */ BLOB_DB_BLOB_FILE_SYNC_MICROS((byte) 0x2B), /** * BlobDB compression time. + * Measured in microseconds. */ BLOB_DB_COMPRESSION_MICROS((byte) 0x2D), /** * BlobDB decompression time. + * Measured in microseconds. */ BLOB_DB_DECOMPRESSION_MICROS((byte) 0x2E), /** * Num of Index and Filter blocks read from file system per level in MultiGet - * request + * request. */ NUM_INDEX_AND_FILTER_BLOCKS_READ_PER_LEVEL((byte) 0x2F), @@ -160,6 +251,9 @@ public enum HistogramType { */ ERROR_HANDLER_AUTORESUME_RETRY_COUNT((byte) 0x32), + /** + * Bytes read asynchronously. + */ ASYNC_READ_BYTES((byte) 0x33), /** @@ -169,23 +263,58 @@ public enum HistogramType { */ TABLE_OPEN_PREFETCH_TAIL_READ_BYTES((byte) 0x39), + /** + * File read during flush. + * Measured in microseconds. + */ FILE_READ_FLUSH_MICROS((byte) 0x3A), + /** + * File read during compaction. + * Measured in microseconds. + */ FILE_READ_COMPACTION_MICROS((byte) 0x3B), + /** + * File read during DB Open. + * Measured in microseconds. + */ FILE_READ_DB_OPEN_MICROS((byte) 0x3C), + /** + * File read during DB Get. + * Measured in microseconds. + */ FILE_READ_GET_MICROS((byte) 0x3D), + /** + * File read during DB Multi-Get. + * Measured in microseconds. + */ FILE_READ_MULTIGET_MICROS((byte) 0x3E), + /** + * File read during DB Iterator. + * Measured in microseconds. + */ FILE_READ_DB_ITERATOR_MICROS((byte) 0x3F), + /** + * File read during DB checksum validation. + * Measured in microseconds. + */ FILE_READ_VERIFY_DB_CHECKSUM_MICROS((byte) 0x40), + /** + * File read during file checksum validation. + * Measured in microseconds. + */ FILE_READ_VERIFY_FILE_CHECKSUMS_MICROS((byte) 0x41), - // 0x1F for backwards compatibility on current minor version. + /** + * The number of histogram types available. + * {@code 0x1F} for backwards compatibility on current minor version. + */ HISTOGRAM_ENUM_MAX((byte) 0x1F); private final byte value; diff --git a/java/src/main/java/org/rocksdb/Holder.java b/java/src/main/java/org/rocksdb/Holder.java index 716a0bda0736..dd088dcd767e 100644 --- a/java/src/main/java/org/rocksdb/Holder.java +++ b/java/src/main/java/org/rocksdb/Holder.java @@ -7,6 +7,8 @@ /** * Simple instance reference wrapper. + * + * @param the concrete type that this holder holds. */ public class Holder { private /* @Nullable */ T value; diff --git a/java/src/main/java/org/rocksdb/ImportColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/ImportColumnFamilyOptions.java index 652bd19dc8c1..0c5dea7de6ba 100644 --- a/java/src/main/java/org/rocksdb/ImportColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/ImportColumnFamilyOptions.java @@ -12,6 +12,10 @@ * ExportImportFilesMetaData)}. */ public class ImportColumnFamilyOptions extends RocksObject { + + /** + * Constructs an ImportColumnFamilyOptions. + */ public ImportColumnFamilyOptions() { super(newImportColumnFamilyOptions()); } diff --git a/java/src/main/java/org/rocksdb/IndexShorteningMode.java b/java/src/main/java/org/rocksdb/IndexShorteningMode.java index a68346c3823c..2d4b3f9ad629 100644 --- a/java/src/main/java/org/rocksdb/IndexShorteningMode.java +++ b/java/src/main/java/org/rocksdb/IndexShorteningMode.java @@ -11,7 +11,7 @@ * enabled ({@link DBOptions#useDirectReads()} == true). * The default mode is the best tradeoff for most use cases. * This option only affects newly written tables. - * + *

* The index contains a key separating each pair of consecutive blocks. * Let A be the highest key in one block, B the lowest key in the next block, * and I the index entry separating these two blocks: @@ -22,7 +22,7 @@ * However, if I=A, this can't happen, and we'll read only the second block. * In kNoShortening mode, we use I=A. In other modes, we use the shortest * key in [A, B), which usually significantly reduces index size. - * + *

* There's a similar story for the last index entry, which is an upper bound * of the highest key in the file. If it's shortened and therefore * overestimated, iterator is likely to unnecessarily read the last data block diff --git a/java/src/main/java/org/rocksdb/InfoLogLevel.java b/java/src/main/java/org/rocksdb/InfoLogLevel.java index 197bd89dab68..c5fda9acd7c3 100644 --- a/java/src/main/java/org/rocksdb/InfoLogLevel.java +++ b/java/src/main/java/org/rocksdb/InfoLogLevel.java @@ -5,12 +5,39 @@ * RocksDB log levels. */ public enum InfoLogLevel { + /** + * Log 'debug' level events. + */ DEBUG_LEVEL((byte)0), + + /** + * Log 'info' level events. + */ INFO_LEVEL((byte)1), + + /** + * Log 'warn' level events. + */ WARN_LEVEL((byte)2), + + /** + * Log 'error' level events. + */ ERROR_LEVEL((byte)3), + + /** + * Log 'fatal' level events. + */ FATAL_LEVEL((byte)4), + + /** + * Log 'header' level events. + */ HEADER_LEVEL((byte)5), + + /** + * The number of log levels available. + */ NUM_INFO_LOG_LEVELS((byte)6); private final byte value_; diff --git a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java index 1a6a5fccd945..85eccea5a55d 100644 --- a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java +++ b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java @@ -12,11 +12,16 @@ */ public class IngestExternalFileOptions extends RocksObject { + /** + * Constructs an IngestExternalFileOptions. + */ public IngestExternalFileOptions() { super(newIngestExternalFileOptions()); } /** + * Constructs an IngestExternalFileOptions. + * * @param moveFiles {@link #setMoveFiles(boolean)} * @param snapshotConsistency {@link #setSnapshotConsistency(boolean)} * @param allowGlobalSeqNo {@link #setAllowGlobalSeqNo(boolean)} diff --git a/java/src/main/java/org/rocksdb/KeyMayExist.java b/java/src/main/java/org/rocksdb/KeyMayExist.java index cd2267528d85..98d176f6d4fd 100644 --- a/java/src/main/java/org/rocksdb/KeyMayExist.java +++ b/java/src/main/java/org/rocksdb/KeyMayExist.java @@ -5,8 +5,12 @@ package org.rocksdb; +import java.nio.ByteBuffer; import java.util.Objects; +/** + * Indicates whether a key exists or not, and its corresponding value's length. + */ public class KeyMayExist { @Override public boolean equals(final Object o) { @@ -23,13 +27,44 @@ public int hashCode() { return Objects.hash(exists, valueLength); } - public enum KeyMayExistEnum { kNotExist, kExistsWithoutValue, kExistsWithValue } + /** + * Part of the return type from {@link RocksDB#keyMayExist(ColumnFamilyHandle, ByteBuffer, ByteBuffer)}. + */ + public enum KeyMayExistEnum { + /** + * Key does not exist. + */ + kNotExist, + /** + * Key may exist without a value. + */ + kExistsWithoutValue, + + /** + * Key may exist with a value. + */ + kExistsWithValue + } + + /** + * Constructs a KeyMayExist. + * + * @param exists indicates if the key exists. + * @param valueLength the length of the value pointed to by the key (if it exists). + */ KeyMayExist(final KeyMayExistEnum exists, final int valueLength) { this.exists = exists; this.valueLength = valueLength; } + /** + * Indicates if the key exists. + */ public final KeyMayExistEnum exists; + + /** + * The length of the value pointed to by the key (if it exists). + */ public final int valueLength; } diff --git a/java/src/main/java/org/rocksdb/LiveFileMetaData.java b/java/src/main/java/org/rocksdb/LiveFileMetaData.java index cb0f1a30225b..8f1762fb7159 100644 --- a/java/src/main/java/org/rocksdb/LiveFileMetaData.java +++ b/java/src/main/java/org/rocksdb/LiveFileMetaData.java @@ -55,7 +55,7 @@ public int level() { return level; } - public long newLiveFileMetaDataHandle() { + private long newLiveFileMetaDataHandle() { return newLiveFileMetaDataHandle(columnFamilyName(), columnFamilyName().length, level(), fileName(), path(), size(), smallestSeqno(), largestSeqno(), smallestKey(), smallestKey().length, largestKey(), largestKey().length, numReadsSampled(), diff --git a/java/src/main/java/org/rocksdb/LogFile.java b/java/src/main/java/org/rocksdb/LogFile.java index 5ee2c9fcc64a..2be597ce5f9b 100644 --- a/java/src/main/java/org/rocksdb/LogFile.java +++ b/java/src/main/java/org/rocksdb/LogFile.java @@ -5,6 +5,9 @@ package org.rocksdb; +/** + * A (journal) log file. + */ @SuppressWarnings("PMD.MissingStaticMethodInNonInstantiatableClass") public class LogFile { private final String pathName; diff --git a/java/src/main/java/org/rocksdb/Logger.java b/java/src/main/java/org/rocksdb/Logger.java index 614a7fa502f1..e5983dde8caa 100644 --- a/java/src/main/java/org/rocksdb/Logger.java +++ b/java/src/main/java/org/rocksdb/Logger.java @@ -96,15 +96,51 @@ public InfoLogLevel infoLogLevel() { infoLogLevel(nativeHandle_)); } + /** + * Log a message. + * + * @param infoLogLevel the log level. + * @param logMsg the log message. + */ protected abstract void log(InfoLogLevel infoLogLevel, String logMsg); + /** + * Create a new Logger with Options. + * + * @param options the native handle to the underlying C++ native options object + * + * @return the native handle to the underlying C++ native Logger object. + */ protected native long createNewLoggerOptions( long options); + + /** + * Create a new Logger with DBOptions. + * + * @param dbOptions the native handle to the underlying C++ native db options object + * + * @return the native handle to the underlying C++ native Logger object. + */ protected native long createNewLoggerDbOptions( long dbOptions); + + /** + * Set the log level. + * + * @param handle the native handle to the underlying C++ native Logger object. + * @param infoLogLevel the log level. + */ protected native void setInfoLogLevel(long handle, byte infoLogLevel); + + /** + * Get the log level. + * + * @param handle the native handle to the underlying C++ native Logger object. + * + * @return the log level. + */ protected native byte infoLogLevel(long handle); /** diff --git a/java/src/main/java/org/rocksdb/MemTableInfo.java b/java/src/main/java/org/rocksdb/MemTableInfo.java index 3d429035a343..56396ac8d997 100644 --- a/java/src/main/java/org/rocksdb/MemTableInfo.java +++ b/java/src/main/java/org/rocksdb/MemTableInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Information about a Mem Table. + */ public class MemTableInfo { private final String columnFamilyName; private final long firstSeqno; diff --git a/java/src/main/java/org/rocksdb/MergeOperator.java b/java/src/main/java/org/rocksdb/MergeOperator.java index c299f62210fa..630c400cfa9a 100644 --- a/java/src/main/java/org/rocksdb/MergeOperator.java +++ b/java/src/main/java/org/rocksdb/MergeOperator.java @@ -12,6 +12,12 @@ * value. */ public abstract class MergeOperator extends RocksObject { + + /** + * Constructs a MergeOperator. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ MergeOperator. + */ protected MergeOperator(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java index e54db7171e54..b58098119e9e 100644 --- a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java @@ -7,6 +7,9 @@ import java.util.*; +/** + * Mutable Column Family Options. + */ public class MutableColumnFamilyOptions extends AbstractMutableOptions { /** * User must use builder pattern, or parser. @@ -54,24 +57,87 @@ public static MutableColumnFamilyOptionsBuilder parse( return new MutableColumnFamilyOptionsBuilder().fromParsed(parsedOptions, ignoreUnknown); } + /** + * Parses a String representation of MutableColumnFamilyOptions + *

+ * The format is: key1=value1;key2=value2;key3=value3 etc + *

+ * For int[] values, each int should be separated by a colon, e.g. + *

+ * key1=value1;intArrayKey1=1:2:3 + * + * @param str The string representation of the mutable column family options + * + * @return A builder for the mutable column family options + */ public static MutableColumnFamilyOptionsBuilder parse(final String str) { return parse(str, false); } private interface MutableColumnFamilyOptionKey extends MutableOptionKey {} + /** + * Mem Table options. + */ public enum MemtableOption implements MutableColumnFamilyOptionKey { + /** + * Write buffer size. + */ write_buffer_size(ValueType.LONG), + + /** + * Arena block size. + */ arena_block_size(ValueType.LONG), + + /** + * Prefix size ratio for Memtable's Bloom Filter. + */ memtable_prefix_bloom_size_ratio(ValueType.DOUBLE), + + /** + * Whether to filter whole keys in the Memtable(s). + */ memtable_whole_key_filtering(ValueType.BOOLEAN), + + /** + * Number of bits for the prefix in Memtable's Bloom Filter. + */ @Deprecated memtable_prefix_bloom_bits(ValueType.INT), + + /** + * Number of probes for the prefix in Memtable's Bloom Filter. + */ @Deprecated memtable_prefix_bloom_probes(ValueType.INT), + + /** + * Huge Page Size for Memtable(s). + */ memtable_huge_page_size(ValueType.LONG), + + /** + * Maximum number of successive merges. + */ max_successive_merges(ValueType.LONG), + + /** + * Whether to filter deletes. + */ @Deprecated filter_deletes(ValueType.BOOLEAN), + + /** + * Maximum number of write buffers. + */ max_write_buffer_number(ValueType.INT), + + /** + * Number of in-place update locks. + */ inplace_update_num_locks(ValueType.LONG), + + /** + * Memory purge threshold. + */ experimental_mempurge_threshold(ValueType.DOUBLE); private final ValueType valueType; @@ -85,20 +151,78 @@ public ValueType getValueType() { } } + /** + * Compaction options. + */ public enum CompactionOption implements MutableColumnFamilyOptionKey { + /** + * Disable auto compaction. + */ disable_auto_compactions(ValueType.BOOLEAN), + + /** + * Soft limit on the number of bytes pending before compaction. + */ soft_pending_compaction_bytes_limit(ValueType.LONG), + + /** + * Hard limit on the number of bytes pending before compaction. + */ hard_pending_compaction_bytes_limit(ValueType.LONG), + + /** + * Number of files in Level 0 before compaction is triggered. + */ level0_file_num_compaction_trigger(ValueType.INT), + + /** + * Writes to Level 0 before a slowdown is triggered. + */ level0_slowdown_writes_trigger(ValueType.INT), + + /** + * Writes to Level 0 before a stop is triggered. + */ level0_stop_writes_trigger(ValueType.INT), + + /** + * Max compaction bytes. + */ max_compaction_bytes(ValueType.LONG), + + /** + * Target for the base size of files. + */ target_file_size_base(ValueType.LONG), + + /** + * Multiplier for the size of files. + */ target_file_size_multiplier(ValueType.INT), + + /** + * Maximum size in bytes for level base. + */ max_bytes_for_level_base(ValueType.LONG), + + /** + * Maximum bytes for level multiplier. + */ max_bytes_for_level_multiplier(ValueType.INT), + + /** + * Maximum bytes for level multiplier(s) additional + */ max_bytes_for_level_multiplier_additional(ValueType.INT_ARRAY), + + /** + * Time-to-live. + */ ttl(ValueType.LONG), + + /** + * Compaction period in seconds. + */ periodic_compaction_seconds(ValueType.LONG); private final ValueType valueType; @@ -112,16 +236,58 @@ public ValueType getValueType() { } } + /** + * Blob options. + */ public enum BlobOption implements MutableColumnFamilyOptionKey { + /** + * Enable BLOB files. + */ enable_blob_files(ValueType.BOOLEAN), + + /** + * Minimum BLOB size. + */ min_blob_size(ValueType.LONG), + + /** + * BLOB file size. + */ blob_file_size(ValueType.LONG), + + /** + * BLOB compression type. + */ blob_compression_type(ValueType.ENUM), + + /** + * Enable BLOB garbage collection. + */ enable_blob_garbage_collection(ValueType.BOOLEAN), + + /** + * BLOB garbage collection age cut-off. + */ blob_garbage_collection_age_cutoff(ValueType.DOUBLE), + + /** + * Threshold for forcing BLOB garbage collection. + */ blob_garbage_collection_force_threshold(ValueType.DOUBLE), + + /** + * BLOB compaction read-ahead size. + */ blob_compaction_readahead_size(ValueType.LONG), + + /** + * BLOB file starting level. + */ blob_file_starting_level(ValueType.INT), + + /** + * Prepopulate BLOB Cache. + */ prepopulate_blob_cache(ValueType.ENUM); private final ValueType valueType; @@ -135,10 +301,28 @@ public ValueType getValueType() { } } + /** + * Miscellaneous options. + */ public enum MiscOption implements MutableColumnFamilyOptionKey { + /** + * Maximum number of sequential keys to skip during iteration. + */ max_sequential_skip_in_iterations(ValueType.LONG), + + /** + * Whether to enable paranoid file checks. + */ paranoid_file_checks(ValueType.BOOLEAN), + + /** + * Whether to report background I/O stats. + */ report_bg_io_stats(ValueType.BOOLEAN), + + /** + * Compression type. + */ compression(ValueType.ENUM); private final ValueType valueType; @@ -152,6 +336,9 @@ public ValueType getValueType() { } } + /** + * Builder for constructing MutableColumnFamilyOptions. + */ public static class MutableColumnFamilyOptionsBuilder extends AbstractMutableOptionsBuilder implements MutableColumnFamilyOptionsInterface { diff --git a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java index 729b0e882788..c637989d82fa 100644 --- a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java @@ -5,6 +5,11 @@ package org.rocksdb; +/** + * Interface for MutableColumnFamilyOptions. + * + * @param the concrete type of the MutableColumnFamilyOptions. + */ public interface MutableColumnFamilyOptionsInterface< T extends MutableColumnFamilyOptionsInterface> extends AdvancedMutableColumnFamilyOptionsInterface { diff --git a/java/src/main/java/org/rocksdb/MutableDBOptions.java b/java/src/main/java/org/rocksdb/MutableDBOptions.java index 927e80522272..f39d117d833e 100644 --- a/java/src/main/java/org/rocksdb/MutableDBOptions.java +++ b/java/src/main/java/org/rocksdb/MutableDBOptions.java @@ -10,6 +10,9 @@ import java.util.Map; import java.util.Objects; +/** + * Mutable Database Options. + */ public class MutableDBOptions extends AbstractMutableOptions { /** * User must use builder pattern, or parser. @@ -55,27 +58,105 @@ public static MutableDBOptionsBuilder parse(final String str, final boolean igno return new MutableDBOptions.MutableDBOptionsBuilder().fromParsed(parsedOptions, ignoreUnknown); } + /** + * Parses a String representation of MutableDBOptions + *

+ * The format is: key1=value1;key2=value2;key3=value3 etc + *

+ * For int[] values, each int should be separated by a comma, e.g. + *

+ * key1=value1;intArrayKey1=1:2:3 + * + * @param str The string representation of the mutable db options + * + * @return A builder for the mutable db options + */ public static MutableDBOptionsBuilder parse(final String str) { return parse(str, false); } private interface MutableDBOptionKey extends MutableOptionKey {} + /** + * Database options. + */ public enum DBOption implements MutableDBOptionKey { + /** + * Maximum number of background jobs. + */ max_background_jobs(ValueType.INT), + + /** + * Maximum number of background compactions. + */ max_background_compactions(ValueType.INT), + + /** + * Whether to avoid flush during shutdown. + */ avoid_flush_during_shutdown(ValueType.BOOLEAN), + + /** + * Max buffer size for writing to files. + */ writable_file_max_buffer_size(ValueType.LONG), + + /** + * Delayed write rate. + */ delayed_write_rate(ValueType.LONG), + + /** + * Maximum total size of the WAL. + */ max_total_wal_size(ValueType.LONG), + + /** + * The period to delete obsolete file. + * Measured in microseconds. + */ delete_obsolete_files_period_micros(ValueType.LONG), + + /** + * The period to dump statistics. + * Measured in seconds. + */ stats_dump_period_sec(ValueType.INT), + + /** + * The period that statistics persist. + * Measured in seconds. + */ stats_persist_period_sec(ValueType.INT), + + /** + * Buffer size for statistics history. + */ stats_history_buffer_size(ValueType.LONG), + + /** + * Maximum number of open files. + */ max_open_files(ValueType.INT), + + /** + * Bytes per sync. + */ bytes_per_sync(ValueType.LONG), + + /** + * WAL bytes per sync. + */ wal_bytes_per_sync(ValueType.LONG), + + /** + * Strict limit of bytes per sync. + */ strict_bytes_per_sync(ValueType.BOOLEAN), + + /** + * Compaction readahead size. + */ compaction_readahead_size(ValueType.LONG); private final ValueType valueType; @@ -89,6 +170,9 @@ public ValueType getValueType() { } } + /** + * Builder for constructing MutableDBOptions. + */ public static class MutableDBOptionsBuilder extends AbstractMutableOptionsBuilder implements MutableDBOptionsInterface { diff --git a/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java b/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java index 1521fb4d08a1..b2d5233edee9 100644 --- a/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java @@ -1,6 +1,11 @@ // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. package org.rocksdb; +/** + * Interface for MutableDBOptions. + * + * @param the concrete type of DBOptions. + */ public interface MutableDBOptionsInterface> { /** * Specifies the maximum number of concurrent background jobs (both flushes diff --git a/java/src/main/java/org/rocksdb/MutableOptionKey.java b/java/src/main/java/org/rocksdb/MutableOptionKey.java index ec1b9ff3b352..a46b28fdac99 100644 --- a/java/src/main/java/org/rocksdb/MutableOptionKey.java +++ b/java/src/main/java/org/rocksdb/MutableOptionKey.java @@ -1,16 +1,58 @@ // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. package org.rocksdb; +/** + * Mutable Option keys. + */ public interface MutableOptionKey { + + /** + * Types of values used for Mutable Options, + */ enum ValueType { + + /** + * Double precision floating point number. + */ DOUBLE, + + /** + * 64 bit signed integer. + */ LONG, + + /** + * 32 bit signed integer. + */ INT, + + /** + * Boolean. + */ BOOLEAN, + + /** + * Array of 32 bit signed integers. + */ INT_ARRAY, + + /** + * Enumeration. + */ ENUM } + /** + * Get the name of the MutableOption key. + * + * @return the name of the key. + */ String name(); + + /** + * Get the value type of the MutableOption. + * + * @return the value type. + */ ValueType getValueType(); } diff --git a/java/src/main/java/org/rocksdb/MutableOptionValue.java b/java/src/main/java/org/rocksdb/MutableOptionValue.java index fe689b5d01b0..bded79e8d759 100644 --- a/java/src/main/java/org/rocksdb/MutableOptionValue.java +++ b/java/src/main/java/org/rocksdb/MutableOptionValue.java @@ -3,6 +3,11 @@ import static org.rocksdb.AbstractMutableOptions.INT_ARRAY_INT_SEPARATOR; +/** + * Base class for the value of a mutable option. + * + * @param the concrete type of the value. + */ public abstract class MutableOptionValue { abstract double asDouble() throws NumberFormatException; diff --git a/java/src/main/java/org/rocksdb/OperationStage.java b/java/src/main/java/org/rocksdb/OperationStage.java index 6ac0a15a2442..2ded8d8a642d 100644 --- a/java/src/main/java/org/rocksdb/OperationStage.java +++ b/java/src/main/java/org/rocksdb/OperationStage.java @@ -9,16 +9,59 @@ * The operation stage. */ public enum OperationStage { + /** + * Unknown. + */ STAGE_UNKNOWN((byte)0x0), + + /** + * Flush. + */ STAGE_FLUSH_RUN((byte)0x1), + + /** + * Flush writing Level 0. + */ STAGE_FLUSH_WRITE_L0((byte)0x2), + + /** + * Preparing compaction. + */ STAGE_COMPACTION_PREPARE((byte)0x3), + + /** + * Compaction. + */ STAGE_COMPACTION_RUN((byte)0x4), + + /** + * Compaction processing a key-value. + */ STAGE_COMPACTION_PROCESS_KV((byte)0x5), + + /** + * Installing compaction. + */ STAGE_COMPACTION_INSTALL((byte)0x6), + + /** + * Compaction syncing a file. + */ STAGE_COMPACTION_SYNC_FILE((byte)0x7), + + /** + * Picking Memtable(s) to flush. + */ STAGE_PICK_MEMTABLES_TO_FLUSH((byte)0x8), + + /** + * Rolling back Memtable(s). + */ STAGE_MEMTABLE_ROLLBACK((byte)0x9), + + /** + * Installing Memtable flush results. + */ STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS((byte)0xA); private final byte value; diff --git a/java/src/main/java/org/rocksdb/OperationType.java b/java/src/main/java/org/rocksdb/OperationType.java index bf73534683cc..0279e9e3b2f4 100644 --- a/java/src/main/java/org/rocksdb/OperationType.java +++ b/java/src/main/java/org/rocksdb/OperationType.java @@ -12,9 +12,24 @@ * examples include compaction and flush. */ public enum OperationType { + /** + * Unknown. + */ OP_UNKNOWN((byte)0x0), + + /** + * Compaction. + */ OP_COMPACTION((byte)0x1), + + /** + * Flush. + */ OP_FLUSH((byte) 0x2), + + /** + * DB Open. + */ OP_DBOPEN((byte) 0x3); private final byte value; diff --git a/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java b/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java index 283f19a3145d..1f0b2beb3e8f 100644 --- a/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java +++ b/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java @@ -207,9 +207,9 @@ public RocksDB getBaseDB() { @Override protected final native void disposeInternal(final long handle); - protected static native long open(final long optionsHandle, + private static native long open(final long optionsHandle, final String path) throws RocksDBException; - protected static native long[] open(final long handle, final String path, + private static native long[] open(final long handle, final String path, final byte[][] columnFamilyNames, final long[] columnFamilyOptions); private static native void closeDatabase(final long handle) throws RocksDBException; private native long beginTransaction(final long handle, diff --git a/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java b/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java index a2f5d85ab5c1..619db91edd3a 100644 --- a/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java +++ b/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java @@ -5,9 +5,15 @@ package org.rocksdb; +/** + * Options for an Optimistic Transaction. + */ public class OptimisticTransactionOptions extends RocksObject implements TransactionalOptions { + /** + * Constructs an OptimisticTransactionOptions. + */ public OptimisticTransactionOptions() { super(newOptimisticTransactionOptions()); } diff --git a/java/src/main/java/org/rocksdb/OptionString.java b/java/src/main/java/org/rocksdb/OptionString.java index bcbf1d152962..adce6e572f8f 100644 --- a/java/src/main/java/org/rocksdb/OptionString.java +++ b/java/src/main/java/org/rocksdb/OptionString.java @@ -9,6 +9,9 @@ import java.util.List; import java.util.Objects; +/** + * An option expressed as a String. + */ @SuppressWarnings("PMD.AvoidStringBufferField") public class OptionString { private static final char kvPairSeparator = ';'; @@ -19,23 +22,51 @@ public class OptionString { private static final char wrappedValueEnd = '}'; private static final char arrayValueSeparator = ':'; + /** + * The value of the option. + */ static class Value { final List list; final List complex; + /** + * Constructs a Value. + * + * @param list the list of values. + * @param complex the list of complex values. + */ public Value(final List list, final List complex) { this.list = list; this.complex = complex; } + /** + * Returns true if the value is a list. + * + * @return true if the value is a list, false otherwise. + */ public boolean isList() { return (this.list != null && this.complex == null); } + /** + * Constructs a value from a list. + * + * @param list a list of string values. + * + * @return the value. + */ public static Value fromList(final List list) { return new Value(list, null); } + /** + * Constructs a value from a complex value. + * + * @param complex the complex value. + * + * @return the value. + */ public static Value fromComplex(final List complex) { return new Value(null, complex); } @@ -251,6 +282,13 @@ private List parseComplex() { return entries; } + /** + * Parse a string into a list of entry. + * + * @param str the string. + * + * @return the list of entry. + */ public static List parse(final String str) { Objects.requireNonNull(str); diff --git a/java/src/main/java/org/rocksdb/OptionsUtil.java b/java/src/main/java/org/rocksdb/OptionsUtil.java index 4168921f2a05..b0c2f95e6d1c 100644 --- a/java/src/main/java/org/rocksdb/OptionsUtil.java +++ b/java/src/main/java/org/rocksdb/OptionsUtil.java @@ -7,6 +7,9 @@ import java.util.List; +/** + * Utility functions to assist in working with Options. + */ public class OptionsUtil { /** * A static method to construct the DBOptions and ColumnFamilyDescriptors by diff --git a/java/src/main/java/org/rocksdb/PerfContext.java b/java/src/main/java/org/rocksdb/PerfContext.java index 3934e4115cf5..e02f5157fe76 100644 --- a/java/src/main/java/org/rocksdb/PerfContext.java +++ b/java/src/main/java/org/rocksdb/PerfContext.java @@ -5,11 +5,23 @@ package org.rocksdb; +/** + * Performance Context. + */ public class PerfContext extends RocksObject { + + /** + * Constructs a PerfContext. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ PerfContext. + */ protected PerfContext(final long nativeHandle) { super(nativeHandle); } + /** + * Reset the performance context. + */ public void reset() { reset(nativeHandle_); } @@ -42,8 +54,8 @@ public long getBlockReadByte() { return getBlockReadByte(nativeHandle_); } - /* - @return total nanos spent on block reads + /** + * @return total nanos spent on block reads */ public long getBlockReadTime() { return getBlockReadTime(nativeHandle_); @@ -220,7 +232,7 @@ public long getBlobDecompressTime() { } /** - * total number of internal keys skipped over during iteration. + * Get the total number of internal keys skipped over during iteration. * There are several reasons for it: * 1. when calling Next(), the iterator is in the position of the previous * key, so that we'll need to skip it. It means this counter will always @@ -236,51 +248,64 @@ public long getBlobDecompressTime() { * hidden by the tombstones will be included here. * 4. symmetric cases for Prev() and SeekToLast() * internal_recent_skipped_count is not included in this counter. + * + * @return the total number of internal keys skipped over during iteration */ public long getInternalKeySkippedCount() { return getInternalKeySkippedCount(nativeHandle_); } /** - * Total number of deletes and single deletes skipped over during iteration + * Get the Total number of deletes and single deletes skipped over during iteration * When calling Next(), Seek() or SeekToFirst(), after previous position * before calling Next(), the seek key in Seek() or the beginning for * SeekToFirst(), there may be one or more deleted keys before the next valid * key. Every deleted key is counted once. We don't recount here if there are * still older updates invalidated by the tombstones. + * + * @return total number of deletes and single deletes skipped over during iteration. */ public long getInternalDeleteSkippedCount() { return getInternalDeleteSkippedCount(nativeHandle_); } /** - * How many times iterators skipped over internal keys that are more recent + * Get how many times iterators skipped over internal keys that are more recent * than the snapshot that iterator is using. + * + * @return the number of times iterators skipped over internal keys that are more recent + * than the snapshot that iterator is using. */ public long getInternalRecentSkippedCount() { return getInternalRecentSkippedCount(nativeHandle_); } /** - * How many merge operands were fed into the merge operator by iterators. + * Get how many merge operands were fed into the merge operator by iterators. * Note: base values are not included in the count. + * + * @return the number of merge operands that were fed into the merge operator by iterators. */ public long getInternalMergeCount() { return getInternalMergeCount(nativeHandle_); } /** - * How many merge operands were fed into the merge operator by point lookups. + * Get how many merge operands were fed into the merge operator by point lookups. * Note: base values are not included in the count. + * + * @return the number of merge operands yjay were fed into the merge operator by point lookups. */ public long getInternalMergePointLookupCount() { return getInternalMergePointLookupCount(nativeHandle_); } /** - * Number of times we reseeked inside a merging iterator, specifically to skip + * Get the number of times we re-seek'd inside a merging iterator, specifically to skip * after or before a range of keys covered by a range deletion in a newer LSM * component. + * + * @return the number of times we re-seek'd inside a merging iterator. */ public long getInternalRangeDelReseekCount() { return getInternalRangeDelReseekCount(nativeHandle_); @@ -485,26 +510,37 @@ public long getNewTableIteratorNanos() { } /** + * Get total time of mem table block seeks in nanoseconds. + * * @return Time spent on seeking a key in data/index blocks */ public long getBlockSeekNanos() { return getBlockSeekNanos(nativeHandle_); } + /** - * @return Time spent on finding or creating a table reader + * Get total time spent on finding or creating a table reader. + * + * @return the time spent on finding or creating a table reader */ public long getFindTableNanos() { return getFindTableNanos(nativeHandle_); } /** + * Get total number of mem table bloom hits. + * * @return total number of mem table bloom hits */ public long getBloomMemtableHitCount() { return getBloomMemtableHitCount(nativeHandle_); } - // total number of mem table bloom misses + /** + * Get total number of mem table bloom misses. + * + * @return total number of mem table bloom misses. + */ public long getBloomMemtableMissCount() { return getBloomMemtableMissCount(nativeHandle_); } @@ -544,91 +580,209 @@ public long getEnvNewSequentialFileNanos() { return getEnvNewSequentialFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for creating new random access file(s) in the environment. + * + * @return the total time + */ public long getEnvNewRandomAccessFileNanos() { return getEnvNewRandomAccessFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for creating new writable file(s) in the environment. + * + * @return the total time + */ public long getEnvNewWritableFileNanos() { return getEnvNewWritableFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for reusing random access file(s) in the environment. + * + * @return the total time + */ public long getEnvReuseWritableFileNanos() { return getEnvReuseWritableFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for creating new random access read-write file(s) in the environment. + * + * @return the total time + */ public long getEnvNewRandomRwFileNanos() { return getEnvNewRandomRwFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for creating new directory(s) in the environment. + * + * @return the total time + */ public long getEnvNewDirectoryNanos() { return getEnvNewDirectoryNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for checking if a file exists in the environment. + * + * @return the total time + */ public long getEnvFileExistsNanos() { return getEnvFileExistsNanos(nativeHandle_); } + + /** + * Get the time taken in nanoseconds for getting children in the environment. + * + * @return the total time + */ public long getEnvGetChildrenNanos() { return getEnvGetChildrenNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for child file attributes in the environment. + * + * @return the total time + */ public long getEnvGetChildrenFileAttributesNanos() { return getEnvGetChildrenFileAttributesNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for deleting file(s) in the environment. + * + * @return the total time + */ public long getEnvDeleteFileNanos() { return getEnvDeleteFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for creating directories(s) in the environment. + * + * @return the total time + */ public long getEnvCreateDirNanos() { return getEnvCreateDirNanos(nativeHandle_); } + + /** + * Get the time taken in nanoseconds for creating directories(s) (only if not already existing) in the environment. + * + * @return the total time + */ public long getEnvCreateDirIfMissingNanos() { return getEnvCreateDirIfMissingNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for deleting directories(s) in the environment. + * + * @return the total time + */ public long getEnvDeleteDirNanos() { return getEnvDeleteDirNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for getting file size(s) in the environment. + * + * @return the total time + */ public long getEnvGetFileSizeNanos() { return getEnvGetFileSizeNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for getting file modification time(s) in the environment. + * + * @return the total time + */ public long getEnvGetFileModificationTimeNanos() { return getEnvGetFileModificationTimeNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for renaming file(s) in the environment. + * + * @return the total time + */ public long getEnvRenameFileNanos() { return getEnvRenameFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for linking file(s) in the environment. + * + * @return the total time + */ public long getEnvLinkFileNanos() { return getEnvLinkFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for locking file(s) in the environment. + * + * @return the total time + */ public long getEnvLockFileNanos() { return getEnvLockFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for unlocking file(s) in the environment. + * + * @return the total time + */ public long getEnvUnlockFileNanos() { return getEnvUnlockFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for creating loggers in the environment. + * + * @return the total time + */ public long getEnvNewLoggerNanos() { return getEnvNewLoggerNanos(nativeHandle_); } + /** + * Get the CPU time consumed in the environment. + * + * @return the total time + */ public long getGetCpuNanos() { return getGetCpuNanos(nativeHandle_); } + /** + * Get the CPU time consumed by calling 'next' on iterator(s) in the environment. + * + * @return the total time + */ public long getIterNextCpuNanos() { return getIterNextCpuNanos(nativeHandle_); } + + /** + * Get the CPU time consumed by calling 'prev' on iterator(s) in the environment. + * + * @return the total time + */ public long getIterPrevCpuNanos() { return getIterPrevCpuNanos(nativeHandle_); } + /** + * Get the CPU time consumed by calling 'seek' on iterator(s) in the environment. + * + * @return the total time + */ public long getIterSeekCpuNanos() { return getIterSeekCpuNanos(nativeHandle_); } @@ -647,6 +801,9 @@ public long getDecryptDataNanos() { return getDecryptDataNanos(nativeHandle_); } + /** + * @return the number of asynchronous seeks. + */ public long getNumberAsyncSeek() { return getNumberAsyncSeek(nativeHandle_); } diff --git a/java/src/main/java/org/rocksdb/PerfLevel.java b/java/src/main/java/org/rocksdb/PerfLevel.java index 332e6d7d977b..a0db8a3286c4 100644 --- a/java/src/main/java/org/rocksdb/PerfLevel.java +++ b/java/src/main/java/org/rocksdb/PerfLevel.java @@ -5,6 +5,9 @@ package org.rocksdb; +/** + * Performance monitoring levels. + */ public enum PerfLevel { /** * Unknown setting @@ -45,16 +48,31 @@ public enum PerfLevel { private final byte _value; + /** + * Get the internal representation value. + * + * @return the internal representation value. + */ public byte getValue() { return _value; } + /** + * Get the PerfLevel from the internal representation value. + * + * @param level the internal representation value. + * + * @return the PerfLevel + * + * @throws IllegalArgumentException if the value does not match a + * PerfLevel + */ public static PerfLevel getPerfLevel(byte level) { for (PerfLevel l : PerfLevel.values()) { if (l.getValue() == level) { return l; } } - throw new IllegalArgumentException("Uknknown PerfLevel constant : " + level); + throw new IllegalArgumentException("Unknown PerfLevel constant : " + level); } } diff --git a/java/src/main/java/org/rocksdb/PersistentCache.java b/java/src/main/java/org/rocksdb/PersistentCache.java index 5297111e6f98..7b99eaec8a24 100644 --- a/java/src/main/java/org/rocksdb/PersistentCache.java +++ b/java/src/main/java/org/rocksdb/PersistentCache.java @@ -11,6 +11,17 @@ */ public class PersistentCache extends RocksObject { + /** + * Constructs a persistent cache. + * + * @param env the environment. + * @param path the path for the cache. + * @param size the size of the cache. + * @param logger the logger to use. + * @param optimizedForNvm true to optimize for NVM, false otherwise. + * + * @throws RocksDBException if the cache cannot be created. + */ public PersistentCache(final Env env, final String path, final long size, final Logger logger, final boolean optimizedForNvm) throws RocksDBException { diff --git a/java/src/main/java/org/rocksdb/PlainTableConfig.java b/java/src/main/java/org/rocksdb/PlainTableConfig.java index 46077ba56530..8c06a2120e68 100644 --- a/java/src/main/java/org/rocksdb/PlainTableConfig.java +++ b/java/src/main/java/org/rocksdb/PlainTableConfig.java @@ -13,17 +13,52 @@ *

It also support prefix hash feature.

*/ public class PlainTableConfig extends TableFormatConfig { + + /** + * Indicates that the key sizew can be variable length. + */ public static final int VARIABLE_LENGTH = 0; + + /** + * The default bits per key in the bloom filter. + */ public static final int DEFAULT_BLOOM_BITS_PER_KEY = 10; + + /** + * The default ratio of the hash table. + */ public static final double DEFAULT_HASH_TABLE_RATIO = 0.75; + + /** + * The default sparseness factor of the index. + */ public static final int DEFAULT_INDEX_SPARSENESS = 16; + + /** + * The default size of the huge TLB. + */ public static final int DEFAULT_HUGE_TLB_SIZE = 0; + + /** + * The default encoding type. + */ public static final EncodingType DEFAULT_ENCODING_TYPE = EncodingType.kPlain; + + /** + * The default full scan mode. + */ public static final boolean DEFAULT_FULL_SCAN_MODE = false; + + /** + * The default setting for whether to store the index in a file. + */ public static final boolean DEFAULT_STORE_INDEX_IN_FILE = false; + /** + * Constructs a PlainTableConfig with the default settings. + */ public PlainTableConfig() { keySize_ = VARIABLE_LENGTH; bloomBitsPerKey_ = DEFAULT_BLOOM_BITS_PER_KEY; diff --git a/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java b/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java index f1237aa7c95b..d2a02f6a9271 100644 --- a/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java +++ b/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java @@ -18,7 +18,15 @@ * system since it involves network traffic and higher latencies.

*/ public enum PrepopulateBlobCache { + + /** + * Disable pre-populating the blob cache + */ PREPOPULATE_BLOB_DISABLE((byte) 0x0, "prepopulate_blob_disable", "kDisable"), + + /** + * Only pre-populate on BLOB flush. + */ PREPOPULATE_BLOB_FLUSH_ONLY((byte) 0x1, "prepopulate_blob_flush_only", "kFlushOnly"); /** diff --git a/java/src/main/java/org/rocksdb/Priority.java b/java/src/main/java/org/rocksdb/Priority.java index 34a56edcbcde..ac656f9a350f 100644 --- a/java/src/main/java/org/rocksdb/Priority.java +++ b/java/src/main/java/org/rocksdb/Priority.java @@ -9,9 +9,25 @@ * The Thread Pool priority. */ public enum Priority { + + /** + * Bottom most priority. + */ BOTTOM((byte) 0x0), + + /** + * Low priority. + */ LOW((byte) 0x1), + + /** + * High priority. + */ HIGH((byte)0x2), + + /** + * maximum number of priority levels. + */ TOTAL((byte)0x3); private final byte value; diff --git a/java/src/main/java/org/rocksdb/Range.java b/java/src/main/java/org/rocksdb/Range.java index 74c85e5f04f3..16f4dbe2567e 100644 --- a/java/src/main/java/org/rocksdb/Range.java +++ b/java/src/main/java/org/rocksdb/Range.java @@ -12,6 +12,13 @@ public class Range { final Slice start; final Slice limit; + /** + * Constructs a Range. + * + * + * @param start the start of the range + * @param limit the end (start+limit) of the range + */ public Range(final Slice start, final Slice limit) { this.start = start; this.limit = limit; diff --git a/java/src/main/java/org/rocksdb/RateLimiter.java b/java/src/main/java/org/rocksdb/RateLimiter.java index c2b8a0fd92e2..97d9387984b2 100644 --- a/java/src/main/java/org/rocksdb/RateLimiter.java +++ b/java/src/main/java/org/rocksdb/RateLimiter.java @@ -12,10 +12,26 @@ * @since 3.10.0 */ public class RateLimiter extends RocksObject { + + /** + * The default refill period in microseconds. + */ public static final long DEFAULT_REFILL_PERIOD_MICROS = 100 * 1000; + + /** + * The default fairness parameter value. + */ public static final int DEFAULT_FAIRNESS = 10; + + /** + * The default rate limiter mode. + */ public static final RateLimiterMode DEFAULT_MODE = RateLimiterMode.WRITES_ONLY; + + /** + * The default of whether to enable auto-tune. + */ public static final boolean DEFAULT_AUTOTUNE = false; /** diff --git a/java/src/main/java/org/rocksdb/RateLimiterMode.java b/java/src/main/java/org/rocksdb/RateLimiterMode.java index 4b029d8165e2..d0bdc3882b1d 100644 --- a/java/src/main/java/org/rocksdb/RateLimiterMode.java +++ b/java/src/main/java/org/rocksdb/RateLimiterMode.java @@ -9,8 +9,20 @@ * Mode for {@link RateLimiter#RateLimiter(long, long, int, RateLimiterMode)}. */ public enum RateLimiterMode { + + /** + * Only rate limit reads. + */ READS_ONLY((byte)0x0), + + /** + * Only rate limit writes. + */ WRITES_ONLY((byte)0x1), + + /** + * Rate limit all IO. + */ ALL_IO((byte)0x2); private final byte value; diff --git a/java/src/main/java/org/rocksdb/ReadOptions.java b/java/src/main/java/org/rocksdb/ReadOptions.java index c444ae167e0b..14c42670f42c 100644 --- a/java/src/main/java/org/rocksdb/ReadOptions.java +++ b/java/src/main/java/org/rocksdb/ReadOptions.java @@ -12,11 +12,17 @@ * become out-of-scope to release the allocated memory in c++. */ public class ReadOptions extends RocksObject { + + /** + * Constructs a ReadOptions. + */ public ReadOptions() { super(newReadOptions()); } /** + * Constructs a ReadOptions. + * * @param verifyChecksums verification will be performed on every read * when set to true * @param fillCache if true, then fill-cache behavior will be performed. diff --git a/java/src/main/java/org/rocksdb/ReadTier.java b/java/src/main/java/org/rocksdb/ReadTier.java index 78f83f6ad657..b200823544ca 100644 --- a/java/src/main/java/org/rocksdb/ReadTier.java +++ b/java/src/main/java/org/rocksdb/ReadTier.java @@ -9,9 +9,25 @@ * RocksDB {@link ReadOptions} read tiers. */ public enum ReadTier { + + /** + * Read all tiers. + */ READ_ALL_TIER((byte)0), + + /** + * Read block cache. + */ BLOCK_CACHE_TIER((byte)1), + + /** + * Read persisted. + */ PERSISTED_TIER((byte)2), + + /** + * Read Memtable(s). + */ MEMTABLE_TIER((byte)3); private final byte value; diff --git a/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java b/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java index e96694313b4a..935828d0e1dd 100644 --- a/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java @@ -6,10 +6,14 @@ package org.rocksdb; /** - * Just a Java wrapper around EmptyValueCompactionFilter implemented in C++ + * Just a Java wrapper around EmptyValueCompactionFilter implemented in C++. */ public class RemoveEmptyValueCompactionFilter extends AbstractCompactionFilter { + + /** + * Constructs a RemoveEmptyValueCompactionFilter. + */ public RemoveEmptyValueCompactionFilter() { super(createNewRemoveEmptyValueCompactionFilter0()); } diff --git a/java/src/main/java/org/rocksdb/RocksCallbackObject.java b/java/src/main/java/org/rocksdb/RocksCallbackObject.java index 3e1bfdbd7f20..1307911c1eed 100644 --- a/java/src/main/java/org/rocksdb/RocksCallbackObject.java +++ b/java/src/main/java/org/rocksdb/RocksCallbackObject.java @@ -30,6 +30,11 @@ public abstract class RocksCallbackObject extends */ protected final long nativeHandle_; + /** + * Constructs a RocksCallbackObject. + * + * @param nativeParameterHandles reference to the value of the C++ pointers pointing to the underlying native RocksDB C++ objects. + */ protected RocksCallbackObject(final long... nativeParameterHandles) { super(true); this.nativeHandle_ = initializeNative(nativeParameterHandles); diff --git a/java/src/main/java/org/rocksdb/RocksDB.java b/java/src/main/java/org/rocksdb/RocksDB.java index 30115029497e..b44c33556439 100644 --- a/java/src/main/java/org/rocksdb/RocksDB.java +++ b/java/src/main/java/org/rocksdb/RocksDB.java @@ -21,7 +21,15 @@ * indicates sth wrong at the RocksDB library side and the call failed. */ public class RocksDB extends RocksObject { + + /** + * The name of the default column family. + */ public static final byte[] DEFAULT_COLUMN_FAMILY = "default".getBytes(UTF_8); + + /** + * A constant representing a result where something was searched for but not found. + */ public static final int NOT_FOUND = -1; private enum LibraryState { @@ -155,6 +163,11 @@ public static void loadLibrary(final List paths) { } } + /** + * Get the RocksDB version. + * + * @return the version of RocksDB. + */ public static Version rocksdbVersion() { return version; } @@ -781,6 +794,9 @@ public List createColumnFamilies( * The ColumnFamilyHandle is automatically disposed with DB disposal. * * @param columnFamilyDescriptor column family to be created. + * @param importColumnFamilyOptions the options for the import. + * @param metadata the metadata for the imported file. + * * @return {@link org.rocksdb.ColumnFamilyHandle} instance. * * @throws RocksDBException thrown if error happens in underlying @@ -796,6 +812,21 @@ public ColumnFamilyHandle createColumnFamilyWithImport( columnFamilyDescriptor, importColumnFamilyOptions, metadatas); } + /** + * Creates a new column family with the name columnFamilyName and + * import external SST files specified in `metadata` allocates a + * ColumnFamilyHandle within an internal structure. + * The ColumnFamilyHandle is automatically disposed with DB disposal. + * + * @param columnFamilyDescriptor column family to be created. + * @param importColumnFamilyOptions the options for the import. + * @param metadatas the metadata for the imported files. + * + * @return {@link org.rocksdb.ColumnFamilyHandle} instance. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ public ColumnFamilyHandle createColumnFamilyWithImport( final ColumnFamilyDescriptor columnFamilyDescriptor, final ImportColumnFamilyOptions importColumnFamilyOptions, @@ -830,10 +861,17 @@ public void dropColumnFamily(final ColumnFamilyHandle columnFamilyHandle) dropColumnFamily(nativeHandle_, columnFamilyHandle.nativeHandle_); } - // Bulk drop column families. This call only records drop records in the - // manifest and prevents the column families from flushing and compacting. - // In case of error, the request may succeed partially. User may call - // ListColumnFamilies to check the result. + /** + * Bulk drop column families. This call only records drop records in the + * manifest and prevents the column families from flushing and compacting. + * In case of error, the request may succeed partially. User may call + * {@link #listColumnFamilies(Options, String)} to check the result. + * + * @param columnFamilies the column families to drop. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ public void dropColumnFamilies( final List columnFamilies) throws RocksDBException { final long[] cfHandles = new long[columnFamilies.size()]; @@ -1689,6 +1727,19 @@ public void merge(final WriteOptions writeOpts, key, offset, len, value, vOffset, vLen); } + /** + * Add merge operand for key/value pair. + * + * @param writeOpts {@link WriteOptions} for this write. + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * + * @throws IndexOutOfBoundsException if an offset or length is out of bounds + */ public void merge(final WriteOptions writeOpts, final ByteBuffer key, final ByteBuffer value) throws RocksDBException { if (key.isDirect() && value.isDirect()) { @@ -1707,6 +1758,20 @@ public void merge(final WriteOptions writeOpts, final ByteBuffer key, final Byte value.position(value.limit()); } + /** + * Add merge operand for key/value pair. + * + * @param columnFamilyHandle the column family. + * @param writeOpts {@link WriteOptions} for this write. + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * + * @throws IndexOutOfBoundsException if an offset or length is out of bounds + */ public void merge(final ColumnFamilyHandle columnFamilyHandle, final WriteOptions writeOpts, final ByteBuffer key, final ByteBuffer value) throws RocksDBException { if (key.isDirect() && value.isDirect()) { @@ -1917,6 +1982,7 @@ public int get(final byte[] key, final int offset, final int len, * * @throws RocksDBException thrown if error happens in underlying * native library. + * @throws IllegalArgumentException if the arguments are invalid. */ public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, final byte[] value) throws RocksDBException, IllegalArgumentException { @@ -1949,6 +2015,7 @@ public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, * * @throws RocksDBException thrown if error happens in underlying * native library. + * @throws IllegalArgumentException if the arguments are invalid. */ public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, final int offset, final int len, final byte[] value, final int vOffset, @@ -2550,11 +2617,11 @@ public List multiGetByteBuffers(final ReadOptions readOptio * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * @param key byte array of a key to search for* * @return true if key exist in database, otherwise false. @@ -2566,11 +2633,11 @@ public boolean keyExists(final byte[] key) { * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * @param key byte array of a key to search for * @param offset the offset of the "key" array to be used, must be @@ -2586,11 +2653,11 @@ public boolean keyExists(final byte[] key, final int offset, final int len) { * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param columnFamilyHandle {@link ColumnFamilyHandle} instance @@ -2605,11 +2672,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, final byte * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param columnFamilyHandle {@link ColumnFamilyHandle} instance @@ -2629,11 +2696,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, final byte * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param readOptions {@link ReadOptions} instance @@ -2648,11 +2715,11 @@ public boolean keyExists(final ReadOptions readOptions, final byte[] key) { * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param readOptions {@link ReadOptions} instance @@ -2672,11 +2739,11 @@ public boolean keyExists( * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param columnFamilyHandle {@link ColumnFamilyHandle} instance @@ -2693,11 +2760,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param columnFamilyHandle {@link ColumnFamilyHandle} instance @@ -2721,11 +2788,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param key ByteBuffer with key. Must be allocated as direct. @@ -2739,11 +2806,11 @@ public boolean keyExists(final ByteBuffer key) { * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param columnFamilyHandle {@link ColumnFamilyHandle} instance @@ -2758,11 +2825,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, final Byte * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param readOptions {@link ReadOptions} instance @@ -2777,11 +2844,11 @@ public boolean keyExists(final ReadOptions readOptions, final ByteBuffer key) { * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param columnFamilyHandle {@link ColumnFamilyHandle} instance @@ -3641,10 +3708,26 @@ public long[] getApproximateSizes(final List ranges, return getApproximateSizes(null, ranges, sizeApproximationFlags); } + /** + * Count and size. + */ public static class CountAndSize { + /** + * The count. + */ public final long count; + + /** + * The size. + */ public final long size; + /** + * Constructs a CountAndSize. + * + * @param count the count. + * @param size the size. + */ public CountAndSize(final long count, final long size) { this.count = count; this.size = size; @@ -3844,7 +3927,9 @@ public void setOptions( /** * Set performance level for rocksdb performance measurement. - * @param level + * + * @param level the performance level + * * @throws IllegalArgumentException for UNINITIALIZED and OUT_OF_BOUNDS values * as they can't be used for settings. */ @@ -3860,7 +3945,8 @@ public void setPerfLevel(final PerfLevel level) { /** * Return current performance level measurement settings. - * @return + * + * @return the performance level */ public PerfLevel getPerfLevel() { byte level = getPerfLevelNative(); @@ -3868,8 +3954,9 @@ public PerfLevel getPerfLevel() { } /** - * Return perf context bound to this thread. - * @return + * Return performance context bound to this thread. + * + * @return the performance context */ public PerfContext getPerfContext() { long native_handle = getPerfContextNative(); @@ -3877,7 +3964,7 @@ public PerfContext getPerfContext() { } /** - * Get the options for the column family handle + * Get the options for the column family handle. * * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} * instance, or null for the default column family. @@ -4301,6 +4388,9 @@ public void enableFileDeletions(final boolean force) enableFileDeletions(nativeHandle_, force); } + /** + * Live files. + */ public static class LiveFiles { /** * The valid size of the manifest file. The manifest file is an ever growing @@ -4771,15 +4861,29 @@ private static long[] toRangeSliceHandles(final List ranges) { return rangeSliceHandles; } + /** + * Store the options instance. + * + * This is used to ensure it is correct released later. + * + * @param options the options. + */ protected void storeOptionsInstance(final DBOptionsInterface options) { options_ = options; } - protected void storeDefaultColumnFamilyHandle(ColumnFamilyHandle columnFamilyHandle) { + /** + * Store the default column family handle. + * + * This is used to ensure it is correct released later. + * + * @param columnFamilyHandle the handle of the default column family. + */ + protected void storeDefaultColumnFamilyHandle(final ColumnFamilyHandle columnFamilyHandle) { defaultColumnFamilyHandle_ = columnFamilyHandle; } - private static void checkBounds(int offset, int len, int size) { + private static void checkBounds(final int offset, final int len, final int size) { if ((offset | len | (offset + len) | (size - (offset + len))) < 0) { throw new IndexOutOfBoundsException(String.format("offset(%d), len(%d), size(%d)", offset, len, size)); } @@ -5112,28 +5216,56 @@ private static native void destroyDB(final String path, final long optionsHandle private static native int version(); + /** + * The DB Options. + */ protected DBOptionsInterface options_; private static Version version; + /** + * Representation of a 3 part version number, e.g. MAJOR.MINOR.PATCH. + */ public static class Version { private final byte major; private final byte minor; private final byte patch; + /** + * Constructs a new Version. + * + * @param major the major component of the version number. + * @param minor the minor component of the version number. + * @param patch the patch component of the version number. + */ public Version(final byte major, final byte minor, final byte patch) { this.major = major; this.minor = minor; this.patch = patch; } + /** + * Get the major component of the version number. + * + * @return the major component of the version number. + */ public int getMajor() { return major; } + /** + * Get the minor component of the version number. + * + * @return the minor component of the version number. + */ public int getMinor() { return minor; } + /** + * Get the patch component of the version number. + * + * @return the patch component of the version number. + */ public int getPatch() { return patch; } diff --git a/java/src/main/java/org/rocksdb/RocksDBException.java b/java/src/main/java/org/rocksdb/RocksDBException.java index 9df411d121cc..cbc429e8b5d8 100644 --- a/java/src/main/java/org/rocksdb/RocksDBException.java +++ b/java/src/main/java/org/rocksdb/RocksDBException.java @@ -11,22 +11,37 @@ */ public class RocksDBException extends Exception { private static final long serialVersionUID = -5187634878466267120L; + + /** + * The error status that led to this exception. + */ /* @Nullable */ private final Status status; /** * The private construct used by a set of public static factory method. * - * @param msg the specified error message. + * @param message the specified error message. */ - public RocksDBException(final String msg) { - this(msg, null); + public RocksDBException(final String message) { + this(message, null); } - public RocksDBException(final String msg, final Status status) { - super(msg); + /** + * Constructs a RocksDBException. + * + * @param message – the detail message. The detail message is saved for later retrieval by the {@link #getMessage()} method. + * @param status the error status that led to this exception. + */ + public RocksDBException(final String message, final Status status) { + super(message); this.status = status; } + /** + * Constructs a RocksDBException. + * + * @param status the error status that led to this exception. + */ public RocksDBException(final Status status) { super(status.getState() != null ? status.getState() : status.getCodeString()); diff --git a/java/src/main/java/org/rocksdb/RocksIterator.java b/java/src/main/java/org/rocksdb/RocksIterator.java index b35dea2afa27..993de5d33de2 100644 --- a/java/src/main/java/org/rocksdb/RocksIterator.java +++ b/java/src/main/java/org/rocksdb/RocksIterator.java @@ -23,8 +23,15 @@ * @see org.rocksdb.RocksObject */ public class RocksIterator extends AbstractRocksIterator { - protected RocksIterator(final RocksDB rocksDB, final long nativeHandle) { - super(rocksDB, nativeHandle); + + /** + * Constructs a RocksIterator. + * + * @param rocksDb the database. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ RocksIterator. + */ + protected RocksIterator(final RocksDB rocksDb, final long nativeHandle) { + super(rocksDb, nativeHandle); } /** diff --git a/java/src/main/java/org/rocksdb/RocksMutableObject.java b/java/src/main/java/org/rocksdb/RocksMutableObject.java index eb3215290f84..6312634a4d52 100644 --- a/java/src/main/java/org/rocksdb/RocksMutableObject.java +++ b/java/src/main/java/org/rocksdb/RocksMutableObject.java @@ -22,9 +22,17 @@ public abstract class RocksMutableObject extends AbstractNativeReference { private long nativeHandle_; private boolean owningHandle_; + /** + * Constructs a RocksMutableObject with no initial underlying native C++ object. + */ protected RocksMutableObject() { } + /** + * Constructs a RocksMutableObject. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ object. + */ protected RocksMutableObject(final long nativeHandle) { this.nativeHandle_ = nativeHandle; this.owningHandle_ = true; @@ -79,9 +87,19 @@ public final synchronized void close() { } } + /** + * Deletes underlying C++ object pointer. + */ protected void disposeInternal() { disposeInternal(nativeHandle_); } + /** + * Deletes any underlying native C++ objects which are owned by this object. + * All subclasses of {@code RocksObject} must + * implement this to release their underlying native C++ objects. + * + * @param handle reference to the value of the C++ pointer pointing to some underlying native RocksDB C++ object. + */ protected abstract void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/RocksObject.java b/java/src/main/java/org/rocksdb/RocksObject.java index cd5de70acbe2..a7657224cfd4 100644 --- a/java/src/main/java/org/rocksdb/RocksObject.java +++ b/java/src/main/java/org/rocksdb/RocksObject.java @@ -24,6 +24,12 @@ public abstract class RocksObject extends AbstractImmutableNativeReference { */ protected final long nativeHandle_; + /** + * Constructor to be called by subclasses to set the + * handle to the underlying C++ object. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ object. + */ protected RocksObject(final long nativeHandle) { super(true); this.nativeHandle_ = nativeHandle; @@ -37,9 +43,12 @@ protected void disposeInternal() { disposeInternal(nativeHandle_); } + /** + * Deletes any underlying native C++ objects which are owned by this object. + * All subclasses of {@code RocksObject} must + * implement this to release their underlying native C++ objects. + * + * @param handle reference to the value of the C++ pointer pointing to some underlying native RocksDB C++ object. + */ protected abstract void disposeInternal(final long handle); - -// long getNativeHandle() { -// return nativeHandle_; -// } } diff --git a/java/src/main/java/org/rocksdb/SanityLevel.java b/java/src/main/java/org/rocksdb/SanityLevel.java index 30568c363377..e24671c287b6 100644 --- a/java/src/main/java/org/rocksdb/SanityLevel.java +++ b/java/src/main/java/org/rocksdb/SanityLevel.java @@ -6,9 +6,24 @@ package org.rocksdb; +/** + * The Sanity Level. + */ public enum SanityLevel { + + /** + * None. + */ NONE((byte) 0x0), + + /** + * Loosely compatible. + */ LOOSELY_COMPATIBLE((byte) 0x1), + + /** + * Exactly matches. + */ EXACT_MATCH((byte) 0xFF); private final byte value; diff --git a/java/src/main/java/org/rocksdb/SizeApproximationFlag.java b/java/src/main/java/org/rocksdb/SizeApproximationFlag.java index fe3c2dd05be8..3e2759a10bd5 100644 --- a/java/src/main/java/org/rocksdb/SizeApproximationFlag.java +++ b/java/src/main/java/org/rocksdb/SizeApproximationFlag.java @@ -10,8 +10,20 @@ * or file stats approximation or both. */ public enum SizeApproximationFlag { + + /** + * None + */ NONE((byte)0x0), + + /** + * Include Memtable(s). + */ INCLUDE_MEMTABLES((byte)0x1), + + /** + * Include file(s). + */ INCLUDE_FILES((byte)0x2); private final byte value; diff --git a/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java b/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java index e2c1b97d8940..2d73baa1a7da 100644 --- a/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java +++ b/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java @@ -6,6 +6,9 @@ */ public class SkipListMemTableConfig extends MemTableConfig { + /** + * The default lookahead. + */ public static final long DEFAULT_LOOKAHEAD = 0; /** diff --git a/java/src/main/java/org/rocksdb/SstFileManager.java b/java/src/main/java/org/rocksdb/SstFileManager.java index 371f73f01fb2..24f85292a860 100644 --- a/java/src/main/java/org/rocksdb/SstFileManager.java +++ b/java/src/main/java/org/rocksdb/SstFileManager.java @@ -17,9 +17,25 @@ */ //@ThreadSafe public final class SstFileManager extends RocksObject { + + /** + * The default bytes-per-sec rate. + */ public static final long DEFAULT_RATE_BYTES_PER_SEC = 0; + + /** + * The default of whether to delete existing trash. + */ public static final boolean DEFAULT_DELETE_EXISTING_TRASH = true; - public static final double DEFAULT_MAX_TRASH_DB_RATION = 0.25; + + /** + * The default max trash db ratio. + */ + public static final double DEFAULT_MAX_TRASH_DB_RATIO = 0.25; + + /** + * The default max delete chunk size in bytes. + */ public static final long DEFAULT_BYTES_MAX_DELETE_CHUNK = 64 * 1024 * 1024; /** @@ -65,7 +81,7 @@ public SstFileManager(final Env env, /*@Nullable*/ final Logger logger) */ public SstFileManager(final Env env, /*@Nullable*/ final Logger logger, final long rateBytesPerSec) throws RocksDBException { - this(env, logger, rateBytesPerSec, DEFAULT_MAX_TRASH_DB_RATION); + this(env, logger, rateBytesPerSec, DEFAULT_MAX_TRASH_DB_RATIO); } /** diff --git a/java/src/main/java/org/rocksdb/SstFileReader.java b/java/src/main/java/org/rocksdb/SstFileReader.java index 939d3937536c..65281004a67c 100644 --- a/java/src/main/java/org/rocksdb/SstFileReader.java +++ b/java/src/main/java/org/rocksdb/SstFileReader.java @@ -5,7 +5,16 @@ package org.rocksdb; +/** + * An SST File Reader. + */ public class SstFileReader extends RocksObject { + + /** + * Constructs an SstFileReader. + * + * @param options the options for the reader. + */ public SstFileReader(final Options options) { super(newSstFileReader(options.nativeHandle_)); } diff --git a/java/src/main/java/org/rocksdb/SstFileReaderIterator.java b/java/src/main/java/org/rocksdb/SstFileReaderIterator.java index a4a08167b184..78ca0e9addc8 100644 --- a/java/src/main/java/org/rocksdb/SstFileReaderIterator.java +++ b/java/src/main/java/org/rocksdb/SstFileReaderIterator.java @@ -21,6 +21,13 @@ * @see RocksObject */ public class SstFileReaderIterator extends AbstractRocksIterator { + + /** + * Constructs a SstFileReaderIterator. + * + * @param reader the SST file reader. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ SstFileReaderIterator. + */ protected SstFileReaderIterator(final SstFileReader reader, final long nativeHandle) { super(reader, nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/SstPartitionerFactory.java b/java/src/main/java/org/rocksdb/SstPartitionerFactory.java index ea6f13565995..a87cfd75ffa4 100644 --- a/java/src/main/java/org/rocksdb/SstPartitionerFactory.java +++ b/java/src/main/java/org/rocksdb/SstPartitionerFactory.java @@ -9,6 +9,11 @@ * Handle to factory for SstPartitioner. It is used in {@link ColumnFamilyOptions} */ public abstract class SstPartitionerFactory extends RocksObject { + /** + * Constructs a SstPartitionerFactory. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ SstPartitionerFactory. + */ protected SstPartitionerFactory(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java b/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java index b1ccf08c1405..be17459f2d13 100644 --- a/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java +++ b/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java @@ -9,6 +9,12 @@ * Fixed prefix factory. It partitions SST files using fixed prefix of the key. */ public class SstPartitionerFixedPrefixFactory extends SstPartitionerFactory { + + /** + * Constructs an SstPartitionerFixedPrefixFactory. + * + * @param prefixLength the prefix length of the keys for partitioning. + */ public SstPartitionerFixedPrefixFactory(final long prefixLength) { super(newSstPartitionerFixedPrefixFactory0(prefixLength)); } diff --git a/java/src/main/java/org/rocksdb/StateType.java b/java/src/main/java/org/rocksdb/StateType.java index 803fa37d91ec..8151b098cf3e 100644 --- a/java/src/main/java/org/rocksdb/StateType.java +++ b/java/src/main/java/org/rocksdb/StateType.java @@ -12,7 +12,15 @@ * such as reading / writing a file or waiting for a mutex. */ public enum StateType { + + /** + * Unknown. + */ STATE_UNKNOWN((byte)0x0), + + /** + * Waiting on Mutex. + */ STATE_MUTEX_WAIT((byte)0x1); private final byte value; diff --git a/java/src/main/java/org/rocksdb/Statistics.java b/java/src/main/java/org/rocksdb/Statistics.java index 09e08ee5699c..0e404b795236 100644 --- a/java/src/main/java/org/rocksdb/Statistics.java +++ b/java/src/main/java/org/rocksdb/Statistics.java @@ -13,18 +13,37 @@ */ public class Statistics extends RocksObject { + /** + * Constructs a Statistics. + */ public Statistics() { super(newStatisticsInstance()); } + /** + * Constructs a Statistics. + * + * @param otherStatistics another statistics object to copy stats from. + */ public Statistics(final Statistics otherStatistics) { super(newStatistics(otherStatistics.nativeHandle_)); } + /** + * Constructs a Statistics. + * + * @param ignoreHistograms histograms to ignore. + */ public Statistics(final EnumSet ignoreHistograms) { super(newStatisticsInstance(toArrayValues(ignoreHistograms))); } + /** + * Constructs a Statistics. + * + * @param ignoreHistograms histograms to ignore. + * @param otherStatistics another statistics object to copy stats from. + */ public Statistics(final EnumSet ignoreHistograms, final Statistics otherStatistics) { super(newStatistics(toArrayValues(ignoreHistograms), otherStatistics.nativeHandle_)); } diff --git a/java/src/main/java/org/rocksdb/StatisticsCollector.java b/java/src/main/java/org/rocksdb/StatisticsCollector.java index dd0d98fe5214..e034bbfdc405 100644 --- a/java/src/main/java/org/rocksdb/StatisticsCollector.java +++ b/java/src/main/java/org/rocksdb/StatisticsCollector.java @@ -41,6 +41,9 @@ public StatisticsCollector( _executorService = Executors.newSingleThreadExecutor(); } + /** + * Start collecting statistics. + */ public void start() { _executorService.submit(collectStatistics()); } diff --git a/java/src/main/java/org/rocksdb/StatsCollectorInput.java b/java/src/main/java/org/rocksdb/StatsCollectorInput.java index 5bf43ade5a6f..f36f7baa3f7f 100644 --- a/java/src/main/java/org/rocksdb/StatsCollectorInput.java +++ b/java/src/main/java/org/rocksdb/StatsCollectorInput.java @@ -25,11 +25,21 @@ public StatsCollectorInput(final Statistics statistics, _statsCallback = statsCallback; } + /** + * Get the statistics. + * + * @return the statistics. + */ public Statistics getStatistics() { return _statistics; } - public StatisticsCollectorCallback getCallback() { + /** + * Get the statistics collector callback. + * + * @return the statistics collector callback. + */ + StatisticsCollectorCallback getCallback() { return _statsCallback; } } diff --git a/java/src/main/java/org/rocksdb/Status.java b/java/src/main/java/org/rocksdb/Status.java index 5f751f422089..fa8e86bc603c 100644 --- a/java/src/main/java/org/rocksdb/Status.java +++ b/java/src/main/java/org/rocksdb/Status.java @@ -16,10 +16,29 @@ */ public class Status implements Serializable { private static final long serialVersionUID = -3794191127754280439L; + + /** + * The status code. + */ private final Code code; + + /** + * The status sub-code. + */ /* @Nullable */ private final SubCode subCode; + + /** + * The state of the status. + */ /* @Nullable */ private final String state; + /** + * Constructs a Status. + * + * @param code the code. + * @param subCode the sub-code. + * @param state the state. + */ public Status(final Code code, final SubCode subCode, final String state) { this.code = code; this.subCode = subCode; @@ -35,18 +54,38 @@ private Status(final byte code, final byte subCode, final String state) { this.state = state; } + /** + * Get the status code. + * + * @return the status code. + */ public Code getCode() { return code; } + /** + * Get the status sub-code. + * + * @return the status sub-code. + */ public SubCode getSubCode() { return subCode; } + /** + * Get the state of the status. + * + * @return the status state. + */ public String getState() { return state; } + /** + * Get a string representation of the status code. + * + * @return a string representation of the status code. + */ public String getCodeString() { final StringBuilder builder = new StringBuilder() .append(code.name()); @@ -58,22 +97,85 @@ public String getCodeString() { return builder.toString(); } - // should stay in sync with /include/rocksdb/status.h:Code and /java/rocksjni/portal.h:toJavaStatusCode + /** + * Status Code. + *

+ * Should stay in sync with /include/rocksdb/status.h:Code and /java/rocksjni/portal.h:toJavaStatusCode + */ public enum Code { + /** + * Success. + */ Ok( (byte)0x0), + + /** + * Not found. + */ NotFound( (byte)0x1), + + /** + * Corruption detected. + */ Corruption( (byte)0x2), + + /** + * Not supported. + */ NotSupported( (byte)0x3), + + /** + * Invalid argument provided. + */ InvalidArgument( (byte)0x4), + + /** + * I/O error. + */ IOError( (byte)0x5), + + /** + * There is a merge in progress. + */ MergeInProgress( (byte)0x6), + + /** + * Incomplete. + */ Incomplete( (byte)0x7), + + /** + * There is a shutdown in progress. + */ ShutdownInProgress( (byte)0x8), + + /** + * An operation timed out. + */ TimedOut( (byte)0x9), + + /** + * An operation was aborted. + */ Aborted( (byte)0xA), + + /** + * The system is busy. + */ Busy( (byte)0xB), + + /** + * The request expired. + */ Expired( (byte)0xC), + + /** + * The operation should be reattempted. + */ TryAgain( (byte)0xD), + + /** + * Undefined. + */ Undefined( (byte)0x7F); private final byte value; @@ -82,6 +184,15 @@ public enum Code { this.value = value; } + /** + * Get a code from its byte representation. + * + * @param value the byte representation of the code. + * + * @return the code + * + * @throws IllegalArgumentException if the {@code value} parameter does not represent a code. + */ public static Code getCode(final byte value) { for (final Code code : Code.values()) { if (code.value == value){ @@ -102,16 +213,56 @@ public byte getValue() { } } - // should stay in sync with /include/rocksdb/status.h:SubCode and /java/rocksjni/portal.h:toJavaStatusSubCode + /** + * Status Sub-code. + *

+ * should stay in sync with /include/rocksdb/status.h:SubCode and /java/rocksjni/portal.h:toJavaStatusSubCode + */ public enum SubCode { + + /** + * None. + */ None( (byte)0x0), + + /** + * Timeout whilst waiting on Mutex. + */ MutexTimeout( (byte)0x1), + + /** + * Timeout whilst waiting on Lock. + */ LockTimeout( (byte)0x2), + + /** + * Maximum limit on number of locks reached. + */ LockLimit( (byte)0x3), + + /** + * No space remaining. + */ NoSpace( (byte)0x4), + + /** + * Deadlock detected. + */ Deadlock( (byte)0x5), + + /** + * Stale file detected. + */ StaleFile( (byte)0x6), + + /** + * Reached the maximum memory limit. + */ MemoryLimit( (byte)0x7), + + /** + * Undefined. + */ Undefined( (byte)0x7F); private final byte value; @@ -120,6 +271,15 @@ public enum SubCode { this.value = value; } + /** + * Get a sub-code from its byte representation. + * + * @param value the byte representation of the sub-code. + * + * @return the sub-code + * + * @throws IllegalArgumentException if the {@code value} parameter does not represent a sub-code. + */ public static SubCode getSubCode(final byte value) { for (final SubCode subCode : SubCode.values()) { if (subCode.value == value){ diff --git a/java/src/main/java/org/rocksdb/StringAppendOperator.java b/java/src/main/java/org/rocksdb/StringAppendOperator.java index 547371e7c08b..f383de4dc12f 100644 --- a/java/src/main/java/org/rocksdb/StringAppendOperator.java +++ b/java/src/main/java/org/rocksdb/StringAppendOperator.java @@ -11,14 +11,27 @@ * two strings. */ public class StringAppendOperator extends MergeOperator { + /** + * Constructs a StringAppendOperator. + */ public StringAppendOperator() { this(','); } + /** + * Constructs a StringAppendOperator. + * + * @param delim the character delimiter to use when appending. + */ public StringAppendOperator(final char delim) { super(newSharedStringAppendOperator(delim)); } + /** + * Constructs a StringAppendOperator. + * + * @param delim the string delimiter to use when appending. + */ public StringAppendOperator(final String delim) { super(newSharedStringAppendOperator(delim)); } diff --git a/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java b/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java index 8dc56796a25d..aaf34b2cbd57 100644 --- a/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java +++ b/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Brief information on Table File creation. + */ public class TableFileCreationBriefInfo { private final String dbName; private final String columnFamilyName; diff --git a/java/src/main/java/org/rocksdb/TableFileCreationInfo.java b/java/src/main/java/org/rocksdb/TableFileCreationInfo.java index 5654603c3833..1b65712b3b3b 100644 --- a/java/src/main/java/org/rocksdb/TableFileCreationInfo.java +++ b/java/src/main/java/org/rocksdb/TableFileCreationInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Information on Table File creation. + */ public class TableFileCreationInfo extends TableFileCreationBriefInfo { private final long fileSize; private final TableProperties tableProperties; diff --git a/java/src/main/java/org/rocksdb/TableFileCreationReason.java b/java/src/main/java/org/rocksdb/TableFileCreationReason.java index d3984663dd28..f45da28e5776 100644 --- a/java/src/main/java/org/rocksdb/TableFileCreationReason.java +++ b/java/src/main/java/org/rocksdb/TableFileCreationReason.java @@ -5,10 +5,29 @@ package org.rocksdb; +/** + * Reasons for Table File creation. + */ public enum TableFileCreationReason { + + /** + * Flush. + */ FLUSH((byte) 0x00), + + /** + * Compaction. + */ COMPACTION((byte) 0x01), + + /** + * Recovery. + */ RECOVERY((byte) 0x02), + + /** + * Miscellaneous. + */ MISC((byte) 0x03); private final byte value; diff --git a/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java b/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java index 9a777e3336c2..87bd2b8c87af 100644 --- a/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java +++ b/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Information on Table File deleteion. + */ public class TableFileDeletionInfo { private final String dbName; private final String filePath; diff --git a/java/src/main/java/org/rocksdb/ThreadStatus.java b/java/src/main/java/org/rocksdb/ThreadStatus.java index 4211453d1a0b..c75d85d276f6 100644 --- a/java/src/main/java/org/rocksdb/ThreadStatus.java +++ b/java/src/main/java/org/rocksdb/ThreadStatus.java @@ -7,6 +7,9 @@ import java.util.Map; +/** + * The status of a Thread. + */ public class ThreadStatus { private final long threadId; private final ThreadType threadType; @@ -155,6 +158,13 @@ public static String getOperationName(final OperationType operationType) { return getOperationName(operationType.getValue()); } + /** + * Converts microseconds to a string representation. + * + * @param operationElapsedTime the microseconds. + * + * @return the string representation. + */ public static String microsToString(final long operationElapsedTime) { return microsToStringNative(operationElapsedTime); } diff --git a/java/src/main/java/org/rocksdb/TickerType.java b/java/src/main/java/org/rocksdb/TickerType.java index f2ca42776e79..381390678de4 100644 --- a/java/src/main/java/org/rocksdb/TickerType.java +++ b/java/src/main/java/org/rocksdb/TickerType.java @@ -19,7 +19,7 @@ public enum TickerType { /** * total block cache misses - * + *

* REQUIRES: BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS + * BLOCK_CACHE_FILTER_MISS + * BLOCK_CACHE_DATA_MISS; @@ -28,27 +28,30 @@ public enum TickerType { /** * total block cache hit - * + *

* REQUIRES: BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT + * BLOCK_CACHE_FILTER_HIT + * BLOCK_CACHE_DATA_HIT; */ BLOCK_CACHE_HIT((byte) 0x1), + /** + * Number of blocks added to block cache. + */ BLOCK_CACHE_ADD((byte) 0x2), /** - * # of failures when adding blocks to block cache. + * Number of failures when adding blocks to block cache. */ BLOCK_CACHE_ADD_FAILURES((byte) 0x3), /** - * # of times cache miss when accessing index block from block cache. + * Number of times cache miss when accessing index block from block cache. */ BLOCK_CACHE_INDEX_MISS((byte) 0x4), /** - * # of times cache hit when accessing index block from block cache. + * Number of times cache hit when accessing index block from block cache. */ BLOCK_CACHE_INDEX_HIT((byte) 0x5), @@ -63,12 +66,12 @@ public enum TickerType { BLOCK_CACHE_INDEX_BYTES_INSERT((byte) 0x7), /** - * # of times cache miss when accessing filter block from block cache. + * Number of times cache miss when accessing filter block from block cache. */ BLOCK_CACHE_FILTER_MISS((byte) 0x9), /** - * # of times cache hit when accessing filter block from block cache. + * Number of times cache hit when accessing filter block from block cache. */ BLOCK_CACHE_FILTER_HIT((byte) 0xA), @@ -83,12 +86,12 @@ public enum TickerType { BLOCK_CACHE_FILTER_BYTES_INSERT((byte) 0xC), /** - * # of times cache miss when accessing data block from block cache. + * Number of times cache miss when accessing data block from block cache. */ BLOCK_CACHE_DATA_MISS((byte) 0xE), /** - * # of times cache hit when accessing data block from block cache. + * Number of times cache hit when accessing data block from block cache. */ BLOCK_CACHE_DATA_HIT((byte) 0xF), @@ -113,7 +116,7 @@ public enum TickerType { BLOCK_CACHE_BYTES_WRITE((byte) 0x13), /** - * # of times bloom filter has avoided file reads. + * Number of times bloom filter has avoided file reads. */ BLOOM_FILTER_USEFUL((byte) 0x14), @@ -163,23 +166,18 @@ public enum TickerType { GET_HIT_L2_AND_UP((byte) 0x1D), /** - * COMPACTION_KEY_DROP_* count the reasons for key drop during compaction - * There are 4 reasons currently. - */ - - /** - * key was written with a newer value. + * Compaction dropped the key because there is a newer entry. */ COMPACTION_KEY_DROP_NEWER_ENTRY((byte) 0x1E), /** + * Compaction dropped the key because it is obsolete. * Also includes keys dropped for range del. - * The key is obsolete. */ COMPACTION_KEY_DROP_OBSOLETE((byte) 0x1F), /** - * key was covered by a range tombstone. + * Compaction dropped the key because it was covered by a range tombstone. */ COMPACTION_KEY_DROP_RANGE_DEL((byte) 0x20), @@ -189,7 +187,7 @@ public enum TickerType { COMPACTION_KEY_DROP_USER((byte) 0x21), /** - * all keys in range were deleted. + * Compaction dropped the key as all keys in range were deleted. */ COMPACTION_RANGE_DEL_DROP_OBSOLETE((byte) 0x22), @@ -217,7 +215,7 @@ public enum TickerType { /** * The number of uncompressed bytes read from DB::Get(). It could be * either from memtables, cache, or table files. - * + *

* For the number of logical bytes read from DB::MultiGet(), * please use {@link #NUMBER_MULTIGET_BYTES_READ}. */ @@ -259,8 +257,14 @@ public enum TickerType { */ ITER_BYTES_READ((byte) 0x2E), + /** + * The number of calls to open a file. + */ NO_FILE_OPENS((byte) 0x30), + /** + * The number of file errors. + */ NO_FILE_ERRORS((byte) 0x31), /** @@ -270,7 +274,7 @@ public enum TickerType { /** * The wait time for db mutex. - * + *

* Disabled by default. To enable it set stats level to {@link StatsLevel#ALL} */ DB_MUTEX_WAIT_MICROS((byte) 0x36), @@ -290,14 +294,21 @@ public enum TickerType { */ NUMBER_MULTIGET_BYTES_READ((byte) 0x3B), + /** + * Number of merge failures. + */ NUMBER_MERGE_FAILURES((byte) 0x3D), /** - * Number of times bloom was checked before creating iterator on a + * Number of times the bloom filter was checked before creating iterator on a * file, and the number of times the check was useful in avoiding * iterator creation (and thus likely IOPs). */ BLOOM_FILTER_PREFIX_CHECKED((byte) 0x3E), + + /** + * Number of times the bloom filter returned false, and so prevented accessing data+index blocks. + */ BLOOM_FILTER_PREFIX_USEFUL((byte) 0x3F), /** @@ -358,42 +369,75 @@ public enum TickerType { * table reader object. */ NUMBER_DIRECT_LOAD_TABLE_PROPERTIES((byte) 0x4F), + + /** + * Number of times supervision was acquired. + */ NUMBER_SUPERVERSION_ACQUIRES((byte) 0x50), + + /** + * Number of times supervision was released. + */ NUMBER_SUPERVERSION_RELEASES((byte) 0x51), + + /** + * Number of times supervision was cleaned up. + */ NUMBER_SUPERVERSION_CLEANUPS((byte) 0x52), /** - * # of compressions/decompressions executed + * Number of block compressions executed. */ NUMBER_BLOCK_COMPRESSED((byte) 0x53), + + /** + * Number of block de-compressions executed. + */ NUMBER_BLOCK_DECOMPRESSED((byte) 0x54), + /** + * Number of blocks not compressed. + */ + @Deprecated NUMBER_BLOCK_NOT_COMPRESSED((byte) 0x55), + + /** + * Total time spent on merge operations. + */ MERGE_OPERATION_TOTAL_TIME((byte) 0x56), + + /** + * Total time spent on filter operations. + */ FILTER_OPERATION_TOTAL_TIME((byte) 0x57), /** - * Row cache. + * Number of row cache hits. */ ROW_CACHE_HIT((byte) 0x58), - ROW_CACHE_MISS((byte) 0x59), /** - * Read amplification statistics. - * - * Read amplification can be calculated using this formula - * (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES) - * - * REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled + * Number of row cache misses. */ + ROW_CACHE_MISS((byte) 0x59), /** - * Estimate of total bytes actually used. + * Read amplification estimate of total bytes actually used. + *

+ * Read amplification can be calculated using this formula + * ({@link #READ_AMP_TOTAL_READ_BYTES} / #READ_AMP_ESTIMATE_USEFUL_BYTES) + *

+ * REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled */ READ_AMP_ESTIMATE_USEFUL_BYTES((byte) 0x5A), /** - * Total size of loaded data blocks. + * Read amplification estimate of total size of loaded data blocks. + *

+ * Read amplification can be calculated using this formula + * (READ_AMP_TOTAL_READ_BYTES / {@link #READ_AMP_ESTIMATE_USEFUL_BYTES}) + *

+ * REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled */ READ_AMP_TOTAL_READ_BYTES((byte) 0x5B), @@ -434,12 +478,12 @@ public enum TickerType { COMPACTION_CANCELLED((byte) 0x62), /** - * # of times bloom FullFilter has not avoided the reads. + * Number of times bloom FullFilter has not avoided the reads. */ BLOOM_FILTER_FULL_POSITIVE((byte) 0x63), /** - * # of times bloom FullFilter has not avoided the reads and data actually + * Number of times bloom FullFilter has not avoided the reads and data actually * exist. */ BLOOM_FILTER_FULL_TRUE_POSITIVE((byte) 0x64), @@ -531,7 +575,7 @@ public enum TickerType { BLOB_DB_BLOB_FILE_BYTES_READ((byte) 0x75), /** - * # of times a blob files being synced. + * Number of times a blob files being synced. */ BLOB_DB_BLOB_FILE_SYNCED((byte) 0x76), @@ -602,27 +646,27 @@ public enum TickerType { /** * These counters indicate a performance issue in WritePrepared transactions. * We should not seem them ticking them much. - * # of times prepare_mutex_ is acquired in the fast path. + * Number of times prepare_mutex_ is acquired in the fast path. */ TXN_PREPARE_MUTEX_OVERHEAD((byte) -0x09), /** - * # of times old_commit_map_mutex_ is acquired in the fast path. + * Number of times old_commit_map_mutex_ is acquired in the fast path. */ TXN_OLD_COMMIT_MAP_MUTEX_OVERHEAD((byte) -0x0A), /** - * # of times we checked a batch for duplicate keys. + * Number of times we checked a batch for duplicate keys. */ TXN_DUPLICATE_KEY_OVERHEAD((byte) -0x0B), /** - * # of times snapshot_mutex_ is acquired in the fast path. + * Number of times snapshot_mutex_ is acquired in the fast path. */ TXN_SNAPSHOT_MUTEX_OVERHEAD((byte) -0x0C), /** - * # of times ::Get returned TryAgain due to expired snapshot seq + * Number of times ::Get returned TryAgain due to expired snapshot seq */ TXN_GET_TRY_AGAIN((byte) -0x0D), @@ -637,23 +681,63 @@ public enum TickerType { FILES_DELETED_IMMEDIATELY((byte) -0x0f), /** - * Compaction read and write statistics broken down by CompactionReason + * Compaction bytes read and marked. */ COMPACT_READ_BYTES_MARKED((byte) -0x10), + + /** + * Periodic compaction bytes read. + */ COMPACT_READ_BYTES_PERIODIC((byte) -0x11), + + /** + * Compaction bytes read for TTL. + */ COMPACT_READ_BYTES_TTL((byte) -0x12), + + /** + * Compaction bytes written and marked. + */ COMPACT_WRITE_BYTES_MARKED((byte) -0x13), + + /** + * Periodic compaction bytes written. + */ COMPACT_WRITE_BYTES_PERIODIC((byte) -0x14), + + /** + * Compaction bytes written for TTL. + */ COMPACT_WRITE_BYTES_TTL((byte) -0x15), /** - * DB error handler statistics + * DB error handler error count. */ ERROR_HANDLER_BG_ERROR_COUNT((byte) -0x16), + + /** + * DB error handler background I/O error count. + */ ERROR_HANDLER_BG_IO_ERROR_COUNT((byte) -0x17), + + /** + * DB error handler background retryable I/O error count. + */ ERROR_HANDLER_BG_RETRYABLE_IO_ERROR_COUNT((byte) -0x18), + + /** + * DB error handler auto-resume count. + */ ERROR_HANDLER_AUTORESUME_COUNT((byte) -0x19), + + /** + * DB error handler auto-resume retry count. + */ ERROR_HANDLER_AUTORESUME_RETRY_TOTAL_COUNT((byte) -0x1A), + + /** + * DB error handler auto-resume success count. + */ ERROR_HANDLER_AUTORESUME_SUCCESS_COUNT((byte) -0x1B), /** @@ -679,33 +763,73 @@ public enum TickerType { VERIFY_CHECKSUM_READ_BYTES((byte) -0x1F), /** - * Bytes read/written while creating backups + * Bytes read while creating backups. */ BACKUP_READ_BYTES((byte) -0x20), + + /** + * Bytes written while creating backups. + */ BACKUP_WRITE_BYTES((byte) -0x21), /** - * Remote compaction read/write statistics + * Bytes read by remote compaction. */ REMOTE_COMPACT_READ_BYTES((byte) -0x22), + + /** + * Bytes written by remote compaction. + */ REMOTE_COMPACT_WRITE_BYTES((byte) -0x23), /** - * Tiered storage related statistics + * Number of bytes read by tiered storage hot-file(s). */ HOT_FILE_READ_BYTES((byte) -0x24), + + /** + * Number of bytes read by tiered storage warm-file(s). + */ WARM_FILE_READ_BYTES((byte) -0x25), + + /** + * Number of bytes read by tiered storage cold-file(s). + */ COLD_FILE_READ_BYTES((byte) -0x26), + + /** + * Number of reads on tiered storage hot-file(s). + */ HOT_FILE_READ_COUNT((byte) -0x27), + + /** + * Number of reads on tiered storage warm-file(s). + */ WARM_FILE_READ_COUNT((byte) -0x28), + + /** + * Number of reads on tiered storage cold-file(s). + */ COLD_FILE_READ_COUNT((byte) -0x29), /** - * (non-)last level read statistics + * Bytes read from the last level. */ LAST_LEVEL_READ_BYTES((byte) -0x2A), + + /** + * Number of reads on the last level. + */ LAST_LEVEL_READ_COUNT((byte) -0x2B), + + /** + * Bytes read from non-last level(s). + */ NON_LAST_LEVEL_READ_BYTES((byte) -0x2C), + + /** + * Number of reads from non-last level(s). + */ NON_LAST_LEVEL_READ_COUNT((byte) -0x2D), /** @@ -714,12 +838,12 @@ public enum TickerType { BLOCK_CHECKSUM_COMPUTE_COUNT((byte) -0x2E), /** - * # of times cache miss when accessing blob from blob cache. + * Number of times cache miss when accessing blob from blob cache. */ BLOB_DB_CACHE_MISS((byte) -0x2F), /** - * # of times cache hit when accessing blob from blob cache. + * Number of times cache hit when accessing blob from blob cache. */ BLOB_DB_CACHE_HIT((byte) -0x30), @@ -764,18 +888,40 @@ public enum TickerType { */ BLOCK_CHECKSUM_MISMATCH_COUNT((byte) -0x3C), + /** + * Number of times readahead is trimmed during scans when + * {@link ReadOptions#setReadaheadSize(long)} is set. + */ READAHEAD_TRIMMED((byte) -0x3D), + /** + * FIFO compactions that drop files of a maximum size. + */ FIFO_MAX_SIZE_COMPACTIONS((byte) -0x3E), + /** + * FIFO compactions that drop files exceeding a TTL. + */ FIFO_TTL_COMPACTIONS((byte) -0x3F), + /** + * Number of bytes prefetched during user initiated scan. + */ PREFETCH_BYTES((byte) -0x40), + /** + * Number of prefetched bytes that were actually useful. + */ PREFETCH_BYTES_USEFUL((byte) -0x41), + /** + * Number of FS reads avoided due to scan prefetching. + */ PREFETCH_HITS((byte) -0x42), + /** + * maximum number of ticker types. + */ TICKER_ENUM_MAX((byte) 0x5F); private final byte value; diff --git a/java/src/main/java/org/rocksdb/TraceOptions.java b/java/src/main/java/org/rocksdb/TraceOptions.java index cf5f7bbe12f8..85c8abcc2500 100644 --- a/java/src/main/java/org/rocksdb/TraceOptions.java +++ b/java/src/main/java/org/rocksdb/TraceOptions.java @@ -12,10 +12,18 @@ public class TraceOptions { private final long maxTraceFileSize; + /** + * Constructs a TraceOptions. + */ public TraceOptions() { this.maxTraceFileSize = 64L * 1024L * 1024L * 1024L; // 64 GB } + /** + * Constructs a TraceOptions. + * + * @param maxTraceFileSize the maximum size of the trace file. + */ public TraceOptions(final long maxTraceFileSize) { this.maxTraceFileSize = maxTraceFileSize; } diff --git a/java/src/main/java/org/rocksdb/Transaction.java b/java/src/main/java/org/rocksdb/Transaction.java index cab7ed28737f..28c3fa3a5c9a 100644 --- a/java/src/main/java/org/rocksdb/Transaction.java +++ b/java/src/main/java/org/rocksdb/Transaction.java @@ -184,7 +184,9 @@ public void clearSnapshot() { } /** - * Prepare the current transaction for 2PC + * Prepare the current transaction for 2PC. + * + * @throws RocksDBException if the transaction cannot be prepared */ public void prepare() throws RocksDBException { //TODO(AR) consider a Java'ish version of this function, which returns an AutoCloseable (commit) @@ -257,7 +259,7 @@ public void rollbackToSavePoint() throws RocksDBException { /** * This function has an inconsistent parameter order compared to other {@code get()} * methods and is deprecated in favour of one with a consistent order. - * + *

* This function is similar to * {@link RocksDB#get(ColumnFamilyHandle, ReadOptions, byte[])} except it will * also read pending changes in this transaction. @@ -297,11 +299,11 @@ public byte[] get(final ColumnFamilyHandle columnFamilyHandle, final ReadOptions * also read pending changes in this transaction. * Currently, this function will return Status::MergeInProgress if the most * recent write to the queried key in this batch is a Merge. - * + *

* If {@link ReadOptions#snapshot()} is not set, the current version of the * key will be read. Calling {@link #setSnapshot()} does not affect the * version of the data returned. - * + *

* Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect * what is read from the DB but will NOT change which keys are read from this * transaction (the keys in this transaction do not yet belong to any snapshot @@ -560,7 +562,7 @@ public byte[][] multiGet(final ReadOptions readOptions, * {@link org.rocksdb.ColumnFamilyHandle} instances. * @param keys of keys for which values need to be retrieved. * - * @return Array of values, one for each key + * @return list of values, one for each key * * @throws RocksDBException thrown if error happens in underlying * native library. @@ -646,7 +648,7 @@ public byte[][] multiGet(final ReadOptions readOptions, final byte[][] keys) * {@link org.rocksdb.ColumnFamilyHandle} instances. * @param keys of keys for which values need to be retrieved. * - * @return Array of values, one for each key + * @return list of values, one for each key * * @throws RocksDBException thrown if error happens in underlying * native library. @@ -1157,7 +1159,6 @@ public GetStatus getForUpdate(final ReadOptions readOptions, /** * A multi-key version of * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}. - *

* * @param readOptions Read options. * @param columnFamilyHandles {@link org.rocksdb.ColumnFamilyHandle} @@ -1193,14 +1194,13 @@ public byte[][] multiGetForUpdate(final ReadOptions readOptions, /** * A multi-key version of * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}. - *

* * @param readOptions Read options. * @param columnFamilyHandles {@link org.rocksdb.ColumnFamilyHandle} * instances * @param keys the keys to retrieve the values for. * - * @return Array of values, one for each key + * @return list of values, one for each key * * @throws RocksDBException thrown if error happens in underlying * native library. @@ -1229,7 +1229,6 @@ public List multiGetForUpdateAsList(final ReadOptions readOptions, /** * A multi-key version of {@link #getForUpdate(ReadOptions, byte[], boolean)}. - *

* * @param readOptions Read options. * @param keys the keys to retrieve the values for. @@ -1253,7 +1252,6 @@ public byte[][] multiGetForUpdate(final ReadOptions readOptions, final byte[][] /** * A multi-key version of {@link #getForUpdate(ReadOptions, byte[], boolean)}. - *

* * @param readOptions Read options. * @param keys the keys to retrieve the values for. @@ -1300,7 +1298,7 @@ public RocksIterator getIterator() { * Returns an iterator that will iterate on all keys in the default * column family including both keys in the DB and uncommitted keys in this * transaction. - * + *

* Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read * from the DB but will NOT change which keys are read from this transaction * (the keys in this transaction do not yet belong to any snapshot and will be @@ -1526,10 +1524,10 @@ public void put(final ColumnFamilyHandle columnFamilyHandle, /** * Similar to {@link RocksDB#put(byte[], byte[])}, but * will also perform conflict checking on the keys be written. - * + *

* If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

* If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1565,10 +1563,10 @@ public void put(final ByteBuffer key, final ByteBuffer value) throws RocksDBExce /** * Similar to {@link RocksDB#put(byte[], byte[])}, but * will also perform conflict checking on the keys be written. - * + *

* If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

* If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1608,6 +1606,30 @@ public void put(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer ke key.position(key.limit()); value.position(value.limit()); } + + /** + * Similar to {@link RocksDB#put(byte[], byte[])}, but + * will also perform conflict checking on the keys be written. + *

+ * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + *

+ * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param columnFamilyHandle The column family to put the key/value into + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ public void put(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key, final ByteBuffer value) throws RocksDBException { put(columnFamilyHandle, key, value, false); @@ -1731,10 +1753,10 @@ public void merge(final byte[] key, final byte[] value) /** * Similar to {@link RocksDB#merge(byte[], byte[])}, but * will also perform conflict checking on the keys be written. - * + *

* If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

* If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1768,10 +1790,10 @@ public void merge(final ByteBuffer key, final ByteBuffer value) throws RocksDBEx /** * Similar to {@link RocksDB#merge(byte[], byte[])}, but * will also perform conflict checking on the keys be written. - * + *

* If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

* If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1811,10 +1833,10 @@ public void merge(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer /** * Similar to {@link RocksDB#merge(byte[], byte[])}, but * will also perform conflict checking on the keys be written. - * + *

* If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

* If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -2268,10 +2290,10 @@ public void mergeUntracked(final ColumnFamilyHandle columnFamilyHandle, * Similar to {@link RocksDB#merge(ColumnFamilyHandle, byte[], byte[])}, * but operates on the transactions write batch. This write will only happen * if this transaction gets committed successfully. - * + *

* Unlike {@link #merge(ColumnFamilyHandle, byte[], byte[])} no conflict * checking will be performed for this key. - * + *

* If this Transaction was created on a {@link TransactionDB}, this function * will still acquire locks necessary to make sure this write doesn't cause * conflicts in other transactions; This may cause a {@link RocksDBException} @@ -2331,10 +2353,10 @@ public void mergeUntracked(final byte[] key, final byte[] value) * Similar to {@link RocksDB#merge(byte[], byte[])}, * but operates on the transactions write batch. This write will only happen * if this transaction gets committed successfully. - * + *

* Unlike {@link #merge(byte[], byte[])} no conflict * checking will be performed for this key. - * + *

* If this Transaction was created on a {@link TransactionDB}, this function * will still acquire locks necessary to make sure this write doesn't cause * conflicts in other transactions; This may cause a {@link RocksDBException} @@ -2777,20 +2799,57 @@ public long getId() { return getId(nativeHandle_); } + /** + * States of a Transaction. + */ public enum TransactionState { + /** + * Transaction started. + */ STARTED((byte)0), + + /** + * Transaction is awaiting prepare. + */ AWAITING_PREPARE((byte)1), + + /** + * Transaction is prepared. + */ PREPARED((byte)2), + + /** + * Transaction awaiting commit. + */ AWAITING_COMMIT((byte)3), + + /** + * Transaction is committed. + */ COMMITTED((byte)4), + + /** + * Transaction is awaiting rollback. + */ AWAITING_ROLLBACK((byte)5), + + /** + * Transaction rolled-back. + */ ROLLEDBACK((byte)6), + + /** + * Transaction locks have been stolen. + */ LOCKS_STOLEN((byte)7); - /* - * Keep old misspelled variable as alias - * Tip from https://stackoverflow.com/a/37092410/454544 + /** + * Old misspelled variable as alias for {@link #COMMITTED}. + * Tip from https://stackoverflow.com/a/37092410/454544 + * + * @deprecated use {@link #COMMITTED} instead. */ + @Deprecated public static final TransactionState COMMITED = COMMITTED; private final byte value; @@ -2835,6 +2894,9 @@ private WaitingTransactions newWaitingTransactions( return new WaitingTransactions(columnFamilyId, key, transactionIds); } + /** + * Waiting Transactions. + */ public static class WaitingTransactions { private final long columnFamilyId; private final String key; diff --git a/java/src/main/java/org/rocksdb/TransactionDB.java b/java/src/main/java/org/rocksdb/TransactionDB.java index a4ee951dc994..940dd4d92ec5 100644 --- a/java/src/main/java/org/rocksdb/TransactionDB.java +++ b/java/src/main/java/org/rocksdb/TransactionDB.java @@ -203,6 +203,14 @@ public Transaction beginTransaction(final WriteOptions writeOptions, return oldTransaction; } + /** + * Gets a transaction by name. + * + * @param transactionName the name of the transaction. + * + * @return the transaction, or null if the transaction can't be found. + * + */ public Transaction getTransactionByName(final String transactionName) { final long jtxnHandle = getTransactionByName(nativeHandle_, transactionName); if(jtxnHandle == 0) { @@ -217,6 +225,11 @@ public Transaction getTransactionByName(final String transactionName) { return txn; } + /** + * Gets a list of all prepared transactions. + * + * @return the list of prepared transactions. + */ public List getAllPreparedTransactions() { final long[] jtxnHandles = getAllPreparedTransactions(nativeHandle_); @@ -232,11 +245,21 @@ public List getAllPreparedTransactions() { return txns; } + /** + * Information on Key Locks. + */ public static class KeyLockInfo { private final String key; private final long[] transactionIDs; private final boolean exclusive; + /** + * Constructs a KeyLockInfo. + * + * @param key the key. + * @param transactionIDs the transaction ids + * @param exclusive true if the lock is exclusive, false if the lock is shared. + */ @SuppressWarnings("PMD.ArrayIsStoredDirectly") public KeyLockInfo(final String key, final long[] transactionIDs, final boolean exclusive) { this.key = key; @@ -300,6 +323,9 @@ private DeadlockInfo newDeadlockInfo(final long transactionID, final long column waitingKey, exclusive); } + /** + * Information on a Deadlock. + */ public static class DeadlockInfo { private final long transactionID; private final long columnFamilyId; @@ -351,25 +377,49 @@ public boolean isExclusive() { } } + /** + * The paths of a Deadlock. + */ public static class DeadlockPath { final DeadlockInfo[] path; final boolean limitExceeded; + /** + * Construct a DeadLockPack. + * + * @param path the paths + * @param limitExceeded true if the limit is exceeded, false otherwise. + */ @SuppressWarnings("PMD.ArrayIsStoredDirectly") public DeadlockPath(final DeadlockInfo[] path, final boolean limitExceeded) { this.path = path; this.limitExceeded = limitExceeded; } + /** + * Returns true if there are no paths and the limit is not exceeded. + * + * @return true if empty, false otherwise. + */ public boolean isEmpty() { return path.length == 0 && !limitExceeded; } } + /** + * Get Deadlock Information. + * + * @return the deadlock paths. + */ public DeadlockPath[] getDeadlockInfoBuffer() { return getDeadlockInfoBuffer(nativeHandle_); } + /** + * Set the size of the deadlock information buffer. + * + * @param targetSize the target size of the buffer. + */ public void setDeadlockInfoBufferSize(final int targetSize) { setDeadlockInfoBufferSize(nativeHandle_, targetSize); } diff --git a/java/src/main/java/org/rocksdb/TransactionDBOptions.java b/java/src/main/java/org/rocksdb/TransactionDBOptions.java index 391025d6ae9d..92c9fe56e0d8 100644 --- a/java/src/main/java/org/rocksdb/TransactionDBOptions.java +++ b/java/src/main/java/org/rocksdb/TransactionDBOptions.java @@ -5,8 +5,14 @@ package org.rocksdb; +/** + * Options for TransactionDB. + */ public class TransactionDBOptions extends RocksObject { + /** + * Constructs a TransactionDB. + */ public TransactionDBOptions() { super(newTransactionDBOptions()); } @@ -110,16 +116,15 @@ public TransactionDBOptions setTransactionLockTimeout( /** * The wait timeout in milliseconds when writing a key - * OUTSIDE of a transaction (ie by calling {@link RocksDB#put}, - * {@link RocksDB#merge}, {@link RocksDB#delete} or {@link RocksDB#write} + * OUTSIDE of a transaction (ie by calling {@link RocksDB#put(byte[], byte[])}, + * {@link RocksDB#merge(byte[], byte[])}, {@link RocksDB#delete(WriteOptions, byte[])} or {@link RocksDB#write(WriteOptions, WriteBatch)} * directly). *

* If 0, no waiting is done if a lock cannot instantly be acquired. * If negative, there is no timeout and will block indefinitely when acquiring * a lock. * - * @return the timeout in milliseconds when writing a key OUTSIDE of a - * transaction + * @return the timeout in milliseconds when writing a key outside of the transaction */ public long getDefaultLockTimeout() { assert(isOwningHandle()); @@ -128,8 +133,8 @@ public long getDefaultLockTimeout() { /** * If positive, specifies the wait timeout in milliseconds when writing a key - * OUTSIDE of a transaction (ie by calling {@link RocksDB#put}, - * {@link RocksDB#merge}, {@link RocksDB#delete} or {@link RocksDB#write} + * OUTSIDE of a transaction (ie by calling {@link RocksDB#put(byte[], byte[])}, + * {@link RocksDB#merge(byte[], byte[])}, {@link RocksDB#delete(byte[])} or {@link RocksDB#write(WriteOptions, WriteBatch)} * directly). *

* If 0, no waiting is done if a lock cannot instantly be acquired. @@ -145,7 +150,7 @@ public long getDefaultLockTimeout() { * Default: 1000 * * @param defaultLockTimeout the timeout in milliseconds when writing a key - * OUTSIDE of a transaction + * outside of the transaction * @return this TransactionDBOptions instance */ public TransactionDBOptions setDefaultLockTimeout(final long defaultLockTimeout) { diff --git a/java/src/main/java/org/rocksdb/TransactionOptions.java b/java/src/main/java/org/rocksdb/TransactionOptions.java index f93d3cb3cbb8..9755724f1285 100644 --- a/java/src/main/java/org/rocksdb/TransactionOptions.java +++ b/java/src/main/java/org/rocksdb/TransactionOptions.java @@ -5,9 +5,15 @@ package org.rocksdb; +/** + * Options for a Transaction. + */ public class TransactionOptions extends RocksObject implements TransactionalOptions { + /** + * Constructs a TransactionOptions. + */ public TransactionOptions() { super(newTransactionOptions()); } @@ -56,7 +62,7 @@ public TransactionOptions setDeadlockDetect(final boolean deadlockDetect) { * The wait timeout in milliseconds when a transaction attempts to lock a key. *

* If 0, no waiting is done if a lock cannot instantly be acquired. - * If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)} + * If negative, {@link TransactionDBOptions#getTransactionLockTimeout()} * will be used * * @return the lock timeout in milliseconds @@ -71,7 +77,7 @@ public long getLockTimeout() { * a transaction attempts to lock a key. *

* If 0, no waiting is done if a lock cannot instantly be acquired. - * If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)} + * If negative, {@link TransactionDBOptions#getTransactionLockTimeout()} * will be used *

* Default: -1 diff --git a/java/src/main/java/org/rocksdb/UInt64AddOperator.java b/java/src/main/java/org/rocksdb/UInt64AddOperator.java index 0cffdce8c117..b028bcc45df6 100644 --- a/java/src/main/java/org/rocksdb/UInt64AddOperator.java +++ b/java/src/main/java/org/rocksdb/UInt64AddOperator.java @@ -10,6 +10,9 @@ * integer value. */ public class UInt64AddOperator extends MergeOperator { + /** + * Constructs a UInt64AddOperator. + */ public UInt64AddOperator() { super(newSharedUInt64AddOperator()); } diff --git a/java/src/main/java/org/rocksdb/VectorMemTableConfig.java b/java/src/main/java/org/rocksdb/VectorMemTableConfig.java index fb1e7a948549..9bbf8eed56f5 100644 --- a/java/src/main/java/org/rocksdb/VectorMemTableConfig.java +++ b/java/src/main/java/org/rocksdb/VectorMemTableConfig.java @@ -5,6 +5,10 @@ * The config for vector memtable representation. */ public class VectorMemTableConfig extends MemTableConfig { + + /** + * The default reserved size for the Vector Mem Table. + */ public static final int DEFAULT_RESERVED_SIZE = 0; /** diff --git a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java index 25d6e6f9d666..1bc51627dba3 100644 --- a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java +++ b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java @@ -7,10 +7,19 @@ import java.nio.ByteBuffer; +/** + * Iterator over the contents of a Write Batch With Index. + */ public class WBWIRocksIterator extends AbstractRocksIterator { private final WriteEntry entry = new WriteEntry(); + /** + * Constructs a WBWIRocksIterator. + * + * @param wbwi the write batch with index. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ WBWIRocksIterator. + */ protected WBWIRocksIterator(final WriteBatchWithIndex wbwi, final long nativeHandle) { super(wbwi, nativeHandle); @@ -70,12 +79,40 @@ final native void seekForPrevByteArray0( * that created the record in the Write Batch */ public enum WriteType { + + /** + * Put. + */ PUT((byte)0x0), + + /** + * Merge. + */ MERGE((byte)0x1), + + /** + * Delete. + */ DELETE((byte)0x2), + + /** + * Single Delete. + */ SINGLE_DELETE((byte)0x3), + + /** + * Delete Range. + */ DELETE_RANGE((byte)0x4), + + /** + * Log. + */ LOG((byte)0x5), + + /** + * Transaction ID. + */ XID((byte)0x6); final byte id; @@ -83,13 +120,22 @@ public enum WriteType { this.id = id; } - public static WriteType fromId(final byte id) { + /** + * Get a WriteType from its byte representation. + * + * @param value the byte representation of the WriteType. + * + * @return the WriteType + * + * @throws IllegalArgumentException if the {@code value} parameter does not represent a WriteType. + */ + public static WriteType fromId(final byte value) { for(final WriteType wt : WriteType.values()) { - if(id == wt.id) { + if(value == wt.id) { return wt; } } - throw new IllegalArgumentException("No WriteType with id=" + id); + throw new IllegalArgumentException("No WriteType with id=" + value); } } @@ -125,6 +171,13 @@ private WriteEntry() { value = new DirectSlice(); } + /** + * Constructs a WriteEntry. + * + * @param type the type of the write. + * @param key the key. + * @param value the value. + */ public WriteEntry(final WriteType type, final DirectSlice key, final DirectSlice value) { this.type = type; diff --git a/java/src/main/java/org/rocksdb/WalFileType.java b/java/src/main/java/org/rocksdb/WalFileType.java index fed27ed11705..371f2e7b2ff6 100644 --- a/java/src/main/java/org/rocksdb/WalFileType.java +++ b/java/src/main/java/org/rocksdb/WalFileType.java @@ -5,6 +5,9 @@ package org.rocksdb; +/** + * Types of WAL file. + */ public enum WalFileType { /** * Indicates that WAL file is in archive directory. WAL files are moved from diff --git a/java/src/main/java/org/rocksdb/WalFilter.java b/java/src/main/java/org/rocksdb/WalFilter.java index a2836634af65..eac7b657f18d 100644 --- a/java/src/main/java/org/rocksdb/WalFilter.java +++ b/java/src/main/java/org/rocksdb/WalFilter.java @@ -56,7 +56,14 @@ LogRecordFoundResult logRecordFound(final long logNumber, final String logFileName, final WriteBatch batch, final WriteBatch newBatch); + /** + * LogFoundResult. + */ class LogRecordFoundResult { + + /** + * Constant for continuing processing unchanged. + */ public static LogRecordFoundResult CONTINUE_UNCHANGED = new LogRecordFoundResult(WalProcessingOption.CONTINUE_PROCESSING, false); diff --git a/java/src/main/java/org/rocksdb/WalProcessingOption.java b/java/src/main/java/org/rocksdb/WalProcessingOption.java index 3a9c2be0e3b5..a37c83634587 100644 --- a/java/src/main/java/org/rocksdb/WalProcessingOption.java +++ b/java/src/main/java/org/rocksdb/WalProcessingOption.java @@ -5,8 +5,11 @@ package org.rocksdb; +/** + * Options for WAL processing. + */ public enum WalProcessingOption { - /* + /** * Continue processing as usual. */ CONTINUE_PROCESSING((byte)0x0), @@ -42,6 +45,15 @@ byte getValue() { return value; } + /** + * Get an option from its byte representation. + * + * @param value the byte representation of the option. + * + * @return the option + * + * @throws IllegalArgumentException if the {@code value} parameter does not represent an option. + */ public static WalProcessingOption fromValue(final byte value) { for (final WalProcessingOption walProcessingOption : WalProcessingOption.values()) { if (walProcessingOption.value == value) { diff --git a/java/src/main/java/org/rocksdb/WriteBatch.java b/java/src/main/java/org/rocksdb/WriteBatch.java index 49e1f7f204a2..1ac3c472da5e 100644 --- a/java/src/main/java/org/rocksdb/WriteBatch.java +++ b/java/src/main/java/org/rocksdb/WriteBatch.java @@ -282,6 +282,9 @@ private native void iterate(final long handle, final long handlerHandle) * Handler callback for iterating over the contents of a batch. */ public abstract static class Handler extends RocksCallbackObject { + /** + * Constructs a Handler. + */ public Handler() { super(0L); } @@ -291,39 +294,182 @@ protected long initializeNative(final long... nativeParameterHandles) { return createNewHandler0(); } + /** + * Put operation callback. + * + * @param columnFamilyId the id of the column family that the operation was performed on. + * @param key the key from the put operation. + * @param value the value from the put operation. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void put(final int columnFamilyId, final byte[] key, final byte[] value) throws RocksDBException; + + /** + * Put operation callback. + * + * @param key the key from the put operation. + * @param value the value from the put operation. + */ public abstract void put(final byte[] key, final byte[] value); + + /** + * Merge operation callback. + * + * @param columnFamilyId the id of the column family that the operation was performed on. + * @param key the key from the merge operation. + * @param value the value from the merge operation. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void merge(final int columnFamilyId, final byte[] key, final byte[] value) throws RocksDBException; + + /** + * Merge operation callback. + * + * @param key the key from the merge operation. + * @param value the value from the merge operation. + */ public abstract void merge(final byte[] key, final byte[] value); + + /** + * Delete operation callback. + * + * @param columnFamilyId the id of the column family that the operation was performed on. + * @param key the key from the delete operation. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void delete(final int columnFamilyId, final byte[] key) throws RocksDBException; + + /** + * Delete operation callback. + * + * @param key the key from the delete operation. + */ public abstract void delete(final byte[] key); + + /** + * Single Delete operation callback. + * + * @param columnFamilyId the id of the column family that the operation was performed on. + * @param key the key from the single delete operation. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void singleDelete(final int columnFamilyId, final byte[] key) throws RocksDBException; + + /** + * Single Delete operation callback. + * + * @param key the key from the single delete operation. + */ public abstract void singleDelete(final byte[] key); + + /** + * Delete Range operation callback. + * + * @param columnFamilyId the id of the column family that the operation was performed on. + * @param beginKey the begin key from the delete range operation. + * @param endKey the end key from the delete range operation. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void deleteRange(final int columnFamilyId, final byte[] beginKey, final byte[] endKey) throws RocksDBException; + + /** + * Delete Range operation callback. + * + * @param beginKey the begin key from the delete range operation. + * @param endKey the end key from the delete range operation. + */ public abstract void deleteRange(final byte[] beginKey, final byte[] endKey); + + /** + * Log Data operation callback. + * + * @param blob the blob from the log data operation. + */ public abstract void logData(final byte[] blob); + + /** + * Put Blob Index operation callback. + * + * @param columnFamilyId the id of the column family that the operation was performed on. + * @param key the key from the put blob index operation. + * @param value the value from the put blob index operation. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void putBlobIndex(final int columnFamilyId, final byte[] key, final byte[] value) throws RocksDBException; + + /** + * Mark Begin Prepare operation callback. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void markBeginPrepare() throws RocksDBException; + + /** + * Mark End Prepare operation callback. + * + * @param xid the transaction id. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void markEndPrepare(final byte[] xid) throws RocksDBException; + + /** + * Mark Noop operation callback. + * + * @param emptyBatch true if the batch was empty, false otherwise. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void markNoop(final boolean emptyBatch) throws RocksDBException; + + /** + * Mark Rollback operation callback. + * + * @param xid the transaction id. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void markRollback(final byte[] xid) throws RocksDBException; + + /** + * Mark Commit operation callback. + * + * @param xid the transaction id. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void markCommit(final byte[] xid) throws RocksDBException; + + /** + * Mark Commit With Timestamp operation callback. + * + * @param xid the transaction id. + * @param ts the timestamp. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void markCommitWithTimestamp(final byte[] xid, final byte[] ts) throws RocksDBException; /** - * shouldContinue is called by the underlying iterator + * Called by the underlying iterator * {@link WriteBatch#iterate(Handler)}. If it returns false, * iteration is halted. Otherwise, it continues * iterating. The default implementation always @@ -347,6 +493,13 @@ public static class SavePoint { private long count; private long contentFlags; + /** + * Constructs a SavePoint. + * + * @param size the size + * @param count the count + * @param contentFlags the content flags + */ public SavePoint(final long size, final long count, final long contentFlags) { this.size = size; @@ -354,6 +507,9 @@ public SavePoint(final long size, final long count, this.contentFlags = contentFlags; } + /** + * Clear the save point data. + */ public void clear() { this.size = 0; this.count = 0; @@ -387,6 +543,12 @@ public long getContentFlags() { return contentFlags; } + /** + * Determines if {@link #clear()} was + * called. + * + * @return true if {@link #clear()} was called and the save point remains empty, false otherwise. + */ public boolean isCleared() { return (size | count | contentFlags) == 0; } diff --git a/java/src/main/java/org/rocksdb/WriteBufferManager.java b/java/src/main/java/org/rocksdb/WriteBufferManager.java index 40176aba42fb..08970b158eb8 100644 --- a/java/src/main/java/org/rocksdb/WriteBufferManager.java +++ b/java/src/main/java/org/rocksdb/WriteBufferManager.java @@ -28,10 +28,21 @@ public WriteBufferManager( this.allowStall_ = allowStall; } - public WriteBufferManager(final long bufferSizeBytes, final Cache cache){ + /** + * Construct a new instance of WriteBufferManager. + * + * @param bufferSizeBytes the buffer size in bytes. + * @param cache the cache to use. + */ + public WriteBufferManager(final long bufferSizeBytes, final Cache cache) { this(bufferSizeBytes, cache, false); } + /** + * Determine if the Write Buffer Manager is allowed to stall. + * + * @return true if it is allowed to stall, false otherwise. + */ public boolean allowStall() { return allowStall_; } diff --git a/java/src/main/java/org/rocksdb/WriteStallCondition.java b/java/src/main/java/org/rocksdb/WriteStallCondition.java index 98d9e2ce4adf..c91310374b1d 100644 --- a/java/src/main/java/org/rocksdb/WriteStallCondition.java +++ b/java/src/main/java/org/rocksdb/WriteStallCondition.java @@ -5,9 +5,24 @@ package org.rocksdb; +/** + * Conditions that caused Write Stalls. + */ public enum WriteStallCondition { + + /** + * Delayed. + */ DELAYED((byte) 0x0), + + /** + * Stopped. + */ STOPPED((byte) 0x1), + + /** + * Normal. + */ NORMAL((byte) 0x2); private final byte value; diff --git a/java/src/main/java/org/rocksdb/WriteStallInfo.java b/java/src/main/java/org/rocksdb/WriteStallInfo.java index 1cade0acb8ed..9cccc0a95971 100644 --- a/java/src/main/java/org/rocksdb/WriteStallInfo.java +++ b/java/src/main/java/org/rocksdb/WriteStallInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Information on a Write Stall. + */ public class WriteStallInfo { private final String columnFamilyName; private final WriteStallCondition currentCondition; diff --git a/java/src/main/java/org/rocksdb/util/BufferUtil.java b/java/src/main/java/org/rocksdb/util/BufferUtil.java index 54be3e6937d6..4c23cfba86e5 100644 --- a/java/src/main/java/org/rocksdb/util/BufferUtil.java +++ b/java/src/main/java/org/rocksdb/util/BufferUtil.java @@ -6,7 +6,20 @@ package org.rocksdb.util; +/** + * Utility functions for working with buffers. + */ public class BufferUtil { + + /** + * Check the bounds for an operation on a buffer. + * + * @param offset the offset + * @param len the length + * @param size the size + * + * @throws IndexOutOfBoundsException if the values are out of bounds + */ public static void CheckBounds(final int offset, final int len, final int size) { if ((offset | len | (offset + len) | (size - (offset + len))) < 0) { throw new IndexOutOfBoundsException( diff --git a/java/src/main/java/org/rocksdb/util/ByteUtil.java b/java/src/main/java/org/rocksdb/util/ByteUtil.java index 5d64d5dcf29a..8c32668ca8b9 100644 --- a/java/src/main/java/org/rocksdb/util/ByteUtil.java +++ b/java/src/main/java/org/rocksdb/util/ByteUtil.java @@ -10,6 +10,9 @@ import static java.nio.charset.StandardCharsets.UTF_8; +/** + * Simple utility functions for working with bytes. + */ public class ByteUtil { /** @@ -29,7 +32,7 @@ public static byte[] bytes(final String str) { * lexically less than {@code y}, or a value greater than zero if {@code x} * is lexically greater than {@code y}. Note that lexical order is determined * as if comparing unsigned char arrays. - * + *

* Similar to memcmp.c. * * @param x the first value to compare with diff --git a/java/src/main/java/org/rocksdb/util/BytewiseComparator.java b/java/src/main/java/org/rocksdb/util/BytewiseComparator.java index fd55fdf8c57c..fb32db3be0da 100644 --- a/java/src/main/java/org/rocksdb/util/BytewiseComparator.java +++ b/java/src/main/java/org/rocksdb/util/BytewiseComparator.java @@ -23,6 +23,11 @@ */ public final class BytewiseComparator extends AbstractComparator { + /** + * Constructs a new BytewiseComparator. + * + * @param copt the configuration options for the comparator. + */ public BytewiseComparator(final ComparatorOptions copt) { super(copt); } diff --git a/java/src/main/java/org/rocksdb/util/Environment.java b/java/src/main/java/org/rocksdb/util/Environment.java index 78b73dc5d432..f6b2fa505657 100644 --- a/java/src/main/java/org/rocksdb/util/Environment.java +++ b/java/src/main/java/org/rocksdb/util/Environment.java @@ -5,6 +5,9 @@ import java.io.IOException; import java.util.Locale; +/** + * Provides information about the environment in which RocksJava is executing. + */ public class Environment { @SuppressWarnings("FieldMayBeFinal") private static String OS = System.getProperty("os.name").toLowerCase(Locale.getDefault()); @@ -24,38 +27,83 @@ public class Environment { */ private static Boolean MUSL_LIBC = null; + /** + * Returns true if the CPU architecture is aarch64. + * + * @return true if the CPU architecture is aarch64, false otherwise. + */ public static boolean isAarch64() { return ARCH.contains("aarch64"); } + /** + * Returns true if the CPU architecture is ppc. + * + * @return true if the CPU architecture is ppc, false otherwise. + */ public static boolean isPowerPC() { return ARCH.contains("ppc"); } + /** + * Returns true if the CPU architecture is s390x. + * + * @return true if the CPU architecture is s390x, false otherwise. + */ public static boolean isS390x() { return ARCH.contains("s390x"); } + /** + * Returns true if the CPU architecture is riscv64. + * + * @return true if the CPU architecture is riscv64, false otherwise. + */ public static boolean isRiscv64() { return ARCH.contains("riscv64"); } + /** + * Returns true if the OS is Windows. + * + * @return true if the OS is Windows, false otherwise. + */ public static boolean isWindows() { return (OS.contains("win")); } + /** + * Returns true if the OS is FreeBSD. + * + * @return true if the OS is FreeBSD, false otherwise. + */ public static boolean isFreeBSD() { return (OS.contains("freebsd")); } + /** + * Returns true if the OS is Mac. + * + * @return true if the OS is Mac, false otherwise. + */ public static boolean isMac() { return (OS.contains("mac")); } + /** + * Returns true if the OS is AIX. + * + * @return true if the OS is AIX, false otherwise. + */ public static boolean isAix() { return OS.contains("aix"); } - + + /** + * Returns true if the OS is Unix. + * + * @return true if the OS is Unix, false otherwise. + */ public static boolean isUnix() { return OS.contains("nix") || OS.contains("nux"); @@ -75,9 +123,9 @@ public static boolean isMuslLibc() { /** * Determine if the environment has a musl libc. - * + *

* The initialisation counterpart of {@link #isMuslLibc()}. - * + *

* Intentionally package-private for testing. * * @return true if the environment has a musl libc, false otherwise. @@ -136,14 +184,29 @@ static boolean initIsMuslLibc() { return false; } + /** + * Returns true if the OS is Solaris. + * + * @return true if the OS is Solaris, false otherwise. + */ public static boolean isSolaris() { return OS.contains("sunos"); } + /** + * Returns true if the OS is OpenBSD. + * + * @return true if the OS is OpenBSD, false otherwise. + */ public static boolean isOpenBSD() { return (OS.contains("openbsd")); } + /** + * Returns true if the system architecture is 64 bit. + * + * @return true if the system architecture is 64 bit, false otherwise. + */ public static boolean is64Bit() { if (ARCH.contains(SPARCV9)) { return true; @@ -151,10 +214,24 @@ public static boolean is64Bit() { return (ARCH.indexOf("64") > 0); } + /** + * Get the name as that of a shared JNI library. + * + * @param name the name. + * + * @return the name of the shared JNI library. + */ public static String getSharedLibraryName(final String name) { return name + "jni"; } + /** + * Get the filename as that of a shared JNI library. + * + * @param name the name. + * + * @return the filename of the shared JNI library. + */ public static String getSharedLibraryFileName(final String name) { return appendLibOsSuffix("lib" + getSharedLibraryName(name), true); } @@ -181,6 +258,16 @@ private static String getLibcPostfix() { return "-" + libcName; } + + /** + * Get the name as that of a JNI library. + *

+ * Deals with platform and architecture specific naming. + * + * @param name the name. + * + * @return the name of the JNI library. + */ public static String getJniLibraryName(final String name) { if (isUnix()) { final String arch = is64Bit() ? "64" : "32"; @@ -219,6 +306,15 @@ public static String getJniLibraryName(final String name) { throw new UnsupportedOperationException(String.format("Cannot determine JNI library name for ARCH='%s' OS='%s' name='%s'", ARCH, OS, name)); } + /** + * Get a fallback name as that of a JNI library. + *

+ * Deals with platform and architecture specific naming. + * + * @param name the name. + * + * @return the fallback name of the JNI library. + */ public static /*@Nullable*/ String getFallbackJniLibraryName(final String name) { if (isMac() && is64Bit()) { return String.format("%sjni-osx", name); @@ -226,10 +322,28 @@ public static String getJniLibraryName(final String name) { return null; } + /** + * Get the filename as that of a JNI library. + *

+ * Deals with platform and architecture specific naming. + * + * @param name the name. + * + * @return the filename of the JNI library. + */ public static String getJniLibraryFileName(final String name) { return appendLibOsSuffix("lib" + getJniLibraryName(name), false); } + /** + * Get the fallback filename as that of a JNI library. + *

+ * Deals with platform and architecture specific naming. + * + * @param name the name. + * + * @return the fallback filename of the JNI library. + */ public static /*@Nullable*/ String getFallbackJniLibraryFileName(final String name) { final String fallbackJniLibraryName = getFallbackJniLibraryName(name); if (fallbackJniLibraryName == null) { @@ -249,6 +363,13 @@ private static String appendLibOsSuffix(final String libraryFileName, final bool throw new UnsupportedOperationException(); } + /** + * Get the filename extension used for a JNI library. + *

+ * Deals with platform and architecture specific naming. + * + * @return the filename extension. + */ public static String getJniLibraryExtension() { if (isWindows()) { return ".dll"; diff --git a/java/src/main/java/org/rocksdb/util/IntComparator.java b/java/src/main/java/org/rocksdb/util/IntComparator.java index 2caf0c601572..cf3c6423f08c 100644 --- a/java/src/main/java/org/rocksdb/util/IntComparator.java +++ b/java/src/main/java/org/rocksdb/util/IntComparator.java @@ -13,11 +13,11 @@ /** * This is a Java implementation of a Comparator for Java int * keys. - * + *

* This comparator assumes keys are (at least) four bytes, so * the caller must guarantee that in accessing other APIs in * combination with this comparator. - * + *

* The performance of Comparators implemented in Java is always * less than their C++ counterparts due to the bridging overhead, * as such you likely don't want to use this apart from benchmarking @@ -25,8 +25,13 @@ */ public final class IntComparator extends AbstractComparator { - public IntComparator(final ComparatorOptions copt) { - super(copt); + /** + * Constructs an IntComparator. + * + * @param comparatorOptions the options for the comparator. + */ + public IntComparator(final ComparatorOptions comparatorOptions) { + super(comparatorOptions); } @Override diff --git a/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java b/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java index 3d3c429416b0..e145184eac6c 100644 --- a/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java +++ b/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java @@ -24,6 +24,11 @@ */ public final class ReverseBytewiseComparator extends AbstractComparator { + /** + * Constructs a ReverseBytewiseComparator. + * + * @param copt the comparator options. + */ public ReverseBytewiseComparator(final ComparatorOptions copt) { super(copt); } diff --git a/java/src/main/java/org/rocksdb/util/SizeUnit.java b/java/src/main/java/org/rocksdb/util/SizeUnit.java index 0f717e8d4540..8582bb15436b 100644 --- a/java/src/main/java/org/rocksdb/util/SizeUnit.java +++ b/java/src/main/java/org/rocksdb/util/SizeUnit.java @@ -5,12 +5,33 @@ package org.rocksdb.util; -public class SizeUnit { - public static final long KB = 1024L; - public static final long MB = KB * KB; - public static final long GB = KB * MB; - public static final long TB = KB * GB; - public static final long PB = KB * TB; +/** + * Simple factors of byte sizes. + */ +public interface SizeUnit { - private SizeUnit() {} + /** + * 1 Kilobyte. + */ + long KB = 1024L; + + /** + * 1 Megabyte. + */ + long MB = KB * KB; + + /** + * 1 Gigabyte. + */ + long GB = KB * MB; + + /** + * 1 Terabyte. + */ + long TB = KB * GB; + + /** + * 1 Petabyte. + */ + long PB = KB * TB; } diff --git a/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java b/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java index 549b74beb1cc..9c6689ea8906 100644 --- a/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java +++ b/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java @@ -125,14 +125,14 @@ public void fullHistoryTSLowDefault() { @Test public void canceled() { CompactRangeOptions opt = new CompactRangeOptions(); - assertThat(opt.canceled()).isEqualTo(false); - opt.setCanceled(true); - assertThat(opt.canceled()).isEqualTo(true); - opt.setCanceled(false); - assertThat(opt.canceled()).isEqualTo(false); - opt.setCanceled(true); - assertThat(opt.canceled()).isEqualTo(true); - opt.setCanceled(true); - assertThat(opt.canceled()).isEqualTo(true); + assertThat(opt.cancelled()).isEqualTo(false); + opt.setCancelled(true); + assertThat(opt.cancelled()).isEqualTo(true); + opt.setCancelled(false); + assertThat(opt.cancelled()).isEqualTo(false); + opt.setCancelled(true); + assertThat(opt.cancelled()).isEqualTo(true); + opt.setCancelled(true); + assertThat(opt.cancelled()).isEqualTo(true); } } diff --git a/java/src/test/java/org/rocksdb/SstFileManagerTest.java b/java/src/test/java/org/rocksdb/SstFileManagerTest.java index 2e136e820035..96c5627096d8 100644 --- a/java/src/test/java/org/rocksdb/SstFileManagerTest.java +++ b/java/src/test/java/org/rocksdb/SstFileManagerTest.java @@ -47,7 +47,7 @@ public void trackedFiles() throws RocksDBException { @Test public void deleteRateBytesPerSecond() throws RocksDBException { try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) { - assertThat(sstFileManager.getDeleteRateBytesPerSecond()).isEqualTo(SstFileManager.RATE_BYTES_PER_SEC_DEFAULT); + assertThat(sstFileManager.getDeleteRateBytesPerSecond()).isEqualTo(SstFileManager.DEFAULT_RATE_BYTES_PER_SEC); final long ratePerSecond = 1024 * 1024 * 52; sstFileManager.setDeleteRateBytesPerSecond(ratePerSecond); assertThat(sstFileManager.getDeleteRateBytesPerSecond()).isEqualTo(ratePerSecond); @@ -57,7 +57,7 @@ public void deleteRateBytesPerSecond() throws RocksDBException { @Test public void maxTrashDBRatio() throws RocksDBException { try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) { - assertThat(sstFileManager.getMaxTrashDBRatio()).isEqualTo(SstFileManager.MAX_TRASH_DB_RATION_DEFAULT); + assertThat(sstFileManager.getMaxTrashDBRatio()).isEqualTo(SstFileManager.DEFAULT_MAX_TRASH_DB_RATIO); final double trashRatio = 0.2; sstFileManager.setMaxTrashDBRatio(trashRatio); assertThat(sstFileManager.getMaxTrashDBRatio()).isEqualTo(trashRatio); diff --git a/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java b/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java index 8d7956cf27f6..c3309d787878 100644 --- a/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java +++ b/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java @@ -8,7 +8,18 @@ import java.nio.ByteBuffer; +/** + * Allocates for creating new buffers. + */ public interface ByteBufferAllocator { + + /** + * Allocate a new ByteBuffer. + * + * @param capacity the capacity of the buffer. + * + * @return the new ByteBuffer. + */ ByteBuffer allocate(int capacity); ByteBufferAllocator DIRECT = new DirectByteBufferAllocator(); diff --git a/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java b/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java index 8ea104332cb1..8b06fbfabca0 100644 --- a/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java +++ b/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java @@ -124,16 +124,34 @@ public void markCommitWithTimestamp(final byte[] xid, final byte[] ts) throws Ro events.add(new Event(Action.MARK_COMMIT_WITH_TIMESTAMP, (byte[]) null, (byte[]) null)); } + /** + * Event received by the handler. + */ public static class Event { public final Action action; public final int columnFamilyId; public final byte[] key; public final byte[] value; + /** + * Construct an event. + * + * @param action the action of the event + * @param key the key of the event + * @param value the value of the event + */ public Event(final Action action, final byte[] key, final byte[] value) { this(action, 0, key, value); } + /** + * Construct an event. + * + * @param action the action of the event + * @param columnFamilyId the id of the column family of the event + * @param key the key of the event + * @param value the value of the event + */ public Event(final Action action, final int columnFamilyId, final byte[] key, final byte[] value) { this.action = action;