Skip to content

Commit

Permalink
HADOOP-17596. ABFS: Change default Readahead Queue Depth from num(pro…
Browse files Browse the repository at this point in the history
…cessors) to const (#3106)

* HADOOP-17596. ABFS: Change default Readahead Queue Depth from num(processors) to const (#2795)
. Contributed by Sumangala Patki.

(cherry picked from commit 76d92eb)
  • Loading branch information
sumangala-patki committed Jul 10, 2021
1 parent 7cb91db commit aa6a9ca
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 2 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ public final class FileSystemConfigurations {
public static final int MIN_LEASE_DURATION = 15;
public static final int MAX_LEASE_DURATION = 60;

public static final int DEFAULT_READ_AHEAD_QUEUE_DEPTH = -1;
public static final int DEFAULT_READ_AHEAD_QUEUE_DEPTH = 2;

public static final boolean DEFAULT_ENABLE_FLUSH = true;
public static final boolean DEFAULT_DISABLE_OUTPUTSTREAM_FLUSH = true;
Expand Down
2 changes: 1 addition & 1 deletion hadoop-tools/hadoop-azure/src/site/markdown/abfs.md
Original file line number Diff line number Diff line change
Expand Up @@ -803,7 +803,7 @@ pattern is detected.
`fs.azure.readaheadqueue.depth`: Sets the readahead queue depth in
AbfsInputStream. In case the set value is negative the read ahead queue depth
will be set as Runtime.getRuntime().availableProcessors(). By default the value
will be -1. To disable readaheads, set this value to 0. If your workload is
will be 2. To disable readaheads, set this value to 0. If your workload is
doing only random reads (non-sequential) or you are seeing throttling, you
may try setting this value to 0.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
Expand All @@ -48,6 +49,7 @@

import static org.apache.hadoop.test.LambdaTestUtils.intercept;
import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.FORWARD_SLASH;
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_READ_AHEAD_QUEUE_DEPTH;

/**
* Unit test AbfsInputStream.
Expand Down Expand Up @@ -569,6 +571,20 @@ public void testDiffReadRequestSizeAndRAHBlockSize() throws Exception {
testReadAheads(inputStream, FORTY_EIGHT_KB, SIXTEEN_KB);
}

@Test
public void testDefaultReadaheadQueueDepth() throws Exception {
Configuration config = getRawConfiguration();
config.unset(FS_AZURE_READ_AHEAD_QUEUE_DEPTH);
AzureBlobFileSystem fs = getFileSystem(config);
Path testFile = new Path("/testFile");
fs.create(testFile);
FSDataInputStream in = fs.open(testFile);
Assertions.assertThat(
((AbfsInputStream) in.getWrappedStream()).getReadAheadQueueDepth())
.describedAs("readahead queue depth should be set to default value 2")
.isEqualTo(2);
}


private void testReadAheads(AbfsInputStream inputStream,
int readRequestSize,
Expand Down

0 comments on commit aa6a9ca

Please sign in to comment.