jrwest commented on code in PR #3606:
URL: https://github.com/apache/cassandra/pull/3606#discussion_r1938379147


##########
src/java/org/apache/cassandra/io/util/ThreadLocalReadAheadBuffer.java:
##########
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.io.util;
+
+import java.nio.ByteBuffer;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import io.netty.util.concurrent.FastThreadLocal;
+import org.apache.cassandra.io.compress.BufferType;
+import org.apache.cassandra.io.compress.CorruptBlockException;
+import org.apache.cassandra.io.sstable.CorruptSSTableException;
+
+public final class ThreadLocalReadAheadBuffer
+{
+    private static class Block {
+        ByteBuffer buffer = null;
+        int index = -1;
+    }
+
+    private final ChannelProxy channel;
+
+    private final BufferType bufferType;
+
+    private static final Map<String, FastThreadLocal<Block>> allBlocks = new 
ConcurrentHashMap<>();
+
+    private final FastThreadLocal<Block> blockHolder;
+
+    private final int bufferSize;
+
+    public ThreadLocalReadAheadBuffer(ChannelProxy channel, int bufferSize, 
BufferType bufferType)
+    {
+        this.channel = channel;
+        this.bufferSize = bufferSize;
+        this.bufferType = bufferType;
+        allBlocks.putIfAbsent(channel.filePath(), new FastThreadLocal<>() {
+            @Override
+            public Block initialValue()
+            {
+                return new Block();
+            }
+        });
+        blockHolder = allBlocks.get(channel.filePath());
+    }
+
+    public boolean hasBuffer()
+    {
+        return blockHolder.get().buffer != null;
+    }
+
+    /**
+     * Safe to call only if {@link #hasBuffer()} is true
+     */
+    public int remaining()
+    {
+        return getBlock().buffer.remaining();
+    }
+
+    public void allocateBuffer()
+    {
+        getBlock();
+    }
+
+    private Block getBlock()
+    {
+        Block block = blockHolder.get();
+        if (block.buffer == null)
+        {
+            block.buffer = bufferType.allocate(bufferSize);
+            block.buffer.clear();
+        }
+        return block;
+    }
+
+    public void fill(long position) throws CorruptBlockException
+    {
+        Block block = getBlock();
+        ByteBuffer blockBuffer = block.buffer;
+        long channelSize = channel.size();

Review Comment:
   see 
https://github.com/apache/cassandra/pull/3606/commits/38d47aae5d6a2ee36fdf93ba3c467e9902043964



##########
test/unit/org/apache/cassandra/io/util/ThreadLocalReadAheadBufferTest.java:
##########
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.io.util;
+
+
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.util.List;
+import java.util.Random;
+
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.DataStorageSpec;
+import org.apache.cassandra.io.compress.BufferType;
+import org.apache.cassandra.io.compress.CorruptBlockException;
+import org.apache.cassandra.utils.Pair;
+import org.quicktheories.WithQuickTheories;
+import org.quicktheories.core.Gen;
+
+import static 
org.apache.cassandra.config.CassandraRelevantProperties.JAVA_IO_TMPDIR;
+
+public class ThreadLocalReadAheadBufferTest implements WithQuickTheories
+{
+    private static final int numFiles = 5;
+    private static final File[] files = new File[numFiles];
+
+    @BeforeClass
+    public static void setup()
+    {
+        int seed = new Random().nextInt();
+        for (int i = 0; i < numFiles; i++)
+        {
+            int size = new Random().nextInt((Integer.MAX_VALUE - 1) / 8);
+            files[i] = writeFile(seed, size);
+        }
+    }
+
+    @AfterClass
+    public static void cleanup()
+    {
+        for (File f : files)
+        {
+            f.delete();
+        }
+    }
+
+    @Test
+    public void testLastBlockReads()
+    {
+        qt().forAll(lastBlockReads())
+            .checkAssert(this::testReads);
+    }
+
+    @Test
+    public void testReadsLikeChannelProxy()
+    {
+
+        qt().forAll(randomReads())
+            .checkAssert(this::testReads);
+    }
+
+    private void testReads(InputData propertyInputs)
+    {
+        try (ChannelProxy channel = new ChannelProxy(propertyInputs.file))
+        {
+            ThreadLocalReadAheadBuffer trlab = new 
ThreadLocalReadAheadBuffer(channel, new 
DataStorageSpec.IntKibibytesBound("256KiB").toBytes(), BufferType.OFF_HEAP);
+            for (Pair<Long, Integer> read : propertyInputs.positionsAndLengths)
+            {
+                int readSize = Math.min(read.right,(int) (channel.size() - 
read.left));
+                ByteBuffer buf1 = ByteBuffer.allocate(readSize);
+                channel.read(buf1, read.left);
+
+                ByteBuffer buf2 = ByteBuffer.allocate(readSize);
+                try
+                {
+                    int copied = 0;
+                    while (copied < readSize) {
+                        trlab.fill(read.left + copied);
+                        int leftToRead = readSize - copied;
+                        if (trlab.remaining() >= leftToRead)
+                            copied += trlab.read(buf2, leftToRead);
+                        else
+                            copied += trlab.read(buf2, trlab.remaining());
+                    }
+                }
+                catch (CorruptBlockException e)
+                {
+                    throw new RuntimeException(e);
+                }
+
+                Assert.assertEquals(buf1, buf2);
+            }
+        }
+    }
+
+    private Gen<InputData> lastBlockReads()
+    {
+        return arbitrary().pick(List.of(files))
+                          .flatMap((file) ->
+                                   lists().of(longs().between(0, 
fileSize(file)).zip(integers().between(1, 100), Pair::create))
+                                          .ofSizeBetween(5, 10)
+                                          .map(positionsAndLengths -> new 
InputData(file, positionsAndLengths)));
+
+    }
+
+    private Gen<InputData> randomReads()
+    {
+        int blockSize = new 
DataStorageSpec.IntKibibytesBound("256KiB").toBytes();
+        return arbitrary().pick(List.of(files))
+                         .flatMap((file) ->
+                                  lists().of(longs().between(fileSize(file) - 
blockSize, fileSize(file)).zip(integers().between(1, 100), Pair::create))
+                                         .ofSizeBetween(5, 10)
+                                         .map(positionsAndLengths -> new 
InputData(file, positionsAndLengths)));
+
+    }
+
+    // need this becasue generators don't handle the IOException
+    private long fileSize(File file)
+    {
+        try
+        {
+            return Files.size(file.toPath());
+        } catch (IOException e)
+        {
+            throw new RuntimeException(e);
+        }
+    }
+
+    private static class InputData
+    {
+
+        private final File file;
+        private final List<Pair<Long, Integer>> positionsAndLengths;
+
+        public InputData(File file, List<Pair<Long, Integer>> 
positionsAndLengths)
+        {
+            this.file = file;
+            this.positionsAndLengths = positionsAndLengths;
+        }
+    }
+
+    private static File writeFile(int seed, int length)
+    {
+        String fileName = JAVA_IO_TMPDIR.getString() + "data+" + length + 
".bin";
+
+        byte[] data = new byte[(int) length];

Review Comment:
   See 
https://github.com/apache/cassandra/pull/3606/commits/38d47aae5d6a2ee36fdf93ba3c467e9902043964



##########
test/unit/org/apache/cassandra/io/util/ThreadLocalReadAheadBufferTest.java:
##########
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.io.util;
+
+
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.util.List;
+import java.util.Random;
+
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.cassandra.config.DataStorageSpec;
+import org.apache.cassandra.io.compress.BufferType;
+import org.apache.cassandra.io.compress.CorruptBlockException;
+import org.apache.cassandra.utils.Pair;
+import org.quicktheories.WithQuickTheories;
+import org.quicktheories.core.Gen;
+
+import static 
org.apache.cassandra.config.CassandraRelevantProperties.JAVA_IO_TMPDIR;
+
+public class ThreadLocalReadAheadBufferTest implements WithQuickTheories
+{
+    private static final int numFiles = 5;
+    private static final File[] files = new File[numFiles];
+
+    @BeforeClass
+    public static void setup()
+    {
+        int seed = new Random().nextInt();
+        for (int i = 0; i < numFiles; i++)
+        {
+            int size = new Random().nextInt((Integer.MAX_VALUE - 1) / 8);
+            files[i] = writeFile(seed, size);
+        }
+    }
+
+    @AfterClass
+    public static void cleanup()
+    {
+        for (File f : files)
+        {
+            f.delete();
+        }
+    }
+
+    @Test
+    public void testLastBlockReads()
+    {
+        qt().forAll(lastBlockReads())
+            .checkAssert(this::testReads);
+    }
+
+    @Test
+    public void testReadsLikeChannelProxy()
+    {
+
+        qt().forAll(randomReads())
+            .checkAssert(this::testReads);
+    }
+
+    private void testReads(InputData propertyInputs)
+    {
+        try (ChannelProxy channel = new ChannelProxy(propertyInputs.file))
+        {
+            ThreadLocalReadAheadBuffer trlab = new 
ThreadLocalReadAheadBuffer(channel, new 
DataStorageSpec.IntKibibytesBound("256KiB").toBytes(), BufferType.OFF_HEAP);
+            for (Pair<Long, Integer> read : propertyInputs.positionsAndLengths)
+            {
+                int readSize = Math.min(read.right,(int) (channel.size() - 
read.left));
+                ByteBuffer buf1 = ByteBuffer.allocate(readSize);
+                channel.read(buf1, read.left);
+
+                ByteBuffer buf2 = ByteBuffer.allocate(readSize);
+                try
+                {
+                    int copied = 0;
+                    while (copied < readSize) {
+                        trlab.fill(read.left + copied);
+                        int leftToRead = readSize - copied;
+                        if (trlab.remaining() >= leftToRead)
+                            copied += trlab.read(buf2, leftToRead);
+                        else
+                            copied += trlab.read(buf2, trlab.remaining());
+                    }
+                }
+                catch (CorruptBlockException e)
+                {
+                    throw new RuntimeException(e);
+                }
+
+                Assert.assertEquals(buf1, buf2);
+            }
+        }
+    }
+
+    private Gen<InputData> lastBlockReads()
+    {
+        return arbitrary().pick(List.of(files))
+                          .flatMap((file) ->
+                                   lists().of(longs().between(0, 
fileSize(file)).zip(integers().between(1, 100), Pair::create))
+                                          .ofSizeBetween(5, 10)
+                                          .map(positionsAndLengths -> new 
InputData(file, positionsAndLengths)));
+
+    }
+
+    private Gen<InputData> randomReads()
+    {
+        int blockSize = new 
DataStorageSpec.IntKibibytesBound("256KiB").toBytes();
+        return arbitrary().pick(List.of(files))
+                         .flatMap((file) ->
+                                  lists().of(longs().between(fileSize(file) - 
blockSize, fileSize(file)).zip(integers().between(1, 100), Pair::create))
+                                         .ofSizeBetween(5, 10)
+                                         .map(positionsAndLengths -> new 
InputData(file, positionsAndLengths)));
+
+    }
+
+    // need this becasue generators don't handle the IOException
+    private long fileSize(File file)
+    {
+        try
+        {
+            return Files.size(file.toPath());
+        } catch (IOException e)
+        {
+            throw new RuntimeException(e);
+        }
+    }
+
+    private static class InputData
+    {
+
+        private final File file;
+        private final List<Pair<Long, Integer>> positionsAndLengths;
+
+        public InputData(File file, List<Pair<Long, Integer>> 
positionsAndLengths)
+        {
+            this.file = file;
+            this.positionsAndLengths = positionsAndLengths;
+        }
+    }
+
+    private static File writeFile(int seed, int length)
+    {
+        String fileName = JAVA_IO_TMPDIR.getString() + "data+" + length + 
".bin";

Review Comment:
   See 
https://github.com/apache/cassandra/pull/3606/commits/38d47aae5d6a2ee36fdf93ba3c467e9902043964



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: pr-unsubscr...@cassandra.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: pr-unsubscr...@cassandra.apache.org
For additional commands, e-mail: pr-h...@cassandra.apache.org

Reply via email to