jpountz commented on a change in pull request #1234: Add compression for Binary 
doc value fields
URL: https://github.com/apache/lucene-solr/pull/1234#discussion_r379299380
 
 

 ##########
 File path: 
lucene/core/src/java/org/apache/lucene/codecs/lucene80/Lucene80DocValuesConsumer.java
 ##########
 @@ -353,67 +360,168 @@ private void writeBlock(long[] values, int length, long 
gcd, ByteBuffersDataOutp
     }
   }
 
-  @Override
-  public void addBinaryField(FieldInfo field, DocValuesProducer 
valuesProducer) throws IOException {
-    meta.writeInt(field.number);
-    meta.writeByte(Lucene80DocValuesFormat.BINARY);
-
-    BinaryDocValues values = valuesProducer.getBinary(field);
-    long start = data.getFilePointer();
-    meta.writeLong(start); // dataOffset
-    int numDocsWithField = 0;
-    int minLength = Integer.MAX_VALUE;
-    int maxLength = 0;
-    for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc 
= values.nextDoc()) {
-      numDocsWithField++;
-      BytesRef v = values.binaryValue();
-      int length = v.length;
-      data.writeBytes(v.bytes, v.offset, v.length);
-      minLength = Math.min(length, minLength);
-      maxLength = Math.max(length, maxLength);
+  class CompressedBinaryBlockWriter implements Closeable {
+    FastCompressionHashTable ht = new LZ4.FastCompressionHashTable();    
+    int uncompressedBlockLength = 0;
+    int maxUncompressedBlockLength = 0;
+    int numDocsInCurrentBlock = 0;
+    int [] docLengths = new 
int[Lucene80DocValuesFormat.BINARY_DOCS_PER_COMPRESSED_BLOCK]; 
+    byte [] block = new byte [1024 * 16];
+    int totalChunks = 0;
+    long maxPointer = 0;
+    long blockAddressesStart = -1; 
+
+    private IndexOutput tempBinaryOffsets;
+    
+    
+    public CompressedBinaryBlockWriter() throws IOException {
+      tempBinaryOffsets = 
state.directory.createTempOutput(state.segmentInfo.name, "binary_pointers", 
state.context);
+      try {
+        CodecUtil.writeHeader(tempBinaryOffsets, 
Lucene80DocValuesFormat.META_CODEC + "FilePointers", 
Lucene80DocValuesFormat.VERSION_CURRENT);
+      } catch (Throwable exception) {
+        IOUtils.closeWhileHandlingException(this); //self-close because 
constructor caller can't 
+        throw exception;
+      }
     }
-    assert numDocsWithField <= maxDoc;
-    meta.writeLong(data.getFilePointer() - start); // dataLength
 
-    if (numDocsWithField == 0) {
-      meta.writeLong(-2); // docsWithFieldOffset
-      meta.writeLong(0L); // docsWithFieldLength
-      meta.writeShort((short) -1); // jumpTableEntryCount
-      meta.writeByte((byte) -1);   // denseRankPower
-    } else if (numDocsWithField == maxDoc) {
-      meta.writeLong(-1); // docsWithFieldOffset
-      meta.writeLong(0L); // docsWithFieldLength
-      meta.writeShort((short) -1); // jumpTableEntryCount
-      meta.writeByte((byte) -1);   // denseRankPower
-    } else {
-      long offset = data.getFilePointer();
-      meta.writeLong(offset); // docsWithFieldOffset
-      values = valuesProducer.getBinary(field);
-      final short jumpTableEntryCount = IndexedDISI.writeBitSet(values, data, 
IndexedDISI.DEFAULT_DENSE_RANK_POWER);
-      meta.writeLong(data.getFilePointer() - offset); // docsWithFieldLength
-      meta.writeShort(jumpTableEntryCount);
-      meta.writeByte(IndexedDISI.DEFAULT_DENSE_RANK_POWER);
+    void addDoc(int doc, BytesRef v) throws IOException {
+      if (blockAddressesStart < 0) {
+        blockAddressesStart = data.getFilePointer();
+      }
+      docLengths[numDocsInCurrentBlock] = v.length;
+      block = ArrayUtil.grow(block, uncompressedBlockLength + v.length);
+      System.arraycopy(v.bytes, v.offset, block, uncompressedBlockLength, 
v.length);
+      uncompressedBlockLength += v.length;
+      numDocsInCurrentBlock++;
+      if (numDocsInCurrentBlock == 
Lucene80DocValuesFormat.BINARY_DOCS_PER_COMPRESSED_BLOCK) {
+        flushData();
+      }      
     }
 
-    meta.writeInt(numDocsWithField);
-    meta.writeInt(minLength);
-    meta.writeInt(maxLength);
-    if (maxLength > minLength) {
-      start = data.getFilePointer();
-      meta.writeLong(start);
+    private void flushData() throws IOException {
+      if (numDocsInCurrentBlock > 0) {
+        // Write offset to this block to temporary offsets file
+        totalChunks++;
+        long thisBlockStartPointer = data.getFilePointer();
+        data.writeVInt(numDocsInCurrentBlock);
+        for (int i = 0; i < numDocsInCurrentBlock; i++) {
+          data.writeVInt(docLengths[i]);
+        }
+        maxUncompressedBlockLength = Math.max(maxUncompressedBlockLength, 
uncompressedBlockLength);
+        LZ4.compress(block,  0, uncompressedBlockLength, data, ht);
+        numDocsInCurrentBlock = 0;
+        uncompressedBlockLength = 0;
+        maxPointer = data.getFilePointer();
+        tempBinaryOffsets.writeVLong(maxPointer - thisBlockStartPointer);
+      }
+    }
+    
+    void writeMetaData() throws IOException {
+      if (blockAddressesStart < 0 ) {
 
 Review comment:
   This makes sense, can you leave a comment about it?

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@lucene.apache.org
For additional commands, e-mail: issues-h...@lucene.apache.org

Reply via email to