This is an automated email from the ASF dual-hosted git repository.

gabor pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/parquet-mr.git


The following commit(s) were added to refs/heads/master by this push:
     new 7bb1663  PARQUET-2063: Remove Compile Warnings from MemoryManager 
(#917)
7bb1663 is described below

commit 7bb1663b434e069f2b5f2832fccc15e14d41b034
Author: belugabehr <[email protected]>
AuthorDate: Tue Aug 10 03:36:50 2021 -0400

    PARQUET-2063: Remove Compile Warnings from MemoryManager (#917)
---
 .../apache/parquet/ParquetRuntimeException.java    |  3 ---
 .../org/apache/parquet/hadoop/MemoryManager.java   | 26 +++++++++++----------
 .../ParquetMemoryManagerRuntimeException.java      | 27 ++++++++--------------
 .../apache/parquet/hadoop/TestMemoryManager.java   |  4 ++--
 4 files changed, 26 insertions(+), 34 deletions(-)

diff --git 
a/parquet-common/src/main/java/org/apache/parquet/ParquetRuntimeException.java 
b/parquet-common/src/main/java/org/apache/parquet/ParquetRuntimeException.java
index a1d3a8e..3544e2f 100644
--- 
a/parquet-common/src/main/java/org/apache/parquet/ParquetRuntimeException.java
+++ 
b/parquet-common/src/main/java/org/apache/parquet/ParquetRuntimeException.java
@@ -18,9 +18,6 @@
  */
 package org.apache.parquet;
 
-import java.io.Closeable;
-import java.io.IOException;
-
 /**
  * The parent class for all runtime exceptions
  */
diff --git 
a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/MemoryManager.java 
b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/MemoryManager.java
index 41afd72..87b8e9b 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/MemoryManager.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/MemoryManager.java
@@ -18,7 +18,6 @@
  */
 package org.apache.parquet.hadoop;
 
-import org.apache.parquet.ParquetRuntimeException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -48,8 +47,8 @@ public class MemoryManager {
 
   private final long totalMemoryPool;
   private final long minMemoryAllocation;
-  private final Map<InternalParquetRecordWriter, Long> writerList = new
-      HashMap<InternalParquetRecordWriter, Long>();
+  private final Map<InternalParquetRecordWriter<?>, Long> writerList =
+      new HashMap<>();
   private final Map<String, Runnable> callBacks = new HashMap<String, 
Runnable>();
   private double scale = 1.0;
 
@@ -75,7 +74,7 @@ public class MemoryManager {
    * @param writer the new created writer
    * @param allocation the requested buffer size
    */
-  synchronized void addWriter(InternalParquetRecordWriter writer, Long 
allocation) {
+  synchronized void addWriter(InternalParquetRecordWriter<?> writer, Long 
allocation) {
     Long oldValue = writerList.get(writer);
     if (oldValue == null) {
       writerList.put(writer, allocation);
@@ -91,7 +90,7 @@ public class MemoryManager {
    * Remove the given writer from the memory manager.
    * @param writer the writer that has been closed
    */
-  synchronized void removeWriter(InternalParquetRecordWriter writer) {
+  synchronized void removeWriter(InternalParquetRecordWriter<?> writer) {
     writerList.remove(writer);
     if (!writerList.isEmpty()) {
       updateAllocation();
@@ -121,16 +120,19 @@ public class MemoryManager {
     }
 
     int maxColCount = 0;
-    for (InternalParquetRecordWriter w : writerList.keySet()) {
+    for (InternalParquetRecordWriter<?> w : writerList.keySet()) {
       maxColCount = Math.max(w.getSchema().getColumns().size(), maxColCount);
     }
 
-    for (Map.Entry<InternalParquetRecordWriter, Long> entry : 
writerList.entrySet()) {
+    for (Map.Entry<InternalParquetRecordWriter<?>, Long> entry : writerList
+        .entrySet()) {
       long newSize = (long) Math.floor(entry.getValue() * scale);
-      if(scale < 1.0 && minMemoryAllocation > 0 && newSize < 
minMemoryAllocation) {
-          throw new ParquetRuntimeException(String.format("New Memory 
allocation %d bytes" +
-          " is smaller than the minimum allocation size of %d bytes.",
-              newSize, minMemoryAllocation)){};
+      if (scale < 1.0 && minMemoryAllocation > 0
+          && newSize < minMemoryAllocation) {
+        throw new ParquetMemoryManagerRuntimeException(String.format(
+            "New Memory allocation %d bytes"
+                + " is smaller than the minimum allocation size of %d bytes.",
+            newSize, minMemoryAllocation));
       }
       entry.getKey().setRowGroupSizeThreshold(newSize);
       LOG.debug(String.format("Adjust block size from %,d to %,d for writer: 
%s",
@@ -150,7 +152,7 @@ public class MemoryManager {
    * Get the writers list
    * @return the writers in this memory manager
    */
-  Map<InternalParquetRecordWriter, Long> getWriterList() {
+  Map<InternalParquetRecordWriter<?>, Long> getWriterList() {
     return writerList;
   }
 
diff --git 
a/parquet-common/src/main/java/org/apache/parquet/ParquetRuntimeException.java 
b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetMemoryManagerRuntimeException.java
similarity index 65%
copy from 
parquet-common/src/main/java/org/apache/parquet/ParquetRuntimeException.java
copy to 
parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetMemoryManagerRuntimeException.java
index a1d3a8e..a21be24 100644
--- 
a/parquet-common/src/main/java/org/apache/parquet/ParquetRuntimeException.java
+++ 
b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetMemoryManagerRuntimeException.java
@@ -16,30 +16,23 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.parquet;
+package org.apache.parquet.hadoop;
 
-import java.io.Closeable;
-import java.io.IOException;
+import org.apache.parquet.ParquetRuntimeException;
 
 /**
- * The parent class for all runtime exceptions
+ * Signals that an exception of some sort has occurred with the Parquet Memory
+ * Manager.
+ *
+ * @see MemoryManager
  */
-abstract public class ParquetRuntimeException extends RuntimeException {
-  private static final long serialVersionUID = 1L;
-
-  public ParquetRuntimeException() {
-    super();
-  }
+public class ParquetMemoryManagerRuntimeException
+    extends ParquetRuntimeException {
 
-  public ParquetRuntimeException(String message, Throwable cause) {
-    super(message, cause);
-  }
+  private static final long serialVersionUID = 1L;
 
-  public ParquetRuntimeException(String message) {
+  public ParquetMemoryManagerRuntimeException(String message) {
     super(message);
   }
 
-  public ParquetRuntimeException(Throwable cause) {
-    super(cause);
-  }
 }
diff --git 
a/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/TestMemoryManager.java 
b/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/TestMemoryManager.java
index 21f279b..9d680f3 100644
--- 
a/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/TestMemoryManager.java
+++ 
b/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/TestMemoryManager.java
@@ -178,10 +178,10 @@ public class TestMemoryManager {
   }
 
   private long getTotalAllocation() {
-    Set<InternalParquetRecordWriter> writers = ParquetOutputFormat
+    Set<InternalParquetRecordWriter<?>> writers = ParquetOutputFormat
         .getMemoryManager().getWriterList().keySet();
     long total = 0;
-    for (InternalParquetRecordWriter writer : writers) {
+    for (InternalParquetRecordWriter<?> writer : writers) {
       total += writer.getRowGroupSizeThreshold();
     }
     return total;

Reply via email to