This is an automated email from the ASF dual-hosted git repository.

gangwu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/parquet-java.git


The following commit(s) were added to refs/heads/master by this push:
     new 44cb94b83 GH-3310: Clean up JIRA references and move to GH issues 
(#3309)
44cb94b83 is described below

commit 44cb94b8380d9b712719ef749c628ccf594fd8e8
Author: Arnav Balyan <[email protected]>
AuthorDate: Tue Sep 9 08:43:33 2025 +0530

    GH-3310: Clean up JIRA references and move to GH issues (#3309)
---
 .../src/main/java/org/apache/parquet/CorruptStatistics.java         | 4 ++--
 .../main/java/org/apache/parquet/filter2/predicate/FilterApi.java   | 6 +++---
 .../parquet/filter2/predicate/SchemaCompatibilityValidator.java     | 2 +-
 .../java/org/apache/parquet/filter2/predicate/ValidTypeMap.java     | 2 +-
 .../parquet/filter2/recordlevel/FilteringPrimitiveConverter.java    | 2 +-
 .../recordlevel/IncrementallyUpdatedFilterPredicateEvaluator.java   | 2 +-
 .../apache/parquet/filter2/statisticslevel/StatisticsFilter.java    | 2 +-
 .../apache/parquet/format/converter/ParquetMetadataConverter.java   | 2 +-
 .../src/main/java/org/apache/parquet/hadoop/CodecFactory.java       | 2 +-
 .../src/main/java/org/apache/parquet/hadoop/ParquetReader.java      | 2 +-
 10 files changed, 13 insertions(+), 13 deletions(-)

diff --git 
a/parquet-column/src/main/java/org/apache/parquet/CorruptStatistics.java 
b/parquet-column/src/main/java/org/apache/parquet/CorruptStatistics.java
index 28a99caec..c5846f9ef 100644
--- a/parquet-column/src/main/java/org/apache/parquet/CorruptStatistics.java
+++ b/parquet-column/src/main/java/org/apache/parquet/CorruptStatistics.java
@@ -27,7 +27,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * There was a bug (PARQUET-251) that caused the statistics metadata
+ * There was a bug (https://github.com/apache/parquet-java/issues/1433) that 
caused the statistics metadata
  * for binary columns to be corrupted in the write path.
  * <p>
  * This class is used to detect whether a file was written with this bug,
@@ -38,7 +38,7 @@ public class CorruptStatistics {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(CorruptStatistics.class);
 
-  // the version in which the bug described by jira: PARQUET-251 was fixed
+  // the version in which the bug described by jira: 
(https://github.com/apache/parquet-java/issues/1433) was fixed
   // the bug involved writing invalid binary statistics, so stats written 
prior to this
   // fix must be ignored / assumed invalid
   private static final SemanticVersion PARQUET_251_FIXED_VERSION = new 
SemanticVersion(1, 8, 0);
diff --git 
a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/FilterApi.java
 
b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/FilterApi.java
index 3c5168066..62c52f017 100644
--- 
a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/FilterApi.java
+++ 
b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/FilterApi.java
@@ -59,13 +59,13 @@ import org.apache.parquet.hadoop.metadata.ColumnPath;
  *   FilterPredicate pred = or(eq(foo, 10), ltEq(bar, 17.0));
  * </pre>
  */
-// TODO: Support repeated columns 
(https://issues.apache.org/jira/browse/PARQUET-34)
+// TODO: Support repeated columns 
(https://github.com/apache/parquet-java/issues/1452)
 //
 // TODO: Support filtering on groups (eg, filter where this group is / isn't 
null)
-// TODO: (https://issues.apache.org/jira/browse/PARQUET-43)
+// TODO: (https://github.com/apache/parquet-format/issues/261)
 
 // TODO: Consider adding support for more column types that aren't coupled 
with parquet types, eg Column<String>
-// TODO: (https://issues.apache.org/jira/browse/PARQUET-35)
+// TODO: (https://github.com/apache/parquet-java/issues/1453)
 public final class FilterApi {
   private FilterApi() {}
 
diff --git 
a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/SchemaCompatibilityValidator.java
 
b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/SchemaCompatibilityValidator.java
index b5708a4a0..650fcb310 100644
--- 
a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/SchemaCompatibilityValidator.java
+++ 
b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/SchemaCompatibilityValidator.java
@@ -54,7 +54,7 @@ import org.apache.parquet.schema.MessageType;
  * <p>
  * TODO: detect if a column is optional or required and validate that eq(null)
  * TODO: is not called on required fields (is that too strict?)
- * TODO: (https://issues.apache.org/jira/browse/PARQUET-44)
+ * TODO: (https://github.com/apache/parquet-java/issues/1472)
  */
 public class SchemaCompatibilityValidator implements 
FilterPredicate.Visitor<Void> {
 
diff --git 
a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/ValidTypeMap.java
 
b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/ValidTypeMap.java
index 838583ec5..c8ef61e03 100644
--- 
a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/ValidTypeMap.java
+++ 
b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/ValidTypeMap.java
@@ -34,7 +34,7 @@ import 
org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName;
  * when there are type mismatches.
  * <p>
  * TODO: this has some overlap with {@link PrimitiveTypeName#javaType}
- * TODO: (https://issues.apache.org/jira/browse/PARQUET-30)
+ * TODO: (https://github.com/apache/parquet-java/issues/1447)
  */
 public class ValidTypeMap {
   private ValidTypeMap() {}
diff --git 
a/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/FilteringPrimitiveConverter.java
 
b/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/FilteringPrimitiveConverter.java
index c1eee2fa3..ab1306c8d 100644
--- 
a/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/FilteringPrimitiveConverter.java
+++ 
b/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/FilteringPrimitiveConverter.java
@@ -42,7 +42,7 @@ public class FilteringPrimitiveConverter extends 
PrimitiveConverter {
   // TODO: this works, but
   // TODO: essentially turns off the benefits of dictionary support
   // TODO: even if the underlying delegate supports it.
-  // TODO: we should support it here. 
(https://issues.apache.org/jira/browse/PARQUET-36)
+  // TODO: we should support it here. 
(https://github.com/apache/parquet-java/issues/1392)
   @Override
   public boolean hasDictionarySupport() {
     return false;
diff --git 
a/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateEvaluator.java
 
b/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateEvaluator.java
index d78f93132..51bb36954 100644
--- 
a/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateEvaluator.java
+++ 
b/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateEvaluator.java
@@ -30,7 +30,7 @@ import 
org.apache.parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicat
  * represent columns with a null value, and updates them accordingly.
  * <p>
  * TODO: We could also build an evaluator that detects if enough values are 
known to determine the outcome
- * TODO: of the predicate and quit the record assembly early. 
(https://issues.apache.org/jira/browse/PARQUET-37)
+ * TODO: of the predicate and quit the record assembly early. 
(https://github.com/apache/parquet-java/issues/1455)
  */
 public class IncrementallyUpdatedFilterPredicateEvaluator implements Visitor {
   private static final IncrementallyUpdatedFilterPredicateEvaluator INSTANCE =
diff --git 
a/parquet-hadoop/src/main/java/org/apache/parquet/filter2/statisticslevel/StatisticsFilter.java
 
b/parquet-hadoop/src/main/java/org/apache/parquet/filter2/statisticslevel/StatisticsFilter.java
index 4d7918c4f..fb7e0bade 100644
--- 
a/parquet-hadoop/src/main/java/org/apache/parquet/filter2/statisticslevel/StatisticsFilter.java
+++ 
b/parquet-hadoop/src/main/java/org/apache/parquet/filter2/statisticslevel/StatisticsFilter.java
@@ -63,7 +63,7 @@ import org.apache.parquet.hadoop.metadata.ColumnPath;
  * false otherwise (including when it is not known, which is often the case).
  */
 // TODO: this belongs in the parquet-column project, but some of the classes 
here need to be moved too
-// TODO: (https://issues.apache.org/jira/browse/PARQUET-38)
+// TODO: (https://github.com/apache/parquet-java/issues/1458)
 public class StatisticsFilter implements FilterPredicate.Visitor<Boolean> {
 
   private static final boolean BLOCK_MIGHT_MATCH = false;
diff --git 
a/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java
 
b/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java
index d20ac7fae..10728dfae 100644
--- 
a/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java
+++ 
b/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java
@@ -139,7 +139,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 // TODO: This file has become too long!
-// TODO: Lets split it up: https://issues.apache.org/jira/browse/PARQUET-310
+// TODO: Lets split it up: https://github.com/apache/parquet-java/issues/1835
 public class ParquetMetadataConverter {
 
   private static final TypeDefinedOrder TYPE_DEFINED_ORDER = new 
TypeDefinedOrder();
diff --git 
a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/CodecFactory.java 
b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/CodecFactory.java
index f1041a83b..eee5fa608 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/CodecFactory.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/CodecFactory.java
@@ -171,7 +171,7 @@ public class CodecFactory implements 
CompressionCodecFactory {
       InputStream is = codec.createInputStream(bytes.toInputStream(), 
decompressor);
 
       // We need to explicitly close the ZstdDecompressorStream here to 
release the resources it holds to
-      // avoid off-heap memory fragmentation issue, see 
https://issues.apache.org/jira/browse/PARQUET-2160.
+      // avoid off-heap memory fragmentation issue, see 
https://github.com/apache/parquet-format/issues/398.
       // This change will load the decompressor stream into heap a little 
earlier, since the problem it solves
       // only happens in the ZSTD codec, so this modification is only made for 
ZSTD streams.
       if (codec instanceof ZstandardCodec) {
diff --git 
a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetReader.java 
b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetReader.java
index be599ba56..f08e45ced 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetReader.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetReader.java
@@ -49,7 +49,7 @@ import org.apache.parquet.io.InputFile;
 
 /**
  * Read records from a Parquet file.
- * TODO: too many constructors 
(https://issues.apache.org/jira/browse/PARQUET-39)
+ * TODO: too many constructors 
(https://github.com/apache/parquet-java/issues/1466)
  */
 public class ParquetReader<T> implements Closeable {
 

Reply via email to