[ https://issues.apache.org/jira/browse/HDFS-17630?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17897237#comment-17897237 ]
ASF GitHub Bot commented on HDFS-17630: --------------------------------------- dongjoon-hyun commented on code in PR #7063: URL: https://github.com/apache/hadoop/pull/7063#discussion_r1836973959 ########## hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java: ########## @@ -162,8 +162,10 @@ private void doRead(ReadableByteChannel ch, InputStream in) // Sanity check the buffer size so we don't allocate too much memory // and OOME. + int maxPacketSize = MAX_PACKET_SIZE == 0 ? + HdfsClientConfigKeys.DFS_DATA_TRANSFER_MAX_PACKET_SIZE_DEFAULT : MAX_PACKET_SIZE; int totalLen = payloadLen + headerLen; - if (totalLen < 0 || totalLen > MAX_PACKET_SIZE) { + if (totalLen < 0 || totalLen > maxPacketSize) { Review Comment: Why do we check this static final variable, `MAX_PACKET_SIZE`, at this runtime layer instead of the initialization ? https://github.com/apache/hadoop/blob/9a743bd17f55245841db3de5d1c785282306d408/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java#L80-L85 > Avoid PacketReceiver#MAX_PACKET_SIZE Initialized to 0 > ----------------------------------------------------- > > Key: HDFS-17630 > URL: https://issues.apache.org/jira/browse/HDFS-17630 > Project: Hadoop HDFS > Issue Type: Bug > Affects Versions: 3.4.0 > Reporter: dzcxzl > Priority: Major > Labels: pull-request-available > > There are nested calls, causing the MAX_PACKET_SIZE of PacketReceiver to be 0. > > {code:java} > java.io.IOException: Incorrect value for packet payload size: 1014776 > at > org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:167) > at > org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:112) > at > org.apache.hadoop.hdfs.client.impl.BlockReaderRemote.readNextPacket(BlockReaderRemote.java:187) > at > org.apache.hadoop.hdfs.client.impl.BlockReaderRemote.read(BlockReaderRemote.java:146) > at > org.apache.hadoop.hdfs.ByteArrayStrategy.readFromBlock(ReaderStrategy.java:118) > at > org.apache.hadoop.hdfs.DFSInputStream.readBuffer(DFSInputStream.java:789) > at > org.apache.hadoop.hdfs.DFSInputStream.readWithStrategy(DFSInputStream.java:855) > at org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:919) > at java.base/java.io.DataInputStream.read(DataInputStream.java:158) > at java.base/java.io.InputStream.transferTo(InputStream.java:796) > at java.base/java.nio.file.Files.copy(Files.java:3151) > at > java.base/sun.net.www.protocol.jar.URLJarFile$1.run(URLJarFile.java:216) > at > java.base/sun.net.www.protocol.jar.URLJarFile$1.run(URLJarFile.java:212) > at > java.base/java.security.AccessController.doPrivileged(AccessController.java:571) > at > org.apache.hadoop.conf.Configuration.getTrimmed(Configuration.java:1319) > at org.apache.hadoop.conf.Configuration.getInt(Configuration.java:1545) > at > org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.<clinit>(PacketReceiver.java:82) > at > org.apache.hadoop.hdfs.client.impl.BlockReaderRemote.<init>(BlockReaderRemote.java:101) > {code} -- This message was sent by Atlassian Jira (v8.20.10#820010) --------------------------------------------------------------------- To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org