[ 
https://issues.apache.org/jira/browse/TRAFODION-3065?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16469529#comment-16469529
 ] 

ASF GitHub Bot commented on TRAFODION-3065:
-------------------------------------------

Github user DaveBirdsall commented on a diff in the pull request:

    https://github.com/apache/trafodion/pull/1557#discussion_r187178015
  
    --- Diff: core/sql/executor/HdfsClient_JNI.cpp ---
    @@ -574,41 +595,50 @@ Int32 HdfsClient::hdfsWrite(const char* data, Int64 
len, HDFS_Client_RetCode &hd
          hdfsClientRetcode = HDFS_CLIENT_ERROR_HDFS_WRITE_EXCEPTION;
          return 0;
       }
    -
    -  //Write the requisite bytes into the file
    -  jbyteArray jbArray = jenv_->NewByteArray( len);
    -  if (!jbArray) {
    -    
GetCliGlobals()->setJniErrorStr(getErrorText(HDFS_CLIENT_ERROR_HDFS_WRITE_PARAM));
    -    jenv_->PopLocalFrame(NULL);
    -    hdfsClientRetcode =  HDFS_CLIENT_ERROR_HDFS_WRITE_PARAM;
    -    return 0;
    -  }
    -  jenv_->SetByteArrayRegion(jbArray, 0, len, (const jbyte*)data);
    -
    -  if (hdfsStats_ != NULL)
    -     hdfsStats_->getHdfsTimer().start();
    -
    -  tsRecentJMFromJNI = JavaMethods_[JM_HDFS_WRITE].jm_full_name;
    -  // Java method returns the cumulative bytes written
    -  jint totalBytesWritten = jenv_->CallIntMethod(javaObj_, 
JavaMethods_[JM_HDFS_WRITE].methodID, jbArray);
    -
    -  if (hdfsStats_ != NULL) {
    -      hdfsStats_->incMaxHdfsIOTime(hdfsStats_->getHdfsTimer().stop());
    -      hdfsStats_->incHdfsCalls();
    -  }
    -  if (jenv_->ExceptionCheck())
    +  Int64 lenRemain = len;
    +  Int64 writeLen;
    +  Int64 chunkLen = (ioByteArraySize_ > 0 ? ioByteArraySize_ * 1024 : 0);
    +  Int64 offset = 0;
    +  do 
       {
    -    getExceptionDetails(__FILE__, __LINE__, "HdfsClient::hdfsWrite()");
    -    jenv_->PopLocalFrame(NULL);
    -    hdfsClientRetcode = HDFS_CLIENT_ERROR_HDFS_WRITE_EXCEPTION;
    -    return 0;
    -  }
    -
    +     if ((chunkLen > 0) && (lenRemain > chunkLen))
    +        writeLen = chunkLen; 
    +     else
    +        writeLen = lenRemain;
    +     //Write the requisite bytes into the file
    +     jbyteArray jbArray = jenv_->NewByteArray(writeLen);
    +     if (!jbArray) {
    +        
GetCliGlobals()->setJniErrorStr(getErrorText(HDFS_CLIENT_ERROR_HDFS_WRITE_PARAM));
    +        jenv_->PopLocalFrame(NULL);
    +        hdfsClientRetcode =  HDFS_CLIENT_ERROR_HDFS_WRITE_PARAM;
    +        return 0;
    +     }
    +     jenv_->SetByteArrayRegion(jbArray, 0, writeLen, (const 
jbyte*)(data+offset));
    +
    +     if (hdfsStats_ != NULL)
    +         hdfsStats_->getHdfsTimer().start();
    +
    +     tsRecentJMFromJNI = JavaMethods_[JM_HDFS_WRITE].jm_full_name;
    +     // Java method returns the cumulative bytes written
    +     jint totalBytesWritten = jenv_->CallIntMethod(javaObj_, 
JavaMethods_[JM_HDFS_WRITE].methodID, jbArray);
    +
    +     if (hdfsStats_ != NULL) {
    +         hdfsStats_->incMaxHdfsIOTime(hdfsStats_->getHdfsTimer().stop());
    +         hdfsStats_->incHdfsCalls();
    +     }
    +     if (jenv_->ExceptionCheck())
    +     {
    +        getExceptionDetails(__FILE__, __LINE__, "HdfsClient::hdfsWrite()");
    +        jenv_->PopLocalFrame(NULL);
    +        hdfsClientRetcode = HDFS_CLIENT_ERROR_HDFS_WRITE_EXCEPTION;
    +        return 0;
    +     }
    +     lenRemain -= writeLen;
    +     offset += writeLen;
    +  } while (lenRemain > 0);
    --- End diff --
    
    What happens if len is initially zero or negative? (Usually one codes a 
"while" instead of "do while" so that zero cases are harmless).


> Trafodion to support compressed Hive Text formatted tables
> ----------------------------------------------------------
>
>                 Key: TRAFODION-3065
>                 URL: https://issues.apache.org/jira/browse/TRAFODION-3065
>             Project: Apache Trafodion
>          Issue Type: New Feature
>          Components: sql-cmp, sql-exe
>            Reporter: Selvaganesan Govindarajan
>            Assignee: Selvaganesan Govindarajan
>            Priority: Major
>             Fix For: 2.3
>
>
> Currently,  Trafodion doesn't support Hive text formatted compressed files.  
> The compressed file support will be provided with the refactored Hdfs scan 
> implementation only.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

Reply via email to