This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
     new 5e51ee0  [MINOR] Fix string interpolation in CommandUtils.scala and 
KafkaDataConsumer.scala
5e51ee0 is described below

commit 5e51ee006b1610b0cf3ced69c11a54ba1fb9ccd8
Author: Terry Kim <[email protected]>
AuthorDate: Sun Dec 6 12:03:14 2020 +0900

    [MINOR] Fix string interpolation in CommandUtils.scala and 
KafkaDataConsumer.scala
    
    ### What changes were proposed in this pull request?
    
    This PR proposes to fix a string interpolation in `CommandUtils.scala` and 
`KafkaDataConsumer.scala`.
    
    ### Why are the changes needed?
    
    To fix a string interpolation bug.
    
    ### Does this PR introduce _any_ user-facing change?
    
    Yes, the string will be correctly constructed.
    
    ### How was this patch tested?
    
    Existing tests since they were used in exception/log messages.
    
    Closes #30609 from imback82/fix_cache_str_interporlation.
    
    Authored-by: Terry Kim <[email protected]>
    Signed-off-by: HyukjinKwon <[email protected]>
    (cherry picked from commit 154f6044033d1a3b4c19c64b206b168bf919cb3b)
    Signed-off-by: HyukjinKwon <[email protected]>
---
 .../org/apache/spark/sql/kafka010/consumer/KafkaDataConsumer.scala      | 2 +-
 .../scala/org/apache/spark/sql/execution/command/CommandUtils.scala     | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/consumer/KafkaDataConsumer.scala
 
b/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/consumer/KafkaDataConsumer.scala
index f2bf7cd..649430d 100644
--- 
a/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/consumer/KafkaDataConsumer.scala
+++ 
b/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/consumer/KafkaDataConsumer.scala
@@ -276,7 +276,7 @@ private[kafka010] class KafkaDataConsumer(
     val fetchedData = getOrRetrieveFetchedData(offset)
 
     logDebug(s"Get $groupId $topicPartition nextOffset 
${fetchedData.nextOffsetInFetchedData} " +
-      "requested $offset")
+      s"requested $offset")
 
     // The following loop is basically for `failOnDataLoss = false`. When 
`failOnDataLoss` is
     // `false`, first, we will try to fetch the record at `offset`. If no such 
record exists, then
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/CommandUtils.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/CommandUtils.scala
index f86f62b..15a735b 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/CommandUtils.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/CommandUtils.scala
@@ -391,7 +391,7 @@ object CommandUtils extends Logging {
     try {
       sparkSession.catalog.uncacheTable(name)
     } catch {
-      case NonFatal(e) => logWarning("Exception when attempting to uncache 
$name", e)
+      case NonFatal(e) => logWarning(s"Exception when attempting to uncache 
$name", e)
     }
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to