This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new 66b1bdb  [MINOR] Fix string interpolation in CommandUtils.scala and 
KafkaDataConsumer.scala
66b1bdb is described below

commit 66b1bdb24119c345db4d0fb0a5d80f7beadff582
Author: Terry Kim <[email protected]>
AuthorDate: Sun Dec 6 12:03:14 2020 +0900

    [MINOR] Fix string interpolation in CommandUtils.scala and 
KafkaDataConsumer.scala
    
    ### What changes were proposed in this pull request?
    
    This PR proposes to fix a string interpolation in `CommandUtils.scala` and 
`KafkaDataConsumer.scala`.
    
    ### Why are the changes needed?
    
    To fix a string interpolation bug.
    
    ### Does this PR introduce _any_ user-facing change?
    
    Yes, the string will be correctly constructed.
    
    ### How was this patch tested?
    
    Existing tests since they were used in exception/log messages.
    
    Closes #30609 from imback82/fix_cache_str_interporlation.
    
    Authored-by: Terry Kim <[email protected]>
    Signed-off-by: HyukjinKwon <[email protected]>
    (cherry picked from commit 154f6044033d1a3b4c19c64b206b168bf919cb3b)
    Signed-off-by: HyukjinKwon <[email protected]>
---
 .../org/apache/spark/sql/kafka010/consumer/KafkaDataConsumer.scala      | 2 +-
 .../scala/org/apache/spark/sql/execution/command/CommandUtils.scala     | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/consumer/KafkaDataConsumer.scala
 
b/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/consumer/KafkaDataConsumer.scala
index 5f23029..df51c4f 100644
--- 
a/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/consumer/KafkaDataConsumer.scala
+++ 
b/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/consumer/KafkaDataConsumer.scala
@@ -275,7 +275,7 @@ private[kafka010] class KafkaDataConsumer(
     val fetchedData = getOrRetrieveFetchedData(offset)
 
     logDebug(s"Get $groupId $topicPartition nextOffset 
${fetchedData.nextOffsetInFetchedData} " +
-      "requested $offset")
+      s"requested $offset")
 
     // The following loop is basically for `failOnDataLoss = false`. When 
`failOnDataLoss` is
     // `false`, first, we will try to fetch the record at `offset`. If no such 
record exists, then
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/CommandUtils.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/CommandUtils.scala
index 7e456a6..34e0ac9 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/CommandUtils.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/CommandUtils.scala
@@ -390,7 +390,7 @@ object CommandUtils extends Logging {
     try {
       sparkSession.catalog.uncacheTable(name)
     } catch {
-      case NonFatal(e) => logWarning("Exception when attempting to uncache 
$name", e)
+      case NonFatal(e) => logWarning(s"Exception when attempting to uncache 
$name", e)
     }
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to