This is an automated email from the ASF dual-hosted git repository.
mdedetrich pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-pekko-connectors.git
The following commit(s) were added to refs/heads/main by this push:
new 32facdd80 Change materializer type for chunkUploadSink in S3 DSL's
32facdd80 is described below
commit 32facdd804b61e3c0839aef06b1091f0290538ca
Author: Matthew de Detrich <[email protected]>
AuthorDate: Sun Nov 12 20:34:36 2023 +0100
Change materializer type for chunkUploadSink in S3 DSL's
---
.../chunkUploadSink-type-param-change.backwards.excludes | 12 ++++++++++++
.../org/apache/pekko/stream/connectors/s3/javadsl/S3.scala | 12 ++++++------
.../org/apache/pekko/stream/connectors/s3/scaladsl/S3.scala | 8 ++++----
3 files changed, 22 insertions(+), 10 deletions(-)
diff --git
a/s3/src/main/mima-filters/1.0.x.backwards.excludes/chunkUploadSink-type-param-change.backwards.excludes
b/s3/src/main/mima-filters/1.0.x.backwards.excludes/chunkUploadSink-type-param-change.backwards.excludes
new file mode 100644
index 000000000..1d9a11221
--- /dev/null
+++
b/s3/src/main/mima-filters/1.0.x.backwards.excludes/chunkUploadSink-type-param-change.backwards.excludes
@@ -0,0 +1,12 @@
+# These filters are here because a type argument in the chunkUploadSink
parameter was changed from
+# NotUsed to _ which has zero effect on runtime
+ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.javadsl.S3.resumeMultipartUploadWithContext")
+ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.javadsl.S3.multipartUploadWithContext")
+ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.javadsl.S3.resumeMultipartUploadWithContext")
+ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.scaladsl.S3.resumeMultipartUploadWithHeadersAndContext")
+ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.scaladsl.S3.resumeMultipartUploadWithContext")
+ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.scaladsl.S3.multipartUploadWithHeadersAndContext")
+ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.scaladsl.S3.multipartUploadWithContext")
+ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.scaladsl.S3.multipartUploadWithHeadersAndContext")
+ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.scaladsl.S3.resumeMultipartUploadWithContext")
+ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.scaladsl.S3.resumeMultipartUploadWithHeadersAndContext")
diff --git
a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/javadsl/S3.scala
b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/javadsl/S3.scala
index 41078d8f0..5873d978d 100644
--- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/javadsl/S3.scala
+++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/javadsl/S3.scala
@@ -961,7 +961,7 @@ object S3 {
def multipartUploadWithContext[C](
bucket: String,
key: String,
- chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]],
NotUsed],
+ chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]],
_],
contentType: ContentType,
s3Headers: S3Headers): Sink[JPair[ByteString, C],
CompletionStage[MultipartUploadResult]] =
S3Stream
@@ -1003,7 +1003,7 @@ object S3 {
def multipartUploadWithContext[C](
bucket: String,
key: String,
- chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]],
NotUsed],
+ chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]],
_],
contentType: ContentType): Sink[JPair[ByteString, C],
CompletionStage[MultipartUploadResult]] =
multipartUploadWithContext[C](bucket,
key,
@@ -1034,7 +1034,7 @@ object S3 {
def multipartUploadWithContext[C](
bucket: String,
key: String,
- chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]],
NotUsed])
+ chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]],
_])
: Sink[JPair[ByteString, C], CompletionStage[MultipartUploadResult]] =
multipartUploadWithContext[C](bucket, key, chunkUploadSink,
ContentTypes.APPLICATION_OCTET_STREAM)
@@ -1133,7 +1133,7 @@ object S3 {
key: String,
uploadId: String,
previousParts: java.lang.Iterable[Part],
- chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]],
NotUsed],
+ chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]],
_],
contentType: ContentType,
s3Headers: S3Headers): Sink[JPair[ByteString, C],
CompletionStage[MultipartUploadResult]] = {
S3Stream
@@ -1183,7 +1183,7 @@ object S3 {
key: String,
uploadId: String,
previousParts: java.lang.Iterable[Part],
- chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]],
NotUsed],
+ chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]],
_],
contentType: ContentType): Sink[JPair[ByteString, C],
CompletionStage[MultipartUploadResult]] =
resumeMultipartUploadWithContext[C](bucket,
key,
@@ -1221,7 +1221,7 @@ object S3 {
key: String,
uploadId: String,
previousParts: java.lang.Iterable[Part],
- chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]],
NotUsed])
+ chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]],
_])
: Sink[JPair[ByteString, C], CompletionStage[MultipartUploadResult]] =
resumeMultipartUploadWithContext[C](bucket,
key,
diff --git
a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3.scala
b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3.scala
index 56cd9bc86..9a09fef56 100644
--- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3.scala
+++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3.scala
@@ -581,7 +581,7 @@ object S3 {
def multipartUploadWithContext[C](
bucket: String,
key: String,
- chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]),
NotUsed],
+ chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]), _],
contentType: ContentType = ContentTypes.`application/octet-stream`,
metaHeaders: MetaHeaders = MetaHeaders(Map()),
cannedAcl: CannedAcl = CannedAcl.Private,
@@ -626,7 +626,7 @@ object S3 {
def multipartUploadWithHeadersAndContext[C](
bucket: String,
key: String,
- chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]),
NotUsed],
+ chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]), _],
contentType: ContentType = ContentTypes.`application/octet-stream`,
chunkSize: Int = MinChunkSize,
chunkingParallelism: Int = 4,
@@ -710,7 +710,7 @@ object S3 {
key: String,
uploadId: String,
previousParts: immutable.Iterable[Part],
- chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]),
NotUsed],
+ chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]), _],
contentType: ContentType = ContentTypes.`application/octet-stream`,
metaHeaders: MetaHeaders = MetaHeaders(Map()),
cannedAcl: CannedAcl = CannedAcl.Private,
@@ -797,7 +797,7 @@ object S3 {
key: String,
uploadId: String,
previousParts: immutable.Iterable[Part],
- chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]),
NotUsed],
+ chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]), _],
contentType: ContentType = ContentTypes.`application/octet-stream`,
chunkSize: Int = MinChunkSize,
chunkingParallelism: Int = 4,
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]