jon-wei commented on a change in pull request #7490: Add reload by interval API
URL: https://github.com/apache/incubator-druid/pull/7490#discussion_r277936651
 
 

 ##########
 File path: 
server/src/main/java/org/apache/druid/metadata/SQLMetadataSegmentManager.java
 ##########
 @@ -219,83 +218,144 @@ public void stop()
     }
   }
 
-  @Override
-  public boolean enableDataSource(final String dataSource)
+  private VersionedIntervalTimeline<String, DataSegment> 
getVersionedIntervalTimeline(final String dataSource, final Interval interval)
   {
-    try {
-      final IDBI dbi = connector.getDBI();
-      VersionedIntervalTimeline<String, DataSegment> segmentTimeline = 
connector.inReadOnlyTransaction(
-          (handle, status) -> VersionedIntervalTimeline.forSegments(
-              Iterators.transform(
-                  handle
-                      .createQuery(
-                          StringUtils.format(
-                              "SELECT payload FROM %s WHERE dataSource = 
:dataSource",
-                              getSegmentsTable()
-                          )
-                      )
-                      .setFetchSize(connector.getStreamingFetchSize())
-                      .bind("dataSource", dataSource)
-                      .map(ByteArrayMapper.FIRST)
-                      .iterator(),
-                  payload -> {
-                    try {
-                      return jsonMapper.readValue(payload, DataSegment.class);
-                    }
-                    catch (IOException e) {
-                      throw new RuntimeException(e);
-                    }
+    return connector.inReadOnlyTransaction(
+        (handle, status) -> VersionedIntervalTimeline.forSegments(
+            Iterators.transform(
+                handle
+                    .createQuery(
+                        StringUtils.format(
+                            "SELECT payload FROM %1$s WHERE dataSource = 
:dataSource AND start >= :start AND %2$send%2$s <= :end",
+                            getSegmentsTable(), connector.getQuoteString()
+                        )
+                    )
+                    .setFetchSize(connector.getStreamingFetchSize())
+                    .bind("dataSource", dataSource)
+                    .bind("start", interval.getStart().toString())
+                    .bind("end", interval.getEnd().toString())
+                    .map(ByteArrayMapper.FIRST)
+                    .iterator(),
+                payload -> {
+                  try {
+                    return jsonMapper.readValue(payload, DataSegment.class);
                   }
-              )
-
-          )
-      );
-
-      final List<DataSegment> segments = new ArrayList<>();
-      List<TimelineObjectHolder<String, DataSegment>> timelineObjectHolders = 
segmentTimeline.lookup(
-          Intervals.of("0000-01-01/3000-01-01")
-      );
-      for (TimelineObjectHolder<String, DataSegment> objectHolder : 
timelineObjectHolders) {
-        for (PartitionChunk<DataSegment> partitionChunk : 
objectHolder.getObject()) {
-          segments.add(partitionChunk.getObject());
-        }
-      }
-
-      if (segments.isEmpty()) {
-        log.warn("No segments found in the database!");
-        return false;
-      }
-
-      dbi.withHandle(
-          new HandleCallback<Void>()
-          {
-            @Override
-            public Void withHandle(Handle handle)
-            {
-              Batch batch = handle.createBatch();
+                  catch (IOException e) {
+                    throw new RuntimeException(e);
+                  }
+                }
+            )
+        )
+    );
+  }
 
-              for (DataSegment segment : segments) {
-                batch.add(
+  private VersionedIntervalTimeline<String, DataSegment> 
getVersionedIntervalTimeline(final String dataSource, final Collection<String> 
segmentIds)
+  {
+    return connector.inReadOnlyTransaction(
+        (handle, status) -> 
VersionedIntervalTimeline.forSegments(segmentIds.stream().map(segmentId -> {
+          try {
+            return jsonMapper.readValue(StreamSupport.stream(
+                handle.createQuery(
                     StringUtils.format(
-                        "UPDATE %s SET used=true WHERE id = '%s'",
-                        getSegmentsTable(),
-                        segment.getId()
+                        "SELECT payload FROM %1$s WHERE dataSource = 
:dataSource AND id = :id",
+                        getSegmentsTable()
                     )
-                );
-              }
-              batch.execute();
-
-              return null;
-            }
+                )
+                .setFetchSize(connector.getStreamingFetchSize())
+                .bind("dataSource", dataSource)
+                .bind("id", segmentId)
+                .map(ByteArrayMapper.FIRST)
+                .spliterator(), false
+            ).findFirst().orElseThrow(
+                () -> new UnknownSegmentIdException(StringUtils.format("Cannot 
find segment id [%s]", segmentId))
+            ), DataSegment.class);
           }
-      );
+          catch (IOException e) {
+            throw new RuntimeException(e);
+          }
+        }).collect(Collectors.toList()))
+    );
+  }
+
+  private Stream<SegmentId> segmentIdsForInterval(
+      final VersionedIntervalTimeline<String, DataSegment> 
versionedIntervalTimeline,
+      final Interval interval
+  )
+  {
+    return versionedIntervalTimeline.lookup(interval).stream().flatMap(
+        objectHolder -> 
StreamSupport.stream(objectHolder.getObject().spliterator(), false).map(
+            dataSegmentPartitionChunk -> 
dataSegmentPartitionChunk.getObject().getId()
+        )
+    );
+  }
+
+  @Override
+  public boolean enableDataSource(final String dataSource)
+  {
+    try {
+      return enableSegments(dataSource, Intervals.ETERNITY) != 0;
     }
     catch (Exception e) {
       log.error(e, "Exception enabling datasource %s", dataSource);
       return false;
     }
+  }
 
-    return true;
+  @Override
+  public int enableSegments(final String dataSource, final Interval interval)
 
 Review comment:
   `addOverlappingEnabledSegmentsToVersionIntervalTimeline` uses a potentially 
old, cached view of the currently enabled segments that's updated by polling 
(default 1 minute period), while `buildVersionedIntervalTimeline(dataSource, 
interval);` reads fresh from metadata. I think it would be better to do the 
enable based on a self-consistent view of the segments, so I'll suggest the 
following:
   
   For the interval case:
   1. Create an empty timeline
   2. Query the metadata store for both the used status and payload of segments 
that overlap with the provided interval (SELECT used, payload...)
   3. Go through the used/segment pairs: If the segment is used, add the 
segment to the timeline. If the segment is not used, check that the segments 
interval is contained by the provided interval. If the unused segment is 
contained, add the segment to the timeline, and track this segment in a 
separate list.
   4. Go through the list of unused+contained segments in the list built in 
step 3, and enable the unused+contained segments for which isOvershadowed 
returns false. 
   
   For the segmentId case:
   1. Create an empty timeline
   2. Add the provided segments to the timeline
   3. Using JodaUtils.condenseIntervals(), build a condensed list of intervals 
from the intervals of the provided segment set
   4. In a single transaction, issue a used=true + interval overlaps query for 
each interval in the condensed intervals list constructed in 3, and add these 
used+overlapping segments to the timeline
   5. For each provided segment, enable it if isOvershadowed is false

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to