[GitHub] drill pull request #1241: DRILL-6364: Handle Cluster Info in WebUI when exis...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1241#discussion_r185403964 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -252,33 +255,129 @@ timeout = setTimeout(reloadStatus, refreshTime); } - function fillStatus(data,size) { - var status_map = (data.responseJSON); - for (i = 1; i <= size; i++) { -var address = $("#row-"+i).find("#address").contents().get(0).nodeValue; -address = address.trim(); -var port = $("#row-"+i).find("#port").html(); -var key = address+"-"+port; + function fillStatus(dataResponse,size) { + var status_map = (dataResponse.responseJSON); + //In case localhost has gone down (i.e. we don't know status from ZK) + if (typeof status_map == 'undefined') { +//Query other nodes for state details +for (j = 1; j <= size; j++) { + if ($("#row-"+j).find("#current").html() == "Current") { +continue; //Skip LocalHost + } + var address = $("#row-"+j).find("#address").contents().get(0).nodeValue.trim(); + var restPort = $("#row-"+j).find("#httpPort").contents().get(0).nodeValue.trim(); + var altStateUrl = location.protocol + "//" + address+":"+restPort + "/state"; + var goatResponse = $.getJSON(altStateUrl) +.done(function(stateDataJson) { +//Update Status & Buttons for alternate stateData +if (typeof status_map == 'undefined') { + status_map = (stateDataJson); //Update + updateStatusAndShutdown(stateDataJson); +} + }); + //Don't loop any more + if (typeof status_map != 'undefined') { +break; + } +} + } else { --- End diff -- Yes as discussed in which case there is race condition. Please see latest comment above. ---
[GitHub] drill pull request #1241: DRILL-6364: Handle Cluster Info in WebUI when exis...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1241#discussion_r185406434 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -252,33 +255,129 @@ timeout = setTimeout(reloadStatus, refreshTime); } - function fillStatus(data,size) { - var status_map = (data.responseJSON); - for (i = 1; i <= size; i++) { -var address = $("#row-"+i).find("#address").contents().get(0).nodeValue; -address = address.trim(); -var port = $("#row-"+i).find("#port").html(); -var key = address+"-"+port; + function fillStatus(dataResponse,size) { + var status_map = (dataResponse.responseJSON); + //In case localhost has gone down (i.e. we don't know status from ZK) + if (typeof status_map == 'undefined') { +//Query other nodes for state details +for (j = 1; j <= size; j++) { + if ($("#row-"+j).find("#current").html() == "Current") { +continue; //Skip LocalHost + } + var address = $("#row-"+j).find("#address").contents().get(0).nodeValue.trim(); + var restPort = $("#row-"+j).find("#httpPort").contents().get(0).nodeValue.trim(); + var altStateUrl = location.protocol + "//" + address+":"+restPort + "/state"; + var goatResponse = $.getJSON(altStateUrl) +.done(function(stateDataJson) { +//Update Status & Buttons for alternate stateData +if (typeof status_map == 'undefined') { + status_map = (stateDataJson); //Update + updateStatusAndShutdown(stateDataJson); +} + }); + //Don't loop any more + if (typeof status_map != 'undefined') { +break; + } +} + } else { +updateStatusAndShutdown(status_map); + } + } + + function updateStatusAndShutdown(status_map) { +let bitMap = {}; +if (typeof status_map != 'undefined') { +for (var k in status_map) { + bitMap[k] = status_map[k]; +} +} +for (i = 1; i <= size; i++) { +let key = ""; +if ($("#row-"+i).find("#stateKey").length > 0) { //Check if newBit that has no stateKey + key = $("#row-"+i).find("#stateKey").textContent; +} else { + let address = $("#row-"+i).find("#address").contents().get(0).nodeValue.trim(); + let port = $("#row-"+i).find("#httpPort").html(); + key = address+"-"+port; +} -if (status_map[key] == null) { +if (typeof status_map == 'undefined') { +$("#row-"+i).find("#status").text(nAText); + $("#row-"+i).find("#shutdown").prop('disabled',true).css('opacity',0.5); +$("#row-"+i).find("#queriesCount").text(""); +} else if (status_map[key] == null) { $("#row-"+i).find("#status").text("OFFLINE"); $("#row-"+i).find("#shutdown").prop('disabled',true).css('opacity',0.5); $("#row-"+i).find("#queriesCount").text(""); -} -else { +} else { if (status_map[key] == "ONLINE") { $("#row-"+i).find("#status").text(status_map[key]); - $("#row-"+i).find("#shutdown").prop('disabled',false).css('opacity',1.0); -} -else { +<#if ( model.shouldShowAdminInfo() || !model.isAuthEnabled() ) > +if ( location.protocol != "https" || ($("#row-"+i).find("#current").html() == "Current") ) { + $("#row-"+i).find("#shutdown").prop('disabled',false).css('opacity',1.0).css('cursor','pointer'); +} + +} else { if ($("#row-&quo
[GitHub] drill pull request #1241: DRILL-6364: Handle Cluster Info in WebUI when exis...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1241#discussion_r185406443 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -252,33 +255,129 @@ timeout = setTimeout(reloadStatus, refreshTime); } - function fillStatus(data,size) { - var status_map = (data.responseJSON); - for (i = 1; i <= size; i++) { -var address = $("#row-"+i).find("#address").contents().get(0).nodeValue; -address = address.trim(); -var port = $("#row-"+i).find("#port").html(); -var key = address+"-"+port; + function fillStatus(dataResponse,size) { + var status_map = (dataResponse.responseJSON); + //In case localhost has gone down (i.e. we don't know status from ZK) + if (typeof status_map == 'undefined') { +//Query other nodes for state details +for (j = 1; j <= size; j++) { + if ($("#row-"+j).find("#current").html() == "Current") { +continue; //Skip LocalHost + } + var address = $("#row-"+j).find("#address").contents().get(0).nodeValue.trim(); + var restPort = $("#row-"+j).find("#httpPort").contents().get(0).nodeValue.trim(); + var altStateUrl = location.protocol + "//" + address+":"+restPort + "/state"; + var goatResponse = $.getJSON(altStateUrl) +.done(function(stateDataJson) { +//Update Status & Buttons for alternate stateData +if (typeof status_map == 'undefined') { + status_map = (stateDataJson); //Update + updateStatusAndShutdown(stateDataJson); +} + }); + //Don't loop any more + if (typeof status_map != 'undefined') { +break; + } +} + } else { +updateStatusAndShutdown(status_map); + } + } + + function updateStatusAndShutdown(status_map) { +let bitMap = {}; +if (typeof status_map != 'undefined') { +for (var k in status_map) { + bitMap[k] = status_map[k]; +} +} +for (i = 1; i <= size; i++) { +let key = ""; +if ($("#row-"+i).find("#stateKey").length > 0) { //Check if newBit that has no stateKey + key = $("#row-"+i).find("#stateKey").textContent; +} else { + let address = $("#row-"+i).find("#address").contents().get(0).nodeValue.trim(); + let port = $("#row-"+i).find("#httpPort").html(); + key = address+"-"+port; +} -if (status_map[key] == null) { +if (typeof status_map == 'undefined') { +$("#row-"+i).find("#status").text(nAText); + $("#row-"+i).find("#shutdown").prop('disabled',true).css('opacity',0.5); +$("#row-"+i).find("#queriesCount").text(""); +} else if (status_map[key] == null) { $("#row-"+i).find("#status").text("OFFLINE"); $("#row-"+i).find("#shutdown").prop('disabled',true).css('opacity',0.5); $("#row-"+i).find("#queriesCount").text(""); -} -else { +} else { if (status_map[key] == "ONLINE") { $("#row-"+i).find("#status").text(status_map[key]); - $("#row-"+i).find("#shutdown").prop('disabled',false).css('opacity',1.0); -} -else { +<#if ( model.shouldShowAdminInfo() || !model.isAuthEnabled() ) > +if ( location.protocol != "https" || ($("#row-"+i).find("#current").html() == "Current") ) { + $("#row-"+i).find("#shutdown").prop('disabled',false).css('opacity',1.0).css('cursor','pointer'); +} + +} else { if ($("#row-&quo
[GitHub] drill pull request #1241: DRILL-6364: Handle Cluster Info in WebUI when exis...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1241#discussion_r185403838 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -252,37 +257,139 @@ timeout = setTimeout(reloadStatus, refreshTime); } - function fillStatus(data,size) { - var status_map = (data.responseJSON); - for (i = 1; i <= size; i++) { -var address = $("#row-"+i).find("#address").contents().get(0).nodeValue; -address = address.trim(); -var port = $("#row-"+i).find("#port").html(); -var key = address+"-"+port; + //Fill the Status table for all the listed drillbits + function fillStatus(dataResponse,size) { + let status_map = (dataResponse.responseJSON); + //In case localhost has gone down (i.e. we don't know status from ZK) + if (typeof status_map == 'undefined') { +let rxUpdateCount = 0; +let statusRespList = []; +//Query other nodes for state details +for (j = 1; j <= size; j++) { + let currentRow = $("#row-"+j); + if (currentRow.find("#current").html() == "Current") { +continue; //Skip LocalHost + } + let address = currentRow.find("#address").contents().get(0).nodeValue.trim(); + let restPort = currentRow.find("#httpPort").contents().get(0).nodeValue.trim(); + let altStateUrl = location.protocol + "//" + address+":"+restPort + "/state"; + let altResponse = $.getJSON(altStateUrl) +.done(function(stateDataJson) { +//Update Status & Buttons for alternate stateData +if (rxUpdateCount == 0) { --- End diff -- I am not an expert on javascript but little surprised to see how it's allowing reference of `rxUpdateCount` and `statusRespList` inside the callback function `done`, since these variables are defined local to `fillStatus` function. However there is still a race condition here, you cannot avoid it without a lock or a sync call. Please see [.when in jQuery](http://api.jquery.com/jQuery.when/) and this [stackoverflow](https://stackoverflow.com/questions/3709597/wait-until-all-jquery-ajax-requests-are-done) post for example. Also when the size of Drillbit in cluster is in hundreds then you will end up making that many requests which is an overkill, why not try say 3 Drillbits at max ? If you still get undefined then treat them as status is not available. ---
[GitHub] drill pull request #1241: DRILL-6364: Handle Cluster Info in WebUI when exis...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1241#discussion_r185406413 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -252,33 +255,129 @@ timeout = setTimeout(reloadStatus, refreshTime); } - function fillStatus(data,size) { - var status_map = (data.responseJSON); - for (i = 1; i <= size; i++) { -var address = $("#row-"+i).find("#address").contents().get(0).nodeValue; -address = address.trim(); -var port = $("#row-"+i).find("#port").html(); -var key = address+"-"+port; + function fillStatus(dataResponse,size) { + var status_map = (dataResponse.responseJSON); + //In case localhost has gone down (i.e. we don't know status from ZK) + if (typeof status_map == 'undefined') { +//Query other nodes for state details +for (j = 1; j <= size; j++) { + if ($("#row-"+j).find("#current").html() == "Current") { +continue; //Skip LocalHost + } + var address = $("#row-"+j).find("#address").contents().get(0).nodeValue.trim(); + var restPort = $("#row-"+j).find("#httpPort").contents().get(0).nodeValue.trim(); + var altStateUrl = location.protocol + "//" + address+":"+restPort + "/state"; + var goatResponse = $.getJSON(altStateUrl) +.done(function(stateDataJson) { +//Update Status & Buttons for alternate stateData +if (typeof status_map == 'undefined') { + status_map = (stateDataJson); //Update + updateStatusAndShutdown(stateDataJson); +} + }); + //Don't loop any more + if (typeof status_map != 'undefined') { +break; + } +} + } else { +updateStatusAndShutdown(status_map); + } + } + + function updateStatusAndShutdown(status_map) { +let bitMap = {}; --- End diff -- Ok but status_map is used in listNewDrillbits and point was to use only one, in this case bit_map is fine. ---
[GitHub] drill pull request #1241: DRILL-6364: Handle Cluster Info in WebUI when exis...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1241#discussion_r185392204 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -103,14 +105,14 @@ ${drillbit.getState()} Not Available - -<#if ( model.shouldShowAdminInfo() && ( drillbit.isCurrent() || ( !model.isAuthEnabled() && location.protocol != "https" ))) > - -<#else> + + <#if ( model.shouldShowAdminInfo() || !model.isAuthEnabled() || drillbit.isCurrent() ) > --- End diff -- I think what is required here is to show shutdown button both for local and remote Drillbits only to admin users. But the button will be hidden and not enabled to click. Then later logic in [line 317](https://github.com/kkhatua/drill/blob/ab3e8619c6259803eb362be290a3a3605839a194/exec/java-exec/src/main/resources/rest/index.ftl#L317) will enable the button. But with current check even if user is non-Admin it will show the button. Check will be as below: ``` <#if (!model.isAuthEnabled() || model.shouldShowAdminInfo())> <#if drillbit.isCurrent()> <#else> ``` Please test with auth enabled and non-admin user too. Sharing example snapshot below. https://user-images.githubusercontent.com/22159459/39505653-a37c1326-4d88-11e8-99c7-d8ab5495c7cb.png;> ---
[GitHub] drill pull request #1241: DRILL-6364: Handle Cluster Info in WebUI when exis...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1241#discussion_r185406458 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -328,9 +432,11 @@ //Iterates through all the nodes for update function reloadMetrics() { for (i = 1; i <= size; i++) { - var address = $("#row-"+i).find("#address").contents().get(0).nodeValue.trim(); - var httpPort = $("#row-"+i).find("#httpPort").contents().get(0).nodeValue.trim(); - updateMetricsHtml(address, httpPort, i); + if ( $("#row-"+i).find("#stateKey").length == 0 ) { +var address = $("#row-"+i).find("#address").contents().get(0).nodeValue.trim(); +var httpPort = $("#row-"+i).find("#httpPort").contents().get(0).nodeValue.trim(); +updateMetricsHtml(address, httpPort, i); --- End diff -- Not true please see the braces. They evaluate to different outcome. ---
[GitHub] drill pull request #1241: DRILL-6364: Handle Cluster Info in WebUI when exis...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1241#discussion_r185113634 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -328,9 +432,11 @@ //Iterates through all the nodes for update function reloadMetrics() { for (i = 1; i <= size; i++) { - var address = $("#row-"+i).find("#address").contents().get(0).nodeValue.trim(); - var httpPort = $("#row-"+i).find("#httpPort").contents().get(0).nodeValue.trim(); - updateMetricsHtml(address, httpPort, i); + if ( $("#row-"+i).find("#stateKey").length == 0 ) { +var address = $("#row-"+i).find("#address").contents().get(0).nodeValue.trim(); +var httpPort = $("#row-"+i).find("#httpPort").contents().get(0).nodeValue.trim(); +updateMetricsHtml(address, httpPort, i); --- End diff -- Please update the check inside `updateMetricsHtml`, it will not refresh for local bit in case when Auth is enabled. ``` Current: if ( !updateRemoteInfo || (location.protocol == "https" && remoteHost != location.host) ) { return; } Should Be: if ( (!updateRemoteInfo || (location.protocol == "https") && remoteHost != location.host ) { return; } ``` ---
[GitHub] drill pull request #1241: DRILL-6364: Handle Cluster Info in WebUI when exis...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1241#discussion_r185094337 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -252,33 +255,129 @@ timeout = setTimeout(reloadStatus, refreshTime); } - function fillStatus(data,size) { - var status_map = (data.responseJSON); - for (i = 1; i <= size; i++) { -var address = $("#row-"+i).find("#address").contents().get(0).nodeValue; -address = address.trim(); -var port = $("#row-"+i).find("#port").html(); -var key = address+"-"+port; + function fillStatus(dataResponse,size) { + var status_map = (dataResponse.responseJSON); + //In case localhost has gone down (i.e. we don't know status from ZK) + if (typeof status_map == 'undefined') { +//Query other nodes for state details +for (j = 1; j <= size; j++) { + if ($("#row-"+j).find("#current").html() == "Current") { +continue; //Skip LocalHost + } + var address = $("#row-"+j).find("#address").contents().get(0).nodeValue.trim(); + var restPort = $("#row-"+j).find("#httpPort").contents().get(0).nodeValue.trim(); + var altStateUrl = location.protocol + "//" + address+":"+restPort + "/state"; + var goatResponse = $.getJSON(altStateUrl) +.done(function(stateDataJson) { +//Update Status & Buttons for alternate stateData +if (typeof status_map == 'undefined') { + status_map = (stateDataJson); //Update + updateStatusAndShutdown(stateDataJson); +} + }); + //Don't loop any more + if (typeof status_map != 'undefined') { +break; + } +} + } else { +updateStatusAndShutdown(status_map); + } + } + + function updateStatusAndShutdown(status_map) { +let bitMap = {}; +if (typeof status_map != 'undefined') { +for (var k in status_map) { + bitMap[k] = status_map[k]; +} +} +for (i = 1; i <= size; i++) { +let key = ""; +if ($("#row-"+i).find("#stateKey").length > 0) { //Check if newBit that has no stateKey + key = $("#row-"+i).find("#stateKey").textContent; +} else { + let address = $("#row-"+i).find("#address").contents().get(0).nodeValue.trim(); + let port = $("#row-"+i).find("#httpPort").html(); + key = address+"-"+port; +} -if (status_map[key] == null) { +if (typeof status_map == 'undefined') { +$("#row-"+i).find("#status").text(nAText); + $("#row-"+i).find("#shutdown").prop('disabled',true).css('opacity',0.5); +$("#row-"+i).find("#queriesCount").text(""); +} else if (status_map[key] == null) { --- End diff -- There is a different mode called `Quiescent` where Drillbit will be OFFLINE but will not deregister itself from Zookeeper. In that case showing Drillbit as `OFFLINE` is fine as compared to when Drillbit has deregistered itself from Zookeeper. `status_map[key] `will be `null` for second scenario where I guess we should not show any info as it's not available. ---
[GitHub] drill pull request #1241: DRILL-6364: Handle Cluster Info in WebUI when exis...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1241#discussion_r185066896 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -252,33 +255,129 @@ timeout = setTimeout(reloadStatus, refreshTime); } - function fillStatus(data,size) { - var status_map = (data.responseJSON); - for (i = 1; i <= size; i++) { -var address = $("#row-"+i).find("#address").contents().get(0).nodeValue; -address = address.trim(); -var port = $("#row-"+i).find("#port").html(); -var key = address+"-"+port; + function fillStatus(dataResponse,size) { + var status_map = (dataResponse.responseJSON); + //In case localhost has gone down (i.e. we don't know status from ZK) + if (typeof status_map == 'undefined') { +//Query other nodes for state details +for (j = 1; j <= size; j++) { + if ($("#row-"+j).find("#current").html() == "Current") { +continue; //Skip LocalHost + } + var address = $("#row-"+j).find("#address").contents().get(0).nodeValue.trim(); + var restPort = $("#row-"+j).find("#httpPort").contents().get(0).nodeValue.trim(); + var altStateUrl = location.protocol + "//" + address+":"+restPort + "/state"; + var goatResponse = $.getJSON(altStateUrl) +.done(function(stateDataJson) { +//Update Status & Buttons for alternate stateData +if (typeof status_map == 'undefined') { + status_map = (stateDataJson); //Update + updateStatusAndShutdown(stateDataJson); +} + }); + //Don't loop any more + if (typeof status_map != 'undefined') { +break; + } +} + } else { +updateStatusAndShutdown(status_map); + } + } + + function updateStatusAndShutdown(status_map) { +let bitMap = {}; +if (typeof status_map != 'undefined') { +for (var k in status_map) { + bitMap[k] = status_map[k]; +} +} +for (i = 1; i <= size; i++) { +let key = ""; +if ($("#row-"+i).find("#stateKey").length > 0) { //Check if newBit that has no stateKey --- End diff -- Please store the reference to the row object and use it here and below rather than searching it every time. `let currentRow = $("#row-"+i)` Also comment looks wrong since `if condition `is checking for if there is any row with stateKey instead ? ---
[GitHub] drill pull request #1241: DRILL-6364: Handle Cluster Info in WebUI when exis...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1241#discussion_r185108515 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -252,33 +255,129 @@ timeout = setTimeout(reloadStatus, refreshTime); } - function fillStatus(data,size) { - var status_map = (data.responseJSON); - for (i = 1; i <= size; i++) { -var address = $("#row-"+i).find("#address").contents().get(0).nodeValue; -address = address.trim(); -var port = $("#row-"+i).find("#port").html(); -var key = address+"-"+port; + function fillStatus(dataResponse,size) { + var status_map = (dataResponse.responseJSON); + //In case localhost has gone down (i.e. we don't know status from ZK) + if (typeof status_map == 'undefined') { +//Query other nodes for state details +for (j = 1; j <= size; j++) { + if ($("#row-"+j).find("#current").html() == "Current") { +continue; //Skip LocalHost + } + var address = $("#row-"+j).find("#address").contents().get(0).nodeValue.trim(); + var restPort = $("#row-"+j).find("#httpPort").contents().get(0).nodeValue.trim(); + var altStateUrl = location.protocol + "//" + address+":"+restPort + "/state"; + var goatResponse = $.getJSON(altStateUrl) +.done(function(stateDataJson) { +//Update Status & Buttons for alternate stateData +if (typeof status_map == 'undefined') { + status_map = (stateDataJson); //Update + updateStatusAndShutdown(stateDataJson); +} + }); + //Don't loop any more + if (typeof status_map != 'undefined') { +break; + } +} + } else { +updateStatusAndShutdown(status_map); + } + } + + function updateStatusAndShutdown(status_map) { +let bitMap = {}; +if (typeof status_map != 'undefined') { +for (var k in status_map) { + bitMap[k] = status_map[k]; +} +} +for (i = 1; i <= size; i++) { +let key = ""; +if ($("#row-"+i).find("#stateKey").length > 0) { //Check if newBit that has no stateKey + key = $("#row-"+i).find("#stateKey").textContent; +} else { + let address = $("#row-"+i).find("#address").contents().get(0).nodeValue.trim(); + let port = $("#row-"+i).find("#httpPort").html(); + key = address+"-"+port; +} -if (status_map[key] == null) { +if (typeof status_map == 'undefined') { +$("#row-"+i).find("#status").text(nAText); + $("#row-"+i).find("#shutdown").prop('disabled',true).css('opacity',0.5); +$("#row-"+i).find("#queriesCount").text(""); +} else if (status_map[key] == null) { $("#row-"+i).find("#status").text("OFFLINE"); $("#row-"+i).find("#shutdown").prop('disabled',true).css('opacity',0.5); $("#row-"+i).find("#queriesCount").text(""); -} -else { +} else { if (status_map[key] == "ONLINE") { $("#row-"+i).find("#status").text(status_map[key]); - $("#row-"+i).find("#shutdown").prop('disabled',false).css('opacity',1.0); -} -else { +<#if ( model.shouldShowAdminInfo() || !model.isAuthEnabled() ) > +if ( location.protocol != "https" || ($("#row-"+i).find("#current").html() == "Current") ) { + $("#row-"+i).find("#shutdown").prop('disabled',false).css('opacity',1.0).css('cursor','pointer'); +} + +} else { if ($("#row-&quo
[GitHub] drill pull request #1241: DRILL-6364: Handle Cluster Info in WebUI when exis...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1241#discussion_r185072034 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -252,33 +255,129 @@ timeout = setTimeout(reloadStatus, refreshTime); } - function fillStatus(data,size) { - var status_map = (data.responseJSON); - for (i = 1; i <= size; i++) { -var address = $("#row-"+i).find("#address").contents().get(0).nodeValue; -address = address.trim(); -var port = $("#row-"+i).find("#port").html(); -var key = address+"-"+port; + function fillStatus(dataResponse,size) { + var status_map = (dataResponse.responseJSON); + //In case localhost has gone down (i.e. we don't know status from ZK) + if (typeof status_map == 'undefined') { +//Query other nodes for state details +for (j = 1; j <= size; j++) { + if ($("#row-"+j).find("#current").html() == "Current") { +continue; //Skip LocalHost + } + var address = $("#row-"+j).find("#address").contents().get(0).nodeValue.trim(); + var restPort = $("#row-"+j).find("#httpPort").contents().get(0).nodeValue.trim(); + var altStateUrl = location.protocol + "//" + address+":"+restPort + "/state"; + var goatResponse = $.getJSON(altStateUrl) +.done(function(stateDataJson) { +//Update Status & Buttons for alternate stateData +if (typeof status_map == 'undefined') { + status_map = (stateDataJson); //Update + updateStatusAndShutdown(stateDataJson); +} + }); + //Don't loop any more + if (typeof status_map != 'undefined') { +break; + } +} + } else { +updateStatusAndShutdown(status_map); + } + } + + function updateStatusAndShutdown(status_map) { +let bitMap = {}; +if (typeof status_map != 'undefined') { +for (var k in status_map) { + bitMap[k] = status_map[k]; +} +} +for (i = 1; i <= size; i++) { +let key = ""; +if ($("#row-"+i).find("#stateKey").length > 0) { //Check if newBit that has no stateKey + key = $("#row-"+i).find("#stateKey").textContent; +} else { + let address = $("#row-"+i).find("#address").contents().get(0).nodeValue.trim(); + let port = $("#row-"+i).find("#httpPort").html(); + key = address+"-"+port; +} --- End diff -- the if-else condition can be removed. I don't think stateKey is needed at all. ``` let address = $("#row-"+i).find("#address").contents().get(0).nodeValue.trim(); let port = $("#row-"+i).find("#httpPort").html(); key = address+"-"+port; ``` ---
[GitHub] drill pull request #1241: DRILL-6364: Handle Cluster Info in WebUI when exis...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1241#discussion_r185063186 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -252,33 +255,129 @@ timeout = setTimeout(reloadStatus, refreshTime); } - function fillStatus(data,size) { - var status_map = (data.responseJSON); - for (i = 1; i <= size; i++) { -var address = $("#row-"+i).find("#address").contents().get(0).nodeValue; -address = address.trim(); -var port = $("#row-"+i).find("#port").html(); -var key = address+"-"+port; + function fillStatus(dataResponse,size) { + var status_map = (dataResponse.responseJSON); + //In case localhost has gone down (i.e. we don't know status from ZK) + if (typeof status_map == 'undefined') { +//Query other nodes for state details +for (j = 1; j <= size; j++) { + if ($("#row-"+j).find("#current").html() == "Current") { +continue; //Skip LocalHost + } + var address = $("#row-"+j).find("#address").contents().get(0).nodeValue.trim(); + var restPort = $("#row-"+j).find("#httpPort").contents().get(0).nodeValue.trim(); + var altStateUrl = location.protocol + "//" + address+":"+restPort + "/state"; + var goatResponse = $.getJSON(altStateUrl) --- End diff -- go**A**tResponse ? ---
[GitHub] drill pull request #1241: DRILL-6364: Handle Cluster Info in WebUI when exis...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1241#discussion_r185114420 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -328,9 +432,11 @@ //Iterates through all the nodes for update function reloadMetrics() { for (i = 1; i <= size; i++) { - var address = $("#row-"+i).find("#address").contents().get(0).nodeValue.trim(); - var httpPort = $("#row-"+i).find("#httpPort").contents().get(0).nodeValue.trim(); - updateMetricsHtml(address, httpPort, i); + if ( $("#row-"+i).find("#stateKey").length == 0 ) { --- End diff -- This check means we are not loading the metrics for newly available Drillbits, why is that ? I think instead we should check if the `status is ONLINE` then only `updateMetricsHtml`. ---
[GitHub] drill pull request #1241: DRILL-6364: Handle Cluster Info in WebUI when exis...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1241#discussion_r185095442 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -252,33 +255,129 @@ timeout = setTimeout(reloadStatus, refreshTime); } - function fillStatus(data,size) { - var status_map = (data.responseJSON); - for (i = 1; i <= size; i++) { -var address = $("#row-"+i).find("#address").contents().get(0).nodeValue; -address = address.trim(); -var port = $("#row-"+i).find("#port").html(); -var key = address+"-"+port; + function fillStatus(dataResponse,size) { + var status_map = (dataResponse.responseJSON); + //In case localhost has gone down (i.e. we don't know status from ZK) + if (typeof status_map == 'undefined') { +//Query other nodes for state details +for (j = 1; j <= size; j++) { + if ($("#row-"+j).find("#current").html() == "Current") { +continue; //Skip LocalHost + } + var address = $("#row-"+j).find("#address").contents().get(0).nodeValue.trim(); + var restPort = $("#row-"+j).find("#httpPort").contents().get(0).nodeValue.trim(); + var altStateUrl = location.protocol + "//" + address+":"+restPort + "/state"; + var goatResponse = $.getJSON(altStateUrl) +.done(function(stateDataJson) { +//Update Status & Buttons for alternate stateData +if (typeof status_map == 'undefined') { + status_map = (stateDataJson); //Update + updateStatusAndShutdown(stateDataJson); +} + }); + //Don't loop any more + if (typeof status_map != 'undefined') { +break; + } +} + } else { +updateStatusAndShutdown(status_map); + } + } + + function updateStatusAndShutdown(status_map) { +let bitMap = {}; +if (typeof status_map != 'undefined') { +for (var k in status_map) { + bitMap[k] = status_map[k]; +} +} +for (i = 1; i <= size; i++) { +let key = ""; +if ($("#row-"+i).find("#stateKey").length > 0) { //Check if newBit that has no stateKey + key = $("#row-"+i).find("#stateKey").textContent; +} else { + let address = $("#row-"+i).find("#address").contents().get(0).nodeValue.trim(); + let port = $("#row-"+i).find("#httpPort").html(); + key = address+"-"+port; +} -if (status_map[key] == null) { +if (typeof status_map == 'undefined') { +$("#row-"+i).find("#status").text(nAText); + $("#row-"+i).find("#shutdown").prop('disabled',true).css('opacity',0.5); +$("#row-"+i).find("#queriesCount").text(""); +} else if (status_map[key] == null) { $("#row-"+i).find("#status").text("OFFLINE"); $("#row-"+i).find("#shutdown").prop('disabled',true).css('opacity',0.5); $("#row-"+i).find("#queriesCount").text(""); -} -else { +} else { if (status_map[key] == "ONLINE") { $("#row-"+i).find("#status").text(status_map[key]); - $("#row-"+i).find("#shutdown").prop('disabled',false).css('opacity',1.0); -} -else { +<#if ( model.shouldShowAdminInfo() || !model.isAuthEnabled() ) > +if ( location.protocol != "https" || ($("#row-"+i).find("#current").html() == "Current") ) { + $("#row-"+i).find("#shutdown").prop('disabled',false).css('opacity',1.0).css('cursor','pointer'); +} + +} else { if ($("#row-&quo
[GitHub] drill pull request #1241: DRILL-6364: Handle Cluster Info in WebUI when exis...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1241#discussion_r185071475 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -252,33 +255,129 @@ timeout = setTimeout(reloadStatus, refreshTime); } - function fillStatus(data,size) { - var status_map = (data.responseJSON); - for (i = 1; i <= size; i++) { -var address = $("#row-"+i).find("#address").contents().get(0).nodeValue; -address = address.trim(); -var port = $("#row-"+i).find("#port").html(); -var key = address+"-"+port; + function fillStatus(dataResponse,size) { + var status_map = (dataResponse.responseJSON); + //In case localhost has gone down (i.e. we don't know status from ZK) + if (typeof status_map == 'undefined') { +//Query other nodes for state details +for (j = 1; j <= size; j++) { + if ($("#row-"+j).find("#current").html() == "Current") { +continue; //Skip LocalHost + } + var address = $("#row-"+j).find("#address").contents().get(0).nodeValue.trim(); + var restPort = $("#row-"+j).find("#httpPort").contents().get(0).nodeValue.trim(); + var altStateUrl = location.protocol + "//" + address+":"+restPort + "/state"; + var goatResponse = $.getJSON(altStateUrl) +.done(function(stateDataJson) { +//Update Status & Buttons for alternate stateData +if (typeof status_map == 'undefined') { + status_map = (stateDataJson); //Update + updateStatusAndShutdown(stateDataJson); +} + }); + //Don't loop any more + if (typeof status_map != 'undefined') { +break; + } +} + } else { +updateStatusAndShutdown(status_map); + } + } + + function updateStatusAndShutdown(status_map) { +let bitMap = {}; --- End diff -- Doesn't look like `bitMap` is needed. Why not just use `status_map` here and in `listNewDrillbits` ? ---
[GitHub] drill pull request #1241: DRILL-6364: Handle Cluster Info in WebUI when exis...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1241#discussion_r184873670 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -252,33 +255,129 @@ timeout = setTimeout(reloadStatus, refreshTime); } - function fillStatus(data,size) { - var status_map = (data.responseJSON); - for (i = 1; i <= size; i++) { -var address = $("#row-"+i).find("#address").contents().get(0).nodeValue; -address = address.trim(); -var port = $("#row-"+i).find("#port").html(); -var key = address+"-"+port; + function fillStatus(dataResponse,size) { + var status_map = (dataResponse.responseJSON); + //In case localhost has gone down (i.e. we don't know status from ZK) + if (typeof status_map == 'undefined') { --- End diff -- Please put a comment on `fillStatus` about what the function does like it get's the state from remote bits even though the local bit is down. ---
[GitHub] drill pull request #1241: DRILL-6364: Handle Cluster Info in WebUI when exis...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1241#discussion_r185064331 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -252,33 +255,129 @@ timeout = setTimeout(reloadStatus, refreshTime); } - function fillStatus(data,size) { - var status_map = (data.responseJSON); - for (i = 1; i <= size; i++) { -var address = $("#row-"+i).find("#address").contents().get(0).nodeValue; -address = address.trim(); -var port = $("#row-"+i).find("#port").html(); -var key = address+"-"+port; + function fillStatus(dataResponse,size) { + var status_map = (dataResponse.responseJSON); + //In case localhost has gone down (i.e. we don't know status from ZK) + if (typeof status_map == 'undefined') { +//Query other nodes for state details +for (j = 1; j <= size; j++) { + if ($("#row-"+j).find("#current").html() == "Current") { +continue; //Skip LocalHost + } + var address = $("#row-"+j).find("#address").contents().get(0).nodeValue.trim(); + var restPort = $("#row-"+j).find("#httpPort").contents().get(0).nodeValue.trim(); + var altStateUrl = location.protocol + "//" + address+":"+restPort + "/state"; + var goatResponse = $.getJSON(altStateUrl) +.done(function(stateDataJson) { +//Update Status & Buttons for alternate stateData +if (typeof status_map == 'undefined') { + status_map = (stateDataJson); //Update + updateStatusAndShutdown(stateDataJson); +} + }); + //Don't loop any more + if (typeof status_map != 'undefined') { +break; + } +} + } else { --- End diff -- You can get rid of this else and just call `updateStatusAndShutdown` instead. That way inside the `.done callback of .getJson `also you don't have to call `updateStatusAndShutdown` explicitly. ---
[GitHub] drill pull request #1241: DRILL-6364: Handle Cluster Info in WebUI when exis...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1241#discussion_r185115283 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -103,14 +105,14 @@ ${drillbit.getState()} Not Available - -<#if ( model.shouldShowAdminInfo() && ( drillbit.isCurrent() || ( !model.isAuthEnabled() && location.protocol != "https" ))) > - -<#else> + + <#if ( model.shouldShowAdminInfo() || !model.isAuthEnabled() || drillbit.isCurrent() ) > --- End diff -- This check doesn't look correct to me. `shouldShowAdminInfo` checks If the logged in user is admin or not. So for admin user it will show the shutdown button in if block even for remote Drillbit which is not the intention. ---
[GitHub] drill issue #1240: DRILL-6327: Update unary operators to handle IterOutcome....
Github user sohami commented on the issue: https://github.com/apache/drill/pull/1240 @parthchandra - please help to review this PR. ---
[GitHub] drill pull request #1240: DRILL-6327: Update unary operators to handle IterO...
GitHub user sohami opened a pull request: https://github.com/apache/drill/pull/1240 DRILL-6327: Update unary operators to handle IterOutcome.EMIT Note: Handles for Non-Blocking Unary operators (like Filter/Project/etc) with EMIT Iter.Outcome You can merge this pull request into a Git repository by running: $ git pull https://github.com/sohami/drill DRILL-6327 Alternatively you can review and apply these changes as the patch at: https://github.com/apache/drill/pull/1240.patch To close this pull request, make a commit to your master/trunk branch with (at least) the following in the commit message: This closes #1240 commit 74b9ead99cf1f92713ab9b81f23f680bd1a9cfff Author: Sorabh Hamirwasia <shamirwasia@...> Date: 2018-04-05T00:54:58Z DRILL-6327: Update unary operators to handle IterOutcome.EMIT Note: Handles for Non-Blocking Unary operators (like Filter/Project/etc) with EMIT Iter.Outcome ---
[GitHub] drill issue #1217: DRILL-6302: Fixed NPE in Drillbit close method
Github user sohami commented on the issue: https://github.com/apache/drill/pull/1217 +1 LGTM. ---
[GitHub] drill pull request #1227: Drill-6236: batch sizing for hash join
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1227#discussion_r183181073 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinBatch.java --- @@ -560,6 +554,40 @@ public void close() { super.close(); } + @Override + protected void updateBatchMemoryManagerStats() { +stats.setLongStat(Metric.LEFT_INPUT_BATCH_COUNT, batchMemoryManager.getNumIncomingBatches(LEFT_INDEX)); --- End diff -- Should be fine. ---
[GitHub] drill pull request #1227: Drill 6236: batch sizing for hash join
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1227#discussion_r183169484 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinBatch.java --- @@ -560,6 +554,40 @@ public void close() { super.close(); } + @Override + protected void updateBatchMemoryManagerStats() { +stats.setLongStat(Metric.LEFT_INPUT_BATCH_COUNT, batchMemoryManager.getNumIncomingBatches(LEFT_INDEX)); --- End diff -- @ppadma - The main motive of moving the metrics inside JoinBatchMemoryManager was to avoid duplicate definition in all the BinaryRecordBatches. I think we should improve on OperatorMetricsRegistry rather than just overriding this method. Or if you want you can create a new JIRA to track metrics related improvement. ---
[GitHub] drill pull request #1227: Drill 6236: batch sizing for hash join
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1227#discussion_r183145171 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinBatch.java --- @@ -147,7 +150,19 @@ NUM_BUCKETS, NUM_ENTRIES, NUM_RESIZING, -RESIZING_TIME_MS; +RESIZING_TIME_MS, +LEFT_INPUT_BATCH_COUNT, +LEFT_AVG_INPUT_BATCH_BYTES, +LEFT_AVG_INPUT_ROW_BYTES, +LEFT_INPUT_RECORD_COUNT, +RIGHT_INPUT_BATCH_COUNT, +RIGHT_AVG_INPUT_BATCH_BYTES, +RIGHT_AVG_INPUT_ROW_BYTES, +RIGHT_INPUT_RECORD_COUNT, +OUTPUT_BATCH_COUNT, +AVG_OUTPUT_BATCH_BYTES, +AVG_OUTPUT_ROW_BYTES, +OUTPUT_RECORD_COUNT; --- End diff -- Putting these metrics inside operator Metric class will not work. For joins these metrics were moved inside `JoinBatchMemoryManager.Metric` class since they are memory manager metrics. So when you call `updateBatchMemoryManagerStats()` it updates the operator stats but using ordinals from `JoinBatchMemoryManager.Metric` class. So the ordinal for LEFT_INPUT_BATCH_COUNT will be 0 not 4 (which is required). I think we should improve our OperatorsMetricRegistry to register multiple Metric classes for an operator. ---
[GitHub] drill pull request #1227: Drill 6236: batch sizing for hash join
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1227#discussion_r183145138 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinBatch.java --- @@ -157,14 +172,20 @@ public int metricId() { } } + public class HashJoinMemoryManager extends JoinBatchMemoryManager { --- End diff -- Not required you can directly use `JoinBatchMemoryManager`. ---
[GitHub] drill pull request #1223: DRILL-6324: Unnest initial implementation
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1223#discussion_r183107099 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/unnest/UnnestRecordBatch.java --- @@ -0,0 +1,451 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.physical.impl.unnest; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import org.apache.drill.common.exceptions.UserException; +import org.apache.drill.common.expression.FieldReference; +import org.apache.drill.common.expression.SchemaPath; +import org.apache.drill.common.logical.data.NamedExpression; +import org.apache.drill.exec.ExecConstants; +import org.apache.drill.exec.exception.ClassTransformationException; +import org.apache.drill.exec.exception.OutOfMemoryException; +import org.apache.drill.exec.exception.SchemaChangeException; +import org.apache.drill.exec.expr.ClassGenerator; +import org.apache.drill.exec.expr.CodeGenerator; +import org.apache.drill.exec.ops.FragmentContext; +import org.apache.drill.exec.ops.MetricDef; +import org.apache.drill.exec.physical.base.LateralContract; +import org.apache.drill.exec.physical.config.UnnestPOP; +import org.apache.drill.exec.record.AbstractSingleRecordBatch; +import org.apache.drill.exec.record.AbstractTableFunctionRecordBatch; +import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode; +import org.apache.drill.exec.record.CloseableRecordBatch; +import org.apache.drill.exec.record.MaterializedField; +import org.apache.drill.exec.record.RecordBatch; +import org.apache.drill.exec.record.RecordBatchMemoryManager; +import org.apache.drill.exec.record.RecordBatchSizer; +import org.apache.drill.exec.record.TransferPair; +import org.apache.drill.exec.record.TypedFieldId; +import org.apache.drill.exec.record.VectorContainer; +import org.apache.drill.exec.vector.ValueVector; +import org.apache.drill.exec.vector.complex.RepeatedMapVector; +import org.apache.drill.exec.vector.complex.RepeatedValueVector; + +import java.io.IOException; +import java.util.List; + +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK; +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK_NEW_SCHEMA; + +// TODO - handle the case where a user tries to unnest a scalar, should just return the column as is +public class UnnestRecordBatch extends AbstractTableFunctionRecordBatch { + private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(UnnestRecordBatch.class); + + private Unnest unnest; + private boolean hasRemainder = false; // set to true if there is data left over for the current row AND if we want +// to keep processing it. Kill may be called by a limit in a subquery that +// requires us to stop processing thecurrent row, but not stop processing +// the data. + // In some cases we need to return a predetermined state from a call to next. These are: + // 1) Kill is called due to an error occurring in the processing of the query. IterOutcome should be NONE + // 2) Kill is called by LIMIT to stop processing of the current row (This occurs when the LIMIT is part of a subquery + //between UNNEST and LATERAL. Iteroutcome should be EMIT + // 3) Kill is called by LIMIT downstream from LATERAL. IterOutcome should be NONE + private IterOutcome nextState = OK; + private int remainderIndex = 0; + private int recordCount; + private MaterializedField unnestFieldMetadata; + private final UnnestMemoryManager memoryManager; + + public enum Metric implements MetricDef { +INPUT_BATCH_COUNT, +AVG_INPUT_BATCH_BYTES, +AVG_INPUT_ROW_BYTES, +INPUT_RECORD_COUNT, +OUTPUT_BATCH_COUNT, +AV
[GitHub] drill pull request #1223: DRILL-6324: Unnest initial implementation
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1223#discussion_r183110851 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/unnest/UnnestRecordBatch.java --- @@ -0,0 +1,451 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.physical.impl.unnest; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import org.apache.drill.common.exceptions.UserException; +import org.apache.drill.common.expression.FieldReference; +import org.apache.drill.common.expression.SchemaPath; +import org.apache.drill.common.logical.data.NamedExpression; +import org.apache.drill.exec.ExecConstants; +import org.apache.drill.exec.exception.ClassTransformationException; +import org.apache.drill.exec.exception.OutOfMemoryException; +import org.apache.drill.exec.exception.SchemaChangeException; +import org.apache.drill.exec.expr.ClassGenerator; +import org.apache.drill.exec.expr.CodeGenerator; +import org.apache.drill.exec.ops.FragmentContext; +import org.apache.drill.exec.ops.MetricDef; +import org.apache.drill.exec.physical.base.LateralContract; +import org.apache.drill.exec.physical.config.UnnestPOP; +import org.apache.drill.exec.record.AbstractSingleRecordBatch; +import org.apache.drill.exec.record.AbstractTableFunctionRecordBatch; +import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode; +import org.apache.drill.exec.record.CloseableRecordBatch; +import org.apache.drill.exec.record.MaterializedField; +import org.apache.drill.exec.record.RecordBatch; +import org.apache.drill.exec.record.RecordBatchMemoryManager; +import org.apache.drill.exec.record.RecordBatchSizer; +import org.apache.drill.exec.record.TransferPair; +import org.apache.drill.exec.record.TypedFieldId; +import org.apache.drill.exec.record.VectorContainer; +import org.apache.drill.exec.vector.ValueVector; +import org.apache.drill.exec.vector.complex.RepeatedMapVector; +import org.apache.drill.exec.vector.complex.RepeatedValueVector; + +import java.io.IOException; +import java.util.List; + +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK; +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK_NEW_SCHEMA; + +// TODO - handle the case where a user tries to unnest a scalar, should just return the column as is +public class UnnestRecordBatch extends AbstractTableFunctionRecordBatch { + private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(UnnestRecordBatch.class); + + private Unnest unnest; + private boolean hasRemainder = false; // set to true if there is data left over for the current row AND if we want +// to keep processing it. Kill may be called by a limit in a subquery that +// requires us to stop processing thecurrent row, but not stop processing +// the data. + // In some cases we need to return a predetermined state from a call to next. These are: + // 1) Kill is called due to an error occurring in the processing of the query. IterOutcome should be NONE + // 2) Kill is called by LIMIT to stop processing of the current row (This occurs when the LIMIT is part of a subquery + //between UNNEST and LATERAL. Iteroutcome should be EMIT + // 3) Kill is called by LIMIT downstream from LATERAL. IterOutcome should be NONE + private IterOutcome nextState = OK; + private int remainderIndex = 0; + private int recordCount; + private MaterializedField unnestFieldMetadata; + private final UnnestMemoryManager memoryManager; + + public enum Metric implements MetricDef { +INPUT_BATCH_COUNT, +AVG_INPUT_BATCH_BYTES, +AVG_INPUT_ROW_BYTES, +INPUT_RECORD_COUNT, +OUTPUT_BATCH_COUNT, +AV
[GitHub] drill pull request #1223: DRILL-6324: Unnest initial implementation
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1223#discussion_r183106122 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/unnest/UnnestRecordBatch.java --- @@ -0,0 +1,451 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.physical.impl.unnest; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import org.apache.drill.common.exceptions.UserException; +import org.apache.drill.common.expression.FieldReference; +import org.apache.drill.common.expression.SchemaPath; +import org.apache.drill.common.logical.data.NamedExpression; +import org.apache.drill.exec.ExecConstants; +import org.apache.drill.exec.exception.ClassTransformationException; +import org.apache.drill.exec.exception.OutOfMemoryException; +import org.apache.drill.exec.exception.SchemaChangeException; +import org.apache.drill.exec.expr.ClassGenerator; +import org.apache.drill.exec.expr.CodeGenerator; +import org.apache.drill.exec.ops.FragmentContext; +import org.apache.drill.exec.ops.MetricDef; +import org.apache.drill.exec.physical.base.LateralContract; +import org.apache.drill.exec.physical.config.UnnestPOP; +import org.apache.drill.exec.record.AbstractSingleRecordBatch; +import org.apache.drill.exec.record.AbstractTableFunctionRecordBatch; +import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode; +import org.apache.drill.exec.record.CloseableRecordBatch; +import org.apache.drill.exec.record.MaterializedField; +import org.apache.drill.exec.record.RecordBatch; +import org.apache.drill.exec.record.RecordBatchMemoryManager; +import org.apache.drill.exec.record.RecordBatchSizer; +import org.apache.drill.exec.record.TransferPair; +import org.apache.drill.exec.record.TypedFieldId; +import org.apache.drill.exec.record.VectorContainer; +import org.apache.drill.exec.vector.ValueVector; +import org.apache.drill.exec.vector.complex.RepeatedMapVector; +import org.apache.drill.exec.vector.complex.RepeatedValueVector; + +import java.io.IOException; +import java.util.List; + +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK; +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK_NEW_SCHEMA; + +// TODO - handle the case where a user tries to unnest a scalar, should just return the column as is +public class UnnestRecordBatch extends AbstractTableFunctionRecordBatch { + private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(UnnestRecordBatch.class); + + private Unnest unnest; + private boolean hasRemainder = false; // set to true if there is data left over for the current row AND if we want +// to keep processing it. Kill may be called by a limit in a subquery that +// requires us to stop processing thecurrent row, but not stop processing +// the data. + // In some cases we need to return a predetermined state from a call to next. These are: + // 1) Kill is called due to an error occurring in the processing of the query. IterOutcome should be NONE + // 2) Kill is called by LIMIT to stop processing of the current row (This occurs when the LIMIT is part of a subquery + //between UNNEST and LATERAL. Iteroutcome should be EMIT + // 3) Kill is called by LIMIT downstream from LATERAL. IterOutcome should be NONE + private IterOutcome nextState = OK; + private int remainderIndex = 0; + private int recordCount; + private MaterializedField unnestFieldMetadata; + private final UnnestMemoryManager memoryManager; + + public enum Metric implements MetricDef { +INPUT_BATCH_COUNT, +AVG_INPUT_BATCH_BYTES, +AVG_INPUT_ROW_BYTES, +INPUT_RECORD_COUNT, +OUTPUT_BATCH_COUNT, +AV
[GitHub] drill pull request #1223: DRILL-6324: Unnest initial implementation
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1223#discussion_r183107693 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/unnest/UnnestRecordBatch.java --- @@ -0,0 +1,451 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.physical.impl.unnest; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import org.apache.drill.common.exceptions.UserException; +import org.apache.drill.common.expression.FieldReference; +import org.apache.drill.common.expression.SchemaPath; +import org.apache.drill.common.logical.data.NamedExpression; +import org.apache.drill.exec.ExecConstants; +import org.apache.drill.exec.exception.ClassTransformationException; +import org.apache.drill.exec.exception.OutOfMemoryException; +import org.apache.drill.exec.exception.SchemaChangeException; +import org.apache.drill.exec.expr.ClassGenerator; +import org.apache.drill.exec.expr.CodeGenerator; +import org.apache.drill.exec.ops.FragmentContext; +import org.apache.drill.exec.ops.MetricDef; +import org.apache.drill.exec.physical.base.LateralContract; +import org.apache.drill.exec.physical.config.UnnestPOP; +import org.apache.drill.exec.record.AbstractSingleRecordBatch; +import org.apache.drill.exec.record.AbstractTableFunctionRecordBatch; +import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode; +import org.apache.drill.exec.record.CloseableRecordBatch; +import org.apache.drill.exec.record.MaterializedField; +import org.apache.drill.exec.record.RecordBatch; +import org.apache.drill.exec.record.RecordBatchMemoryManager; +import org.apache.drill.exec.record.RecordBatchSizer; +import org.apache.drill.exec.record.TransferPair; +import org.apache.drill.exec.record.TypedFieldId; +import org.apache.drill.exec.record.VectorContainer; +import org.apache.drill.exec.vector.ValueVector; +import org.apache.drill.exec.vector.complex.RepeatedMapVector; +import org.apache.drill.exec.vector.complex.RepeatedValueVector; + +import java.io.IOException; +import java.util.List; + +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK; +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK_NEW_SCHEMA; + +// TODO - handle the case where a user tries to unnest a scalar, should just return the column as is +public class UnnestRecordBatch extends AbstractTableFunctionRecordBatch { + private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(UnnestRecordBatch.class); + + private Unnest unnest; + private boolean hasRemainder = false; // set to true if there is data left over for the current row AND if we want +// to keep processing it. Kill may be called by a limit in a subquery that +// requires us to stop processing thecurrent row, but not stop processing +// the data. + // In some cases we need to return a predetermined state from a call to next. These are: + // 1) Kill is called due to an error occurring in the processing of the query. IterOutcome should be NONE + // 2) Kill is called by LIMIT to stop processing of the current row (This occurs when the LIMIT is part of a subquery + //between UNNEST and LATERAL. Iteroutcome should be EMIT + // 3) Kill is called by LIMIT downstream from LATERAL. IterOutcome should be NONE + private IterOutcome nextState = OK; + private int remainderIndex = 0; + private int recordCount; + private MaterializedField unnestFieldMetadata; + private final UnnestMemoryManager memoryManager; + + public enum Metric implements MetricDef { +INPUT_BATCH_COUNT, +AVG_INPUT_BATCH_BYTES, +AVG_INPUT_ROW_BYTES, +INPUT_RECORD_COUNT, +OUTPUT_BATCH_COUNT, +AV
[GitHub] drill pull request #1223: DRILL-6324: Unnest initial implementation
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1223#discussion_r182873443 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/unnest/UnnestRecordBatch.java --- @@ -0,0 +1,451 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.physical.impl.unnest; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import org.apache.drill.common.exceptions.UserException; +import org.apache.drill.common.expression.FieldReference; +import org.apache.drill.common.expression.SchemaPath; +import org.apache.drill.common.logical.data.NamedExpression; +import org.apache.drill.exec.ExecConstants; +import org.apache.drill.exec.exception.ClassTransformationException; +import org.apache.drill.exec.exception.OutOfMemoryException; +import org.apache.drill.exec.exception.SchemaChangeException; +import org.apache.drill.exec.expr.ClassGenerator; +import org.apache.drill.exec.expr.CodeGenerator; +import org.apache.drill.exec.ops.FragmentContext; +import org.apache.drill.exec.ops.MetricDef; +import org.apache.drill.exec.physical.base.LateralContract; +import org.apache.drill.exec.physical.config.UnnestPOP; +import org.apache.drill.exec.record.AbstractSingleRecordBatch; +import org.apache.drill.exec.record.AbstractTableFunctionRecordBatch; +import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode; +import org.apache.drill.exec.record.CloseableRecordBatch; +import org.apache.drill.exec.record.MaterializedField; +import org.apache.drill.exec.record.RecordBatch; +import org.apache.drill.exec.record.RecordBatchMemoryManager; +import org.apache.drill.exec.record.RecordBatchSizer; +import org.apache.drill.exec.record.TransferPair; +import org.apache.drill.exec.record.TypedFieldId; +import org.apache.drill.exec.record.VectorContainer; +import org.apache.drill.exec.vector.ValueVector; +import org.apache.drill.exec.vector.complex.RepeatedMapVector; +import org.apache.drill.exec.vector.complex.RepeatedValueVector; + +import java.io.IOException; +import java.util.List; + +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK; +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK_NEW_SCHEMA; + +// TODO - handle the case where a user tries to unnest a scalar, should just return the column as is +public class UnnestRecordBatch extends AbstractTableFunctionRecordBatch { + private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(UnnestRecordBatch.class); + + private Unnest unnest; + private boolean hasRemainder = false; // set to true if there is data left over for the current row AND if we want +// to keep processing it. Kill may be called by a limit in a subquery that +// requires us to stop processing thecurrent row, but not stop processing +// the data. + // In some cases we need to return a predetermined state from a call to next. These are: + // 1) Kill is called due to an error occurring in the processing of the query. IterOutcome should be NONE + // 2) Kill is called by LIMIT to stop processing of the current row (This occurs when the LIMIT is part of a subquery + //between UNNEST and LATERAL. Iteroutcome should be EMIT + // 3) Kill is called by LIMIT downstream from LATERAL. IterOutcome should be NONE + private IterOutcome nextState = OK; + private int remainderIndex = 0; + private int recordCount; + private MaterializedField unnestFieldMetadata; + private final UnnestMemoryManager memoryManager; + + public enum Metric implements MetricDef { +INPUT_BATCH_COUNT, +AVG_INPUT_BATCH_BYTES, +AVG_INPUT_ROW_BYTES, +INPUT_RECORD_COUNT, +OUTPUT_BATCH_COUNT, +AV
[GitHub] drill pull request #1223: DRILL-6324: Unnest initial implementation
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1223#discussion_r183128175 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/unnest/UnnestRecordBatch.java --- @@ -0,0 +1,451 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.physical.impl.unnest; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import org.apache.drill.common.exceptions.UserException; +import org.apache.drill.common.expression.FieldReference; +import org.apache.drill.common.expression.SchemaPath; +import org.apache.drill.common.logical.data.NamedExpression; +import org.apache.drill.exec.ExecConstants; +import org.apache.drill.exec.exception.ClassTransformationException; +import org.apache.drill.exec.exception.OutOfMemoryException; +import org.apache.drill.exec.exception.SchemaChangeException; +import org.apache.drill.exec.expr.ClassGenerator; +import org.apache.drill.exec.expr.CodeGenerator; +import org.apache.drill.exec.ops.FragmentContext; +import org.apache.drill.exec.ops.MetricDef; +import org.apache.drill.exec.physical.base.LateralContract; +import org.apache.drill.exec.physical.config.UnnestPOP; +import org.apache.drill.exec.record.AbstractSingleRecordBatch; +import org.apache.drill.exec.record.AbstractTableFunctionRecordBatch; +import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode; +import org.apache.drill.exec.record.CloseableRecordBatch; +import org.apache.drill.exec.record.MaterializedField; +import org.apache.drill.exec.record.RecordBatch; +import org.apache.drill.exec.record.RecordBatchMemoryManager; +import org.apache.drill.exec.record.RecordBatchSizer; +import org.apache.drill.exec.record.TransferPair; +import org.apache.drill.exec.record.TypedFieldId; +import org.apache.drill.exec.record.VectorContainer; +import org.apache.drill.exec.vector.ValueVector; +import org.apache.drill.exec.vector.complex.RepeatedMapVector; +import org.apache.drill.exec.vector.complex.RepeatedValueVector; + +import java.io.IOException; +import java.util.List; + +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK; +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK_NEW_SCHEMA; + +// TODO - handle the case where a user tries to unnest a scalar, should just return the column as is +public class UnnestRecordBatch extends AbstractTableFunctionRecordBatch { + private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(UnnestRecordBatch.class); + + private Unnest unnest; + private boolean hasRemainder = false; // set to true if there is data left over for the current row AND if we want +// to keep processing it. Kill may be called by a limit in a subquery that +// requires us to stop processing thecurrent row, but not stop processing +// the data. + // In some cases we need to return a predetermined state from a call to next. These are: + // 1) Kill is called due to an error occurring in the processing of the query. IterOutcome should be NONE + // 2) Kill is called by LIMIT to stop processing of the current row (This occurs when the LIMIT is part of a subquery + //between UNNEST and LATERAL. Iteroutcome should be EMIT + // 3) Kill is called by LIMIT downstream from LATERAL. IterOutcome should be NONE + private IterOutcome nextState = OK; + private int remainderIndex = 0; + private int recordCount; + private MaterializedField unnestFieldMetadata; + private final UnnestMemoryManager memoryManager; + + public enum Metric implements MetricDef { +INPUT_BATCH_COUNT, +AVG_INPUT_BATCH_BYTES, +AVG_INPUT_ROW_BYTES, +INPUT_RECORD_COUNT, +OUTPUT_BATCH_COUNT, +AV
[GitHub] drill pull request #1223: DRILL-6324: Unnest initial implementation
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1223#discussion_r182873668 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/unnest/UnnestRecordBatch.java --- @@ -0,0 +1,451 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.physical.impl.unnest; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import org.apache.drill.common.exceptions.UserException; +import org.apache.drill.common.expression.FieldReference; +import org.apache.drill.common.expression.SchemaPath; +import org.apache.drill.common.logical.data.NamedExpression; +import org.apache.drill.exec.ExecConstants; +import org.apache.drill.exec.exception.ClassTransformationException; +import org.apache.drill.exec.exception.OutOfMemoryException; +import org.apache.drill.exec.exception.SchemaChangeException; +import org.apache.drill.exec.expr.ClassGenerator; +import org.apache.drill.exec.expr.CodeGenerator; +import org.apache.drill.exec.ops.FragmentContext; +import org.apache.drill.exec.ops.MetricDef; +import org.apache.drill.exec.physical.base.LateralContract; +import org.apache.drill.exec.physical.config.UnnestPOP; +import org.apache.drill.exec.record.AbstractSingleRecordBatch; +import org.apache.drill.exec.record.AbstractTableFunctionRecordBatch; +import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode; +import org.apache.drill.exec.record.CloseableRecordBatch; +import org.apache.drill.exec.record.MaterializedField; +import org.apache.drill.exec.record.RecordBatch; +import org.apache.drill.exec.record.RecordBatchMemoryManager; +import org.apache.drill.exec.record.RecordBatchSizer; +import org.apache.drill.exec.record.TransferPair; +import org.apache.drill.exec.record.TypedFieldId; +import org.apache.drill.exec.record.VectorContainer; +import org.apache.drill.exec.vector.ValueVector; +import org.apache.drill.exec.vector.complex.RepeatedMapVector; +import org.apache.drill.exec.vector.complex.RepeatedValueVector; + +import java.io.IOException; +import java.util.List; + +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK; +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK_NEW_SCHEMA; + +// TODO - handle the case where a user tries to unnest a scalar, should just return the column as is +public class UnnestRecordBatch extends AbstractTableFunctionRecordBatch { + private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(UnnestRecordBatch.class); + + private Unnest unnest; + private boolean hasRemainder = false; // set to true if there is data left over for the current row AND if we want +// to keep processing it. Kill may be called by a limit in a subquery that +// requires us to stop processing thecurrent row, but not stop processing +// the data. + // In some cases we need to return a predetermined state from a call to next. These are: + // 1) Kill is called due to an error occurring in the processing of the query. IterOutcome should be NONE + // 2) Kill is called by LIMIT to stop processing of the current row (This occurs when the LIMIT is part of a subquery + //between UNNEST and LATERAL. Iteroutcome should be EMIT + // 3) Kill is called by LIMIT downstream from LATERAL. IterOutcome should be NONE + private IterOutcome nextState = OK; + private int remainderIndex = 0; + private int recordCount; + private MaterializedField unnestFieldMetadata; + private final UnnestMemoryManager memoryManager; + + public enum Metric implements MetricDef { +INPUT_BATCH_COUNT, +AVG_INPUT_BATCH_BYTES, +AVG_INPUT_ROW_BYTES, +INPUT_RECORD_COUNT, +OUTPUT_BATCH_COUNT, +AV
[GitHub] drill pull request #1223: DRILL-6324: Unnest initial implementation
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1223#discussion_r183114381 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/unnest/UnnestRecordBatch.java --- @@ -0,0 +1,451 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.physical.impl.unnest; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import org.apache.drill.common.exceptions.UserException; +import org.apache.drill.common.expression.FieldReference; +import org.apache.drill.common.expression.SchemaPath; +import org.apache.drill.common.logical.data.NamedExpression; +import org.apache.drill.exec.ExecConstants; +import org.apache.drill.exec.exception.ClassTransformationException; +import org.apache.drill.exec.exception.OutOfMemoryException; +import org.apache.drill.exec.exception.SchemaChangeException; +import org.apache.drill.exec.expr.ClassGenerator; +import org.apache.drill.exec.expr.CodeGenerator; +import org.apache.drill.exec.ops.FragmentContext; +import org.apache.drill.exec.ops.MetricDef; +import org.apache.drill.exec.physical.base.LateralContract; +import org.apache.drill.exec.physical.config.UnnestPOP; +import org.apache.drill.exec.record.AbstractSingleRecordBatch; +import org.apache.drill.exec.record.AbstractTableFunctionRecordBatch; +import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode; +import org.apache.drill.exec.record.CloseableRecordBatch; +import org.apache.drill.exec.record.MaterializedField; +import org.apache.drill.exec.record.RecordBatch; +import org.apache.drill.exec.record.RecordBatchMemoryManager; +import org.apache.drill.exec.record.RecordBatchSizer; +import org.apache.drill.exec.record.TransferPair; +import org.apache.drill.exec.record.TypedFieldId; +import org.apache.drill.exec.record.VectorContainer; +import org.apache.drill.exec.vector.ValueVector; +import org.apache.drill.exec.vector.complex.RepeatedMapVector; +import org.apache.drill.exec.vector.complex.RepeatedValueVector; + +import java.io.IOException; +import java.util.List; + +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK; +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK_NEW_SCHEMA; + +// TODO - handle the case where a user tries to unnest a scalar, should just return the column as is +public class UnnestRecordBatch extends AbstractTableFunctionRecordBatch { + private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(UnnestRecordBatch.class); + + private Unnest unnest; + private boolean hasRemainder = false; // set to true if there is data left over for the current row AND if we want +// to keep processing it. Kill may be called by a limit in a subquery that +// requires us to stop processing thecurrent row, but not stop processing +// the data. + // In some cases we need to return a predetermined state from a call to next. These are: + // 1) Kill is called due to an error occurring in the processing of the query. IterOutcome should be NONE + // 2) Kill is called by LIMIT to stop processing of the current row (This occurs when the LIMIT is part of a subquery + //between UNNEST and LATERAL. Iteroutcome should be EMIT + // 3) Kill is called by LIMIT downstream from LATERAL. IterOutcome should be NONE + private IterOutcome nextState = OK; + private int remainderIndex = 0; + private int recordCount; + private MaterializedField unnestFieldMetadata; + private final UnnestMemoryManager memoryManager; + + public enum Metric implements MetricDef { +INPUT_BATCH_COUNT, +AVG_INPUT_BATCH_BYTES, +AVG_INPUT_ROW_BYTES, +INPUT_RECORD_COUNT, +OUTPUT_BATCH_COUNT, +AV
[GitHub] drill pull request #1223: DRILL-6324: Unnest initial implementation
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1223#discussion_r183116678 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/unnest/UnnestRecordBatch.java --- @@ -0,0 +1,451 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.physical.impl.unnest; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import org.apache.drill.common.exceptions.UserException; +import org.apache.drill.common.expression.FieldReference; +import org.apache.drill.common.expression.SchemaPath; +import org.apache.drill.common.logical.data.NamedExpression; +import org.apache.drill.exec.ExecConstants; +import org.apache.drill.exec.exception.ClassTransformationException; +import org.apache.drill.exec.exception.OutOfMemoryException; +import org.apache.drill.exec.exception.SchemaChangeException; +import org.apache.drill.exec.expr.ClassGenerator; +import org.apache.drill.exec.expr.CodeGenerator; +import org.apache.drill.exec.ops.FragmentContext; +import org.apache.drill.exec.ops.MetricDef; +import org.apache.drill.exec.physical.base.LateralContract; +import org.apache.drill.exec.physical.config.UnnestPOP; +import org.apache.drill.exec.record.AbstractSingleRecordBatch; +import org.apache.drill.exec.record.AbstractTableFunctionRecordBatch; +import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode; +import org.apache.drill.exec.record.CloseableRecordBatch; +import org.apache.drill.exec.record.MaterializedField; +import org.apache.drill.exec.record.RecordBatch; +import org.apache.drill.exec.record.RecordBatchMemoryManager; +import org.apache.drill.exec.record.RecordBatchSizer; +import org.apache.drill.exec.record.TransferPair; +import org.apache.drill.exec.record.TypedFieldId; +import org.apache.drill.exec.record.VectorContainer; +import org.apache.drill.exec.vector.ValueVector; +import org.apache.drill.exec.vector.complex.RepeatedMapVector; +import org.apache.drill.exec.vector.complex.RepeatedValueVector; + +import java.io.IOException; +import java.util.List; + +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK; +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK_NEW_SCHEMA; + +// TODO - handle the case where a user tries to unnest a scalar, should just return the column as is +public class UnnestRecordBatch extends AbstractTableFunctionRecordBatch { + private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(UnnestRecordBatch.class); + + private Unnest unnest; + private boolean hasRemainder = false; // set to true if there is data left over for the current row AND if we want +// to keep processing it. Kill may be called by a limit in a subquery that +// requires us to stop processing thecurrent row, but not stop processing +// the data. + // In some cases we need to return a predetermined state from a call to next. These are: + // 1) Kill is called due to an error occurring in the processing of the query. IterOutcome should be NONE + // 2) Kill is called by LIMIT to stop processing of the current row (This occurs when the LIMIT is part of a subquery + //between UNNEST and LATERAL. Iteroutcome should be EMIT + // 3) Kill is called by LIMIT downstream from LATERAL. IterOutcome should be NONE + private IterOutcome nextState = OK; + private int remainderIndex = 0; + private int recordCount; + private MaterializedField unnestFieldMetadata; + private final UnnestMemoryManager memoryManager; + + public enum Metric implements MetricDef { +INPUT_BATCH_COUNT, +AVG_INPUT_BATCH_BYTES, +AVG_INPUT_ROW_BYTES, +INPUT_RECORD_COUNT, +OUTPUT_BATCH_COUNT, +AV
[GitHub] drill pull request #1223: DRILL-6324: Unnest initial implementation
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1223#discussion_r182640359 --- Diff: exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/unnest/TestingLateralJoinBatch.java --- @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.physical.impl.unnest; + +import org.apache.drill.exec.exception.OutOfMemoryException; +import org.apache.drill.exec.ops.FragmentContext; +import org.apache.drill.exec.physical.config.LateralJoinPOP; +import org.apache.drill.exec.physical.impl.join.LateralJoinBatch; +import org.apache.drill.exec.record.RecordBatch; + +/** + * Create a derived class so we can access the protected ctor + */ +public class TestingLateralJoinBatch extends LateralJoinBatch{ --- End diff -- Not needed anymore constructor of LateralJoinBatch was made public. ---
[GitHub] drill pull request #1223: DRILL-6324: Unnest initial implementation
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1223#discussion_r182628778 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/unnest/UnnestImpl.java --- @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.physical.impl.unnest; + +import com.google.common.collect.ImmutableList; +import org.apache.drill.exec.exception.SchemaChangeException; +import org.apache.drill.exec.ops.FragmentContext; +import org.apache.drill.exec.physical.base.LateralContract; +import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode; +import org.apache.drill.exec.record.RecordBatch; +import org.apache.drill.exec.record.TransferPair; +import org.apache.drill.exec.vector.ValueVector; +import org.apache.drill.exec.vector.complex.RepeatedValueVector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; + +/** + * Contains the actual unnest operation. Unnest is a simple transfer operation in this impelementation. + * For use as a table function, we will need to change the logic of the unnest method to operate on + * more than one row at a time and remove any dependence on Lateral + * {@link org.apache.drill.exec.physical.impl.flatten.FlattenTemplate}. + * This class follows the pattern of other operators that generate code at runtime. Normally this class + * would be abstract and have placeholders for doSetup and doEval. Unnest however, doesn't require code + * generation so we can simply implement the code in a simple class that looks similar to the code gen + * templates used by other operators but does not implement the doSetup and doEval methods. + */ +public class UnnestImpl implements Unnest { + private static final Logger logger = LoggerFactory.getLogger(UnnestImpl.class); + + private static final int OUTPUT_ROW_COUNT = ValueVector.MAX_ROW_COUNT; + + private ImmutableList transfers; + private LateralContract lateral; // corresponding lateral Join (or other operator implementing the Lateral Contract) + private SelectionVectorMode svMode; + private RepeatedValueVector fieldToUnnest; + private RepeatedValueVector.RepeatedAccessor accessor; + + /** + * The output batch limit starts at OUTPUT_ROW_COUNT, but may be decreased + * if records are found to be large. + */ + private int outputLimit = OUTPUT_ROW_COUNT; + + + // The index in the unnest column that is being processed.We start at zero and continue until + // InnerValueCount is reached or if the batch limit is reached + // this allows for groups to be written between batches if we run out of space, for cases where we have finished + // a batch on the boundary it will be set to 0 + private int innerValueIndex = 0; + + @Override + public void setUnnestField(RepeatedValueVector unnestField) { +this.fieldToUnnest = unnestField; +this.accessor = RepeatedValueVector.RepeatedAccessor.class.cast(unnestField.getAccessor()); + } + + @Override + public RepeatedValueVector getUnnestField() { +return fieldToUnnest; + } + + @Override + public void setOutputCount(int outputCount) { +outputLimit = outputCount; + } + + @Override + public final int unnestRecords(final int recordCount) { +switch (svMode) { + case FOUR_BYTE: +throw new UnsupportedOperationException("Unnest does not support selection vector inputs."); + + case TWO_BYTE: +throw new UnsupportedOperationException("Unnest does not support selection vector inputs."); + + case NONE: +if (innerValueIndex == -1) { + innerValueIndex = 0; +} + +// Current record being processed in the incoming record batch. We could keep +// track of
[GitHub] drill pull request #1223: DRILL-6324: Unnest initial implementation
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1223#discussion_r182628483 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/unnest/UnnestImpl.java --- @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.physical.impl.unnest; + +import com.google.common.collect.ImmutableList; +import org.apache.drill.exec.exception.SchemaChangeException; +import org.apache.drill.exec.ops.FragmentContext; +import org.apache.drill.exec.physical.base.LateralContract; +import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode; +import org.apache.drill.exec.record.RecordBatch; +import org.apache.drill.exec.record.TransferPair; +import org.apache.drill.exec.vector.ValueVector; +import org.apache.drill.exec.vector.complex.RepeatedValueVector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; + +/** + * Contains the actual unnest operation. Unnest is a simple transfer operation in this impelementation. + * For use as a table function, we will need to change the logic of the unnest method to operate on + * more than one row at a time and remove any dependence on Lateral + * {@link org.apache.drill.exec.physical.impl.flatten.FlattenTemplate}. + * This class follows the pattern of other operators that generate code at runtime. Normally this class + * would be abstract and have placeholders for doSetup and doEval. Unnest however, doesn't require code + * generation so we can simply implement the code in a simple class that looks similar to the code gen + * templates used by other operators but does not implement the doSetup and doEval methods. + */ +public class UnnestImpl implements Unnest { + private static final Logger logger = LoggerFactory.getLogger(UnnestImpl.class); + + private static final int OUTPUT_ROW_COUNT = ValueVector.MAX_ROW_COUNT; + + private ImmutableList transfers; + private LateralContract lateral; // corresponding lateral Join (or other operator implementing the Lateral Contract) + private SelectionVectorMode svMode; + private RepeatedValueVector fieldToUnnest; + private RepeatedValueVector.RepeatedAccessor accessor; + + /** + * The output batch limit starts at OUTPUT_ROW_COUNT, but may be decreased + * if records are found to be large. + */ + private int outputLimit = OUTPUT_ROW_COUNT; --- End diff -- `OUTPUT_ROW_COUNT` is not required, you can directly assign it with `ValueVector.MAX_ROW_COUNT` ---
[GitHub] drill pull request #1223: DRILL-6324: Unnest initial implementation
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1223#discussion_r182639747 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockLateralJoinPOP.java --- @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.store.mock; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeName; +import org.apache.drill.exec.physical.EndpointAffinity; +import org.apache.drill.exec.physical.base.AbstractStore; +import org.apache.drill.exec.physical.base.PhysicalOperator; +import org.apache.drill.exec.physical.base.Store; +import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint; + +import java.util.Collections; +import java.util.List; + +@JsonTypeName("mock-lj") +public class MockLateralJoinPOP extends AbstractStore { --- End diff -- Doesn't look like this class is used anywhere. ---
[GitHub] drill pull request #1223: DRILL-6324: Unnest initial implementation
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1223#discussion_r183120789 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/unnest/UnnestImpl.java --- @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.physical.impl.unnest; + +import com.google.common.collect.ImmutableList; +import org.apache.drill.exec.exception.SchemaChangeException; +import org.apache.drill.exec.ops.FragmentContext; +import org.apache.drill.exec.physical.base.LateralContract; +import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode; +import org.apache.drill.exec.record.RecordBatch; +import org.apache.drill.exec.record.TransferPair; +import org.apache.drill.exec.vector.ValueVector; +import org.apache.drill.exec.vector.complex.RepeatedValueVector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; + +/** + * Contains the actual unnest operation. Unnest is a simple transfer operation in this impelementation. + * For use as a table function, we will need to change the logic of the unnest method to operate on + * more than one row at a time and remove any dependence on Lateral + * {@link org.apache.drill.exec.physical.impl.flatten.FlattenTemplate}. + * This class follows the pattern of other operators that generate code at runtime. Normally this class + * would be abstract and have placeholders for doSetup and doEval. Unnest however, doesn't require code + * generation so we can simply implement the code in a simple class that looks similar to the code gen + * templates used by other operators but does not implement the doSetup and doEval methods. + */ +public class UnnestImpl implements Unnest { + private static final Logger logger = LoggerFactory.getLogger(UnnestImpl.class); + + private static final int OUTPUT_ROW_COUNT = ValueVector.MAX_ROW_COUNT; + + private ImmutableList transfers; + private LateralContract lateral; // corresponding lateral Join (or other operator implementing the Lateral Contract) + private SelectionVectorMode svMode; + private RepeatedValueVector fieldToUnnest; + private RepeatedValueVector.RepeatedAccessor accessor; + + /** + * The output batch limit starts at OUTPUT_ROW_COUNT, but may be decreased + * if records are found to be large. + */ + private int outputLimit = OUTPUT_ROW_COUNT; + + + // The index in the unnest column that is being processed.We start at zero and continue until + // InnerValueCount is reached or if the batch limit is reached + // this allows for groups to be written between batches if we run out of space, for cases where we have finished + // a batch on the boundary it will be set to 0 + private int innerValueIndex = 0; + + @Override + public void setUnnestField(RepeatedValueVector unnestField) { +this.fieldToUnnest = unnestField; +this.accessor = RepeatedValueVector.RepeatedAccessor.class.cast(unnestField.getAccessor()); + } + + @Override + public RepeatedValueVector getUnnestField() { +return fieldToUnnest; + } + + @Override + public void setOutputCount(int outputCount) { +outputLimit = outputCount; + } + + @Override + public final int unnestRecords(final int recordCount) { +switch (svMode) { + case FOUR_BYTE: +throw new UnsupportedOperationException("Unnest does not support selection vector inputs."); + + case TWO_BYTE: +throw new UnsupportedOperationException("Unnest does not support selection vector inputs."); + + case NONE: +if (innerValueIndex == -1) { + innerValueIndex = 0; +} + +// Current record being processed in the incoming record batch. We could keep +// track of
[GitHub] drill pull request #1223: DRILL-6324: Unnest initial implementation
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1223#discussion_r182628991 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/unnest/UnnestImpl.java --- @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.physical.impl.unnest; + +import com.google.common.collect.ImmutableList; +import org.apache.drill.exec.exception.SchemaChangeException; +import org.apache.drill.exec.ops.FragmentContext; +import org.apache.drill.exec.physical.base.LateralContract; +import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode; +import org.apache.drill.exec.record.RecordBatch; +import org.apache.drill.exec.record.TransferPair; +import org.apache.drill.exec.vector.ValueVector; +import org.apache.drill.exec.vector.complex.RepeatedValueVector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; + +/** + * Contains the actual unnest operation. Unnest is a simple transfer operation in this impelementation. + * For use as a table function, we will need to change the logic of the unnest method to operate on + * more than one row at a time and remove any dependence on Lateral + * {@link org.apache.drill.exec.physical.impl.flatten.FlattenTemplate}. + * This class follows the pattern of other operators that generate code at runtime. Normally this class + * would be abstract and have placeholders for doSetup and doEval. Unnest however, doesn't require code + * generation so we can simply implement the code in a simple class that looks similar to the code gen + * templates used by other operators but does not implement the doSetup and doEval methods. + */ +public class UnnestImpl implements Unnest { + private static final Logger logger = LoggerFactory.getLogger(UnnestImpl.class); + + private static final int OUTPUT_ROW_COUNT = ValueVector.MAX_ROW_COUNT; + + private ImmutableList transfers; + private LateralContract lateral; // corresponding lateral Join (or other operator implementing the Lateral Contract) + private SelectionVectorMode svMode; + private RepeatedValueVector fieldToUnnest; + private RepeatedValueVector.RepeatedAccessor accessor; + + /** + * The output batch limit starts at OUTPUT_ROW_COUNT, but may be decreased + * if records are found to be large. + */ + private int outputLimit = OUTPUT_ROW_COUNT; + + + // The index in the unnest column that is being processed.We start at zero and continue until + // InnerValueCount is reached or if the batch limit is reached + // this allows for groups to be written between batches if we run out of space, for cases where we have finished + // a batch on the boundary it will be set to 0 + private int innerValueIndex = 0; + + @Override + public void setUnnestField(RepeatedValueVector unnestField) { +this.fieldToUnnest = unnestField; +this.accessor = RepeatedValueVector.RepeatedAccessor.class.cast(unnestField.getAccessor()); + } + + @Override + public RepeatedValueVector getUnnestField() { +return fieldToUnnest; + } + + @Override + public void setOutputCount(int outputCount) { +outputLimit = outputCount; + } + + @Override + public final int unnestRecords(final int recordCount) { +switch (svMode) { + case FOUR_BYTE: +throw new UnsupportedOperationException("Unnest does not support selection vector inputs."); + + case TWO_BYTE: +throw new UnsupportedOperationException("Unnest does not support selection vector inputs."); + + case NONE: +if (innerValueIndex == -1) { + innerValueIndex = 0; +} + +// Current record being processed in the incoming record batch. We could keep +// track of
[GitHub] drill pull request #1223: DRILL-6324: Unnest initial implementation
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1223#discussion_r183117233 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/unnest/UnnestRecordBatch.java --- @@ -0,0 +1,451 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.physical.impl.unnest; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import org.apache.drill.common.exceptions.UserException; +import org.apache.drill.common.expression.FieldReference; +import org.apache.drill.common.expression.SchemaPath; +import org.apache.drill.common.logical.data.NamedExpression; +import org.apache.drill.exec.ExecConstants; +import org.apache.drill.exec.exception.ClassTransformationException; +import org.apache.drill.exec.exception.OutOfMemoryException; +import org.apache.drill.exec.exception.SchemaChangeException; +import org.apache.drill.exec.expr.ClassGenerator; +import org.apache.drill.exec.expr.CodeGenerator; +import org.apache.drill.exec.ops.FragmentContext; +import org.apache.drill.exec.ops.MetricDef; +import org.apache.drill.exec.physical.base.LateralContract; +import org.apache.drill.exec.physical.config.UnnestPOP; +import org.apache.drill.exec.record.AbstractSingleRecordBatch; +import org.apache.drill.exec.record.AbstractTableFunctionRecordBatch; +import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode; +import org.apache.drill.exec.record.CloseableRecordBatch; +import org.apache.drill.exec.record.MaterializedField; +import org.apache.drill.exec.record.RecordBatch; +import org.apache.drill.exec.record.RecordBatchMemoryManager; +import org.apache.drill.exec.record.RecordBatchSizer; +import org.apache.drill.exec.record.TransferPair; +import org.apache.drill.exec.record.TypedFieldId; +import org.apache.drill.exec.record.VectorContainer; +import org.apache.drill.exec.vector.ValueVector; +import org.apache.drill.exec.vector.complex.RepeatedMapVector; +import org.apache.drill.exec.vector.complex.RepeatedValueVector; + +import java.io.IOException; +import java.util.List; + +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK; +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK_NEW_SCHEMA; + +// TODO - handle the case where a user tries to unnest a scalar, should just return the column as is +public class UnnestRecordBatch extends AbstractTableFunctionRecordBatch { + private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(UnnestRecordBatch.class); + + private Unnest unnest; + private boolean hasRemainder = false; // set to true if there is data left over for the current row AND if we want +// to keep processing it. Kill may be called by a limit in a subquery that +// requires us to stop processing thecurrent row, but not stop processing +// the data. + // In some cases we need to return a predetermined state from a call to next. These are: + // 1) Kill is called due to an error occurring in the processing of the query. IterOutcome should be NONE + // 2) Kill is called by LIMIT to stop processing of the current row (This occurs when the LIMIT is part of a subquery + //between UNNEST and LATERAL. Iteroutcome should be EMIT + // 3) Kill is called by LIMIT downstream from LATERAL. IterOutcome should be NONE + private IterOutcome nextState = OK; + private int remainderIndex = 0; + private int recordCount; + private MaterializedField unnestFieldMetadata; + private final UnnestMemoryManager memoryManager; + + public enum Metric implements MetricDef { +INPUT_BATCH_COUNT, +AVG_INPUT_BATCH_BYTES, +AVG_INPUT_ROW_BYTES, +INPUT_RECORD_COUNT, +OUTPUT_BATCH_COUNT, +AV
[GitHub] drill pull request #1223: DRILL-6324: Unnest initial implementation
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1223#discussion_r182628559 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/unnest/UnnestImpl.java --- @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.physical.impl.unnest; + +import com.google.common.collect.ImmutableList; +import org.apache.drill.exec.exception.SchemaChangeException; +import org.apache.drill.exec.ops.FragmentContext; +import org.apache.drill.exec.physical.base.LateralContract; +import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode; +import org.apache.drill.exec.record.RecordBatch; +import org.apache.drill.exec.record.TransferPair; +import org.apache.drill.exec.vector.ValueVector; +import org.apache.drill.exec.vector.complex.RepeatedValueVector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; + +/** + * Contains the actual unnest operation. Unnest is a simple transfer operation in this impelementation. + * For use as a table function, we will need to change the logic of the unnest method to operate on + * more than one row at a time and remove any dependence on Lateral + * {@link org.apache.drill.exec.physical.impl.flatten.FlattenTemplate}. --- End diff -- Little confused why `FlattenTemplate` is referenced here. ---
[GitHub] drill issue #1203: DRILL-6289: Cluster view should show more relevant inform...
Github user sohami commented on the issue: https://github.com/apache/drill/pull/1203 Thanks for all the changes Kunal. LGTM +1 ---
[GitHub] drill pull request #1203: DRILL-6289: Cluster view should show more relevant...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1203#discussion_r182507182 --- Diff: protocol/src/main/protobuf/Coordination.proto --- @@ -18,6 +18,7 @@ message DrillbitEndpoint{ OFFLINE = 3; } optional State state = 7; + optional int32 http_port = 8; --- End diff -- Please compile protobuf for c++ client as well (Instructions are [here](https://github.com/apache/drill/blob/master/contrib/native/client/readme.linux)). It would be really helpful if you can put a note in [readme.txt of this package](https://github.com/apache/drill/blob/master/protocol/readme.txt) for future dev to compile c++ client protobufs whenever a change is made in any of .proto files. ---
[GitHub] drill pull request #1203: DRILL-6289: Cluster view should show more relevant...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1203#discussion_r182510967 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -79,11 +94,15 @@ ${drillbit.getState()} -<#if (model.shouldShowAdminInfo() || !model.isAuthEnabled()) && drillbit.isCurrent() > +Not Available - SHUTDOWN - +<#if (model.shouldShowAdminInfo() || !model.isAuthEnabled()) && (drillbit.isCurrent() || !model.isUserEncryptionEnabled()) > --- End diff -- Please re-visit this condition. `IsUserEncryptionEnabled()` only checks for encryption between JDBC/ODBC client to Drillbit path not for Https. You have to check it using protocol. You can put the check something like below: ``` if(model.shouldShowAdminInfo() && (drillbit.isCurrent() || (!model.isAuthEnabled() && location.protocol != https))) { showShutdownButton(); } ``` ---
[GitHub] drill pull request #1203: DRILL-6289: Cluster view should show more relevant...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1203#discussion_r182496734 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/server/Drillbit.java --- @@ -180,20 +180,22 @@ public void run() throws Exception { if (profileStoreProvider != storeProvider) { profileStoreProvider.start(); } -final DrillbitEndpoint md = engine.start(); +DrillbitEndpoint md = engine.start(); manager.start(md, engine.getController(), engine.getDataConnectionCreator(), coord, storeProvider, profileStoreProvider); -@SuppressWarnings("resource") -final DrillbitContext drillbitContext = manager.getContext(); +DrillbitContext drillbitContext = manager.getContext(); --- End diff -- Please revert above change. ---
[GitHub] drill pull request #1203: DRILL-6289: Cluster view should show more relevant...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1203#discussion_r182495572 --- Diff: common/src/main/java/org/apache/drill/exec/metrics/DrillMetrics.java --- @@ -18,6 +18,7 @@ package org.apache.drill.exec.metrics; import java.lang.management.ManagementFactory; +import java.lang.management.OperatingSystemMXBean; --- End diff -- import not required anymore. ---
[GitHub] drill pull request #1217: DRILL-6302: Fixed NPE in Drillbit close method
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1217#discussion_r182302719 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/server/Drillbit.java --- @@ -225,14 +225,18 @@ public synchronized void close() { } final Stopwatch w = Stopwatch.createStarted(); logger.debug("Shutdown begun."); -registrationHandle = coord.update(registrationHandle, State.QUIESCENT); +if (registrationHandle != null) { + registrationHandle = coord.update(registrationHandle, State.QUIESCENT); +} --- End diff -- may be create a private method like `UpdateState(State newState)` and encapsulate null check in that method ---
[GitHub] drill pull request #1212: DRILL-6323: Lateral Join - Initial implementation
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1212#discussion_r182259926 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/LateralJoinBatch.java --- @@ -0,0 +1,861 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.physical.impl.join; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import org.apache.calcite.rel.core.JoinRelType; +import org.apache.drill.common.types.TypeProtos; +import org.apache.drill.common.types.Types; +import org.apache.drill.exec.ExecConstants; +import org.apache.drill.exec.exception.OutOfMemoryException; +import org.apache.drill.exec.exception.SchemaChangeException; +import org.apache.drill.exec.ops.FragmentContext; +import org.apache.drill.exec.physical.base.LateralContract; +import org.apache.drill.exec.physical.config.LateralJoinPOP; +import org.apache.drill.exec.record.AbstractBinaryRecordBatch; +import org.apache.drill.exec.record.BatchSchema; +import org.apache.drill.exec.record.JoinBatchMemoryManager; +import org.apache.drill.exec.record.MaterializedField; +import org.apache.drill.exec.record.RecordBatch; +import org.apache.drill.exec.record.RecordBatchSizer; +import org.apache.drill.exec.record.VectorAccessibleUtilities; +import org.apache.drill.exec.record.VectorWrapper; +import org.apache.drill.exec.vector.ValueVector; + +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.EMIT; +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.NONE; +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK; +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OK_NEW_SCHEMA; +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.OUT_OF_MEMORY; +import static org.apache.drill.exec.record.RecordBatch.IterOutcome.STOP; + +/** + * RecordBatch implementation for the lateral join operator. Currently it's expected LATERAL to co-exists with UNNEST + * operator. Both LATERAL and UNNEST will share a contract with each other defined at {@link LateralContract} + */ +public class LateralJoinBatch extends AbstractBinaryRecordBatch implements LateralContract { + private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(LateralJoinBatch.class); + + // Input indexes to correctly update the stats + private static final int LEFT_INPUT = 0; + + private static final int RIGHT_INPUT = 1; + + // Maximum number records in the outgoing batch + private int maxOutputRowCount; + + // Schema on the left side + private BatchSchema leftSchema; + + // Schema on the right side + private BatchSchema rightSchema; + + // Index in output batch to populate next row + private int outputIndex; + + // Current index of record in left incoming which is being processed + private int leftJoinIndex = -1; + + // Current index of record in right incoming which is being processed + private int rightJoinIndex = -1; + + // flag to keep track if current left batch needs to be processed in future next call + private boolean processLeftBatchInFuture; + + // Keep track if any matching right record was found for current left index record + private boolean matchedRecordFound; + + private boolean useMemoryManager = true; + + /* + * Public Methods + * / + public LateralJoinBatch(LateralJoinPOP popConfig, FragmentContext context, + RecordBatch left, RecordBatch right) throws OutOfMemoryException { +super(popConfig,
[GitHub] drill pull request #1212: DRILL-6323: Lateral Join - Initial implementation
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1212#discussion_r182262104 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java --- @@ -186,6 +186,11 @@ private ExecConstants() { public static final String BIT_ENCRYPTION_SASL_ENABLED = "drill.exec.security.bit.encryption.sasl.enabled"; public static final String BIT_ENCRYPTION_SASL_MAX_WRAPPED_SIZE = "drill.exec.security.bit.encryption.sasl.max_wrapped_size"; + /** --- End diff -- Removed. Thanks for catching this. ---
[GitHub] drill pull request #1212: DRILL-6323: Lateral Join - Initial implementation
GitHub user sohami opened a pull request: https://github.com/apache/drill/pull/1212 DRILL-6323: Lateral Join - Initial implementation @parthchandra - Please review. You can merge this pull request into a Git repository by running: $ git pull https://github.com/sohami/drill DRILL-6323 Alternatively you can review and apply these changes as the patch at: https://github.com/apache/drill/pull/1212.patch To close this pull request, make a commit to your master/trunk branch with (at least) the following in the commit message: This closes #1212 commit 38b82ce8ebca3b91ba8847229a568daf88f654a3 Author: Sorabh Hamirwasia <shamirwasia@...> Date: 2018-03-06T05:09:16Z DRILL-6323: Lateral Join - Initial implementation Refactor Join PopConfigs commit 0931ac974f7be41cd5ca4516524d04c4e5c6245d Author: Sorabh Hamirwasia <shamirwasia@...> Date: 2018-02-05T22:46:19Z DRILL-6323: Lateral Join - Initial implementation commit 08b7d38e6ffe4fed492a2ea67c3907f0f514717e Author: Sorabh Hamirwasia <shamirwasia@...> Date: 2018-02-20T22:47:48Z DRILL-6323: Lateral Join - Initial implementation Note: Refactor, fix various issues with LATERAL: a)Override prefetch call in BuildSchema phase for LATERAL, b)EMIT handling in buildSchema, c)Issue when in multilevel Lateral case schema change is observed only on right side of UNNEST, d)Handle SelectionVector in incoming batch, e) Handling Schema change, f) Updating joinIndexes correctly when producing multiple output batches for current left inputs. Added tests for a)EMIT handling in buildSchema, b)Multiple UNNEST at same level, c)Multilevel Lateral, d)Multilevel Lateral with Schema change on left/right or both branches, e) Left LATERAL join f)Schema change for UNNEST and Non-UNNEST columns, g)Error outcomes from left, h) Producing multiple output batches for given incoming, i) Consuming multiple incoming into single output batch commit f686877198857a23227031329b2f48c163f85021 Author: Sorabh Hamirwasia <shamirwasia@...> Date: 2018-03-09T23:56:22Z DRILL-6323: Lateral Join - Initial implementation Note: Remove codegen and operator template class. Logic to copy data is moved to LateralJoinBatch itself commit cb47407a8f47e7fc10ac04c438f7e903de92a80c Author: Sorabh Hamirwasia <shamirwasia@...> Date: 2018-03-13T23:41:03Z DRILL-6323: Lateral Join - Initial implementation Note: Add some debug logs for LateralJoinBatch commit 2ea968e3302ad8bb6d62a8032bd6b6329b900d3a Author: Sorabh Hamirwasia <shamirwasia@...> Date: 2018-03-14T23:59:29Z DRILL-6323: Lateral Join - Initial implementation Note: Refactor BatchMemorySize to put outputBatchSize in abstract class. Created a new JoinBatchMemoryManager to be shared across join record batches. Changed merge join to use AbstractBinaryRecordBatch instead of AbstractRecordBatch, and use JoinBatchMemoryManager commit fb521c65f91b69543ae9b5dbb9a727c79f852573 Author: Sorabh Hamirwasia <shamirwasia@...> Date: 2018-03-19T19:00:22Z DRILL-6323: Lateral Join - Initial implementation Note: Lateral Join Batch Memory manager support using the record batch sizer ---
[GitHub] drill pull request #1211: DRILL-6322: Lateral Join: Common changes - Add new...
GitHub user sohami opened a pull request: https://github.com/apache/drill/pull/1211 DRILL-6322: Lateral Join: Common changes - Add new iterOutcome, Operator types, MockRecordBatch for testing @parthchandra - Please review. You can merge this pull request into a Git repository by running: $ git pull https://github.com/sohami/drill DRILL-6322 Alternatively you can review and apply these changes as the patch at: https://github.com/apache/drill/pull/1211.patch To close this pull request, make a commit to your master/trunk branch with (at least) the following in the commit message: This closes #1211 commit 2dbc6768857a0b8cd3e6e8c19fc133af2abf20d3 Author: Sorabh Hamirwasia <shamirwasia@...> Date: 2018-02-05T21:12:15Z DRILL-6322: Lateral Join: Common changes - Add new iterOutcome, Operatortypes, MockRecordBatch for testing Note: Added new Iterator State EMIT, added operatos LATERA_JOIN & UNNEST in CoreOperatorType and added LateralContract interface commit 15655ab3543521ff56f4d426ebf7ed4884eb3006 Author: Sorabh Hamirwasia <shamirwasia@...> Date: 2018-02-07T21:29:28Z DRILL-6322: Lateral Join: Common changes - Add new iterOutcome, Operatortypes, MockRecordBatch for testing Note: Implementation of MockRecordBatch to test operator behavior for different IterOutcomes. a) Creates new output container for schema change cases. b) Doesn't create new container for each next() call without schema change, since the operator in test expects the ValueVector object in it's incoming batch to be same unless a OK_NEW_SCHEMA case is hit. Since setup() method of operator in test will store the reference to value vector received in first batch ---
[GitHub] drill pull request #1203: DRILL-6289: Cluster view should show more relevant...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1203#discussion_r181830465 --- Diff: common/src/main/java/org/apache/drill/exec/metrics/CpuGaugeSet.java --- @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.metrics; + +import java.lang.management.OperatingSystemMXBean; +import java.util.HashMap; +import java.util.Map; + +import com.codahale.metrics.Gauge; +import com.codahale.metrics.Metric; +import com.codahale.metrics.MetricSet; + +/** + * Creates a Cpu GaugeSet + */ +class CpuGaugeSet implements MetricSet { --- End diff -- I am not sure how useful `getProcessCpuLoad` alone will be, but in addition with total system load it can give the idea about how Drill and other services on node are using the CPU. If you plan to include another metric then in that case `CpuGaugeSet` class is fine. Thanks for clarification. ---
[GitHub] drill pull request #1203: DRILL-6289: Cluster view should show more relevant...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1203#discussion_r181254054 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -67,9 +73,15 @@ ${i} ${drillbit.getAddress()}<#if drillbit.isCurrent()> Current +<#else> + --- End diff -- This will only work if authentication is disabled. Shouldn't we have that check here to display ? ---
[GitHub] drill pull request #1203: DRILL-6289: Cluster view should show more relevant...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1203#discussion_r181252867 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/WebServer.java --- @@ -158,7 +158,13 @@ public void start() throws Exception { final int selectors = config.getInt(ExecConstants.HTTP_JETTY_SERVER_SELECTORS); final QueuedThreadPool threadPool = new QueuedThreadPool(2, 2, 6); embeddedJetty = new Server(threadPool); -embeddedJetty.setHandler(createServletContextHandler(authEnabled)); +ServletContextHandler webServerContext = createServletContextHandler(authEnabled); +//Allow for Other Drillbits to make REST calls +FilterHolder filterHolder = new FilterHolder(CrossOriginFilter.class); +filterHolder.setInitParameter("allowedOrigins", "*"); --- End diff -- I am not too familiar with CORS concept but would be good to see if we can be more restrictive instead of just *. Also when authentication is enabled then with this filter, CORS request still requires authentication right ? Can you please confirm this? ---
[GitHub] drill pull request #1203: DRILL-6289: Cluster view should show more relevant...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1203#discussion_r181251700 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -79,9 +91,14 @@ ${drillbit.getState()} + <#if (model.shouldShowAdminInfo() || !model.isAuthEnabled()) && drillbit.isCurrent() > - SHUTDOWN + SHUTDOWN + +<#elseif (model.shouldShowAdminInfo() || !model.isAuthEnabled()) && !drillbit.isCurrent() > + --- End diff -- the elseif check should be `(!model.isAuthEnabled && !drillbit.isCurrent() && check to see if not https)` FYI - On a bigger note I think it will be cumbersome and error prone to implement remote shutdown when security is enabled using WebClient side changes. There are few options which where discussed on how to achieve it, please see: [DRILL-6244](https://issues.apache.org/jira/browse/DRILL-6244). I would prefer if we can be consistent to do remote shutdown in same way for both secure and unsecure case. ---
[GitHub] drill pull request #1203: DRILL-6289: Cluster view should show more relevant...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1203#discussion_r181161322 --- Diff: common/src/main/java/org/apache/drill/exec/metrics/CpuGaugeSet.java --- @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.metrics; + +import java.lang.management.OperatingSystemMXBean; +import java.util.HashMap; +import java.util.Map; + +import com.codahale.metrics.Gauge; +import com.codahale.metrics.Metric; +import com.codahale.metrics.MetricSet; + +/** + * Creates a Cpu GaugeSet + */ +class CpuGaugeSet implements MetricSet { --- End diff -- I don't think this class is needed at all, instead just keeping the OperatingSystemLoad class is fine. Then in DrillMetrics we have to `register` that instead of calling registerAll on this class. Reason being we only have a single metric here not actually a set so we should use `register` instead of `registerAll`. ---
[GitHub] drill pull request #1203: DRILL-6289: Cluster view should show more relevant...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1203#discussion_r181161557 --- Diff: common/src/main/java/org/apache/drill/exec/metrics/DrillMetrics.java --- @@ -60,6 +61,7 @@ private static void registerSystemMetrics() { REGISTRY.registerAll(new BufferPoolMetricSet(ManagementFactory.getPlatformMBeanServer())); REGISTRY.registerAll(new MemoryUsageGaugeSet()); REGISTRY.registerAll(new ThreadStatesGaugeSet()); + REGISTRY.registerAll(new CpuGaugeSet(ManagementFactory.getOperatingSystemMXBean())); --- End diff -- please replace `registerAll` with `register`. See comment above in `CpuGaugeSet` ---
[GitHub] drill pull request #1203: DRILL-6289: Cluster view should show more relevant...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1203#discussion_r181252317 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -260,7 +285,7 @@ function fillQueryCount(row_id) { var requestPath = "/queriesCount"; var url = getRequestUrl(requestPath); - var result = $.ajax({ + var result = $.ajax({ --- End diff -- please revert the original indentation ---
[GitHub] drill pull request #1203: DRILL-6289: Cluster view should show more relevant...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1203#discussion_r181258290 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -283,20 +308,114 @@ alert(errorThrown); }, success: function(data) { - alert(data["response"]); + alert(data["response"]); button.prop('disabled',true).css('opacity',0.5); } }); } } - + + function remoteShutdown(button,host) { + var url = location.protocol + "//" + host + "/gracefulShutdown"; + var result = $.ajax({ +type: 'POST', +url: url, +contentType : 'text/plain', +complete: function(data) { +alert(data.responseJSON["response"]); +button.prop('disabled',true).css('opacity',0.5); +} + }); + } + + + function popOutRemoteDbitUI(dbitHost, dbitPort) { --- End diff -- this and functions below should be under `<#if !model.isAuthEnabled()> <#/if>` block ---
[GitHub] drill pull request #1203: DRILL-6289: Cluster view should show more relevant...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1203#discussion_r181179746 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/server/Drillbit.java --- @@ -189,11 +189,16 @@ public void run() throws Exception { drillbitContext.getOptionManager().init(); javaPropertiesToSystemOptions(); manager.getContext().getRemoteFunctionRegistry().init(context.getConfig(), storeProvider, coord); -registrationHandle = coord.register(md); webServer.start(); - +//Discovering HTTP port (in case of port hunting) +if (webServer.isRunning()) { + int httpPort = getWebServerPort(); + registrationHandle = coord.register(md.toBuilder().setHttpPort(httpPort).build()); +} else { + //WebServer is not running + registrationHandle = coord.register(md); +} --- End diff -- how about changing with below: ``` if (webServer.isRunning()) { final int httpPort = getWebServerPort(); md = md.toBuilder().setHttpPort(httpPort).build(); } registrationHandle = coord.register(md); ``` ---
[GitHub] drill pull request #1203: DRILL-6289: Cluster view should show more relevant...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1203#discussion_r181253785 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -283,20 +308,114 @@ alert(errorThrown); }, success: function(data) { - alert(data["response"]); + alert(data["response"]); button.prop('disabled',true).css('opacity',0.5); } }); } } - + + function remoteShutdown(button,host) { --- End diff -- this function is mostly same as existing `shutdown(button)`. Consider change that to take host parameter and use that instead. ---
[GitHub] drill pull request #1203: DRILL-6289: Cluster view should show more relevant...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1203#discussion_r181165145 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/server/Drillbit.java --- @@ -180,7 +180,7 @@ public void run() throws Exception { if (profileStoreProvider != storeProvider) { profileStoreProvider.start(); } -final DrillbitEndpoint md = engine.start(); +DrillbitEndpoint md = engine.start(); --- End diff -- if you do below then you need to remove final otherwise not ---
[GitHub] drill issue #1202: DRILL-6311: No logging information in drillbit.log / dril...
Github user sohami commented on the issue: https://github.com/apache/drill/pull/1202 @vrozov - Update the PR based on your suggestion. The command `mvn test -DexcludedGroups="org.apache.drill.categories.SlowTest,org.apache.drill.categories.UnlikelyTest,org.apache.drill.categories.SecurityTest"` was not working on latest master as well. Spoke with @ilooner and made sure atleast the command used in Travis is working with this change `mvn install -DexcludedGroups="org.apache.drill.categories.SlowTest,org.apache.drill.categories.UnlikelyTest,org.apache.drill.categories.SecurityTest"` ---
[GitHub] drill issue #1202: DRILL-6311: No logging information in drillbit.log / dril...
Github user sohami commented on the issue: https://github.com/apache/drill/pull/1202 @vrozov - Please help to review. ---
[GitHub] drill pull request #1202: DRILL-6311: No logging information in drillbit.log...
GitHub user sohami opened a pull request: https://github.com/apache/drill/pull/1202 DRILL-6311: No logging information in drillbit.log / drillbit.out You can merge this pull request into a Git repository by running: $ git pull https://github.com/sohami/drill DRILL-6311 Alternatively you can review and apply these changes as the patch at: https://github.com/apache/drill/pull/1202.patch To close this pull request, make a commit to your master/trunk branch with (at least) the following in the commit message: This closes #1202 commit c988dc83df36da6dbe4d9ab77595084e9cd084f1 Author: Sorabh Hamirwasia <shamirwasia@...> Date: 2018-04-05T22:49:22Z DRILL-6311: No logging information in drillbit.log / drillbit.out ---
[GitHub] drill issue #1180: DRILL-6283: WebServer stores SPNEGO client principal with...
Github user sohami commented on the issue: https://github.com/apache/drill/pull/1180 @arina-ielchiieva - Please help to review this PR ---
[GitHub] drill pull request #1180: DRILL-6283: WebServer stores SPNEGO client princip...
GitHub user sohami opened a pull request: https://github.com/apache/drill/pull/1180 DRILL-6283: WebServer stores SPNEGO client principal without taking a⦠â¦ny conversion rule You can merge this pull request into a Git repository by running: $ git pull https://github.com/sohami/drill DRILL-6283 Alternatively you can review and apply these changes as the patch at: https://github.com/apache/drill/pull/1180.patch To close this pull request, make a commit to your master/trunk branch with (at least) the following in the commit message: This closes #1180 commit d80633021d8d81d786921051b92eda63476375de Author: Sorabh Hamirwasia <shamirwasia@...> Date: 2018-03-21T22:53:25Z DRILL-6283: WebServer stores SPNEGO client principal without taking any conversion rule ---
[GitHub] drill issue #1169: DRILL-6243: Added alert box to confirm shutdown of drillb...
Github user sohami commented on the issue: https://github.com/apache/drill/pull/1169 +1 LGTM. Thanks for making the changes. ---
[GitHub] drill pull request #1175: DRILL-6262: IndexOutOfBoundException in RecordBatc...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1175#discussion_r175629518 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/record/RecordBatchSizer.java --- @@ -321,10 +321,8 @@ public ColumnSize(ValueVector v, String prefix) { // Calculate pure data size. if (isVariableWidth) { -UInt4Vector offsetVector = ((RepeatedValueVector) v).getOffsetVector(); -int innerValueCount = offsetVector.getAccessor().get(valueCount); VariableWidthVector dataVector = ((VariableWidthVector) ((RepeatedValueVector) v).getDataVector()); -totalDataSize = dataVector.getOffsetVector().getAccessor().get(innerValueCount); +totalDataSize = dataVector.getCurrentSizeInBytes(); --- End diff -- @paul-rogers - I don't think `totalDataSize` includes both offset vector side and bytes size. It was meant to only include **pure data size only** for all entries in that column and that's what comment also suggests. Instead `totalNetSize` includes the size for data and offset vector which is used for computing the rowWidth. ---
[GitHub] drill pull request #1169: DRILL-6243: Added alert box to confirm shutdown of...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1169#discussion_r175598697 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -272,17 +272,19 @@ } <#if model.shouldShowAdminInfo() || !model.isAuthEnabled()> function shutdown(button) { - var requestPath = "/gracefulShutdown"; - var url = getRequestUrl(requestPath); - var result = $.ajax({ -type: 'POST', -url: url, -contentType : 'text/plain', -complete: function(data) { -alert(data.responseJSON["response"]); -button.prop('disabled',true).css('opacity',0.5); -} - }); + if (confirm("Click ok to shutdown")) { --- End diff -- Message should be more of like` "Are you sure you want to shutdown Drillbit running on + location.host + node ?"` ---
[GitHub] drill pull request #1169: DRILL-6243: Added alert box to confirm shutdown of...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1169#discussion_r175598108 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -272,17 +272,19 @@ } <#if model.shouldShowAdminInfo() || !model.isAuthEnabled()> function shutdown(button) { - var requestPath = "/gracefulShutdown"; - var url = getRequestUrl(requestPath); - var result = $.ajax({ -type: 'POST', -url: url, -contentType : 'text/plain', -complete: function(data) { -alert(data.responseJSON["response"]); -button.prop('disabled',true).css('opacity',0.5); -} - }); + if (confirm("Click ok to shutdown")) { + var requestPath = "/gracefulShutdown"; + var url = getRequestUrl(requestPath); + var result = $.ajax({ + type: 'POST', + url: url, + contentType : 'text/plain', + complete: function(data) { +alert(data.responseJSON["response"]); + button.prop('disabled',true).css('opacity',0.5); +} --- End diff -- please fix indentation here and below. Also add the `error: `callback for Ajax request. Like alert with received error ? ---
[GitHub] drill issue #1175: DRILL-6262: IndexOutOfBoundException in RecordBatchSize f...
Github user sohami commented on the issue: https://github.com/apache/drill/pull/1175 @bitblender - Updated the test. Please review. ---
[GitHub] drill issue #1175: DRILL-6262: IndexOutOfBoundException in RecordBatchSize f...
Github user sohami commented on the issue: https://github.com/apache/drill/pull/1175 @ppadma - Please review ---
[GitHub] drill pull request #1175: DRILL-6262: IndexOutOfBoundException in RecordBatc...
GitHub user sohami opened a pull request: https://github.com/apache/drill/pull/1175 DRILL-6262: IndexOutOfBoundException in RecordBatchSize for empty var⦠â¦iableWidthVector You can merge this pull request into a Git repository by running: $ git pull https://github.com/sohami/drill DRILL-6262 Alternatively you can review and apply these changes as the patch at: https://github.com/apache/drill/pull/1175.patch To close this pull request, make a commit to your master/trunk branch with (at least) the following in the commit message: This closes #1175 commit dbde22ea37c486b483601ab551e7fc7c23fb96b8 Author: Sorabh Hamirwasia <shamirwasia@...> Date: 2018-03-16T23:57:12Z DRILL-6262: IndexOutOfBoundException in RecordBatchSize for empty variableWidthVector ---
[GitHub] drill issue #1168: DRILL-6246: Reduced the size of the jdbc-all jar file
Github user sohami commented on the issue: https://github.com/apache/drill/pull/1168 +1 LGTM. Thanks for making the changes. ---
[GitHub] drill pull request #1168: DRILL-6246: Reduced the size of the jdbc-all jar f...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1168#discussion_r174882554 --- Diff: exec/jdbc-all/pom.xml --- @@ -473,6 +473,8 @@ org/yaml/** hello/** webapps/** + **/org/apache/calcite/avatica/metrics/** + **/org/apache/calcite/avatica/org/** --- End diff -- Would be great to make that change. Other than that, changes looks good to me ---
[GitHub] drill pull request #1168: DRILL-6246: Reduced the size of the jdbc-all jar f...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1168#discussion_r174866472 --- Diff: exec/jdbc-all/pom.xml --- @@ -473,6 +473,8 @@ org/yaml/** hello/** webapps/** + **/org/apache/calcite/avatica/metrics/** + **/org/apache/calcite/avatica/org/** --- End diff -- After removing these exclusion for apache profile what is the size of jdbc-all jar ? based on final size we should reduce the allowed maxSize from ~35MB to that value. ---
[GitHub] drill issue #1153: DRILL-6044: Fixed shutdown button in Web UI when ssl,auth...
Github user sohami commented on the issue: https://github.com/apache/drill/pull/1153 +1 LGTM. Thanks for making the changes. ---
[GitHub] drill issue #1145: DRILL-6187: Exception in RPC communication between DataCl...
Github user sohami commented on the issue: https://github.com/apache/drill/pull/1145 Made all the changes and squashed changes into 2 commits ---
[GitHub] drill pull request #1153: DRILL-6044: Fixed shutdown button in Web UI when s...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1153#discussion_r173371305 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -193,7 +192,11 @@ var port = getPortNum(); var timeout; var size = $("#size").html(); + var host; + window.onload = function () { + host = location.host; + }; --- End diff -- Please remove this `function()` and `var host`. ---
[GitHub] drill pull request #1153: DRILL-6044: Fixed shutdown button in Web UI when s...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1153#discussion_r173371697 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -269,9 +277,12 @@ } }); } - <#if model.shouldShowAdminInfo() || !model.isAuthEnabled() > - function shutdown(address,button) { - url = "http://"+address+":"+portNum+"/gracefulShutdown;; + <#if model.shouldShowAdminInfo() || !model.isAuthEnabled()> + function shutdown(button) { + var protocol = location.protocol; + var host = location.host; + var requestPath = "/gracefulShutdown"; + var url = protocol+host+requestPath; --- End diff -- How about refactoring above lines into separate function `getRequestUrl(requestPath)` and call that from both the methods (`shutdown` & `fillQueryCount`) ``` function getRequestUrl(requestPath) { var protocol = location.protocol; var host = location.host; var url = protocol + host + requestPath; return url; } ``` ---
[GitHub] drill issue #1145: DRILL-6187: Exception in RPC communication between DataCl...
Github user sohami commented on the issue: https://github.com/apache/drill/pull/1145 @vrozov - Thanks for the feedback. Update the PR with latest changes. Please help to review. ---
[GitHub] drill issue #1155: DRILL-6213: During restart drillbit should be killed forc...
Github user sohami commented on the issue: https://github.com/apache/drill/pull/1155 +1 LGTM. ---
[GitHub] drill pull request #1145: DRILL-6187: Exception in RPC communication between...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1145#discussion_r173043030 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/client/DrillClient.java --- @@ -312,6 +312,11 @@ public synchronized void connect(String connect, Properties props) throws RpcExc if (connected) { return; } + +if (props == null) { --- End diff -- Not totally sure what you mean here. Since there are 2 other overloaded methods which call's this method internally. They do pass null props and are used across multiple tests. May be I can check for props in those method instead and create a instance of it if needed ? Then we can place NotNull tag on this connect method. That will not require to change any existing tests. ---
[GitHub] drill pull request #1145: DRILL-6187: Exception in RPC communication between...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1145#discussion_r173041288 --- Diff: exec/rpc/src/main/java/org/apache/drill/exec/rpc/BasicClient.java --- @@ -182,6 +196,66 @@ public boolean isActive() { protected abstract void validateHandshake(HR validateHandshake) throws RpcException; + /** + * Creates various instances needed to start the SASL handshake. This is called from + * {@link BasicClient#validateHandshake(MessageLite)} if authentication is required from server side. + * @param connectionListener + * @throws RpcException + */ + protected abstract void prepareSaslHandshake(final RpcConnectionHandler connectionListener) throws RpcException; --- End diff -- removed from here and `startSaslHandshake` ---
[GitHub] drill pull request #1145: DRILL-6187: Exception in RPC communication between...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1145#discussion_r173041420 --- Diff: exec/rpc/src/main/java/org/apache/drill/exec/rpc/BasicClient.java --- @@ -69,6 +74,11 @@ private final IdlePingHandler pingHandler; private ConnectionMultiListener.SSLHandshakeListener sslHandshakeListener = null; + // Authentication related parameters + protected volatile List serverAuthMechanisms = null; --- End diff -- On second thought volatile doesn't seem necessary here since it will only be accessed by Netty's thread which is also fixed for a connection. Made fields private. ---
[GitHub] drill pull request #1145: DRILL-6187: Exception in RPC communication between...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1145#discussion_r173041172 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/rpc/control/ControlClient.java --- @@ -103,6 +96,23 @@ protected void handle(ControlConnection connection, int rpcType, ByteBuf pBody, connection.getCurrentHandler().handle(connection, rpcType, pBody, dBody, sender); } + @Override + protected void prepareSaslHandshake(final RpcConnectionHandler connectionListener) --- End diff -- `RpcType.SASL_MESSAGE` message accessed within `prepareSaslHandshake` implementation of DataClient/ControlClient is defined separately. Also each of these client except UserClient has access to ConnectionConfig which is not part of BasicClient too and is used in prepareSaslHandshake implementation. Hence I kept the implementations separate for both DataClient and ControlClient. ---
[GitHub] drill pull request #1145: DRILL-6187: Exception in RPC communication between...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1145#discussion_r173041032 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/client/DrillClient.java --- @@ -371,17 +376,20 @@ protected void afterExecute(final Runnable r, final Throwable t) { while (triedEndpointIndex < connectTriesVal) { endpoint = endpoints.get(triedEndpointIndex); + + // Set in both props and properties since props is passed to UserClient + if (!properties.containsKey(DrillProperties.SERVICE_HOST)) { --- End diff -- `putIfAbsent` is Java 8 specific api. Today we had a discussion that until next release we don't want to bring dependency on Java 8 only api's. ---
[GitHub] drill pull request #1153: DRILL-6044: Fixed shutdown button in Web UI when s...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1153#discussion_r173030603 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -272,6 +281,12 @@ <#if model.shouldShowAdminInfo()> function shutdown(address,button) { url = "http://"+address+":"+portNum+"/gracefulShutdown;; + var ssl = $('#ssl').val(); + url = "http://;; + if (ssl == "ssl_enabled") { +url = "https://;; + } + url = url+host+"/gracefulShutdown"; --- End diff -- I guess you can simplify the `shutdown` function as below. ``` function shutdown(button) { var protocol = location.protocol; var host = location.host; var requestPath = "/gracefulShutdown"; var url = protocol+host+requestPath; .. } ``` ---
[GitHub] drill pull request #1153: DRILL-6044: Fixed shutdown button in Web UI when s...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1153#discussion_r173031821 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -247,11 +253,14 @@ $("#row-"+i).find("#queriesCount").text(""); } else { -if( status_map[key] == "ONLINE") { +if (status_map[key] == "ONLINE") { $("#row-"+i).find("#status").text(status_map[key]); } else { -fillQueryCount(address,i); +var is_ssl_enabled = $('#ssl').val(); +if (is_ssl_enabled != "ssl_enabled") { +fillQueryCount(address,i); --- End diff -- `fillQueryCount` should also handle the case for Https and Http just like `shutdown`. Looks like currently with this change if SSL is enabled then we won't be able to get the queryCount of Drillbit shutting down. Why not handle it in same way as for `shutdown` method ? ---
[GitHub] drill pull request #1153: DRILL-6044: Fixed shutdown button in Web UI when s...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1153#discussion_r173029148 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -272,6 +281,12 @@ <#if model.shouldShowAdminInfo()> function shutdown(address,button) { url = "http://"+address+":"+portNum+"/gracefulShutdown;; + var ssl = $('#ssl').val(); + url = "http://;; + if (ssl == "ssl_enabled") { +url = "https://;; --- End diff -- Why not use [location.protocol](https://www.w3schools.com/jsref/prop_loc_protocol.asp) rather than getting the info from server side ? ---
[GitHub] drill pull request #1153: DRILL-6044: Fixed shutdown button in Web UI when s...
Github user sohami commented on a diff in the pull request: https://github.com/apache/drill/pull/1153#discussion_r173029876 --- Diff: exec/java-exec/src/main/resources/rest/index.ftl --- @@ -272,6 +281,12 @@ <#if model.shouldShowAdminInfo()> function shutdown(address,button) { url = "http://"+address+":"+portNum+"/gracefulShutdown;; --- End diff -- please delete this line and update the `shutdown` signature since `address` is not needed anymore. ---
[GitHub] drill pull request #1157: DRILL-6216: Metadata mismatch when connecting to a...
GitHub user sohami opened a pull request: https://github.com/apache/drill/pull/1157 DRILL-6216: Metadata mismatch when connecting to a Drill 1.12.0 with ⦠â¦a Drill-1.13.0-SNAPSHOT driver You can merge this pull request into a Git repository by running: $ git pull https://github.com/sohami/drill DRILL-6216 Alternatively you can review and apply these changes as the patch at: https://github.com/apache/drill/pull/1157.patch To close this pull request, make a commit to your master/trunk branch with (at least) the following in the commit message: This closes #1157 commit fddde494554e74ff4f3fba89f2916900e4e2f917 Author: Sorabh Hamirwasia <shamirwasia@...> Date: 2018-03-07T20:06:32Z DRILL-6216: Metadata mismatch when connecting to a Drill 1.12.0 with a Drill-1.13.0-SNAPSHOT driver ---
[GitHub] drill issue #1145: DRILL-6187: Exception in RPC communication between DataCl...
Github user sohami commented on the issue: https://github.com/apache/drill/pull/1145 @vrozov - Please help to review this PR. It address the concurrency issue during authentication of control/data client to server side. Rather than adding the connection into connection holder right after TCP connection is available, the listener for connection success is called after successful authentication (if needed). ---
[GitHub] drill pull request #1145: DRILL-6187: Exception in RPC communication between...
GitHub user sohami opened a pull request: https://github.com/apache/drill/pull/1145 DRILL-6187: Exception in RPC communication between DataClient/Control⦠â¦Client and respective servers when bit-to-bit security is on You can merge this pull request into a Git repository by running: $ git pull https://github.com/sohami/drill DRILL-6187-2 Alternatively you can review and apply these changes as the patch at: https://github.com/apache/drill/pull/1145.patch To close this pull request, make a commit to your master/trunk branch with (at least) the following in the commit message: This closes #1145 commit 4a7602b428ef4ef9fe358976713a78174bb82f57 Author: Sorabh Hamirwasia <shamirwasia@...> Date: 2018-03-01T23:08:10Z DRILL-6187: Exception in RPC communication between DataClient/ControlClient and respective servers when bit-to-bit security is on ---
[GitHub] drill pull request #1124: DRILL-6172: setValueCount of VariableLengthVectors...
GitHub user sohami opened a pull request: https://github.com/apache/drill/pull/1124 DRILL-6172: setValueCount of VariableLengthVectors throws IOB excepti⦠â¦on when called with 0 value after clearing vectors You can merge this pull request into a Git repository by running: $ git pull https://github.com/sohami/drill DRILL-6172 Alternatively you can review and apply these changes as the patch at: https://github.com/apache/drill/pull/1124.patch To close this pull request, make a commit to your master/trunk branch with (at least) the following in the commit message: This closes #1124 commit 960374b5486fce17a19c7a5ed5200645b5c3ac63 Author: Sorabh Hamirwasia <shamirwasia@...> Date: 2018-02-21T01:14:29Z DRILL-6172: setValueCount of VariableLengthVectors throws IOB exception when called with 0 value after clearing vectors ---
[GitHub] drill issue #1124: DRILL-6172: setValueCount of VariableLengthVectors throws...
Github user sohami commented on the issue: https://github.com/apache/drill/pull/1124 @parthchandra - Please help to review this PR ---
[GitHub] drill issue #1109: DRILL-6128: Wrong Result with Nested Loop Join
Github user sohami commented on the issue: https://github.com/apache/drill/pull/1109 Updated the comment and rebased on top of latest master ---