[8/8] hadoop git commit: HDFS-13298. Addendum: Ozone: Make ozone/hdsl/cblock modules turned off by default. Contributed by Elek, Marton.

2018-03-19 Thread aengineer
HDFS-13298. Addendum: Ozone: Make ozone/hdsl/cblock modules turned off by 
default. Contributed by Elek, Marton.

While committing the original patch, some files were missed. This patch 
addresses that issue.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ace05b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ace05b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ace05b3

Branch: refs/heads/HDFS-7240
Commit: 7ace05b3fe35f7774f66651db025bbbfb9498b8e
Parents: f76819c
Author: Anu Engineer 
Authored: Mon Mar 19 15:58:50 2018 -0700
Committer: Anu Engineer 
Committed: Mon Mar 19 15:58:50 2018 -0700

--
 .../src/main/resources/webapps/datanode/dn.js   |  92 +
 .../webapps/static/angular-1.6.4.min.js | 332 +
 .../webapps/static/angular-nvd3-1.0.9.min.js|   1 +
 .../webapps/static/angular-route-1.6.4.min.js   |  17 +
 .../resources/webapps/static/d3-3.5.17.min.js   |   5 +
 .../main/resources/webapps/static/dfs-dust.js   | 133 +++
 .../resources/webapps/static/nvd3-1.8.5.min.css |   2 +
 .../webapps/static/nvd3-1.8.5.min.css.map   |   1 +
 .../resources/webapps/static/nvd3-1.8.5.min.js  |  11 +
 .../webapps/static/nvd3-1.8.5.min.js.map|   1 +
 .../src/main/resources/webapps/static/ozone.css |  60 
 .../src/main/resources/webapps/static/ozone.js  | 355 +++
 .../webapps/static/templates/config.html|  91 +
 .../resources/webapps/static/templates/jvm.html |  26 ++
 .../webapps/static/templates/menu.html  |  60 
 .../webapps/static/templates/overview.html  |  39 ++
 .../webapps/static/templates/rpc-metrics.html   |  87 +
 17 files changed, 1313 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ace05b3/hadoop-hdsl/framework/src/main/resources/webapps/datanode/dn.js
--
diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/datanode/dn.js 
b/hadoop-hdsl/framework/src/main/resources/webapps/datanode/dn.js
new file mode 100644
index 000..3b67167
--- /dev/null
+++ b/hadoop-hdsl/framework/src/main/resources/webapps/datanode/dn.js
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+(function () {
+  "use strict";
+
+  var data = {ozone: {enabled: false}};
+
+  dust.loadSource(dust.compile($('#tmpl-dn').html(), 'dn'));
+
+  function loadDatanodeInfo() {
+$.get('/jmx?qry=Hadoop:service=DataNode,name=DataNodeInfo', function(resp) 
{
+  data.dn = workaround(resp.beans[0]);
+  data.dn.HostName = resp.beans[0]['DatanodeHostname'];
+  render();
+}).fail(show_err_msg);
+  }
+
+  function loadOzoneScmInfo() {
+
$.get('/jmx?qry=Hadoop:service=OzoneDataNode,name=SCMConnectionManager', 
function (resp) {
+if (resp.beans.length > 0) {
+data.ozone.SCMServers = resp.beans[0].SCMServers;
+data.ozone.enabled = true;
+render();
+}
+}).fail(show_err_msg);
+  }
+
+  function loadOzoneStorageInfo() {
+
$.get('/jmx?qry=Hadoop:service=OzoneDataNode,name=ContainerLocationManager', 
function (resp) {
+if (resp.beans.length > 0) {
+data.ozone.LocationReport = resp.beans[0].LocationReport;
+data.ozone.enabled = true;
+render();
+}
+}).fail(show_err_msg);
+}
+
+  function workaround(dn) {
+function node_map_to_array(nodes) {
+  var res = [];
+  for (var n in nodes) {
+var p = nodes[n];
+p.name = n;
+res.push(p);
+  }
+  return res;
+}
+
+dn.VolumeInfo = node_map_to_array(JSON.parse(dn.VolumeInfo));
+dn.BPServiceActorInfo = JSON.parse(dn.BPServiceActorInfo);
+
+return dn;
+  }
+
+  function render() {
+var base = dust.makeBase({
+  'helper_relative_time' : function (chunk, ctx, bodies, params) {
+var value = dust.helpers.tap(params.value, chunk, ctx);
+return chunk.write(momen

[4/8] hadoop git commit: HDFS-13298. Addendum: Ozone: Make ozone/hdsl/cblock modules turned off by default. Contributed by Elek, Marton.

2018-03-19 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ace05b3/hadoop-hdsl/framework/src/main/resources/webapps/static/dfs-dust.js
--
diff --git 
a/hadoop-hdsl/framework/src/main/resources/webapps/static/dfs-dust.js 
b/hadoop-hdsl/framework/src/main/resources/webapps/static/dfs-dust.js
new file mode 100644
index 000..c7af6a1
--- /dev/null
+++ b/hadoop-hdsl/framework/src/main/resources/webapps/static/dfs-dust.js
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+(function ($, dust, exports) {
+  "use strict";
+
+  var filters = {
+'fmt_bytes': function (v) {
+  var UNITS = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'ZB'];
+  var prev = 0, i = 0;
+  while (Math.floor(v) > 0 && i < UNITS.length) {
+prev = v;
+v /= 1024;
+i += 1;
+  }
+
+  if (i > 0 && i < UNITS.length) {
+v = prev;
+i -= 1;
+  }
+  return Math.round(v * 100) / 100 + ' ' + UNITS[i];
+},
+
+'fmt_percentage': function (v) {
+  return Math.round(v * 100) / 100 + '%';
+},
+'elapsed': function(v) {
+  //elapsed sec from epoch sec
+  return Date.now() - v * 1000;
+},
+'fmt_time': function (v) {
+  var s = Math.floor(v / 1000), h = Math.floor(s / 3600);
+  s -= h * 3600;
+  var m = Math.floor(s / 60);
+  s -= m * 60;
+
+  var res = s + " sec";
+  if (m !== 0) {
+res = m + " mins, " + res;
+  }
+
+  if (h !== 0) {
+res = h + " hrs, " + res;
+  }
+
+  return res;
+},
+
+'date_tostring' : function (v) {
+  return moment(Number(v)).format('ddd MMM DD HH:mm:ss ZZ ');
+},
+
+'format_compile_info' : function (v) {
+  var info = v.split(" by ")
+  var date = moment(info[0]).format('ddd MMM DD HH:mm:ss ZZ ');
+  return date.concat(" by ").concat(info[1]);
+ },
+
+'helper_to_permission': function (v) {
+  var symbols = [ '---', '--x', '-w-', '-wx', 'r--', 'r-x', 'rw-', 'rwx' ];
+  var vInt = parseInt(v, 8);
+  var sticky = (vInt & (1 << 9)) != 0;
+
+  var res = "";
+  for (var i = 0; i < 3; ++i) {
+res = symbols[(v % 10)] + res;
+v = Math.floor(v / 10);
+  }
+
+  if (sticky) {
+var otherExec = (vInt & 1) == 1;
+res = res.substr(0, res.length - 1) + (otherExec ? 't' : 'T');
+  }
+
+  return res;
+},
+
+'helper_to_directory' : function (v) {
+  return v === 'DIRECTORY' ? 'd' : '-';
+},
+
+'helper_to_acl_bit': function (v) {
+  return v ? '+' : "";
+},
+
+'fmt_number': function (v) {
+  return v.toLocaleString();
+}
+  };
+  $.extend(dust.filters, filters);
+
+  /**
+   * Load a sequence of JSON.
+   *
+   * beans is an array of tuples in the format of {url, name}.
+   */
+  function load_json(beans, success_cb, error_cb) {
+var data = {}, error = false, to_be_completed = beans.length;
+
+$.each(beans, function(idx, b) {
+  if (error) {
+return false;
+  }
+  $.get(b.url, function (resp) {
+data[b.name] = resp;
+to_be_completed -= 1;
+if (to_be_completed === 0) {
+  success_cb(data);
+}
+  }).error(function (jqxhr, text, err) {
+error = true;
+error_cb(b.url, jqxhr, text, err);
+  });
+});
+  }
+
+  exports.load_json = load_json;
+
+}($, dust, window));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ace05b3/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css
--
diff --git 
a/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css 
b/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css
new file mode 100644
index 000..b8a5c0f
--- /dev/null
+++ b/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css
@@ -0,0 +1,2 @@
+.nvd3 .nv-axis line,.nvd3 .nv-axis 
path{fill:none;shape-rendering:crispEdges}.nv-brush .extent,.nvd3 .background 
path,.nvd3 .nv-axis line,.nvd3 .nv-axis 
path{shape-rendering:crispEdges}.nv-distx,.nv-disty,.nv-noninteractive,.nvd3 
.nv-axis,.nvd3.

[5/8] hadoop git commit: HDFS-13298. Addendum: Ozone: Make ozone/hdsl/cblock modules turned off by default. Contributed by Elek, Marton.

2018-03-19 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ace05b3/hadoop-hdsl/framework/src/main/resources/webapps/static/d3-3.5.17.min.js
--
diff --git 
a/hadoop-hdsl/framework/src/main/resources/webapps/static/d3-3.5.17.min.js 
b/hadoop-hdsl/framework/src/main/resources/webapps/static/d3-3.5.17.min.js
new file mode 100644
index 000..1664873
--- /dev/null
+++ b/hadoop-hdsl/framework/src/main/resources/webapps/static/d3-3.5.17.min.js
@@ -0,0 +1,5 @@
+!function(){function n(n){return 
n&&(n.ownerDocument||n.document||n).documentElement}function t(n){return 
n&&(n.ownerDocument&&n.ownerDocument.defaultView||n.document&&n||n.defaultView)}function
 e(n,t){return t>n?-1:n>t?1:n>=t?0:NaN}function r(n){return 
null===n?NaN:+n}function i(n){return!isNaN(n)}function 
u(n){return{left:function(t,e,r,i){for(arguments.length<3&&(r=0),arguments.length<4&&(i=t.length);i>r;){var
 u=r+i>>>1;n(t[u],e)<0?r=u+1:i=u}return 
r},right:function(t,e,r,i){for(arguments.length<3&&(r=0),arguments.length<4&&(i=t.length);i>r;){var
 u=r+i>>>1;n(t[u],e)>0?i=u:r=u+1}return r}}}function o(n){return 
n.length}function a(n){for(var t=1;n*t%1;)t*=10;return t}function 
l(n,t){for(var e in 
t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}function 
c(){this._=Object.create(null)}function 
f(n){return(n+="")===bo||n[0]===_o?_o+n:n}function 
s(n){return(n+="")[0]===_o?n.slice(1):n}function h(n){return f(n)in 
this._}function p(n){return(n=f(n))in this._&&delete this
 ._[n]}function g(){var n=[];for(var t in this._)n.push(s(t));return n}function 
v(){var n=0;for(var t in this._)++n;return n}function d(){for(var n in 
this._)return!1;return!0}function y(){this._=Object.create(null)}function 
m(n){return n}function M(n,t,e){return function(){var 
r=e.apply(t,arguments);return r===t?n:r}}function x(n,t){if(t in n)return 
t;t=t.charAt(0).toUpperCase()+t.slice(1);for(var e=0,r=wo.length;r>e;++e){var 
i=wo[e]+t;if(i in n)return i}}function b(){}function _(){}function 
w(n){function t(){for(var 
t,r=e,i=-1,u=r.length;++ie;e++)for(var 
i,u=n[e],o=0,a=u.length;a>o;o++)(i=u[o])&&t(i,o,e);return n}function 
Z(n){return ko(n,qo),n}function V(n){var t,e;return function(r,i,u){var 
o,a=n[u].update,l=a.length;for(u!=e&&(e=u,t=0),i>=t&&(t=i+1);!(o=a[t])&&++t0&&(n=n.slice(0,a));var c=To.get(n);return 
c&&(n=c,l=B),a?t?i:r:t?b:u}function $(n,t){return function(e){var 
r=ao.event;ao.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{ao.event=r}}}function
 B(n,t){var e=$(n,t);return function(n){var 
t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function
 W(e){var r=".dragsuppress-"+ 
++Do,i="click"+r,u=ao.select(t(e)).on("touchmove"+r,S).on("dragstart"+r,S).on("selectstart"+r,S);if(null==Ro&&(Ro="onselectstart"in
 e?!1:x(e.style,"userSelect")),Ro){var o=n(e).style,a=o[Ro];o[Ro]="none"}return 
function(n){if(u.on(r,null),Ro&&(o[Ro]=a),n){var 
t=function(){u.on(i,null)};u.on(i,function(){S(),t()},!0),setTimeout(t,0)}}}function
 J(n,e){e.changedTouches&&(e=e.changedTouches[0]);var 
r=n.ownerSVGElement||n;if(r.createSVGPoint){var 
i=r.createSVGPoint();if(0>Po){var u=t(n);if(u.scrollX||u.scrollY){r=ao.select
 
("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var
 o=r[0][0].getScreenCTM();Po=!(o.f||o.e),r.remove()}}return 
Po?(i.x=e.pageX,i.y=e.pageY):(i.x=e.clientX,i.y=e.clientY),i=i.matrixTransform(n.getScreenCTM().inverse()),[i.x,i.y]}var
 
a=n.getBoundingClientRect();return[e.clientX-a.left-n.clientLeft,e.clientY-a.top-n.clientTop]}function
 G(){return ao.event.changedTouches[0].identifier}function K(n){return 
n>0?1:0>n?-1:0}function 
Q(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(t[1]-n[1])*(e[0]-n[0])}function 
nn(n){return n>1?0:-1>n?Fo:Math.acos(n)}function tn(n){return 
n>1?Io:-1>n?-Io:Math.asin(n)}function 
en(n){return((n=Math.exp(n))-1/n)/2}function 
rn(n){return((n=Math.exp(n))+1/n)/2}function 
un(n){return((n=Math.exp(2*n))-1)/(n+1)}function 
on(n){return(n=Math.sin(n/2))*n}function an(){}function ln(n,t,e){return this 
instanceof ln?(this.h=+n,this.s=+t,void(this.l=+e)):arguments.length<2?n 
instanceof ln?new ln(n.h,n.s,n.l):_n(""+n,wn
 ,ln):new ln(n,t,e)}function cn(n,t,e){function r(n){return 
n>360?n-=360:0>n&&(n+=360),60>n?u+(o-u)*n/60:180>n?o:240>n?u+(o-u)*(240-n)/60:u}function
 i(n){return Math.round(255*r(n))}var u,o;return 
n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,u=2*e-o,new
 mn(i(n+120),i(n),i(n-120))}function fn(n,t,e){return this instanceof 
fn?(this.h=+n,this.c=+t,void(this.l=+e)):arguments.length<2?n instanceof fn?new 
fn(n.h,n.c,n.l):n instanceof 
hn?gn(n.l,n.a,n.b):gn((n=Sn((n=ao.rgb(n)).r,n.g,n.b)).l,n.a,n.b):new 
fn(n,t,e)}function sn(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),new 
hn(e,Math.cos(n*=Yo)*t,Math.sin(n)*t)}function hn(n,t,e){return this instanceof 
hn?(this.l=+n,this.a=+t,void(this.b=+e)):arg

[3/8] hadoop git commit: HDFS-13298. Addendum: Ozone: Make ozone/hdsl/cblock modules turned off by default. Contributed by Elek, Marton.

2018-03-19 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ace05b3/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js
--
diff --git 
a/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js 
b/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js
new file mode 100644
index 000..9cfd702
--- /dev/null
+++ b/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js
@@ -0,0 +1,11 @@
+/* nvd3 version 1.8.5 (https://github.com/novus/nvd3) 2016-12-01 */
+
+!function(){var 
a={};a.dev=!1,a.tooltip=a.tooltip||{},a.utils=a.utils||{},a.models=a.models||{},a.charts={},a.logs={},a.dom={},"undefined"!=typeof
 module&&"undefined"!=typeof exports&&"undefined"==typeof 
d3&&(d3=require("d3")),a.dispatch=d3.dispatch("render_start","render_end"),Function.prototype.bind||(Function.prototype.bind=function(a){if("function"!=typeof
 this)throw new TypeError("Function.prototype.bind - what is trying to be bound 
is not callable");var 
b=Array.prototype.slice.call(arguments,1),c=this,d=function(){},e=function(){return
 c.apply(this instanceof 
d&&a?this:a,b.concat(Array.prototype.slice.call(arguments)))};return 
d.prototype=this.prototype,e.prototype=new 
d,e}),a.dev&&(a.dispatch.on("render_start",function(b){a.logs.startTime=+new 
Date}),a.dispatch.on("render_end",function(b){a.logs.endTime=+new 
Date,a.logs.totalTime=a.logs.endTime-a.logs.startTime,a.log("total",a.logs.totalTime)})),a.log=function(){if(a.dev&&window.console&&console.log&&console.log.apply)console
 .log.apply(console,arguments);else 
if(a.dev&&window.console&&"function"==typeof 
console.log&&Function.prototype.bind){var 
b=Function.prototype.bind.call(console.log,console);b.apply(console,arguments)}return
 
arguments[arguments.length-1]},a.deprecated=function(a,b){console&&console.warn&&console.warn("nvd3
 warning: `"+a+"` has been deprecated. 
",b||"")},a.render=function(b){b=b||1,a.render.active=!0,a.dispatch.render_start();var
 c=function(){for(var 
d,e,f=0;b>f&&(e=a.render.queue[f]);f++)d=e.generate(),typeof e.callback==typeof 
Function&&e.callback(d);a.render.queue.splice(0,f),a.render.queue.length?setTimeout(c):(a.dispatch.render_end(),a.render.active=!1)};setTimeout(c)},a.render.active=!1,a.render.queue=[],a.addGraph=function(b){typeof
 arguments[0]==typeof 
Function&&(b={generate:arguments[0],callback:arguments[1]}),a.render.queue.push(b),a.render.active||a.render()},"undefined"!=typeof
 module&&"undefined"!=typeof exports&&(module.exports=a),"undefined"!=typeof 
window&&(window.nv=
 a),a.dom.write=function(a){return void 
0!==window.fastdom?fastdom.mutate(a):a()},a.dom.read=function(a){return void 
0!==window.fastdom?fastdom.measure(a):a()},a.interactiveGuideline=function(){"use
 strict";function b(l){l.each(function(l){function m(){var 
a=d3.mouse(this),d=a[0],e=a[1],h=!0,i=!1;if(k&&(d=d3.event.offsetX,e=d3.event.offsetY,"svg"!==d3.event.target.tagName&&(h=!1),d3.event.target.className.baseVal.match("nv-legend")&&(i=!0)),h&&(d-=c.left,e-=c.top),"mouseout"===d3.event.type||0>d||0>e||d>o||e>p||d3.event.relatedTarget&&void
 
0===d3.event.relatedTarget.ownerSVGElement||i){if(k&&d3.event.relatedTarget&&void
 0===d3.event.relatedTarget.ownerSVGElement&&(void 
0===d3.event.relatedTarget.className||d3.event.relatedTarget.className.match(j.nvPointerEventsClass)))return;return
 g.elementMouseout({mouseX:d,mouseY:e}),b.renderGuideLine(null),void 
j.hidden(!0)}j.hidden(!1);var l="function"==typeof f.rangeBands,m=void 
0;if(l){var n=d3.bisect(f.range(),d)-1;if(!(f.range()[n]+f.rangeB
 and()>=d))return 
g.elementMouseout({mouseX:d,mouseY:e}),b.renderGuideLine(null),void 
j.hidden(!0);m=f.domain()[d3.bisect(f.range(),d)-1]}else 
m=f.invert(d);g.elementMousemove({mouseX:d,mouseY:e,pointXValue:m}),"dblclick"===d3.event.type&&g.elementDblclick({mouseX:d,mouseY:e,pointXValue:m}),"click"===d3.event.type&&g.elementClick({mouseX:d,mouseY:e,pointXValue:m}),"mousedown"===d3.event.type&&g.elementMouseDown({mouseX:d,mouseY:e,pointXValue:m}),"mouseup"===d3.event.type&&g.elementMouseUp({mouseX:d,mouseY:e,pointXValue:m})}var
 
n=d3.select(this),o=d||960,p=e||400,q=n.selectAll("g.nv-wrap.nv-interactiveLineLayer").data([l]),r=q.enter().append("g").attr("class","
 nv-wrap 
nv-interactiveLineLayer");r.append("g").attr("class","nv-interactiveGuideLine"),i&&(i.on("touchmove",m).on("mousemove",m,!0).on("mouseout",m,!0).on("mousedown",m,!0).on("mouseup",m,!0).on("dblclick",m).on("click",m),b.guideLine=null,b.renderGuideLine=function(c){h&&(b.guideLine&&b.guideLine.attr("x1")===c||a.dom.write(f
 unction(){var 
b=q.select(".nv-interactiveGuideLine").selectAll("line").data(null!=c?[a.utils.NaNtoZero(c)]:[],String);b.enter().append("line").attr("class","nv-guideline").attr("x1",function(a){return
 a}).attr("x2",function(a){return 
a}).attr("y1",p).attr("y2",0),b.exit().remove()}))})})}var 
c={left:0,top:0},d=null,e=null,f=d3.scale.linear(),g=d3.dispatch(

[1/8] hadoop git commit: HDFS-13298. Addendum: Ozone: Make ozone/hdsl/cblock modules turned off by default. Contributed by Elek, Marton.

2018-03-19 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 f76819c7c -> 7ace05b3f


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ace05b3/hadoop-hdsl/framework/src/main/resources/webapps/static/ozone.css
--
diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/static/ozone.css 
b/hadoop-hdsl/framework/src/main/resources/webapps/static/ozone.css
new file mode 100644
index 000..271ac74
--- /dev/null
+++ b/hadoop-hdsl/framework/src/main/resources/webapps/static/ozone.css
@@ -0,0 +1,60 @@
+/**
+ *   Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+*/
+body {
+padding: 40px;
+padding-top: 60px;
+}
+.starter-template {
+padding: 40px 15px;
+text-align: center;
+}
+
+
+.btn {
+border: 0 none;
+font-weight: 700;
+letter-spacing: 1px;
+text-transform: uppercase;
+}
+
+.btn:focus, .btn:active:focus, .btn.active:focus {
+outline: 0 none;
+}
+
+.table-striped > tbody > tr:nth-child(2n+1).selectedtag > td:hover {
+background-color: #3276b1;
+}
+.table-striped > tbody > tr:nth-child(2n+1).selectedtag > td {
+background-color: #3276b1;
+}
+.tagPanel tr.selectedtag td {
+background-color: #3276b1;
+}
+.top-buffer { margin-top:4px; }
+
+
+.sortorder:after {
+content: '\25b2';   // BLACK UP-POINTING TRIANGLE
+}
+.sortorder.reverse:after {
+content: '\25bc';   // BLACK DOWN-POINTING TRIANGLE
+}
+
+.wrap-table{
+word-wrap: break-word;
+table-layout: fixed;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ace05b3/hadoop-hdsl/framework/src/main/resources/webapps/static/ozone.js
--
diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/static/ozone.js 
b/hadoop-hdsl/framework/src/main/resources/webapps/static/ozone.js
new file mode 100644
index 000..37cafef
--- /dev/null
+++ b/hadoop-hdsl/framework/src/main/resources/webapps/static/ozone.js
@@ -0,0 +1,355 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+(function () {
+"use strict";
+
+var isIgnoredJmxKeys = function (key) {
+return key == 'name' || key == 'modelerType' || key == "$$hashKey" ||
+key.match(/tag.*/);
+};
+angular.module('ozone', ['nvd3', 'ngRoute']);
+angular.module('ozone').config(function ($routeProvider) {
+$routeProvider
+.when("/", {
+templateUrl: "main.html"
+})
+.when("/metrics/rpc", {
+template: ""
+})
+.when("/config", {
+template: ""
+})
+});
+angular.module('ozone').component('overview', {
+templateUrl: 'static/templates/overview.html',
+transclude: true,
+controller: function ($http) {
+var ctrl = this;
+
$http.get("jmx?qry=Hadoop:service=*,name=*,component=ServerRuntime")
+.then(function (result) {
+ctrl.jmx = result.data.beans[0]
+})
+}
+});
+angular.module('ozone').component('jvmParameters', {
+templateUrl: 'static/templates/jvm.html',
+controller: function ($http) {
+var ctrl = this;
+$http.get("jmx?qry=java.lang:type=Runtime")
+.then(function (result) {
+ctrl.jmx = result.data.beans[0];
+
+//convert array to a map
+  

[7/8] hadoop git commit: HDFS-13298. Addendum: Ozone: Make ozone/hdsl/cblock modules turned off by default. Contributed by Elek, Marton.

2018-03-19 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ace05b3/hadoop-hdsl/framework/src/main/resources/webapps/static/angular-1.6.4.min.js
--
diff --git 
a/hadoop-hdsl/framework/src/main/resources/webapps/static/angular-1.6.4.min.js 
b/hadoop-hdsl/framework/src/main/resources/webapps/static/angular-1.6.4.min.js
new file mode 100644
index 000..c4bf158
--- /dev/null
+++ 
b/hadoop-hdsl/framework/src/main/resources/webapps/static/angular-1.6.4.min.js
@@ -0,0 +1,332 @@
+/*
+ AngularJS v1.6.4
+ (c) 2010-2017 Google, Inc. http://angularjs.org
+ License: MIT
+*/
+(function(x){'use strict';function L(a,b){b=b||Error;return function(){var 
d=arguments[0],c;c="["+(a?a+":":"")+d+"] 
http://errors.angularjs.org/1.6.4/"+(a?a+"/":"")+d;for(d=1;dc)return"...";var d=b.$$hashKey,f;if(H(a)){f=0;for(var 
g=a.length;f").append(a).html();try{return 
a[0].nodeType===Ia?Q(d):d.match(/^(<[^>]+>)/)[1].replace(/^<([\w-]+)/,function(a,b){return"<"+Q(b)})}catch(c){return
 Q(d)}}function Qc(a){try{return decodeURIComponent(a)}catch(b){}}function 
Rc(a){var b={};q((a||"").split("&"),function(a){var 
c,e,f;a&&(e=a=a.replace(/\+/g,"%20"),c=a.indexOf("="),-1!==c&&(e=a.substring(0,c),f=a.substring(c+1)),e=Qc(e),u(e)&&(f=
+u(f)?Qc(f):!0,ua.call(b,e)?H(b[e])?b[e].push(f):b[e]=[b[e],f]:b[e]=f))});return
 b}function Zb(a){var 
b=[];q(a,function(a,c){H(a)?q(a,function(a){b.push($(c,!0)+(!0===a?"":"="+$(a,!0)))}):b.push($(c,!0)+(!0===a?"":"="+$(a,!0)))});return
 b.length?b.join("&"):""}function db(a){return 
$(a,!0).replace(/%26/gi,"&").replace(/%3D/gi,"=").replace(/%2B/gi,"+")}function 
$(a,b){return 
encodeURIComponent(a).replace(/%40/gi,"@").replace(/%3A/gi,":").replace(/%24/g,"$").replace(/%2C/gi,",").replace(/%3B/gi,";").replace(/%20/g,
+b?"%20":"+")}function te(a,b){var 
d,c,e=Ja.length;for(c=0;c protocol indicates an extension, 
document.location.href does not match."))}
+function Sc(a,b,d){C(d)||(d={});d=S({strictDi:!1},d);var 
c=function(){a=B(a);if(a.injector()){var 
c=a[0]===x.document?"document":xa(a);throw 
Fa("btstrpd",c.replace(//,">"));}b=b||[];b.unshift(["$provide",function(b){b.value("$rootElement",a)}]);d.debugInfoEnabled&&b.push(["$compileProvider",function(a){a.debugInfoEnabled(!0)}]);b.unshift("ng");c=eb(b,d.strictDi);c.invoke(["$rootScope","$rootElement","$compile","$injector",function(a,b,c,d){a.$apply(function(){b.data("$injector",
+d);c(b)(a)})}]);return 
c},e=/^NG_ENABLE_DEBUG_INFO!/,f=/^NG_DEFER_BOOTSTRAP!/;x&&e.test(x.name)&&(d.debugInfoEnabled=!0,x.name=x.name.replace(e,""));if(x&&!f.test(x.name))return
 
c();x.name=x.name.replace(f,"");ea.resumeBootstrap=function(a){q(a,function(a){b.push(a)});return
 c()};D(ea.resumeDeferredBootstrap)&&ea.resumeDeferredBootstrap()}function 
we(){x.name="NG_ENABLE_DEBUG_INFO!"+x.name;x.location.reload()}function 
xe(a){a=ea.element(a).injector();if(!a)throw Fa("test");return 
a.get("$$testability")}
+function Tc(a,b){b=b||"_";return 
a.replace(ye,function(a,c){return(c?b:"")+a.toLowerCase()})}function ze(){var 
a;if(!Uc){var b=rb();(na=w(b)?x.jQuery:b?x[b]:void 
0)&&na.fn.on?(B=na,S(na.fn,{scope:Na.scope,isolateScope:Na.isolateScope,controller:Na.controller,injector:Na.injector,inheritedData:Na.inheritedData}),a=na.cleanData,na.cleanData=function(b){for(var
 
c,e=0,f;null!=(f=b[e]);e++)(c=na._data(f,"events"))&&c.$destroy&&na(f).triggerHandler("$destroy");a(b)}):B=W;ea.element=B;Uc=!0}}function
 fb(a,
+b,d){if(!a)throw Fa("areq",b||"?",d||"required");return a}function 
sb(a,b,d){d&&H(a)&&(a=a[a.length-1]);fb(D(a),b,"not a function, got 
"+(a&&"object"===typeof a?a.constructor.name||"Object":typeof a));return 
a}function Ka(a,b){if("hasOwnProperty"===a)throw Fa("badname",b);}function 
Vc(a,b,d){if(!b)return a;b=b.split(".");for(var 
c,e=a,f=b.length,g=0;g")+c[2];for(c=c[0];c--;)d=d.lastChild;f=ab(f,d.childNodes);
+d=e.firstChild;d.textContent=""}else 
f.push(b.createTextNode(a));e.textContent="";e.innerHTML="";q(f,function(a){e.appendChild(a)});return
 e}function W(a){if(a instanceof W)return a;var b;F(a)&&(a=T(a),b=!0);if(!(this 
instanceof W)){if(b&&"<"!==a.charAt(0))throw dc("nosel");return new 
W(a)}if(b){b=x.document;var 
d;a=(d=dg.exec(a))?[b.createElement(d[1])]:(d=dd(a,b))?d.childNodes:[];ec(this,a)}else
 D(a)?ed(a):ec(this,a)}function fc(a){return a.cloneNode(!0)}function 
xb(a,b){!b&&bc(a)&&B.cleanData([a]);
+a.querySelectorAll&&B.cleanData(a.querySelectorAll("*"))}function 
fd(a,b,d,c){if(u(c))throw dc("offargs");var 
e=(c=yb(a))&&c.events,f=c&&c.handle;if(f)if(b){var g=function(b){var 
c=e[b];u(d)&&$a(c||[],d);u(d)&&c&&0l&&this.remove(p.key);return 
b}},get:function(a){if(l";b=ta.firstChild.attributes;var 
d=b[0];b.removeNamedItem(d.name);d.value=c;a.attributes.setNamedItem(d)}function
 La(a,
+b){try{a.addClass(b)}catch(c){}}function ca(a,b,c,d,e){a instanceof 
B||(a=B(a));var f=Ma(a,b,a,c,d,e);ca.$$addScopeClass(a);var g=null;return 
function(b,c,d){if(!a)throw 
fa("multilink");fb(b,"scope");e&&e.needs

[6/8] hadoop git commit: HDFS-13298. Addendum: Ozone: Make ozone/hdsl/cblock modules turned off by default. Contributed by Elek, Marton.

2018-03-19 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ace05b3/hadoop-hdsl/framework/src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js
--
diff --git 
a/hadoop-hdsl/framework/src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js
 
b/hadoop-hdsl/framework/src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js
new file mode 100644
index 000..4aced57
--- /dev/null
+++ 
b/hadoop-hdsl/framework/src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js
@@ -0,0 +1 @@
+!function(window){"use strict";var nv=window.nv;"undefined"!=typeof 
exports&&(nv=require("nvd3")),angular.module("nvd3",[]).directive("nvd3",["nvd3Utils",function(nvd3Utils){return{restrict:"AE",scope:{data:"=",options:"=",api:"=?",events:"=?",config:"=?",onReady:"&?"},link:function(scope,element,attrs){function
 
configure(chart,options,chartType){chart&&options&&angular.forEach(chart,function(value,key){"_"===key[0]||("dispatch"===key?(void
 
0!==options[key]&&null!==options[key]||scope._config.extended&&(options[key]={}),configureEvents(value,options[key])):"tooltip"===key?(void
 
0!==options[key]&&null!==options[key]||scope._config.extended&&(options[key]={}),configure(chart[key],options[key],chartType)):"contentGenerator"===key?options[key]&&chart[key](options[key]):-1===["axis","clearHighlights","defined","highlightPoint","nvPointerEventsClass","options","rangeBand","rangeBands","scatter","open","close","node"].indexOf(key)&&(void
 0===options[key]||null===options[key]?scope._config.
 extended&&(options[key]=value()):chart[key](options[key])))})}function 
configureEvents(dispatch,options){dispatch&&options&&angular.forEach(dispatch,function(value,key){void
 
0===options[key]||null===options[key]?scope._config.extended&&(options[key]=value.on):dispatch.on(key+"._",options[key])})}function
 configureWrapper(name){var 
_=nvd3Utils.deepExtend(defaultWrapper(name),scope.options[name]||{});scope._config.extended&&(scope.options[name]=_);var
 
wrapElement=angular.element("").html(_.html||"").addClass(name).addClass(_.className).removeAttr("style").css(_.css);_.html||wrapElement.text(_.text),_.enable&&("title"===name?element.prepend(wrapElement):"subtitle"===name?angular.element(element[0].querySelector(".title")).after(wrapElement):"caption"===name&&element.append(wrapElement))}function
 configureStyles(){var 
_=nvd3Utils.deepExtend(defaultStyles(),scope.options.styles||{});scope._config.extended&&(scope.options.styles=_),angular.forEach(_.classes,function(value,key){
 
value?element.addClass(key):element.removeClass(key)}),element.removeAttr("style").css(_.css)}function
 defaultWrapper(_){switch(_){case"title":return{enable:!1,text:"Write Your 
Title",className:"h4",css:{width:scope.options.chart.width+"px",textAlign:"center"}};case"subtitle":return{enable:!1,text:"Write
 Your 
Subtitle",css:{width:scope.options.chart.width+"px",textAlign:"center"}};case"caption":return{enable:!1,text:"Figure
 1. Write Your Caption 
text.",css:{width:scope.options.chart.width+"px",textAlign:"center"function 
defaultStyles(){return{classes:{"with-3d-shadow":!0,"with-transitions":!0,gallery:!1},css:{}}}function
 
dataWatchFn(newData,oldData){newData!==oldData&&(scope._config.disabled||(scope._config.refreshDataOnly?scope.api.update():scope.api.refresh()))}var
 
defaultConfig={extended:!1,visible:!0,disabled:!1,refreshDataOnly:!0,deepWatchOptions:!0,deepWatchData:!0,deepWatchDataDepth:2,debounce:10,debounceImmediate:!0};scope.isReady=!1,scope._config=angular.extend(defaultC
 
onfig,scope.config),scope.api={refresh:function(){scope.api.updateWithOptions(),scope.isReady=!0},refreshWithTimeout:function(t){setTimeout(function(){scope.api.refresh()},t)},update:function(){scope.chart&&scope.svg?"sunburstChart"===scope.options.chart.type?scope.svg.datum(angular.copy(scope.data)).call(scope.chart):scope.svg.datum(scope.data).call(scope.chart):scope.api.refresh()},updateWithTimeout:function(t){setTimeout(function(){scope.api.update()},t)},updateWithOptions:function(options){if(arguments.length){if(scope.options=options,scope._config.deepWatchOptions&&!scope._config.disabled)return}else
 
options=scope.options;scope.api.clearElement(),angular.isDefined(options)!==!1&&scope._config.visible&&(scope.chart=nv.models[options.chart.type](),scope.chart.id=Math.random().toString(36).substr(2,15),angular.forEach(scope.chart,function(value,key){"_"===key[0]||["clearHighlights","highlightPoint","id","options","resizeHandler","state","open","close","tooltipContent"].indexOf(key
 )>=0||("dispatch"===key?(void 
0!==options.chart[key]&&null!==options.chart[key]||scope._config.extended&&(options.chart[key]={}),configureEvents(scope.chart[key],options.chart[key])):["bars","bars1","bars2","boxplot","bullet","controls","discretebar","distX","distY","focus","interactiveLayer","legend","lines","lines1","lines2","multibar","pie","scatter","scatters1","scatters2","sparkline","stack1","stack2","sunburst","to

[2/8] hadoop git commit: HDFS-13298. Addendum: Ozone: Make ozone/hdsl/cblock modules turned off by default. Contributed by Elek, Marton.

2018-03-19 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ace05b3/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js.map
--
diff --git 
a/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js.map 
b/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js.map
new file mode 100644
index 000..594da5a3
--- /dev/null
+++ 
b/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js.map
@@ -0,0 +1 @@
+{"version":3,"file":"nv.d3.min.js","sources":["../src/core.js","../src/dom.js","../src/interactiveLayer.js","../src/tooltip.js","../src/utils.js","../src/models/axis.js","../src/models/boxPlot.js","../src/models/boxPlotChart.js","../src/models/bullet.js","../src/models/bulletChart.js","../src/models/candlestickBar.js","../src/models/cumulativeLineChart.js","../src/models/discreteBar.js","../src/models/discreteBarChart.js","../src/models/distribution.js","../src/models/focus.js","../src/models/forceDirectedGraph.js","../src/models/furiousLegend.js","../src/models/historicalBar.js","../src/models/historicalBarChart.js","../src/models/legend.js","../src/models/line.js","../src/models/lineChart.js","../src/models/linePlusBarChart.js","../src/models/multiBar.js","../src/models/multiBarChart.js","../src/models/multiBarHorizontal.js","../src/models/multiBarHorizontalChart.js","../src/models/multiChart.js","../src/models/ohlcBar.js","../src/models/parallelCoordinates.js","../src/models/para
 
llelCoordinatesChart.js","../src/models/pie.js","../src/models/pieChart.js","../src/models/sankey.js","../src/models/sankeyChart.js","../src/models/scatter.js","../src/models/scatterChart.js","../src/models/sparkline.js","../src/models/sparklinePlus.js","../src/models/stackedArea.js","../src/models/stackedAreaChart.js","../src/models/sunburst.js","../src/models/sunburstChart.js"],"names":["nv","dev","tooltip","utils","models","charts","logs","dom","d3","require","dispatch","Function","prototype","bind","oThis","this","TypeError","aArgs","Array","slice","call","arguments","fToBind","fNOP","fBound","apply","concat","on","e","startTime","Date","endTime","totalTime","log","window","console","length","deprecated","name","info","warn","render","step","active","render_start","renderLoop","chart","graph","i","queue","generate","callback","splice","setTimeout","render_end","addGraph","obj","push","module","exports","write","undefined","fastdom","mutate","read","measure","interactiveGuideline
 
","layer","selection","each","data","mouseHandler","d3mouse","mouse","mouseX","mouseY","subtractMargin","mouseOutAnyReason","isMSIE","event","offsetX","offsetY","target","tagName","className","baseVal","match","margin","left","top","type","availableWidth","availableHeight","relatedTarget","ownerSVGElement","nvPointerEventsClass","elementMouseout","renderGuideLine","hidden","scaleIsOrdinal","xScale","rangeBands","pointXValue","elementIndex","bisect","range","rangeBand","domain","invert","elementMousemove","elementDblclick","elementClick","elementMouseDown","elementMouseUp","container","select","width","height","wrap","selectAll","wrapEnter","enter","append","attr","svgContainer","guideLine","x","showGuideLine","line","NaNtoZero","String","d","exit","remove","scale","linear","ActiveXObject","duration","hideDelay","_","interactiveBisect","values","searchVal","xAccessor","_xAccessor","_cmp","v","bisector","index","max","currentValue","nextIndex","min","nextValue","Math","abs","nearestVa
 
lueIndex","threshold","yDistMax","Infinity","indexToHighlight","forEach","delta","initTooltip","node","document","body","id","classes","style","classed","nvtooltip","enabled","dataSeriesExists","newContent","contentGenerator","innerHTML","positionTooltip","floor","random","gravity","distance","snapDistance","lastPosition","headerEnabled","valueFormatter","headerFormatter","keyFormatter","table","createElement","theadEnter","html","value","tbodyEnter","trowEnter","p","series","highlight","color","total","key","filter","percent","format","opacityScale","opacity","outerHTML","footer","position","pos","clientX","clientY","getComputedStyle","transform","client","getBoundingClientRect","isArray","isObject","calcGravityOffset","tmp","offsetHeight","offsetWidth","clientWidth","documentElement","clientHeight","gravityOffset","interrupt","transition","delay","old_translate","new_translate","round","translateInterpolator","interpolateString","is_hidden","styleTween","options","optionsFunc","_o
 
ptions","Object","create","get","set","chartContainer","fixedTop","offset","point","y","initOptions","windowSize","size","innerWidth","innerHeight","compatMode","a","isFunction","isDate","toString","isNumber","isNaN","windowResize","handler","addEventListener","clear","removeEventListener","getColor","defaultColor","color_scale","ordinal","category20","customTheme","dictionary","getKey","defaultColors","defIndex","pjax","links","content","load","href","f

hadoop git commit: HDDS-391. Simplify Audit Framework to make audit logging easier to use. Contributed by Dinesh Chitlangia.

2018-09-28 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 35c7351f4 -> 0da03f8b1


HDDS-391. Simplify Audit Framework to make audit logging easier to use.
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0da03f8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0da03f8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0da03f8b

Branch: refs/heads/trunk
Commit: 0da03f8b14ed90c22137260d7e4029057421f0d8
Parents: 35c7351
Author: Anu Engineer 
Authored: Fri Sep 28 14:00:24 2018 -0700
Committer: Anu Engineer 
Committed: Fri Sep 28 14:00:24 2018 -0700

--
 .../apache/hadoop/ozone/audit/AuditLogger.java  | 44 ++
 .../apache/hadoop/ozone/audit/AuditMessage.java | 77 +++--
 .../org/apache/hadoop/ozone/audit/Auditor.java  | 33 +++
 .../apache/hadoop/ozone/audit/package-info.java | 16 ++--
 .../ozone/audit/TestOzoneAuditLogger.java   | 89 +--
 .../common/src/test/resources/log4j2.properties |  4 +-
 .../apache/hadoop/ozone/om/OzoneManager.java| 90 +++-
 7 files changed, 213 insertions(+), 140 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0da03f8b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
index ee20c66..9357774 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
@@ -56,53 +56,21 @@ public class AuditLogger {
   }
 
   public void logWriteSuccess(AuditMessage msg) {
-logWriteSuccess(Level.INFO, msg);
-  }
-
-  public void logWriteSuccess(Level level, AuditMessage msg) {
-this.logger.logIfEnabled(FQCN, level, WRITE_MARKER, msg, null);
+this.logger.logIfEnabled(FQCN, Level.INFO, WRITE_MARKER, msg, null);
   }
 
   public void logWriteFailure(AuditMessage msg) {
-logWriteFailure(Level.ERROR, msg);
-  }
-
-  public void logWriteFailure(Level level, AuditMessage msg) {
-logWriteFailure(level, msg, null);
-  }
-
-  public void logWriteFailure(AuditMessage msg, Throwable exception) {
-logWriteFailure(Level.ERROR, msg, exception);
-  }
-
-  public void logWriteFailure(Level level, AuditMessage msg,
-  Throwable exception) {
-this.logger.logIfEnabled(FQCN, level, WRITE_MARKER, msg, exception);
+this.logger.logIfEnabled(FQCN, Level.ERROR, WRITE_MARKER, msg,
+msg.getThrowable());
   }
 
   public void logReadSuccess(AuditMessage msg) {
-logReadSuccess(Level.INFO, msg);
-  }
-
-  public void logReadSuccess(Level level, AuditMessage msg) {
-this.logger.logIfEnabled(FQCN, level, READ_MARKER, msg, null);
+this.logger.logIfEnabled(FQCN, Level.INFO, READ_MARKER, msg, null);
   }
 
   public void logReadFailure(AuditMessage msg) {
-logReadFailure(Level.ERROR, msg);
-  }
-
-  public void logReadFailure(Level level, AuditMessage msg) {
-logReadFailure(level, msg, null);
-  }
-
-  public void logReadFailure(AuditMessage msg, Throwable exception) {
-logReadFailure(Level.ERROR, msg, exception);
-  }
-
-  public void logReadFailure(Level level, AuditMessage msg,
-  Throwable exception) {
-this.logger.logIfEnabled(FQCN, level, READ_MARKER, msg, exception);
+this.logger.logIfEnabled(FQCN, Level.ERROR, READ_MARKER, msg,
+msg.getThrowable());
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0da03f8b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
index 858695a..1569ffe 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
@@ -26,12 +26,13 @@ import java.util.Map;
 public class AuditMessage implements Message {
 
   private String message;
+  private Throwable throwable;
 
-  public AuditMessage(String user, String ip, String op,
-  Map params, String ret){
+  private static final String MSG_PATTERN =
+  "user=%s | ip=%s | op=%s %s | ret=%s";
+
+  public AuditMessage(){
 
-this.message = String.format("user=%s ip=%s op=%s %s ret=%s",
-  user, ip, op, params, ret);
   }
 
   @Override
@@ -51,7 +52,7 @@ public class AuditMessage implements Message {
 
   @O

hadoop git commit: HDDS-572. Support S3 buckets as first class objects in Ozone Manager - 1. Contributed by Anu Engineer.

2018-10-04 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7fb91b8a5 -> e6b77ad65


HDDS-572. Support S3 buckets as first class objects in Ozone Manager - 1.
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6b77ad6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6b77ad6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6b77ad6

Branch: refs/heads/trunk
Commit: e6b77ad65f923858fb67f5c2165fefe52d6f8c17
Parents: 7fb91b8
Author: Anu Engineer 
Authored: Thu Oct 4 21:40:13 2018 -0700
Committer: Anu Engineer 
Committed: Thu Oct 4 21:40:13 2018 -0700

--
 .../org/apache/hadoop/ozone/OzoneConsts.java|   1 +
 .../hadoop/ozone/om/OMMetadataManager.java  |   6 +
 .../hadoop/ozone/om/OmMetadataManagerImpl.java  |  14 ++
 .../hadoop/ozone/om/OzoneManagerLock.java   |  58 +-
 .../apache/hadoop/ozone/om/S3BucketManager.java |  59 ++
 .../hadoop/ozone/om/S3BucketManagerImpl.java| 194 +++
 .../hadoop/ozone/om/exceptions/OMException.java |   5 +-
 .../hadoop/ozone/om/TestOzoneManagerLock.java   |   3 +-
 .../hadoop/ozone/om/TestS3BucketManager.java| 115 +++
 9 files changed, 443 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6b77ad6/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 0a15ec8..923271c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -139,6 +139,7 @@ public final class OzoneConsts {
 
   public static final String OM_KEY_PREFIX = "/";
   public static final String OM_USER_PREFIX = "$";
+  public static final String OM_S3_PREFIX ="S3:";
 
   /**
* Max OM Quota size of 1024 PB.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6b77ad6/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index c8fb39c..5f490ec 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -236,4 +236,10 @@ public interface OMMetadataManager {
*/
   Table getOpenKeyTable();
 
+  /**
+   * Gets the S3Bucket to Ozone Volume/bucket mapping table.
+   *
+   * @return Table.
+   */
+  Table getS3Table();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6b77ad6/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index a7e1bed..75bd712 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -88,6 +88,8 @@ public class OmMetadataManagerImpl implements 
OMMetadataManager {
* |---|
* | openKey| /volumeName/bucketName/keyName/id->KeyInfo   |
* |---|
+   * | s3Table| s3BucketName -> /volumeName/bucketName   |
+   * |---|
*/
 
   private static final String USER_TABLE = "userTable";
@@ -96,6 +98,7 @@ public class OmMetadataManagerImpl implements 
OMMetadataManager {
   private static final String KEY_TABLE = "keyTable";
   private static final String DELETED_TABLE = "deletedTable";
   private static final String OPEN_KEY_TABLE = "openKeyTable";
+  private static final String S3_TABLE = "s3Table";
 
   private final DBStore store;
 
@@ -108,6 +111,7 @@ public class OmMetadataManagerImpl implements 
OMMetadataManager {
   private final Table keyTable;
   private final Table deletedTable;
   private final Table openKeyTable;
+  private final Table s3Table;
 
   public OmMetadataManagerImpl(OzoneConfiguration conf) throws IOException {
 File metaDir = getOzoneMe

hadoop git commit: HDDS-479. Add more ozone fs tests in the robot integration framework. Contributed by Nilotpal Nandi.

2018-10-04 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2a07617f8 -> 153941b23


HDDS-479. Add more ozone fs tests in the robot integration framework.
Contributed by  Nilotpal Nandi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/153941b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/153941b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/153941b2

Branch: refs/heads/trunk
Commit: 153941b2365a3f4a2fc1285f93eeaf12419aca3a
Parents: 2a07617
Author: Anu Engineer 
Authored: Thu Oct 4 22:32:18 2018 -0700
Committer: Anu Engineer 
Committed: Thu Oct 4 22:53:51 2018 -0700

--
 .../src/main/smoketest/ozonefs/ozonefs.robot| 75 +++-
 1 file changed, 74 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/153941b2/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot
--
diff --git a/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot 
b/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot
index fb7b98c..236c5b2 100644
--- a/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot
@@ -24,12 +24,85 @@ Resource../commonlib.robot
 *** Test Cases ***
 Create volume and bucket
 Execute ozone sh volume create http://ozoneManager/fstest 
--user bilbo --quota 100TB --root
+Execute ozone sh volume create http://ozoneManager/fstest2 
--user bilbo --quota 100TB --root
 Execute ozone sh bucket create 
http://ozoneManager/fstest/bucket1
+Execute ozone sh bucket create 
http://ozoneManager/fstest/bucket2
+Execute ozone sh bucket create 
http://ozoneManager/fstest2/bucket3
 
 Check volume from ozonefs
 ${result} = Execute   ozone fs -ls o3://bucket1.fstest/
 
-Create directory from ozonefs
+Run ozoneFS tests
 Execute   ozone fs -mkdir -p 
o3://bucket1.fstest/testdir/deep
 ${result} = Execute   ozone sh key list 
o3://ozoneManager/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
 Should contain${result} testdir/deep
+Execute   ozone fs -copyFromLocal 
NOTICE.txt o3://bucket1.fstest/testdir/deep/
+${result} = Execute   ozone sh key list 
o3://ozoneManager/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+Should contain${result} NOTICE.txt
+
+Execute   ozone fs -put NOTICE.txt 
o3://bucket1.fstest/testdir/deep/PUTFILE.txt
+${result} = Execute   ozone sh key list 
o3://ozoneManager/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+Should contain${result} PUTFILE.txt
+
+${result} = Execute   ozone fs -ls 
o3://bucket1.fstest/testdir/deep/
+Should contain${result} NOTICE.txt
+Should contain${result} PUTFILE.txt
+
+Execute   ozone fs -mv 
o3://bucket1.fstest/testdir/deep/NOTICE.txt 
o3://bucket1.fstest/testdir/deep/MOVED.TXT
+${result} = Execute   ozone sh key list 
o3://ozoneManager/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+Should contain${result} MOVED.TXT
+Should not contain  ${result}   NOTICE.txt
+
+Execute   ozone fs -mkdir -p 
o3://bucket1.fstest/testdir/deep/subdir1
+Execute   ozone fs -cp 
o3://bucket1.fstest/testdir/deep/MOVED.TXT 
o3://bucket1.fstest/testdir/deep/subdir1/NOTICE.txt
+${result} = Execute   ozone sh key list 
o3://ozoneManager/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+Should contain${result} subdir1/NOTICE.txt
+
+${result} = Execute   ozone fs -ls 
o3://bucket1.fstest/testdir/deep/subdir1/
+Should contain${result} NOTICE.txt
+
+Execute   ozone fs -cat 
o3://bucket1.fstest/testdir/deep/subdir1/NOTICE.txt
+Should not contain  ${result}   Failed
+
+Execute   ozone fs -rm 
o3://bucket1.fstest/testdir/deep/subdir1/NOTICE.txt
+${result} = Execute   ozone sh key list 
o3://ozoneManager/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+Should not contain  ${result}   NOTICE.txt
+
+${result} = Execute 

hadoop git commit: HDDS-577. Support S3 buckets as first class objects in Ozone Manager - 2. Contributed by Bharat Viswanadham.

2018-10-09 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk bf04f1945 -> 5b7ba48ce


HDDS-577. Support S3 buckets as first class objects in Ozone Manager - 2.
Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b7ba48c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b7ba48c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b7ba48c

Branch: refs/heads/trunk
Commit: 5b7ba48cedb0d70ca154771fec48e5c4129cf29a
Parents: bf04f19
Author: Anu Engineer 
Authored: Tue Oct 9 13:37:42 2018 -0700
Committer: Anu Engineer 
Committed: Tue Oct 9 13:37:42 2018 -0700

--
 .../apache/hadoop/ozone/client/ObjectStore.java |  48 +
 .../ozone/client/protocol/ClientProtocol.java   |  35 
 .../hadoop/ozone/client/rest/RestClient.java|  25 +++
 .../hadoop/ozone/client/rpc/RpcClient.java  |  32 
 .../ozone/om/protocol/OzoneManagerProtocol.java |  25 +++
 ...neManagerProtocolClientSideTranslatorPB.java |  51 +
 .../src/main/proto/OzoneManagerProtocol.proto   |  32 
 .../ozone/client/rpc/TestOzoneRpcClient.java|  34 
 .../apache/hadoop/ozone/om/OzoneManager.java|  23 +++
 .../hadoop/ozone/om/S3BucketManagerImpl.java|  11 ++
 ...neManagerProtocolServerSideTranslatorPB.java | 187 +++
 11 files changed, 429 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b7ba48c/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
index 17d1938..4196556 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
@@ -83,6 +83,54 @@ public class ObjectStore {
   }
 
   /**
+   * Creates an S3 bucket inside Ozone manager and creates the mapping needed
+   * to access via both S3 and Ozone.
+   * @param userName - S3 user name.
+   * @param s3BucketName - S3 bucket Name.
+   * @throws IOException - On failure, throws an exception like Bucket exists.
+   */
+  public void createS3Bucket(String userName, String s3BucketName) throws
+  IOException {
+proxy.createS3Bucket(userName, s3BucketName);
+  }
+
+  /**
+   * Returns the Ozone Namespace for the S3Bucket. It will return the
+   * OzoneVolume/OzoneBucketName.
+   * @param s3BucketName  - S3 Bucket Name.
+   * @return String - The Ozone canonical name for this s3 bucket. This
+   * string is useful for mounting an OzoneFS.
+   * @throws IOException - Error is throw if the s3bucket does not exist.
+   */
+  public String getOzoneBucketMapping(String s3BucketName) throws IOException {
+return proxy.getOzoneBucketMapping(s3BucketName);
+  }
+
+  /**
+   * Returns the corresponding Ozone volume given an S3 Bucket.
+   * @param s3BucketName - S3Bucket Name.
+   * @return String - Ozone Volume name.
+   * @throws IOException - Throws if the s3Bucket does not exist.
+   */
+  public String getOzoneVolumeName(String s3BucketName) throws IOException {
+String mapping = getOzoneBucketMapping(s3BucketName);
+return mapping.split("/")[0];
+
+  }
+
+  /**
+   * Returns the corresponding Ozone bucket name for the given S3 bucket.
+   * @param s3BucketName - S3Bucket Name.
+   * @return String - Ozone bucket Name.
+   * @throws IOException - Throws if the s3bucket does not exist.
+   */
+  public String getOzoneBucketName(String s3BucketName) throws IOException {
+String mapping = getOzoneBucketMapping(s3BucketName);
+return mapping.split("/")[1];
+  }
+
+
+  /**
* Returns the volume information.
* @param volumeName Name of the volume.
* @return OzoneVolume

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b7ba48c/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 008b69d..b750a5a 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -322,6 +322,41 @@ public interface ClientProtocol {
   throws IOException;
 
   /**
+   * Creates an S3 bucket inside Ozone manager and creates the mapping needed
+   * to access via both S3 and Ozone.
+   * @param userName - 

hadoop git commit: HDDS-478. Log files related to each daemon doesn't have proper startup and shutdown logs. Contributed by Dinesh Chitlangia.

2018-10-09 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6a3973931 -> c1fe657a1


HDDS-478. Log files related to each daemon doesn't have proper startup and 
shutdown logs.
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c1fe657a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c1fe657a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c1fe657a

Branch: refs/heads/trunk
Commit: c1fe657a106aaae3bdf81fa4add70962aaee165b
Parents: 6a39739
Author: Anu Engineer 
Authored: Tue Oct 9 16:44:32 2018 -0700
Committer: Anu Engineer 
Committed: Tue Oct 9 16:44:32 2018 -0700

--
 .../common/src/main/conf/om-audit-log4j2.properties   | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1fe657a/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties
--
diff --git a/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties 
b/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties
index 7d097a0..7be51ac 100644
--- a/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties
+++ b/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties
@@ -52,11 +52,15 @@ filter.write.onMismatch=NEUTRAL
 # TRACE (least specific, a lot of data)
 # ALL (least specific, all data)
 
-appenders=console, rolling
-appender.console.type=Console
-appender.console.name=STDOUT
-appender.console.layout.type=PatternLayout
-appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | 
%throwable{3} %n
+# Uncomment following section to enable logging to console appender also
+#appenders=console, rolling
+#appender.console.type=Console
+#appender.console.name=STDOUT
+#appender.console.layout.type=PatternLayout
+#appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | 
%throwable{3} %n
+
+# Comment this line when using both console and rolling appenders
+appenders=rolling
 
 #Rolling File Appender with size & time thresholds.
 #Rolling is triggered when either threshold is breached.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-559. fs.default.name is deprecated. Contributed by Dinesh Chitlangia.

2018-10-09 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk c1fe657a1 -> 6a06bc309


HDDS-559. fs.default.name is deprecated.
Contributed by  Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a06bc30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a06bc30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a06bc30

Branch: refs/heads/trunk
Commit: 6a06bc309d72c766694eb6296d5f3fb5c3c597c5
Parents: c1fe657
Author: Anu Engineer 
Authored: Tue Oct 9 16:57:39 2018 -0700
Committer: Anu Engineer 
Committed: Tue Oct 9 16:57:39 2018 -0700

--
 hadoop-ozone/docs/content/OzoneFS.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a06bc30/hadoop-ozone/docs/content/OzoneFS.md
--
diff --git a/hadoop-ozone/docs/content/OzoneFS.md 
b/hadoop-ozone/docs/content/OzoneFS.md
index d0621be..6853992 100644
--- a/hadoop-ozone/docs/content/OzoneFS.md
+++ b/hadoop-ozone/docs/content/OzoneFS.md
@@ -46,7 +46,7 @@ Please add the following entry to the core-site.xml.
   org.apache.hadoop.fs.ozone.OzoneFileSystem
 
 
-  fs.default.name
+  fs.defaultFS
   o3://localhost:9864/volume/bucket
 
 {{< /highlight >}}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-568. ozone sh volume info, update, delete operations fail when volume name is not prefixed by /. Contributed by Dinesh Chitlangia.

2018-10-09 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 605622c87 -> 4de2dc269


HDDS-568. ozone sh volume info, update, delete operations fail when volume name 
is not prefixed by /.
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4de2dc26
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4de2dc26
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4de2dc26

Branch: refs/heads/trunk
Commit: 4de2dc2699fc371b2de83ba55ecbcecef1f0423b
Parents: 605622c
Author: Anu Engineer 
Authored: Tue Oct 9 17:16:52 2018 -0700
Committer: Anu Engineer 
Committed: Tue Oct 9 17:32:04 2018 -0700

--
 .../hadoop/ozone/ozShell/TestOzoneShell.java| 48 
 .../hadoop/ozone/web/ozShell/Handler.java   | 28 
 .../web/ozShell/volume/DeleteVolumeHandler.java | 12 +
 .../web/ozShell/volume/InfoVolumeHandler.java   | 22 +
 .../web/ozShell/volume/UpdateVolumeHandler.java | 12 +
 5 files changed, 79 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4de2dc26/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index 6e73b8c..d5f2554 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -283,6 +283,33 @@ public class TestOzoneShell {
   GenericTestUtils.assertExceptionContains(
   "Info Volume failed, error:VOLUME_NOT_FOUND", e);
 }
+
+
+volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+volumeArgs = VolumeArgs.newBuilder()
+.setOwner("bilbo")
+.setQuota("100TB")
+.build();
+client.createVolume(volumeName, volumeArgs);
+volume = client.getVolumeDetails(volumeName);
+assertNotNull(volume);
+
+//volumeName prefixed with /
+String volumeNameWithSlashPrefix = "/" + volumeName;
+args = new String[] {"volume", "delete",
+url + "/" + volumeNameWithSlashPrefix};
+execute(shell, args);
+output = out.toString();
+assertTrue(output.contains("Volume " + volumeName + " is deleted"));
+
+// verify if volume has been deleted
+try {
+  client.getVolumeDetails(volumeName);
+  fail("Get volume call should have thrown.");
+} catch (IOException e) {
+  GenericTestUtils.assertExceptionContains(
+  "Info Volume failed, error:VOLUME_NOT_FOUND", e);
+}
   }
 
   @Test
@@ -295,6 +322,7 @@ public class TestOzoneShell {
 .build();
 client.createVolume(volumeName, volumeArgs);
 
+//volumeName supplied as-is
 String[] args = new String[] {"volume", "info", url + "/" + volumeName};
 execute(shell, args);
 
@@ -303,6 +331,17 @@ public class TestOzoneShell {
 assertTrue(output.contains("createdOn")
 && output.contains(OzoneConsts.OZONE_TIME_ZONE));
 
+//volumeName prefixed with /
+String volumeNameWithSlashPrefix = "/" + volumeName;
+args = new String[] {"volume", "info",
+url + "/" + volumeNameWithSlashPrefix};
+execute(shell, args);
+
+output = out.toString();
+assertTrue(output.contains(volumeName));
+assertTrue(output.contains("createdOn")
+&& output.contains(OzoneConsts.OZONE_TIME_ZONE));
+
 // test infoVolume with invalid volume name
 args = new String[] {"volume", "info",
 url + "/" + volumeName + "/invalid-name"};
@@ -365,6 +404,15 @@ public class TestOzoneShell {
 vol = client.getVolumeDetails(volumeName);
 assertEquals(newUser, vol.getOwner());
 
+//volume with / prefix
+String volumeWithPrefix = "/" + volumeName;
+String newUser2 = "new-user2";
+args = new String[] {"volume", "update", url + "/" + volumeWithPrefix,
+"--user", newUser2};
+execute(shell, args);
+vol = client.getVolumeDetails(volumeName);
+assertEquals(newUser2, vol.getOwner());
+
 // test error conditions
 args = new String[] {"volume", "update", url + "/invalid-volume",
 "--user", newUser};

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4de2dc26/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/H

hadoop git commit: HDDS-604. Correct Ozone getOzoneConf description. Contributed by Dinesh Chitlangia.

2018-10-09 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4de2dc269 -> 794c0451c


HDDS-604. Correct Ozone getOzoneConf description.
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/794c0451
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/794c0451
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/794c0451

Branch: refs/heads/trunk
Commit: 794c0451cffbe147234a2417943709c121d06620
Parents: 4de2dc2
Author: Anu Engineer 
Authored: Tue Oct 9 17:52:06 2018 -0700
Committer: Anu Engineer 
Committed: Tue Oct 9 17:52:06 2018 -0700

--
 hadoop-ozone/common/src/main/bin/ozone  |  4 ++--
 hadoop-ozone/common/src/main/bin/start-ozone.sh | 10 +-
 hadoop-ozone/common/src/main/bin/stop-ozone.sh  | 10 +-
 hadoop-ozone/docs/content/CommandShell.md   |  2 +-
 4 files changed, 13 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/794c0451/hadoop-ozone/common/src/main/bin/ozone
--
diff --git a/hadoop-ozone/common/src/main/bin/ozone 
b/hadoop-ozone/common/src/main/bin/ozone
index 4b50771..2ba9ea7 100755
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -39,7 +39,7 @@ function hadoop_usage
   hadoop_add_subcommand "fs" client "run a filesystem command on Ozone file 
system. Equivalent to 'hadoop fs'"
   hadoop_add_subcommand "genconf" client "generate minimally required ozone 
configs and output to ozone-site.xml in specified path"
   hadoop_add_subcommand "genesis" client "runs a collection of ozone 
benchmarks to help with tuning."
-  hadoop_add_subcommand "getozoneconf" client "get ozone config values from 
configuration"
+  hadoop_add_subcommand "getconf" client "get ozone config values from 
configuration"
   hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode 
or DataNode."
   hadoop_add_subcommand "noz" client "ozone debug tool, convert ozone metadata 
into relational data"
   hadoop_add_subcommand "om" daemon "Ozone Manager"
@@ -94,7 +94,7 @@ function ozonecmd_case
   HADOOP_CLASSNAME=org.apache.hadoop.ozone.genesis.Genesis
   OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools"
 ;;
-getozoneconf)
+getconf)
   HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.OzoneGetConf;
   OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools"
 ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/794c0451/hadoop-ozone/common/src/main/bin/start-ozone.sh
--
diff --git a/hadoop-ozone/common/src/main/bin/start-ozone.sh 
b/hadoop-ozone/common/src/main/bin/start-ozone.sh
index cfb54e0..4c022fb 100755
--- a/hadoop-ozone/common/src/main/bin/start-ozone.sh
+++ b/hadoop-ozone/common/src/main/bin/start-ozone.sh
@@ -67,8 +67,8 @@ fi
 #Add other possible options
 nameStartOpt="$nameStartOpt $*"
 
-SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey 
hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
-SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf 
-confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
+SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -confKey 
hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
+SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf 
-confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
 
 if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} 
== "true" ]]; then
   echo "Ozone is not supported in a security enabled cluster."
@@ -77,7 +77,7 @@ fi
 
 #-
 # Check if ozone is enabled
-OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey 
ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)
+OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -confKey ozone.enabled 
| tr '[:upper:]' '[:lower:]' 2>&-)
 if [[ "${OZONE_ENABLED}" != "true" ]]; then
   echo "Operation is not supported because ozone is not enabled."
   exit -1
@@ -96,7 +96,7 @@ hadoop_uservar_su hdfs datanode 
"${HADOOP_HDFS_HOME}/bin/ozone" \
 
 #-
 # Ozone ozonemanager nodes
-OM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -ozonemanagers 
2>/dev/null)
+OM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -ozonemanagers 2>/dev/null)
 echo "Starting Ozone Manager nodes [${OM_NODES}]"
 if [[ "${OM_NODES}" == "0.0.0.0" ]]; then
   OM_NODES=$(hostname)
@@ -113,7 +113,7 @@ HADOOP_JUMBO_RETCOUNTER=$?
 
 #-
 # Ozone storagecontainermanager nodes
-SCM_NODES=$("${HADOOP_HDFS_HOME}/bin

hadoop git commit: HDDS-443. Create reusable ProgressBar utility for freon tests. Contributed by Zsolt Horvath.

2018-10-09 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6fa3feb57 -> f068296f8


HDDS-443. Create reusable ProgressBar utility for freon tests.
Contributed by Zsolt Horvath.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f068296f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f068296f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f068296f

Branch: refs/heads/trunk
Commit: f068296f8a88fc2a4c7b1680bc190c5fa7fc2469
Parents: 6fa3feb
Author: Anu Engineer 
Authored: Tue Oct 9 18:18:19 2018 -0700
Committer: Anu Engineer 
Committed: Tue Oct 9 18:19:00 2018 -0700

--
 hadoop-ozone/tools/pom.xml  |   6 +
 .../apache/hadoop/ozone/freon/ProgressBar.java  | 210 +++
 .../hadoop/ozone/freon/TestProgressBar.java | 112 ++
 3 files changed, 328 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f068296f/hadoop-ozone/tools/pom.xml
--
diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml
index ac819b5..2d273d1 100644
--- a/hadoop-ozone/tools/pom.xml
+++ b/hadoop-ozone/tools/pom.xml
@@ -77,6 +77,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
   test
   test-jar
 
+
+  org.mockito
+  mockito-core
+  2.15.0
+  test
+
   
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f068296f/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java
--
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java
new file mode 100644
index 000..a8d7e73
--- /dev/null
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java
@@ -0,0 +1,210 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+import java.io.PrintStream;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Function;
+import java.util.function.Supplier;
+
+/**
+ * Run an arbitrary code and print progress on the provided stream. The
+ * progressbar stops when: - the provided currentvalue is less the the maxvalue
+ * - exception thrown
+ */
+public class ProgressBar {
+
+  private static final long REFRESH_INTERVAL = 1000L;
+
+  private PrintStream stream;
+  private AtomicLong currentValue;
+  private long maxValue;
+  private Thread progressBar;
+  private volatile boolean exception = false;
+  private long startTime;
+
+  /**
+   * @param stream Used to display the progress
+   * @param maxValue Maximum value of the progress
+   */
+  ProgressBar(PrintStream stream, long maxValue) {
+this.stream = stream;
+this.maxValue = maxValue;
+this.currentValue = new AtomicLong(0);
+this.progressBar = new Thread(new ProgressBarThread());
+  }
+
+  /**
+   * Start a task with a progessbar without any in/out parameters Runnable used
+   * just a task wrapper.
+   *
+   * @param task Runnable
+   */
+  public void start(Runnable task) {
+
+startTime = System.nanoTime();
+
+try {
+
+  progressBar.start();
+  task.run();
+
+} catch (Exception e) {
+  exception = true;
+} finally {
+
+  try {
+progressBar.join();
+  } catch (InterruptedException e) {
+e.printStackTrace();
+  }
+}
+  }
+
+  /**
+   * Start a task with only out parameters.
+   *
+   * @param task Supplier that represents the task
+   * @param  Generic return type
+   * @return Whatever the supllier produces
+   */
+  public  T start(Supplier task) {
+
+startTime = System.nanoTime();
+T result = null;
+
+try {
+
+  progressBar.start();
+  result = task.get();
+
+} catch (Exception e) {
+  exception = true;
+} finally {
+
+  try {
+progressBar.join();
+  } catch (InterruptedException e) {
+

hadoop git commit: HDDS-634. OzoneFS: support basic user group and permission for file/dir. Contributed by Xiaoyu Yao.

2018-10-11 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 50552479c -> 604c27c33


HDDS-634. OzoneFS: support basic user group and permission for file/dir.
Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/604c27c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/604c27c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/604c27c3

Branch: refs/heads/trunk
Commit: 604c27c33a3bd30644debaf192cec3cf80dfa36d
Parents: 5055247
Author: Anu Engineer 
Authored: Thu Oct 11 17:19:38 2018 -0700
Committer: Anu Engineer 
Committed: Thu Oct 11 17:25:25 2018 -0700

--
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java| 16 ++--
 .../hadoop/fs/ozone/TestOzoneFileInterfaces.java   | 17 +++--
 2 files changed, 29 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/604c27c3/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
--
diff --git 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
index 8fbd688..0b721fb 100644
--- 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
+++ 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
@@ -448,6 +448,14 @@ public class OzoneFileSystem extends FileSystem {
   }
 
   /**
+   * Get the username of the FS.
+   * @return the short name of the user who instantiated the FS
+   */
+  public String getUsername() {
+return userName;
+  }
+
+  /**
* Check whether the path is valid and then create directories.
* Directory is represented using a key with no value.
* All the non-existent parent directories are also created.
@@ -528,11 +536,15 @@ public class OzoneFileSystem extends FileSystem {
   throw new FileNotFoundException(f + ": No such file or directory!");
 } else if (isDirectory(meta)) {
   return new FileStatus(0, true, 1, 0,
-  meta.getModificationTime(), qualifiedPath);
+  meta.getModificationTime(), 0,
+  FsPermission.getDirDefault(), getUsername(), getUsername(),
+  qualifiedPath);
 } else {
   //TODO: Fetch replication count from ratis config
   return new FileStatus(meta.getDataSize(), false, 1,
-getDefaultBlockSize(f), meta.getModificationTime(), qualifiedPath);
+  getDefaultBlockSize(f), meta.getModificationTime(), 0,
+  FsPermission.getFileDefault(), getUsername(), getUsername(),
+  qualifiedPath);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/604c27c3/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
--
diff --git 
a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
index a225702..7cf6e3d 100644
--- 
a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
+++ 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
@@ -23,6 +23,7 @@ import java.net.URI;
 import java.util.Arrays;
 import java.util.Collection;
 
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.junit.Before;
@@ -167,6 +168,10 @@ public class TestOzoneFileInterfaces {
 assertTrue("Modification time has not been recorded: " + status,
 status.getModificationTime() > currentTime);
 
+assertEquals(false, status.isDirectory());
+assertEquals(FsPermission.getFileDefault(), status.getPermission());
+verifyOwnerGroup(status);
+
 try (FSDataInputStream inputStream = fs.open(path)) {
   byte[] buffer = new byte[stringLen];
   inputStream.readFully(0, buffer);
@@ -175,6 +180,12 @@ public class TestOzoneFileInterfaces {
 }
   }
 
+  private void verifyOwnerGroup(FileStatus fileStatus) {
+String owner = getCurrentUser();
+assertEquals(owner, fileStatus.getOwner());
+assertEquals(owner, fileStatus.getGroup());
+  }
+
 
   @Test
   public void testDirectory() throws IOException {
@@ -186,6 +197,10 @@ public class TestOzoneFileInterfaces {
 FileStatus status = fs.getFileStatus(path);
 assertTrue("The created path is not directory.", status.isDirectory());
 
+assertEquals(true, status.isDirectory());
+assertEquals(FsPermission.getDirDefault(), status.getPermission());
+verifyOwnerGroup(status);
+
 assertEquals(0, status.get

hadoop git commit: HDDS-634. OzoneFS: support basic user group and permission for file/dir. Contributed by Xiaoyu Yao.

2018-10-11 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 ed09883f2 -> 587e29909


HDDS-634. OzoneFS: support basic user group and permission for file/dir.
Contributed by Xiaoyu Yao.

(cherry picked from commit 604c27c33a3bd30644debaf192cec3cf80dfa36d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/587e2990
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/587e2990
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/587e2990

Branch: refs/heads/ozone-0.3
Commit: 587e29909e19de21d7b670a3dd3d5c502c66502b
Parents: ed09883
Author: Anu Engineer 
Authored: Thu Oct 11 17:19:38 2018 -0700
Committer: Anu Engineer 
Committed: Thu Oct 11 17:26:02 2018 -0700

--
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java| 16 ++--
 .../hadoop/fs/ozone/TestOzoneFileInterfaces.java   | 17 +++--
 2 files changed, 29 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/587e2990/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
--
diff --git 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
index 8fbd688..0b721fb 100644
--- 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
+++ 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
@@ -448,6 +448,14 @@ public class OzoneFileSystem extends FileSystem {
   }
 
   /**
+   * Get the username of the FS.
+   * @return the short name of the user who instantiated the FS
+   */
+  public String getUsername() {
+return userName;
+  }
+
+  /**
* Check whether the path is valid and then create directories.
* Directory is represented using a key with no value.
* All the non-existent parent directories are also created.
@@ -528,11 +536,15 @@ public class OzoneFileSystem extends FileSystem {
   throw new FileNotFoundException(f + ": No such file or directory!");
 } else if (isDirectory(meta)) {
   return new FileStatus(0, true, 1, 0,
-  meta.getModificationTime(), qualifiedPath);
+  meta.getModificationTime(), 0,
+  FsPermission.getDirDefault(), getUsername(), getUsername(),
+  qualifiedPath);
 } else {
   //TODO: Fetch replication count from ratis config
   return new FileStatus(meta.getDataSize(), false, 1,
-getDefaultBlockSize(f), meta.getModificationTime(), qualifiedPath);
+  getDefaultBlockSize(f), meta.getModificationTime(), 0,
+  FsPermission.getFileDefault(), getUsername(), getUsername(),
+  qualifiedPath);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/587e2990/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
--
diff --git 
a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
index a225702..7cf6e3d 100644
--- 
a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
+++ 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
@@ -23,6 +23,7 @@ import java.net.URI;
 import java.util.Arrays;
 import java.util.Collection;
 
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.junit.Before;
@@ -167,6 +168,10 @@ public class TestOzoneFileInterfaces {
 assertTrue("Modification time has not been recorded: " + status,
 status.getModificationTime() > currentTime);
 
+assertEquals(false, status.isDirectory());
+assertEquals(FsPermission.getFileDefault(), status.getPermission());
+verifyOwnerGroup(status);
+
 try (FSDataInputStream inputStream = fs.open(path)) {
   byte[] buffer = new byte[stringLen];
   inputStream.readFully(0, buffer);
@@ -175,6 +180,12 @@ public class TestOzoneFileInterfaces {
 }
   }
 
+  private void verifyOwnerGroup(FileStatus fileStatus) {
+String owner = getCurrentUser();
+assertEquals(owner, fileStatus.getOwner());
+assertEquals(owner, fileStatus.getGroup());
+  }
+
 
   @Test
   public void testDirectory() throws IOException {
@@ -186,6 +197,10 @@ public class TestOzoneFileInterfaces {
 FileStatus status = fs.getFileStatus(path);
 assertTrue("The created path is not directory.", status.isDirectory());
 
+assertEquals(true, status.isDirectory());
+assertEquals(FsPermission.getDirDefault(), status.getP

hadoop git commit: HDDS-624.PutBlock fails with Unexpected Storage Container Exception. Contributed by Arpit Agarwal.

2018-10-12 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8ae8a5004 -> 02e1ef5e0


HDDS-624.PutBlock fails with Unexpected Storage Container Exception.
Contributed by  Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02e1ef5e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02e1ef5e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02e1ef5e

Branch: refs/heads/trunk
Commit: 02e1ef5e0779369f1363df2bb0437945fe9a271c
Parents: 8ae8a50
Author: Anu Engineer 
Authored: Fri Oct 12 14:22:46 2018 -0700
Committer: Anu Engineer 
Committed: Fri Oct 12 14:22:46 2018 -0700

--
 .../hadoop/utils/MetadataStoreBuilder.java  | 38 ++--
 .../hadoop/utils/TestRocksDBStoreMBean.java | 19 ++
 .../container/common/utils/ContainerCache.java  |  6 ++--
 .../container/keyvalue/helpers/BlockUtils.java  |  3 +-
 4 files changed, 38 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/02e1ef5e/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
index 85cebed..a1c9b85 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.utils;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.iq80.leveldb.Options;
 import org.rocksdb.BlockBasedTableConfig;
@@ -30,6 +31,8 @@ import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.Optional;
+import java.util.function.Supplier;
 
 import static org.apache.hadoop.ozone.OzoneConfigKeys
 .OZONE_METADATA_STORE_IMPL_LEVELDB;
@@ -53,7 +56,7 @@ public class MetadataStoreBuilder {
   private File dbFile;
   private long cacheSize;
   private boolean createIfMissing = true;
-  private Configuration conf;
+  private Optional optionalConf = Optional.empty();
   private String dbType;
 
   public static MetadataStoreBuilder newBuilder() {
@@ -76,7 +79,7 @@ public class MetadataStoreBuilder {
   }
 
   public MetadataStoreBuilder setConf(Configuration configuration) {
-this.conf = configuration;
+this.optionalConf = Optional.of(configuration);
 return this;
   }
 
@@ -98,13 +101,12 @@ public class MetadataStoreBuilder {
 }
 
 // Build db store based on configuration
-MetadataStore store = null;
+final Configuration conf = optionalConf.orElseGet(
+() -> new OzoneConfiguration());
 
 if(dbType == null) {
   LOG.debug("dbType is null, using ");
-  dbType = conf == null ?
-  OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT :
-  conf.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL,
+  dbType = conf.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL,
   OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT);
   LOG.debug("dbType is null, using dbType {} from ozone configuration",
   dbType);
@@ -117,7 +119,7 @@ public class MetadataStoreBuilder {
   if (cacheSize > 0) {
 options.cacheSize(cacheSize);
   }
-  store = new LevelDBStore(dbFile, options);
+  return new LevelDBStore(dbFile, options);
 } else if (OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(dbType)) {
   org.rocksdb.Options opts = new org.rocksdb.Options();
   opts.setCreateIfMissing(createIfMissing);
@@ -128,10 +130,9 @@ public class MetadataStoreBuilder {
 opts.setTableFormatConfig(tableConfig);
   }
 
-  String rocksDbStat = conf == null ?
-  OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT :
-  conf.getTrimmed(OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
-  OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT);
+  String rocksDbStat = conf.getTrimmed(
+  OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
+  OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT);
 
   if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
 Statistics statistics = new Statistics();
@@ -139,14 +140,13 @@ public class MetadataStoreBuilder {
 opts = opts.setStatistics(statistics);
 
   }
-  store = new RocksDBStore(dbFile, opts);
-} else {
-  throw new IllegalArgumentException("Invalid argument for "
-  + OzoneConfigKeys.OZONE_METADATA_STORE_IMPL
-  + ". Expecting " + OZONE_METADAT

hadoop git commit: HDDS-624.PutBlock fails with Unexpected Storage Container Exception. Contributed by Arpit Agarwal.

2018-10-12 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 252a44f8c -> 7daeaa9a9


HDDS-624.PutBlock fails with Unexpected Storage Container Exception.
Contributed by  Arpit Agarwal.

(cherry picked from commit 02e1ef5e0779369f1363df2bb0437945fe9a271c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7daeaa9a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7daeaa9a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7daeaa9a

Branch: refs/heads/ozone-0.3
Commit: 7daeaa9a983b3ded49a4e151e69210123bc9b273
Parents: 252a44f
Author: Anu Engineer 
Authored: Fri Oct 12 14:22:46 2018 -0700
Committer: Anu Engineer 
Committed: Fri Oct 12 14:29:59 2018 -0700

--
 .../hadoop/utils/MetadataStoreBuilder.java  | 38 ++--
 .../hadoop/utils/TestRocksDBStoreMBean.java | 19 ++
 .../container/common/utils/ContainerCache.java  |  6 ++--
 .../container/keyvalue/helpers/BlockUtils.java  |  3 +-
 4 files changed, 38 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7daeaa9a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
index 85cebed..a1c9b85 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.utils;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.iq80.leveldb.Options;
 import org.rocksdb.BlockBasedTableConfig;
@@ -30,6 +31,8 @@ import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.Optional;
+import java.util.function.Supplier;
 
 import static org.apache.hadoop.ozone.OzoneConfigKeys
 .OZONE_METADATA_STORE_IMPL_LEVELDB;
@@ -53,7 +56,7 @@ public class MetadataStoreBuilder {
   private File dbFile;
   private long cacheSize;
   private boolean createIfMissing = true;
-  private Configuration conf;
+  private Optional optionalConf = Optional.empty();
   private String dbType;
 
   public static MetadataStoreBuilder newBuilder() {
@@ -76,7 +79,7 @@ public class MetadataStoreBuilder {
   }
 
   public MetadataStoreBuilder setConf(Configuration configuration) {
-this.conf = configuration;
+this.optionalConf = Optional.of(configuration);
 return this;
   }
 
@@ -98,13 +101,12 @@ public class MetadataStoreBuilder {
 }
 
 // Build db store based on configuration
-MetadataStore store = null;
+final Configuration conf = optionalConf.orElseGet(
+() -> new OzoneConfiguration());
 
 if(dbType == null) {
   LOG.debug("dbType is null, using ");
-  dbType = conf == null ?
-  OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT :
-  conf.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL,
+  dbType = conf.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL,
   OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT);
   LOG.debug("dbType is null, using dbType {} from ozone configuration",
   dbType);
@@ -117,7 +119,7 @@ public class MetadataStoreBuilder {
   if (cacheSize > 0) {
 options.cacheSize(cacheSize);
   }
-  store = new LevelDBStore(dbFile, options);
+  return new LevelDBStore(dbFile, options);
 } else if (OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(dbType)) {
   org.rocksdb.Options opts = new org.rocksdb.Options();
   opts.setCreateIfMissing(createIfMissing);
@@ -128,10 +130,9 @@ public class MetadataStoreBuilder {
 opts.setTableFormatConfig(tableConfig);
   }
 
-  String rocksDbStat = conf == null ?
-  OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT :
-  conf.getTrimmed(OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
-  OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT);
+  String rocksDbStat = conf.getTrimmed(
+  OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
+  OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT);
 
   if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
 Statistics statistics = new Statistics();
@@ -139,14 +140,13 @@ public class MetadataStoreBuilder {
 opts = opts.setStatistics(statistics);
 
   }
-  store = new RocksDBStore(dbFile, opts);
-} else {
-  throw new IllegalArgumentException("Invalid argument for "
-  + OzoneCo

hadoop git commit: HDDS-645. Enable OzoneFS contract tests by default. Contributed by Arpit Agarwal.

2018-10-12 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3a684a2b2 -> c07b95bdf


HDDS-645. Enable OzoneFS contract tests by default.
Contributed by Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c07b95bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c07b95bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c07b95bd

Branch: refs/heads/trunk
Commit: c07b95bdfcd410aa82acdb1fed28e84981ff06f9
Parents: 3a684a2
Author: Anu Engineer 
Authored: Fri Oct 12 14:45:01 2018 -0700
Committer: Anu Engineer 
Committed: Fri Oct 12 14:46:04 2018 -0700

--
 hadoop-ozone/ozonefs/pom.xml | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c07b95bd/hadoop-ozone/ozonefs/pom.xml
--
diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml
index 8f7a0a6..0bf1c3a 100644
--- a/hadoop-ozone/ozonefs/pom.xml
+++ b/hadoop-ozone/ozonefs/pom.xml
@@ -102,6 +102,15 @@
   
 
   
+  
+org.apache.maven.plugins
+maven-surefire-plugin
+
+  
+ITestOzoneContract*.java
+
+
+
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-645. Enable OzoneFS contract tests by default. Contributed by Arpit Agarwal.

2018-10-12 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 86c9418c1 -> 9aa6c7b2a


HDDS-645. Enable OzoneFS contract tests by default.
Contributed by Arpit Agarwal.

(cherry picked from commit c07b95bdfcd410aa82acdb1fed28e84981ff06f9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9aa6c7b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9aa6c7b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9aa6c7b2

Branch: refs/heads/ozone-0.3
Commit: 9aa6c7b2ad09a7f084cd70d9595d4ddc380657b2
Parents: 86c9418
Author: Anu Engineer 
Authored: Fri Oct 12 14:45:01 2018 -0700
Committer: Anu Engineer 
Committed: Fri Oct 12 14:46:40 2018 -0700

--
 hadoop-ozone/ozonefs/pom.xml | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9aa6c7b2/hadoop-ozone/ozonefs/pom.xml
--
diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml
index 8174b74..eb7de6b 100644
--- a/hadoop-ozone/ozonefs/pom.xml
+++ b/hadoop-ozone/ozonefs/pom.xml
@@ -102,6 +102,15 @@
   
 
   
+  
+org.apache.maven.plugins
+maven-surefire-plugin
+
+  
+ITestOzoneContract*.java
+
+
+
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-445. Create a logger to print out all of the incoming requests. Contributed by Bharat Viswanadham.

2018-10-12 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk ddc964932 -> 3c1fe073d


HDDS-445. Create a logger to print out all of the incoming requests.
Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c1fe073
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c1fe073
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c1fe073

Branch: refs/heads/trunk
Commit: 3c1fe073d2fef76676660144e7dce2050761ae64
Parents: ddc9649
Author: Anu Engineer 
Authored: Fri Oct 12 16:27:54 2018 -0700
Committer: Anu Engineer 
Committed: Fri Oct 12 16:27:54 2018 -0700

--
 .../hadoop-common/src/main/conf/log4j.properties   | 6 ++
 hadoop-ozone/dist/src/main/compose/ozones3/docker-config   | 4 
 2 files changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c1fe073/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index aeae2b8..0214da3 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -271,6 +271,12 @@ 
log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
 
#log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-_mm_dd.log
 #log4j.appender.nodemanagerrequestlog.RetainDays=3
 
+#Http Server request logs for Ozone S3Gateway
+log4j.logger.http.requests.s3gateway=INFO,s3gatewayrequestlog
+log4j.appender.s3gatewayrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+log4j.appender.s3gatewayrequestlog.Filename=${hadoop.log.dir}/jetty-s3gateway-_mm_dd.log
+log4j.appender.s3gatewayrequestlog.RetainDays=3
+
 
 # WebHdfs request log on datanodes
 # Specify -Ddatanode.webhdfs.logger=INFO,HTTPDRFA on datanode startup to

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c1fe073/hadoop-ozone/dist/src/main/compose/ozones3/docker-config
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozones3/docker-config
index 2b22874..dd53d9d 100644
--- a/hadoop-ozone/dist/src/main/compose/ozones3/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozones3/docker-config
@@ -31,6 +31,10 @@ 
LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd HH
 LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
 LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
 
LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
+LOG4J.PROPERTIES_log4j.logger.http.requests.s3gateway=INFO,s3gatewayrequestlog
+LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog.Filename=/tmp/jetty-s3gateway-_mm_dd.log
+LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog.RetainDays=3
 
 #Enable this variable to print out all hadoop rpc traffic to the stdout. See 
http://byteman.jboss.org/ to define your own instrumentation.
 
#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-445. Create a logger to print out all of the incoming requests. Contributed by Bharat Viswanadham.

2018-10-12 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 1a9bb57b0 -> 9e219e370


HDDS-445. Create a logger to print out all of the incoming requests.
Contributed by Bharat Viswanadham.

(cherry picked from commit 3c1fe073d2fef76676660144e7dce2050761ae64)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e219e37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e219e37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e219e37

Branch: refs/heads/ozone-0.3
Commit: 9e219e370754734f64b96a372a5a05768360533c
Parents: 1a9bb57
Author: Anu Engineer 
Authored: Fri Oct 12 16:27:54 2018 -0700
Committer: Anu Engineer 
Committed: Fri Oct 12 16:29:42 2018 -0700

--
 .../hadoop-common/src/main/conf/log4j.properties   | 6 ++
 hadoop-ozone/dist/src/main/compose/ozones3/docker-config   | 4 
 2 files changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e219e37/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index aeae2b8..0214da3 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -271,6 +271,12 @@ 
log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
 
#log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-_mm_dd.log
 #log4j.appender.nodemanagerrequestlog.RetainDays=3
 
+#Http Server request logs for Ozone S3Gateway
+log4j.logger.http.requests.s3gateway=INFO,s3gatewayrequestlog
+log4j.appender.s3gatewayrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+log4j.appender.s3gatewayrequestlog.Filename=${hadoop.log.dir}/jetty-s3gateway-_mm_dd.log
+log4j.appender.s3gatewayrequestlog.RetainDays=3
+
 
 # WebHdfs request log on datanodes
 # Specify -Ddatanode.webhdfs.logger=INFO,HTTPDRFA on datanode startup to

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e219e37/hadoop-ozone/dist/src/main/compose/ozones3/docker-config
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozones3/docker-config
index 2b22874..dd53d9d 100644
--- a/hadoop-ozone/dist/src/main/compose/ozones3/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozones3/docker-config
@@ -31,6 +31,10 @@ 
LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd HH
 LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
 LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
 
LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
+LOG4J.PROPERTIES_log4j.logger.http.requests.s3gateway=INFO,s3gatewayrequestlog
+LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog.Filename=/tmp/jetty-s3gateway-_mm_dd.log
+LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog.RetainDays=3
 
 #Enable this variable to print out all hadoop rpc traffic to the stdout. See 
http://byteman.jboss.org/ to define your own instrumentation.
 
#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-653. TestMetadataStore#testIterator fails on Windows. Contributed by Yiqun Lin.

2018-10-13 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk addb84600 -> 9227f3d22


HDDS-653. TestMetadataStore#testIterator fails on Windows.
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9227f3d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9227f3d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9227f3d2

Branch: refs/heads/trunk
Commit: 9227f3d22412f94d22f4cae6a4fb3a52ead9b011
Parents: addb846
Author: Anu Engineer 
Authored: Sat Oct 13 10:41:06 2018 -0700
Committer: Anu Engineer 
Committed: Sat Oct 13 10:41:06 2018 -0700

--
 .../src/test/java/org/apache/hadoop/utils/TestMetadataStore.java   | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9227f3d2/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
--
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
index 30fc7f3..a91bc80 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
@@ -163,6 +163,8 @@ public class TestMetadataStore {
   GenericTestUtils.assertExceptionContains("Store has no more elements",
   ex);
 }
+dbStore.close();
+dbStore.destroy();
 FileUtils.deleteDirectory(dbDir);
 
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-653. TestMetadataStore#testIterator fails on Windows. Contributed by Yiqun Lin.

2018-10-13 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 690da72cd -> 39ffed2a1


HDDS-653. TestMetadataStore#testIterator fails on Windows.
Contributed by Yiqun Lin.

(cherry picked from commit 9227f3d22412f94d22f4cae6a4fb3a52ead9b011)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39ffed2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39ffed2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39ffed2a

Branch: refs/heads/ozone-0.3
Commit: 39ffed2a1109f52ef118741a918267d6bca3a7b6
Parents: 690da72
Author: Anu Engineer 
Authored: Sat Oct 13 10:41:06 2018 -0700
Committer: Anu Engineer 
Committed: Sat Oct 13 10:46:58 2018 -0700

--
 .../src/test/java/org/apache/hadoop/utils/TestMetadataStore.java   | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ffed2a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
--
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
index 30fc7f3..a91bc80 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
@@ -163,6 +163,8 @@ public class TestMetadataStore {
   GenericTestUtils.assertExceptionContains("Store has no more elements",
   ex);
 }
+dbStore.close();
+dbStore.destroy();
 FileUtils.deleteDirectory(dbDir);
 
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-355. Disable OpenKeyDeleteService and DeleteKeysService. Contributed by Anu Engineer.

2018-08-17 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 60ffec9f7 -> ab37423ad


HDDS-355. Disable OpenKeyDeleteService and DeleteKeysService.
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab37423a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab37423a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab37423a

Branch: refs/heads/trunk
Commit: ab37423ad8debe2f050133ad97b686083531c2ea
Parents: 60ffec9
Author: Anu Engineer 
Authored: Fri Aug 17 11:50:46 2018 -0700
Committer: Anu Engineer 
Committed: Fri Aug 17 11:50:46 2018 -0700

--
 .../commandhandler/TestBlockDeletion.java   |  2 ++
 .../hadoop/ozone/om/TestOzoneManager.java   | 11 +++---
 .../apache/hadoop/ozone/om/package-info.java| 22 
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  | 37 
 4 files changed, 36 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab37423a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index badd435..45659bd 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.utils.MetadataStore;
 import org.junit.Assert;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 
 import java.io.File;
@@ -102,6 +103,7 @@ public class TestBlockDeletion {
   }
 
   @Test(timeout = 6)
+  @Ignore("Until delete background service is fixed.")
   public void testBlockDeletion()
   throws IOException, InterruptedException {
 String volumeName = UUID.randomUUID().toString();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab37423a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
index 7c8595c..5109453 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
@@ -56,12 +56,12 @@ import org.apache.hadoop.ozone.web.response.ListBuckets;
 import org.apache.hadoop.ozone.web.response.ListKeys;
 import org.apache.hadoop.ozone.web.response.ListVolumes;
 import org.apache.hadoop.util.Time;
-import org.apache.hadoop.utils.BackgroundService;
 import org.apache.hadoop.utils.MetadataKeyFilters;
 import org.apache.hadoop.utils.MetadataStore;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
@@ -1188,10 +1188,11 @@ public class TestOzoneManager {
   }
 
 
-  @Test
+  //Disabling this test
+  @Ignore("Disabling this test until Open Key is fixed.")
   public void testExpiredOpenKey() throws Exception {
-BackgroundService openKeyCleanUpService = ((KeyManagerImpl)cluster
-.getOzoneManager().getKeyManager()).getOpenKeyCleanupService();
+//BackgroundService openKeyCleanUpService = ((KeyManagerImpl)cluster
+//.getOzoneManager().getKeyManager()).getOpenKeyCleanupService();
 
 String userName = "user" + RandomStringUtils.randomNumeric(5);
 String adminName = "admin" + RandomStringUtils.randomNumeric(5);
@@ -1252,7 +1253,7 @@ public class TestOzoneManager {
 KeyArgs keyArgs5 = new KeyArgs("testKey5", bucketArgs);
 storageHandler.newKeyWriter(keyArgs5);
 
-openKeyCleanUpService.triggerBackgroundTaskForTesting();
+//openKeyCleanUpService.triggerBackgroundTaskForTesting();
 Thread.sleep(2000);
 // now all k1-k4 should have been removed by the clean-up task, only k5
 // should be present in ExpiredOpenKeys.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab37423a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/package-info.java
-

hadoop git commit: HDDS-342. Add example byteman script to print out hadoop rpc traffic. Contributed by Elek, Marton.

2018-08-22 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4c25f37c6 -> af4b705b5


HDDS-342. Add example byteman script to print out hadoop rpc traffic.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af4b705b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af4b705b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af4b705b

Branch: refs/heads/trunk
Commit: af4b705b5f73b177be24292d8dda3a150aa12596
Parents: 4c25f37
Author: Anu Engineer 
Authored: Wed Aug 22 14:48:22 2018 -0700
Committer: Anu Engineer 
Committed: Wed Aug 22 14:48:22 2018 -0700

--
 dev-support/byteman/README.md   | 31 ++
 dev-support/byteman/hadooprpc.btm   | 44 
 .../src/main/compose/ozone/docker-config|  2 +
 3 files changed, 77 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af4b705b/dev-support/byteman/README.md
--
diff --git a/dev-support/byteman/README.md b/dev-support/byteman/README.md
new file mode 100644
index 000..9a17fc5
--- /dev/null
+++ b/dev-support/byteman/README.md
@@ -0,0 +1,31 @@
+
+
+This folder contains example byteman scripts (http://byteman.jboss.org/) to 
help 
+Hadoop debuging.
+
+As the startup script of the hadoop-runner docker image supports byteman 
+instrumentation it's enough to set the URL of a script to a specific 
environment
+variable to activate it with the docker runs:
+
+
+```
+BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
+```
+
+For more info see HADOOP-15656 and HDDS-342
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af4b705b/dev-support/byteman/hadooprpc.btm
--
diff --git a/dev-support/byteman/hadooprpc.btm 
b/dev-support/byteman/hadooprpc.btm
new file mode 100644
index 000..13894fe
--- /dev/null
+++ b/dev-support/byteman/hadooprpc.btm
@@ -0,0 +1,44 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This script instruments hadoop rpc layer to print out all the 
request/response messages to the standard output.
+#
+
+RULE Hadoop RPC request
+INTERFACE ^com.google.protobuf.BlockingService
+METHOD callBlockingMethod
+IF true
+DO traceln("--> RPC message request: " + $3.getClass().getSimpleName() + " 
from " + linked(Thread.currentThread(), "source")); 
+   traceln($3.toString())
+ENDRULE
+
+
+RULE Hadoop RPC response
+INTERFACE ^com.google.protobuf.BlockingService
+METHOD callBlockingMethod
+AT EXIT
+IF true
+DO traceln("--> RPC message response: " + $3.getClass().getSimpleName() + " to 
" + unlink(Thread.currentThread(), "source")); 
+   traceln($!.toString())
+ENDRULE
+
+
+RULE Hadoop RPC source IP
+CLASS org.apache.hadoop.ipc.Server$RpcCall
+METHOD run
+IF true
+DO link(Thread.currentThread(), "source", $0.connection.toString())
+ENDRULE

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af4b705b/hadoop-dist/src/main/compose/ozone/docker-config
--
diff --git a/hadoop-dist/src/main/compose/ozone/docker-config 
b/hadoop-dist/src/main/compose/ozone/docker-config
index 1b75c01..a1828a3 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-config
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -29,3 +29,5 @@ LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
 LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd 
HH:mm:ss} %-5p %c{1}:%L - %m%n
+#Enable this variable to print out all hadoop rpc traffic to the stdout. See 
http://byteman.jboss.org/ to define your own instrumentation.
+#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm


-
To unsubscribe,

hadoop git commit: HDDS-356. Support ColumnFamily based RockDBStore and TableStore. Contributed by Anu Engineer.

2018-08-22 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk af4b705b5 -> b021249ac


HDDS-356. Support ColumnFamily based RockDBStore and TableStore.
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b021249a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b021249a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b021249a

Branch: refs/heads/trunk
Commit: b021249ac84abe31c9d30d73ed483bea2acdbaab
Parents: af4b705
Author: Anu Engineer 
Authored: Wed Aug 22 18:55:14 2018 -0700
Committer: Anu Engineer 
Committed: Wed Aug 22 18:55:14 2018 -0700

--
 .../org/apache/hadoop/utils/db/DBStore.java |  93 +++
 .../org/apache/hadoop/utils/db/RDBStore.java| 252 +++
 .../hadoop/utils/db/RDBStoreIterator.java   |  88 +++
 .../org/apache/hadoop/utils/db/RDBTable.java| 173 +
 .../java/org/apache/hadoop/utils/db/Table.java  | 150 +++
 .../apache/hadoop/utils/db/TableIterator.java   |  50 
 .../apache/hadoop/utils/db/package-info.java|  22 ++
 .../apache/hadoop/utils/db/TestRDBStore.java| 246 ++
 .../hadoop/utils/db/TestRDBTableStore.java  | 189 ++
 .../apache/hadoop/utils/db/package-info.java|  22 ++
 10 files changed, 1285 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b021249a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
new file mode 100644
index 000..a817f4f
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.utils.db;
+
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+/**
+ * The DBStore interface provides the ability to create Tables, which store
+ * a specific type of Key-Value pair. Some DB interfaces like LevelDB will not
+ * be able to do this. In those case a Table creation will map to a default
+ * store.
+ *
+ */
+@InterfaceStability.Evolving
+public interface DBStore extends AutoCloseable {
+
+  /**
+   * Gets an existing TableStore.
+   *
+   * @param name - Name of the TableStore to get
+   * @return - TableStore.
+   * @throws IOException on Failure
+   */
+  Table getTable(String name) throws IOException;
+
+  /**
+   * Lists the Known list of Tables in a DB.
+   *
+   * @return List of Tables, in case of Rocks DB and LevelDB we will return at
+   * least one entry called DEFAULT.
+   * @throws IOException on Failure
+   */
+  ArrayList listTables() throws IOException;
+
+  /**
+   * Compact the entire database.
+   *
+   * @throws IOException on Failure
+   */
+  void compactDB() throws IOException;
+
+  /**
+   * Moves a key from the Source Table to the destination Table.
+   *
+   * @param key - Key to move.
+   * @param source - Source Table.
+   * @param dest - Destination Table.
+   * @throws IOException on Failure
+   */
+  void move(byte[] key, Table source, Table dest) throws IOException;
+
+  /**
+   * Moves a key from the Source Table to the destination Table and updates the
+   * destination to the new value.
+   *
+   * @param key - Key to move.
+   * @param value - new value to write to the destination table.
+   * @param source - Source Table.
+   * @param dest - Destination Table.
+   * @throws IOException on Failure
+   */
+  void move(byte[] key, byte[] value, Table source, Table dest)
+  throws IOException;
+
+  /**
+   * Returns an estimated count of keys in this DB.
+   *
+   * @return long, estimate of keys in the DB.
+   */
+  long getEstimatedKeyCount() throws IOException;
+
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b021249a/hadoop-hdds/common/src/main/java/

[1/2] hadoop git commit: HDDS-332. Remove the ability to configure ozone.handler.type Contributed by Nandakumar and Anu Engineer.

2018-08-28 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2172399c5 -> df21e1b1d


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df21e1b1/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
--
diff --git 
a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
 
b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
index 2200cd8..f56cbe8 100644
--- 
a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
+++ 
b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
@@ -1,64 +1,58 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
  * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients;
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
-import static org.apache.hadoop.ozone.OmUtils.getOmAddress;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.*;
-import static 
com.sun.jersey.api.core.ResourceConfig.PROPERTY_CONTAINER_REQUEST_FILTERS;
-import static com.sun.jersey.api.core.ResourceConfig.FEATURE_TRACE;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.HashMap;
-import java.util.Map;
-
 import com.sun.jersey.api.container.ContainerFactory;
 import com.sun.jersey.api.core.ApplicationAdapter;
-
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import 
org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
+import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
+import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
 import 
org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
 import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
-import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.web.ObjectStoreApplication;
 import org.apache.hadoop.ozone.web.handlers.ServiceFilter;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
 import org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainer;
-import org.apache.hadoop.hdds.scm.protocolPB
-.ScmBlockLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
+import org.apache.hadoop.ozone.web.storage.DistributedStorageHandler;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.protocolPB
-.StorageContainerLocationProtocolClientSideTranslatorPB;
-import 
o

[2/2] hadoop git commit: HDDS-332. Remove the ability to configure ozone.handler.type Contributed by Nandakumar and Anu Engineer.

2018-08-28 Thread aengineer
HDDS-332. Remove the ability to configure ozone.handler.type
Contributed by Nandakumar and Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df21e1b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df21e1b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df21e1b1

Branch: refs/heads/trunk
Commit: df21e1b1ddcc8439b5fa1bb79388403f87742e65
Parents: 2172399
Author: Anu Engineer 
Authored: Tue Aug 28 09:56:02 2018 -0700
Committer: Anu Engineer 
Committed: Tue Aug 28 09:56:02 2018 -0700

--
 .../apache/hadoop/ozone/OzoneConfigKeys.java|7 -
 .../org/apache/hadoop/ozone/OzoneConsts.java|1 -
 .../common/src/main/resources/ozone-default.xml |   21 -
 .../apache/hadoop/ozone/RatisTestHelper.java|8 +-
 .../ozone/client/rest/TestOzoneRestClient.java  |7 +-
 .../rpc/TestCloseContainerHandlingByClient.java |2 -
 .../ozone/client/rpc/TestOzoneRpcClient.java|9 +-
 .../ozone/container/ContainerTestHelper.java|   10 -
 .../TestContainerDeletionChoosingPolicy.java|8 +-
 .../common/impl/TestContainerPersistence.java   |  116 +-
 .../commandhandler/TestBlockDeletion.java   |8 +-
 .../TestCloseContainerByPipeline.java   |   35 +-
 .../container/ozoneimpl/TestOzoneContainer.java |2 -
 .../ozoneimpl/TestOzoneContainerRatis.java  |2 -
 .../container/ozoneimpl/TestRatisManager.java   |2 -
 .../hadoop/ozone/freon/TestDataValidate.java|7 +-
 .../apache/hadoop/ozone/freon/TestFreon.java|3 +-
 .../ozone/om/TestContainerReportWithKeys.java   |   12 +-
 .../om/TestMultipleContainerReadWrite.java  |5 +-
 .../hadoop/ozone/om/TestOmBlockVersioning.java  |7 +-
 .../apache/hadoop/ozone/om/TestOmMetrics.java   |7 +-
 .../apache/hadoop/ozone/om/TestOmSQLCli.java|6 +-
 .../hadoop/ozone/om/TestOzoneManager.java   |5 +-
 .../hadoop/ozone/ozShell/TestOzoneShell.java|   20 +-
 .../ozone/web/TestDistributedOzoneVolumes.java  |  188 ---
 .../hadoop/ozone/web/TestLocalOzoneVolumes.java |  187 ---
 .../hadoop/ozone/web/TestOzoneVolumes.java  |  183 +++
 .../hadoop/ozone/web/TestOzoneWebAccess.java|   10 +-
 .../hadoop/ozone/web/client/TestBuckets.java|9 +-
 .../hadoop/ozone/web/client/TestKeysRatis.java  |4 +-
 .../ozone/web/client/TestOzoneClient.java   |3 -
 .../hadoop/ozone/web/client/TestVolume.java |   11 +-
 .../ozone/web/client/TestVolumeRatis.java   |3 -
 .../server/datanode/ObjectStoreHandler.java |  182 ++-
 .../web/handlers/StorageHandlerBuilder.java |   18 +-
 .../web/localstorage/LocalStorageHandler.java   |  385 --
 .../web/localstorage/OzoneMetadataManager.java  | 1138 --
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java |6 +-
 38 files changed, 363 insertions(+), 2274 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df21e1b1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 92f0c41..6ad9085 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -66,16 +66,9 @@ public final class OzoneConfigKeys {
   "dfs.container.ratis.ipc.random.port";
   public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT =
   false;
-
-  public static final String OZONE_LOCALSTORAGE_ROOT =
-  "ozone.localstorage.root";
-  public static final String OZONE_LOCALSTORAGE_ROOT_DEFAULT = "/tmp/ozone";
   public static final String OZONE_ENABLED =
   "ozone.enabled";
   public static final boolean OZONE_ENABLED_DEFAULT = false;
-  public static final String OZONE_HANDLER_TYPE_KEY =
-  "ozone.handler.type";
-  public static final String OZONE_HANDLER_TYPE_DEFAULT = "distributed";
   public static final String OZONE_TRACE_ENABLED_KEY =
   "ozone.trace.enabled";
   public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df21e1b1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 320a3ed..ab6df92 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@

hadoop git commit: HDDS-376. Create custom message structure for use in AuditLogging Contributed by Dinesh Chitlangia.

2018-08-28 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk cb9d371ae -> ac515d22d


HDDS-376. Create custom message structure for use in AuditLogging
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac515d22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac515d22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac515d22

Branch: refs/heads/trunk
Commit: ac515d22d84478acbed92ef4024d9a3d3f329c8a
Parents: cb9d371
Author: Anu Engineer 
Authored: Tue Aug 28 12:59:08 2018 -0700
Committer: Anu Engineer 
Committed: Tue Aug 28 12:59:08 2018 -0700

--
 .../apache/hadoop/ozone/audit/AuditLogger.java  |  66 --
 .../apache/hadoop/ozone/audit/AuditMessage.java |  64 ++
 .../apache/hadoop/ozone/audit/package-info.java |  19 ++-
 .../ozone/audit/TestOzoneAuditLogger.java   | 124 ---
 4 files changed, 177 insertions(+), 96 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac515d22/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
index 46ffaab..ee20c66 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
@@ -21,10 +21,8 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.logging.log4j.Level;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Marker;
-import org.apache.logging.log4j.message.StructuredDataMessage;
 import org.apache.logging.log4j.spi.ExtendedLogger;
 
-import java.util.Map;
 
 /**
  * Class to define Audit Logger for Ozone.
@@ -32,16 +30,13 @@ import java.util.Map;
 public class AuditLogger {
 
   private ExtendedLogger logger;
-
-  private static final String SUCCESS = AuditEventStatus.SUCCESS.getStatus();
-  private static final String FAILURE = AuditEventStatus.FAILURE.getStatus();
   private static final String FQCN = AuditLogger.class.getName();
   private static final Marker WRITE_MARKER = AuditMarker.WRITE.getMarker();
   private static final Marker READ_MARKER = AuditMarker.READ.getMarker();
 
   /**
* Parametrized Constructor to initialize logger.
-   * @param type
+   * @param type Audit Logger Type
*/
   public AuditLogger(AuditLoggerType type){
 initializeLogger(type);
@@ -60,68 +55,53 @@ public class AuditLogger {
 return logger;
   }
 
-  public void logWriteSuccess(AuditAction type, Map data) {
-logWriteSuccess(type, data, Level.INFO);
+  public void logWriteSuccess(AuditMessage msg) {
+logWriteSuccess(Level.INFO, msg);
   }
 
-  public void logWriteSuccess(AuditAction type, Map data, Level
-  level) {
-StructuredDataMessage msg = new StructuredDataMessage("", SUCCESS,
-type.getAction(), data);
+  public void logWriteSuccess(Level level, AuditMessage msg) {
 this.logger.logIfEnabled(FQCN, level, WRITE_MARKER, msg, null);
   }
 
-
-  public void logWriteFailure(AuditAction type, Map data) {
-logWriteFailure(type, data, Level.INFO, null);
+  public void logWriteFailure(AuditMessage msg) {
+logWriteFailure(Level.ERROR, msg);
   }
 
-  public void logWriteFailure(AuditAction type, Map data, Level
-  level) {
-logWriteFailure(type, data, level, null);
+  public void logWriteFailure(Level level, AuditMessage msg) {
+logWriteFailure(level, msg, null);
   }
 
-  public void logWriteFailure(AuditAction type, Map data,
-  Throwable exception) {
-logWriteFailure(type, data, Level.INFO, exception);
+  public void logWriteFailure(AuditMessage msg, Throwable exception) {
+logWriteFailure(Level.ERROR, msg, exception);
   }
 
-  public void logWriteFailure(AuditAction type, Map data, Level
-  level, Throwable exception) {
-StructuredDataMessage msg = new StructuredDataMessage("", FAILURE,
-type.getAction(), data);
+  public void logWriteFailure(Level level, AuditMessage msg,
+  Throwable exception) {
 this.logger.logIfEnabled(FQCN, level, WRITE_MARKER, msg, exception);
   }
 
-  public void logReadSuccess(AuditAction type, Map data) {
-logReadSuccess(type, data, Level.INFO);
+  public void logReadSuccess(AuditMessage msg) {
+logReadSuccess(Level.INFO, msg);
   }
 
-  public void logReadSuccess(AuditAction type, Map data, Level
-  level) {
-StructuredDataMessage msg = new StructuredDataMessage("", SUCCESS,
-type.getAction(), data);
+  public void logReadSuccess(Level level, AuditMessage msg) {
 this.logger.logIfEnabled(FQCN, 

[1/2] hadoop git commit: HDDS-382. Remove RatisTestHelper#RatisTestSuite constructor argument and fix checkstyle in ContainerTestHelper, GenericTestUtils Contributed by Nandakumar.

2018-08-28 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 33f42efc9 -> c5629d546


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5629d54/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
--
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
index 3b4426c..b652b6b 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
@@ -51,9 +51,9 @@ public class TestSCMContainerPlacementRandom {
 .thenReturn(new ArrayList<>(datanodes));
 
 when(mockNodeManager.getNodeStat(anyObject()))
-.thenReturn(new SCMNodeMetric(100l, 0l, 100l));
+.thenReturn(new SCMNodeMetric(100L, 0L, 100L));
 when(mockNodeManager.getNodeStat(datanodes.get(2)))
-.thenReturn(new SCMNodeMetric(100l, 90l, 10l));
+.thenReturn(new SCMNodeMetric(100L, 90L, 10L));
 
 SCMContainerPlacementRandom scmContainerPlacementRandom =
 new SCMContainerPlacementRandom(mockNodeManager, conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5629d54/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
index fa87706..da05913 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
@@ -21,7 +21,6 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Objects;
-import java.util.UUID;
 
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
@@ -132,7 +131,7 @@ public class TestReplicationManager {
   //WHEN
 
   queue.fireEvent(SCMEvents.REPLICATE_CONTAINER,
-  new ReplicationRequest(1l, (short) 2, System.currentTimeMillis(),
+  new ReplicationRequest(1L, (short) 2, System.currentTimeMillis(),
   (short) 3));
 
   Thread.sleep(500L);
@@ -159,10 +158,8 @@ public class TestReplicationManager {
   leaseManager.start();
 
   ReplicationManager replicationManager =
-  new ReplicationManager(containerPlacementPolicy, 
containerStateManager,
-
-
-  queue, leaseManager) {
+  new ReplicationManager(containerPlacementPolicy,
+  containerStateManager, queue, leaseManager) {
 @Override
 protected List getCurrentReplicas(
 ReplicationRequest request) throws IOException {
@@ -172,7 +169,7 @@ public class TestReplicationManager {
   replicationManager.start();
 
   queue.fireEvent(SCMEvents.REPLICATE_CONTAINER,
-  new ReplicationRequest(1l, (short) 2, System.currentTimeMillis(),
+  new ReplicationRequest(1L, (short) 2, System.currentTimeMillis(),
   (short) 3));
 
   Thread.sleep(500L);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5629d54/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
--
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
index a593718..9dd4fe3 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
@@ -92,8 +92,8 @@ public class TestReplicationQueue {
 1, replicationQueue.size());
 Assert.assertEquals(temp, msg5);
 
-// Message 2 should be ordered before message 5 as both have same 
replication
-// number but message 2 has earlier timestamp.
+// Message 2 should be ordered before message 5 as both have same
+// replication number but message 2 has earlier timestamp.
 temp = replicationQueue.take();
 Assert.assertEqua

[2/2] hadoop git commit: HDDS-382. Remove RatisTestHelper#RatisTestSuite constructor argument and fix checkstyle in ContainerTestHelper, GenericTestUtils Contributed by Nandakumar.

2018-08-28 Thread aengineer
HDDS-382. Remove RatisTestHelper#RatisTestSuite constructor argument and fix 
checkstyle in ContainerTestHelper, GenericTestUtils
Contributed by Nandakumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5629d54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5629d54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5629d54

Branch: refs/heads/trunk
Commit: c5629d546d64091a14560df488a7f797a150337e
Parents: 33f42ef
Author: Anu Engineer 
Authored: Tue Aug 28 14:06:19 2018 -0700
Committer: Anu Engineer 
Committed: Tue Aug 28 14:06:19 2018 -0700

--
 .../apache/hadoop/hdds/scm/XceiverClient.java   |  6 +--
 .../hadoop/hdds/scm/XceiverClientGrpc.java  |  6 +--
 .../hadoop/hdds/scm/XceiverClientManager.java   |  2 +-
 .../hdds/scm/storage/ChunkInputStream.java  |  7 +--
 .../hdds/scm/storage/ChunkOutputStream.java |  4 +-
 .../org/apache/hadoop/hdds/client/BlockID.java  |  5 +-
 .../hadoop/hdds/scm/XceiverClientSpi.java   |  2 -
 .../common/helpers/AllocatedBlock.java  |  4 +-
 .../container/common/helpers/ContainerInfo.java | 12 ++---
 .../common/helpers/ContainerWithPipeline.java   |  7 +--
 .../scm/container/common/helpers/Pipeline.java  | 11 ++---
 .../StorageContainerLocationProtocol.java   |  6 ++-
 ...rLocationProtocolClientSideTranslatorPB.java | 21 
 .../scm/storage/ContainerProtocolCalls.java |  6 +--
 .../org/apache/hadoop/ozone/OzoneConsts.java|  5 --
 .../ozone/container/common/helpers/KeyData.java |  8 ++--
 .../apache/hadoop/utils/HddsVersionInfo.java|  6 ++-
 .../apache/hadoop/utils/TestMetadataStore.java  |  1 -
 .../hadoop/ozone/HddsDatanodeService.java   |  3 +-
 .../common/helpers/ContainerUtils.java  | 22 -
 .../container/common/impl/ContainerSet.java |  2 +-
 .../common/impl/OpenContainerBlockMap.java  | 19 
 .../server/ratis/XceiverServerRatis.java|  6 +--
 .../keyvalue/interfaces/KeyManager.java |  4 +-
 .../ozone/protocol/commands/CommandStatus.java  | 16 +++
 .../ozone/container/common/ScmTestMock.java |  6 ++-
 .../common/interfaces/TestHandler.java  |  7 ---
 .../endpoint/TestHeartbeatEndpointTask.java |  2 -
 .../TestRoundRobinVolumeChoosingPolicy.java |  5 +-
 .../container/ozoneimpl/TestOzoneContainer.java |  3 +-
 .../hadoop/hdds/server/events/EventWatcher.java |  6 ++-
 .../hdds/server/events/TestEventQueue.java  |  3 --
 .../hadoop/hdds/scm/block/BlockManagerImpl.java | 18 +++
 .../hdds/scm/block/DeletedBlockLogImpl.java |  3 +-
 .../hdds/scm/block/SCMBlockDeletingService.java |  4 +-
 .../container/CloseContainerEventHandler.java   |  4 +-
 .../hdds/scm/container/ContainerMapping.java|  4 +-
 .../scm/container/ContainerStateManager.java|  7 +--
 .../replication/ReplicationManager.java |  2 +-
 .../scm/container/states/ContainerStateMap.java |  2 +-
 .../hdds/scm/node/states/Node2ContainerMap.java |  4 +-
 .../scm/node/states/NodeNotFoundException.java  |  2 -
 .../hdds/scm/node/states/ReportResult.java  |  3 +-
 .../hdds/scm/pipelines/Node2PipelineMap.java| 50 +---
 .../hdds/scm/pipelines/PipelineManager.java |  6 +--
 .../hdds/scm/pipelines/PipelineSelector.java|  7 +--
 .../scm/server/SCMClientProtocolServer.java |  3 +-
 .../org/apache/hadoop/hdds/scm/TestUtils.java   |  8 ++--
 .../hadoop/hdds/scm/block/TestBlockManager.java |  1 -
 .../hdds/scm/block/TestDeletedBlockLog.java |  7 +--
 .../command/TestCommandStatusReportHandler.java | 22 -
 .../TestCloseContainerEventHandler.java |  1 -
 .../scm/container/TestContainerMapping.java |  7 +--
 .../container/TestContainerReportHandler.java   |  2 +-
 .../TestSCMContainerPlacementCapacity.java  |  8 ++--
 .../TestSCMContainerPlacementRandom.java|  4 +-
 .../replication/TestReplicationManager.java | 11 ++---
 .../replication/TestReplicationQueue.java   |  4 +-
 .../hdds/scm/node/TestContainerPlacement.java   |  5 +-
 .../hadoop/hdds/scm/node/TestNodeManager.java   |  3 +-
 .../hdds/scm/node/TestNodeReportHandler.java|  3 +-
 .../ozone/container/common/TestEndPoint.java|  9 ++--
 .../placement/TestContainerPlacement.java   |  6 ++-
 .../apache/hadoop/ozone/client/ObjectStore.java |  7 ++-
 .../hdds/scm/pipeline/TestPipelineClose.java|  4 --
 .../apache/hadoop/ozone/RatisTestHelper.java|  8 ++--
 .../TestStorageContainerManagerHelper.java  |  2 -
 .../rpc/TestCloseContainerHandlingByClient.java |  3 +-
 .../ozone/container/ContainerTestHelper.java|  2 -
 .../common/impl/TestContainerPersistence.java   |  1 -
 .../ozoneimpl/TestOzoneContainerRatis.java  |  3 +-
 .../container/ozoneimpl/TestRatisManager.java   |  4 +-
 .../hadoop/ozone/scm/TestAllocateContainer.java |  2 -
 .../hadoop/ozone/web/TestOzoneWebAccess.j

hadoop git commit: HDDS-98. Adding Ozone Manager Audit Log. Contributed by Dinesh Chitlangia.

2018-08-31 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8aa6c4f07 -> 630b64ec7


HDDS-98. Adding Ozone Manager Audit Log.
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/630b64ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/630b64ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/630b64ec

Branch: refs/heads/trunk
Commit: 630b64ec7e963968a5bdcd1d625fc78746950137
Parents: 8aa6c4f
Author: Anu Engineer 
Authored: Fri Aug 31 14:20:56 2018 -0700
Committer: Anu Engineer 
Committed: Fri Aug 31 14:20:56 2018 -0700

--
 .../src/main/compose/ozone/docker-config|  37 
 .../org/apache/hadoop/ozone/OzoneConsts.java|  32 +++
 hadoop-ozone/common/src/main/bin/ozone  |   2 +
 .../src/main/conf/om-audit-log4j2.properties|  86 
 .../org/apache/hadoop/ozone/audit/OMAction.java |  25 ++-
 .../hadoop/ozone/om/helpers/OmBucketArgs.java   |  25 ++-
 .../hadoop/ozone/om/helpers/OmBucketInfo.java   |  21 +-
 .../hadoop/ozone/om/helpers/OmKeyArgs.java  |  22 +-
 .../hadoop/ozone/om/helpers/OmVolumeArgs.java   |  16 +-
 .../apache/hadoop/ozone/om/OzoneManager.java| 218 ++-
 10 files changed, 466 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/630b64ec/hadoop-dist/src/main/compose/ozone/docker-config
--
diff --git a/hadoop-dist/src/main/compose/ozone/docker-config 
b/hadoop-dist/src/main/compose/ozone/docker-config
index a1828a3..21127f8 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-config
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -31,3 +31,40 @@ 
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd 
HH:mm:ss} %-5p %c{1}:%L - %m%n
 #Enable this variable to print out all hadoop rpc traffic to the stdout. See 
http://byteman.jboss.org/ to define your own instrumentation.
 
#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
+
+#LOG4J2.PROPERTIES_* are for Ozone Audit Logging
+LOG4J2.PROPERTIES_monitorInterval=30
+LOG4J2.PROPERTIES_filter=read,write
+LOG4J2.PROPERTIES_filter.read.type=MarkerFilter
+LOG4J2.PROPERTIES_filter.read.marker=READ
+LOG4J2.PROPERTIES_filter.read.onMatch=DENY
+LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL
+LOG4J2.PROPERTIES_filter.write.type=MarkerFilter
+LOG4J2.PROPERTIES_filter.write.marker=WRITE
+LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL
+LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL
+LOG4J2.PROPERTIES_appenders=console, rolling
+LOG4J2.PROPERTIES_appender.console.type=Console
+LOG4J2.PROPERTIES_appender.console.name=STDOUT
+LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
+LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
+LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
+LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
+LOG4J2.PROPERTIES_appender.rolling.fileName 
=${sys:hadoop.log.dir}/om-audit-${hostName}.log
+LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{-MM-dd-HH-mm-ss}-%i.log.gz
+LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout
+LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
+LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies
+LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
+LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400
+LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
+LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB
+LOG4J2.PROPERTIES_loggers=audit
+LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger
+LOG4J2.PROPERTIES_logger.audit.name=OMAudit
+LOG4J2.PROPERTIES_logger.audit.level=INFO
+LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling
+LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile
+LOG4J2.PROPERTIES_rootLogger.level=INFO
+LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout
+LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT

http://git-wip-us.apache.org/repos/asf/hadoop/blob/630b64ec/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 15366fb..9645c02 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.

hadoop git commit: HDDS-388. Fix the name of the db profile configuration key. Contributed by Elek, Marton.

2018-08-31 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 630b64ec7 -> 50d2e3ec4


HDDS-388. Fix the name of the db profile configuration key.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50d2e3ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50d2e3ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50d2e3ec

Branch: refs/heads/trunk
Commit: 50d2e3ec41c73f9a0198d4a4e3d6f308d3030b8a
Parents: 630b64e
Author: Anu Engineer 
Authored: Fri Aug 31 14:30:29 2018 -0700
Committer: Anu Engineer 
Committed: Fri Aug 31 14:30:29 2018 -0700

--
 hadoop-hdds/common/src/main/resources/ozone-default.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50d2e3ec/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 6d2ee09..d3ec4a5 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1100,7 +1100,7 @@
   
 
   
-ozone.db.profile
+hdds.db.profile
 DBProfile.SSD
 OZONE, OM, PERFORMANCE, REQUIRED
 This property allows user to pick a configuration


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: HDDS-379. Simplify and improve the cli arg parsing of ozone scmcli. Contributed by Elek, Marton.

2018-08-31 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 50d2e3ec4 -> 76bae4ccb


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76bae4cc/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
deleted file mode 100644
index 722c1a5..000
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
+++ /dev/null
@@ -1,518 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.scm;
-
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.cli.ResultCode;
-import org.apache.hadoop.hdds.scm.cli.SCMCLI;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.List;
-
-import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED;
-import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN;
-
-import static org.apache.hadoop.hdds.scm.cli.ResultCode.EXECUTION_ERROR;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-
-/**
- * This class tests the CLI of SCM.
- */
-@Ignore ("Needs to be fixed for new SCM and Storage design")
-public class TestSCMCli {
-  private static SCMCLI cli;
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
-  storageContainerLocationClient;
-
-  private static StorageContainerManager scm;
-  private static ScmClient containerOperationClient;
-
-  private static ByteArrayOutputStream outContent;
-  private static PrintStream outStream;
-  private static ByteArrayOutputStream errContent;
-  private static PrintStream errStream;
-  private static XceiverClientManager xceiverClientManager;
-  private static String containerOwner = "OZONE";
-
-  @Rule
-  public Timeout globalTimeout = new Timeout(3);
-
-  @BeforeClass
-  public static void setup() throws Exception {
-conf = new OzoneConfiguration();
-cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();
-cluster.waitForClusterToBeReady();
-xceiverClientManager = new XceiverClientManager(conf);
-storageContainerLocationClient =
-cluster.getStorageContainerLocationClient();
-containerOperationClient = new ContainerOperationClient(
-storageContainerLocationClient, new XceiverClientManager(conf));
-outContent = new ByteArrayOutputStream();
-outStream = new PrintStream(outC

[2/2] hadoop git commit: HDDS-379. Simplify and improve the cli arg parsing of ozone scmcli. Contributed by Elek, Marton.

2018-08-31 Thread aengineer
HDDS-379. Simplify and improve the cli arg parsing of ozone scmcli.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76bae4cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76bae4cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76bae4cc

Branch: refs/heads/trunk
Commit: 76bae4ccb1d929260038b1869be8070c2320b617
Parents: 50d2e3e
Author: Anu Engineer 
Authored: Fri Aug 31 18:11:01 2018 -0700
Committer: Anu Engineer 
Committed: Fri Aug 31 18:11:01 2018 -0700

--
 .../common/dev-support/findbugsExcludeFile.xml  |   4 +
 .../org/apache/hadoop/hdds/cli/GenericCli.java  |  82 +++
 .../hadoop/hdds/cli/HddsVersionProvider.java|  35 ++
 .../apache/hadoop/hdds/cli/package-info.java|  22 +
 hadoop-hdds/pom.xml |   5 +
 .../hadoop/hdds/scm/cli/OzoneBaseCLI.java   |  43 --
 .../hdds/scm/cli/OzoneCommandHandler.java   |  87 
 .../apache/hadoop/hdds/scm/cli/ResultCode.java  |  31 --
 .../org/apache/hadoop/hdds/scm/cli/SCMCLI.java  | 246 +++--
 .../cli/container/CloseContainerHandler.java|  85 ---
 .../hdds/scm/cli/container/CloseSubcommand.java |  54 ++
 .../cli/container/ContainerCommandHandler.java  | 128 -
 .../cli/container/CreateContainerHandler.java   |  67 ---
 .../scm/cli/container/CreateSubcommand.java |  65 +++
 .../cli/container/DeleteContainerHandler.java   |  95 
 .../scm/cli/container/DeleteSubcommand.java |  60 +++
 .../scm/cli/container/InfoContainerHandler.java | 114 
 .../hdds/scm/cli/container/InfoSubcommand.java  |  94 
 .../scm/cli/container/ListContainerHandler.java | 117 -
 .../hdds/scm/cli/container/ListSubcommand.java  |  83 +++
 .../hdds/scm/cli/container/package-info.java|   3 +
 .../hadoop/hdds/scm/cli/package-info.java   |  12 +-
 hadoop-ozone/common/src/main/bin/ozone  |   2 +-
 .../org/apache/hadoop/ozone/scm/TestSCMCli.java | 518 ---
 24 files changed, 596 insertions(+), 1456 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76bae4cc/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
--
diff --git a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
index daf6fec..c7db679 100644
--- a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
@@ -21,4 +21,8 @@
   
 
   
+  
+
+
+  
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76bae4cc/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
new file mode 100644
index 000..2b3e6c0
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.hdds.cli;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+
+import picocli.CommandLine;
+import picocli.CommandLine.ExecutionException;
+import picocli.CommandLine.Option;
+import picocli.CommandLine.ParameterException;
+import picocli.CommandLine.RunLast;
+
+/**
+ * This is a generic parent class for all the ozone related cli tools.
+ */
+public class GenericCli implements Callable {
+
+  @Option(names = {"--verbose"},
+  description = "More verbose output. Show the stack trace of the errors.")
+  private boolean verbose;
+
+  @Option(names = {"-D", "--set"})
+  private Map configurationOverrides = new HashMap<>();
+
+  private final CommandLine cmd;
+
+  public GenericCli() {
+cmd = new CommandLine(this);
+  }
+
+  public void run(Str

hadoop git commit: HDDS-392. Incomplete description about auditMap#key in AuditLogging Framework. Contributed by Dinesh Chitlangia.

2018-08-31 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 76bae4ccb -> 19abaacda


HDDS-392. Incomplete description about auditMap#key in AuditLogging Framework.
Contributed by  Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19abaacd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19abaacd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19abaacd

Branch: refs/heads/trunk
Commit: 19abaacdad84b03fc790341b4b5bcf1c4d41f1fb
Parents: 76bae4c
Author: Anu Engineer 
Authored: Fri Aug 31 22:24:30 2018 -0700
Committer: Anu Engineer 
Committed: Fri Aug 31 22:24:30 2018 -0700

--
 .../main/java/org/apache/hadoop/ozone/audit/package-info.java  | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19abaacd/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
index 48de3f7..9c00ef7 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
@@ -50,8 +50,10 @@ package org.apache.hadoop.ozone.audit;
  * The implementing class must override toAuditMap() to return an
  * instance of Map where both Key and Value are String.
  *
- * Key: must not contain any spaces. If the key is multi word then use
- * camel case.
+ * Key: must contain printable US ASCII characters
+ * May not contain a space, =, ], or "
+ * If the key is multi word then use camel case.
+ *
  * Value: if it is a collection/array, then it must be converted to a comma
  * delimited string
  *


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HDDS-357. Use DBStore and TableStore for OzoneManager non-background service. Contributed by Nandakumar.

2018-09-02 Thread aengineer
HDDS-357. Use DBStore and TableStore for OzoneManager non-background service.
Contributed by Nandakumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff036e49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff036e49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff036e49

Branch: refs/heads/trunk
Commit: ff036e49ff967d5dacf4b2d9d5376e57578ef391
Parents: eed8415
Author: Anu Engineer 
Authored: Sun Sep 2 11:47:32 2018 -0700
Committer: Anu Engineer 
Committed: Sun Sep 2 11:47:32 2018 -0700

--
 .../org/apache/hadoop/ozone/OzoneConsts.java|   6 +-
 .../org/apache/hadoop/utils/RocksDBStore.java   |   2 +-
 .../org/apache/hadoop/utils/db/DBStore.java |  22 +
 .../org/apache/hadoop/utils/db/RDBStore.java|  26 +-
 .../common/src/main/resources/ozone-default.xml |   2 +-
 .../apache/hadoop/hdds/server/ServerUtils.java  |   5 +
 .../ozone/client/io/ChunkGroupOutputStream.java |   4 +-
 .../hadoop/ozone/om/helpers/OpenKeySession.java |   6 +-
 .../ozone/om/protocol/OzoneManagerProtocol.java |  11 +-
 ...neManagerProtocolClientSideTranslatorPB.java |   8 +-
 .../src/main/proto/OzoneManagerProtocol.proto   |   6 +-
 .../rpc/TestCloseContainerHandlingByClient.java |  37 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java|   4 +
 .../apache/hadoop/ozone/om/TestOmSQLCli.java|   7 +-
 .../hadoop/ozone/om/TestOzoneManager.java   |  37 +-
 .../hadoop/ozone/web/client/TestVolume.java |   6 +
 .../hadoop/ozone/om/BucketManagerImpl.java  |  57 ++-
 .../org/apache/hadoop/ozone/om/KeyManager.java  |   6 +-
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  | 276 +-
 .../hadoop/ozone/om/OMMetadataManager.java  | 222 
 .../hadoop/ozone/om/OmMetadataManagerImpl.java  | 509 +++
 .../apache/hadoop/ozone/om/OzoneManager.java| 209 
 .../hadoop/ozone/om/VolumeManagerImpl.java  | 156 +++---
 ...neManagerProtocolServerSideTranslatorPB.java |   7 +-
 .../hadoop/ozone/om/TestBucketManagerImpl.java  | 208 
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java |  12 +-
 26 files changed, 978 insertions(+), 873 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff036e49/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 15366fb..8ea4d7f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -92,7 +92,6 @@ public final class OzoneConsts {
   public static final String CONTAINER_DB_SUFFIX = "container.db";
   public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
   public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
-  public static final String BLOCK_DB = "block.db";
   public static final String OPEN_CONTAINERS_DB = "openContainers.db";
   public static final String DELETED_BLOCK_DB = "deletedBlock.db";
   public static final String OM_DB_NAME = "om.db";
@@ -113,8 +112,6 @@ public final class OzoneConsts {
   public static final String DELETING_KEY_PREFIX = "#deleting#";
   public static final String DELETED_KEY_PREFIX = "#deleted#";
   public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#";
-  public static final String OPEN_KEY_PREFIX = "#open#";
-  public static final String OPEN_KEY_ID_DELIMINATOR = "#";
 
   /**
* OM LevelDB prefixes.
@@ -138,8 +135,7 @@ public final class OzoneConsts {
*  | #deleting#/volumeName/bucketName/keyName |  KeyInfo|
*  --
*/
-  public static final String OM_VOLUME_PREFIX = "/#";
-  public static final String OM_BUCKET_PREFIX = "/#";
+
   public static final String OM_KEY_PREFIX = "/";
   public static final String OM_USER_PREFIX = "$";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff036e49/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
index b243e3d..379d9e9 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
@@ -94,7 +94,7 @@ public class RocksDBStore implements MetadataStore {
 }
   }
 
-  private IOException toIOException(String msg, RocksDBException e) {
+  public static IOException toIOExc

[1/2] hadoop git commit: HDDS-357. Use DBStore and TableStore for OzoneManager non-background service. Contributed by Nandakumar.

2018-09-02 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk eed8415dc -> ff036e49f


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff036e49/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 21d2411..151fddf 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -19,77 +19,178 @@ package org.apache.hadoop.ozone.om;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Strings;
 import com.google.common.collect.Lists;
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
-
 import org.apache.hadoop.util.Time;
-import org.apache.hadoop.utils.BatchOperation;
-import org.apache.hadoop.utils.MetadataKeyFilters;
-import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
-import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
-import org.apache.hadoop.utils.MetadataStore;
-import org.apache.hadoop.utils.MetadataStoreBuilder;
+import org.apache.hadoop.utils.db.DBStore;
+import org.apache.hadoop.utils.db.DBStoreBuilder;
+import org.apache.hadoop.utils.db.Table;
+import org.apache.hadoop.utils.db.TableIterator;
+import org.eclipse.jetty.util.StringUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
-import java.util.ArrayList;
 import java.util.Map;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.stream.Collectors;
 
+import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConsts.DELETING_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
-import static org.apache.hadoop.ozone.OzoneConsts.OPEN_KEY_ID_DELIMINATOR;
-import static org.apache.hadoop.ozone.OzoneConsts.OPEN_KEY_PREFIX;
-import static org.apache.hadoop.ozone.om.OMConfigKeys
-.OZONE_OM_DB_CACHE_SIZE_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys
-.OZONE_OM_DB_CACHE_SIZE_MB;
-import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 
 /**
  * Ozone metadata manager interface.
  */
 public class OmMetadataManagerImpl implements OMMetadataManager {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(OmMetadataManagerImpl.class);
+
+  /**
+   * OM RocksDB Structure .
+   * 
+   * OM DB stores metadata as KV pairs in different column families.
+   * 
+   * OM DB Schema:
+   * |---|
+   * |  Column Family |VALUE |
+   * |---|
+   * | userTable  | user->VolumeList |
+   * |

hadoop git commit: HDDS-396. Remove openContainers.db from SCM. Contributed by Dinesh Chitlangia.

2018-09-04 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9964e33e8 -> 6e4c73147


HDDS-396. Remove openContainers.db from SCM.
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e4c7314
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e4c7314
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e4c7314

Branch: refs/heads/trunk
Commit: 6e4c73147185ae2e5529028c552c47d1edcead36
Parents: 9964e33
Author: Anu Engineer 
Authored: Tue Sep 4 16:27:31 2018 -0700
Committer: Anu Engineer 
Committed: Tue Sep 4 16:27:31 2018 -0700

--
 .../src/main/java/org/apache/hadoop/ozone/OzoneConsts.java   | 1 -
 .../src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java| 4 
 2 files changed, 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e4c7314/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index eb37b79..bf4508b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -92,7 +92,6 @@ public final class OzoneConsts {
   public static final String CONTAINER_DB_SUFFIX = "container.db";
   public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
   public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
-  public static final String OPEN_CONTAINERS_DB = "openContainers.db";
   public static final String DELETED_BLOCK_DB = "deletedBlock.db";
   public static final String OM_DB_NAME = "om.db";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e4c7314/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
--
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
index 080840a..522fea9 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
@@ -59,7 +59,6 @@ import static 
org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_USER_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB;
 
 /**
  * This is the CLI that can be use to convert an ozone metadata DB into
@@ -270,9 +269,6 @@ public class SQLCLI  extends Configured implements Tool {
 if (dbName.toString().endsWith(CONTAINER_DB_SUFFIX)) {
   LOG.info("Converting container DB");
   convertContainerDB(dbPath, outPath);
-} else if (dbName.toString().equals(OPEN_CONTAINERS_DB)) {
-  LOG.info("Converting open container DB");
-  convertOpenContainerDB(dbPath, outPath);
 } else if (dbName.toString().equals(OM_DB_NAME)) {
   LOG.info("Converting om DB");
   convertOMDB(dbPath, outPath);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-405. User/volume mapping is not cleaned up during the deletion of the last volume. Contributed by Elek, Marton.

2018-09-06 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk eb0b5a844 -> fa2945e7a


HDDS-405. User/volume mapping is not cleaned up during the deletion of the last 
volume.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa2945e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa2945e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa2945e7

Branch: refs/heads/trunk
Commit: fa2945e7a9d9cccb0c6ccaf531c6a34d9d09b489
Parents: eb0b5a8
Author: Anu Engineer 
Authored: Thu Sep 6 12:44:08 2018 -0700
Committer: Anu Engineer 
Committed: Thu Sep 6 12:44:08 2018 -0700

--
 .../hadoop/ozone/om/TestOzoneManager.java   | 26 +---
 .../hadoop/ozone/om/VolumeManagerImpl.java  |  2 +-
 2 files changed, 23 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa2945e7/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
index b6ade60..f309715 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
@@ -276,10 +276,28 @@ public class TestOzoneManager {
 Assert.assertTrue(volumeInfo.getVolumeName().equals(volumeName2));
 
 // Make sure volume with _A suffix is successfully deleted.
-exception.expect(IOException.class);
-exception.expectMessage("Info Volume failed, error:VOLUME_NOT_FOUND");
-volumeArgs = new VolumeArgs(volumeName1, userArgs);
-storageHandler.getVolumeInfo(volumeArgs);
+try {
+  volumeArgs = new VolumeArgs(volumeName1, userArgs);
+  storageHandler.getVolumeInfo(volumeArgs);
+  Assert.fail("Volume is not deleted");
+} catch (IOException ex) {
+  Assert.assertEquals("Info Volume failed, error:VOLUME_NOT_FOUND",
+  ex.getMessage());
+}
+//delete the _AA volume, too
+storageHandler.deleteVolume(new VolumeArgs(volumeName2, userArgs));
+
+//Make sure there is no volume information for the specific user
+OMMetadataManager metadataManager =
+cluster.getOzoneManager().getMetadataManager();
+
+byte[] userKey = metadataManager.getUserKey(userName);
+byte[] volumes = metadataManager.getUserTable().get(userKey);
+
+//that was the last volume of the user, shouldn't be any record here
+Assert.assertNull(volumes);
+
+
   }
 
   // Create a volume and a bucket inside the volume,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa2945e7/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
index 419b0aa..cf25215 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
@@ -110,7 +110,7 @@ public class VolumeManagerImpl implements VolumeManager {
 // Remove the volume from the list
 prevVolList.remove(volume);
 if (prevVolList.size() == 0) {
-  batch.delete(dbUserKey);
+  batch.delete(metadataManager.getUserTable().getHandle(), dbUserKey);
 } else {
   VolumeList newVolList = VolumeList.newBuilder()
   .addAllVolumeNames(prevVolList).build();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-406. Enable acceptace test of the putKey for rpc protocol. Contributed by Elek, Marton.

2018-09-06 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk eca1a4bfe -> c5bf43a8e


HDDS-406. Enable acceptace test of the putKey for rpc protocol.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5bf43a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5bf43a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5bf43a8

Branch: refs/heads/trunk
Commit: c5bf43a8e8aec595d1a8133cb0656778b252de89
Parents: eca1a4b
Author: Anu Engineer 
Authored: Thu Sep 6 15:08:46 2018 -0700
Committer: Anu Engineer 
Committed: Thu Sep 6 15:09:21 2018 -0700

--
 .../src/test/acceptance/basic/ozone-shell.robot | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5bf43a8/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
--
diff --git 
a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot 
b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
index 4751841..ac538b2 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
@@ -27,27 +27,27 @@ ${PROJECTDIR}   ${CURDIR}/../../../../../..
 
 *** Test Cases ***
 RestClient without http port
-   Test ozone shell   http://  ozoneManager  restwoport
True
+   Test ozone shell   http://  ozoneManager  restwoport
 
 RestClient with http port
-   Test ozone shell   http://  ozoneManager:9874 restwport 
True
+   Test ozone shell   http://  ozoneManager:9874 restwport
 
 RestClient without host name
-   Test ozone shell   http://  ${EMPTY}  restwohost
True
+   Test ozone shell   http://  ${EMPTY}  restwohost
 
 RpcClient with port
-   Test ozone shell   o3://ozoneManager:9862 rpcwoport 
False
+   Test ozone shell   o3://ozoneManager:9862 rpcwoport
 
 RpcClient without host
-   Test ozone shell   o3://${EMPTY}  rpcwport  
False
+   Test ozone shell   o3://${EMPTY}  rpcwport
 
 RpcClient without scheme
-   Test ozone shell   ${EMPTY} ${EMPTY}  rpcwoscheme   
False
+   Test ozone shell   ${EMPTY} ${EMPTY}  rpcwoscheme
 
 
 *** Keywords ***
 Test ozone shell
-[arguments] ${protocol} ${server}   ${volume} 
${withkeytest}
+[arguments] ${protocol} ${server}   ${volume}
 ${result} = Execute on  datanodeozone oz -createVolume 
${protocol}${server}/${volume} -user bilbo -quota 100TB -root
 Should not contain  ${result}   Failed
 Should contain  ${result}   Creating Volume: 
${volume}
@@ -69,7 +69,7 @@ Test ozone shell
 Should Be Equal ${result}   USER
 ${result} = Execute on  datanodeozone oz -listBucket 
${protocol}${server}/${volume}/ | grep -Ev 
'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") 
| .volumeName'
 Should Be Equal ${result}   ${volume}
-Run Keyword and Return If   ${withkeytest}
Test key handling   ${protocol}   ${server}   ${volume}
+Run Keyword Test key handling   ${protocol}
   ${server}   ${volume}
 Execute on  datanodeozone oz -deleteBucket 
${protocol}${server}/${volume}/bb1
 Execute on  datanodeozone oz -deleteVolume 
${protocol}${server}/${volume} -user bilbo
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-408. Read (getKey) operation is very slow. Contributed by Nandakumar.

2018-09-07 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 396ce7b88 -> be1ec005f


HDDS-408. Read (getKey) operation is very slow.
Contributed by Nandakumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be1ec005
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be1ec005
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be1ec005

Branch: refs/heads/trunk
Commit: be1ec005f1c725d775b92d946fa46b1f17f23b4c
Parents: 396ce7b
Author: Anu Engineer 
Authored: Fri Sep 7 07:36:24 2018 -0700
Committer: Anu Engineer 
Committed: Fri Sep 7 07:36:24 2018 -0700

--
 .../org/apache/hadoop/ozone/client/io/OzoneInputStream.java | 5 +
 .../main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java | 3 +--
 2 files changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be1ec005/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java
index c2ff979..e1f65e6 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java
@@ -45,6 +45,11 @@ public class OzoneInputStream extends InputStream {
   }
 
   @Override
+  public int read(byte[] b, int off, int len) throws IOException {
+return inputStream.read(b, off, len);
+  }
+
+  @Override
   public synchronized void close() throws IOException {
 inputStream.close();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/be1ec005/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index e9a684e..330eba8 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -492,8 +492,7 @@ public class RpcClient implements ClientProtocol {
 ChunkGroupInputStream.getFromOmKeyInfo(
 keyInfo, xceiverClientManager, storageContainerLocationClient,
 requestId);
-return new OzoneInputStream(
-(ChunkGroupInputStream)lengthInputStream.getWrappedStream());
+return new OzoneInputStream(lengthInputStream.getWrappedStream());
   }
 
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-351. Add chill mode state to SCM. Contributed by Ajay Kumar.

2018-09-07 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 36c7c7826 -> ff64d3571


HDDS-351. Add chill mode state to SCM.
Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff64d357
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff64d357
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff64d357

Branch: refs/heads/trunk
Commit: ff64d3571660ace3fb266ee47bea181cebfee8d9
Parents: 36c7c78
Author: Anu Engineer 
Authored: Fri Sep 7 10:35:45 2018 -0700
Committer: Anu Engineer 
Committed: Fri Sep 7 10:54:35 2018 -0700

--
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |   6 +
 .../common/src/main/resources/ozone-default.xml |   9 +
 .../hdds/scm/container/ContainerMapping.java|   5 +
 .../hadoop/hdds/scm/events/SCMEvents.java   |  10 +
 .../hdds/scm/server/SCMChillModeManager.java| 198 +++
 .../scm/server/SCMDatanodeProtocolServer.java   |  19 ++
 .../scm/server/StorageContainerManager.java |  17 +-
 .../apache/hadoop/hdds/scm/HddsTestUtils.java   |  85 
 .../scm/server/TestSCMChillModeManager.java | 115 +++
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |  23 ++-
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |  93 ++---
 .../ozone/TestStorageContainerManager.java  | 170 
 12 files changed, 683 insertions(+), 67 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff64d357/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index 4dc7e0a..98efbf8 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -75,4 +75,10 @@ public final class HddsConfigKeys {
   "hdds.container.close.threshold";
   public static final float HDDS_CONTAINER_CLOSE_THRESHOLD_DEFAULT = 0.9f;
 
+  // % of containers which should have at least one reported replica
+  // before SCM comes out of chill mode.
+  public static final String HDDS_SCM_CHILLMODE_THRESHOLD_PCT =
+  "hdds.scm.chillmode.threshold.pct";
+  public static final double HDDS_SCM_CHILLMODE_THRESHOLD_PCT_DEFAULT = 0.99;
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff64d357/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 778d641..be19e90 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1113,6 +1113,15 @@
   
 
   
+hdds.scm.chillmode.threshold.pct
+0.99
+HDDS,SCM,OPERATION
+ % of containers which should have at least one
+  reported replica before SCM comes out of chill mode.
+
+  
+
+  
 hdds.container.action.max.limit
 20
 DATANODE

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff64d357/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index 3554339..5678205 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
@@ -33,6 +33,7 @@ import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -210,6 +211,10 @@ public class ContainerMapping implements Mapping {
 // For close containers create pipeline from datanodes with replicas
 Set dnWithReplicas = containerStateManager
 .getContainerReplicas(contInfo.containerID());
+if (dnWithReplicas.size() == 0) {
+  throw new SCMException("Can't create a pipeline for container with "
+  + "no replic

[2/2] hadoop git commit: HDDS-190. Improve shell error message for unrecognized option. Contributed by Elek, Marton.

2018-09-07 Thread aengineer
HDDS-190. Improve shell error message for unrecognized option.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73fcbdd2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73fcbdd2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73fcbdd2

Branch: refs/heads/trunk
Commit: 73fcbdd296fb0f6e7cde17ef0bd6f3b981878077
Parents: 410dd3f
Author: Anu Engineer 
Authored: Fri Sep 7 12:53:46 2018 -0700
Committer: Anu Engineer 
Committed: Fri Sep 7 12:54:32 2018 -0700

--
 .../org/apache/hadoop/hdds/cli/GenericCli.java  |  17 +-
 .../hadoop/ozone/ozShell/TestOzoneShell.java| 246 ++-
 .../hadoop/ozone/web/ozShell/Handler.java   |  27 +-
 .../apache/hadoop/ozone/web/ozShell/Shell.java  | 407 +++
 .../web/ozShell/bucket/CreateBucketHandler.java |  46 +--
 .../web/ozShell/bucket/DeleteBucketHandler.java |  45 +-
 .../web/ozShell/bucket/InfoBucketHandler.java   |  42 +-
 .../web/ozShell/bucket/ListBucketHandler.java   |  88 ++--
 .../web/ozShell/bucket/UpdateBucketHandler.java |  66 +--
 .../web/ozShell/keys/DeleteKeyHandler.java  |  48 +--
 .../ozone/web/ozShell/keys/GetKeyHandler.java   |  78 ++--
 .../ozone/web/ozShell/keys/InfoKeyHandler.java  |  45 +-
 .../ozone/web/ozShell/keys/ListKeyHandler.java  |  91 ++---
 .../ozone/web/ozShell/keys/PutKeyHandler.java   |  81 ++--
 .../web/ozShell/volume/CreateVolumeHandler.java |  65 ++-
 .../web/ozShell/volume/DeleteVolumeHandler.java |  35 +-
 .../web/ozShell/volume/InfoVolumeHandler.java   |  37 +-
 .../web/ozShell/volume/ListVolumeHandler.java   |  81 ++--
 .../web/ozShell/volume/UpdateVolumeHandler.java |  49 +--
 19 files changed, 617 insertions(+), 977 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73fcbdd2/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
index 2b3e6c0..f829d82 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
@@ -23,6 +23,7 @@ import java.util.concurrent.Callable;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 
+import com.google.common.annotations.VisibleForTesting;
 import picocli.CommandLine;
 import picocli.CommandLine.ExecutionException;
 import picocli.CommandLine.Option;
@@ -49,13 +50,18 @@ public class GenericCli implements Callable {
 
   public void run(String[] argv) {
 try {
-  cmd.parseWithHandler(new RunLast(), argv);
+  execute(argv);
 } catch (ExecutionException ex) {
   printError(ex.getCause());
   System.exit(-1);
 }
   }
 
+  @VisibleForTesting
+  public void execute(String[] argv) {
+cmd.parseWithHandler(new RunLast(), argv);
+  }
+
   private void printError(Throwable error) {
 if (verbose) {
   error.printStackTrace(System.err);
@@ -79,4 +85,13 @@ public class GenericCli implements Callable {
 }
 return ozoneConf;
   }
+
+  public boolean isVerbose() {
+return verbose;
+  }
+
+  @VisibleForTesting
+  public picocli.CommandLine getCmd() {
+return cmd;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73fcbdd2/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index 386b1d2..f50de4b 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -17,12 +17,6 @@
  */
 package org.apache.hadoop.ozone.ozShell;
 
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
@@ -38,17 +32,15 @@ import java.util.Random;
 import java.util.UUID;
 import java.util.stream.Collectors;
 
-import com.google.common.base.Strings;
-import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import o

[1/2] hadoop git commit: HDDS-190. Improve shell error message for unrecognized option. Contributed by Elek, Marton.

2018-09-07 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 410dd3faa -> 73fcbdd29


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73fcbdd2/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
index c73307d..bea68f2 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
@@ -18,86 +18,80 @@
 
 package org.apache.hadoop.ozone.web.ozShell.keys;
 
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.codec.digest.DigestUtils;
+import java.io.File;
+import java.io.FileInputStream;
+import java.net.URI;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.client.OzoneClientException;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-
+import org.apache.commons.codec.digest.DigestUtils;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_DEFAULT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+import picocli.CommandLine.Parameters;
 
 /**
  * Puts a file into an ozone bucket.
  */
+@Command(name = "-putKey",
+description = "creates or overwrites an existing key")
 public class PutKeyHandler extends Handler {
-  private String volumeName;
-  private String bucketName;
-  private String keyName;
 
+  @Parameters(arity = "1..1", description = Shell.OZONE_KEY_URI_DESCRIPTION)
+  private String uri;
+
+  @Option(names = {"-f", "--file", "-file"},
+  description = "File to upload",
+  required = true)
+  private String fileName;
+
+  @Option(names = {"-r", "--replication", "-replicationFactor"},
+  description = "Replication factor of the new key. (use ONE or THREE) "
+  + "Default is specified in the cluster-wide config.")
+  private ReplicationFactor replicationFactor;
   /**
* Executes the Client Calls.
-   *
-   * @param cmd - CommandLine
-   * @throws IOException
-   * @throws OzoneException
-   * @throws URISyntaxException
*/
   @Override
-  protected void execute(CommandLine cmd)
-  throws IOException, OzoneException, URISyntaxException {
-if (!cmd.hasOption(Shell.PUT_KEY)) {
-  throw new OzoneClientException("Incorrect call : putKey is missing");
-}
+  public Void call() throws Exception {
 
-if (!cmd.hasOption(Shell.FILE)) {
-  throw new OzoneClientException("put key needs a file to put");
-}
-
-String ozoneURIString = cmd.getOptionValue(Shell.PUT_KEY);
-URI ozoneURI = verifyURI(ozoneURIString);
+URI ozoneURI = verifyURI(uri);
 Path path = Paths.get(ozoneURI.getPath());
 if (path.getNameCount() < 3) {
   throw new OzoneClientException(
   "volume/bucket/key name required in putKey");
 }
 
-volumeName = path.getName(0).toString();
-bucketName = path.getName(1).toString();
-keyName = path.getName(2).toString();
-
+String volumeName = path.getName(0).toString();
+String bucketName = path.getName(1).toString();
+String keyName = path.getName(2).toString();
 
-if (cmd.hasOption(Shell.VERBOSE)) {
+if (isVerbose()) {
   System.out.printf("Volume Name : %s%n", volumeName);
   System.out.printf("Bucket Name : %s%n", bucketName);
   System.out.printf("Key Name : %s%n", keyName);
 }
 
-  

[1/3] hadoop git commit: HDDS-351. Add chill mode state to SCM. Contributed by Ajay Kumar.

2018-09-07 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.2 be1ec005f -> fd792ce54


HDDS-351. Add chill mode state to SCM.
Contributed by Ajay Kumar.

(cherry picked from commit ff64d3571660ace3fb266ee47bea181cebfee8d9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48bcebc0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48bcebc0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48bcebc0

Branch: refs/heads/ozone-0.2
Commit: 48bcebc0805df7428549b067f26620bad7940d9f
Parents: be1ec00
Author: Anu Engineer 
Authored: Fri Sep 7 10:35:45 2018 -0700
Committer: Anu Engineer 
Committed: Fri Sep 7 11:00:31 2018 -0700

--
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |   6 +
 .../common/src/main/resources/ozone-default.xml |   9 +
 .../hdds/scm/container/ContainerMapping.java|   5 +
 .../hadoop/hdds/scm/events/SCMEvents.java   |  10 +
 .../hdds/scm/server/SCMChillModeManager.java| 198 +++
 .../scm/server/SCMDatanodeProtocolServer.java   |  19 ++
 .../scm/server/StorageContainerManager.java |  17 +-
 .../apache/hadoop/hdds/scm/HddsTestUtils.java   |  85 
 .../scm/server/TestSCMChillModeManager.java | 115 +++
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |  23 ++-
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |  93 ++---
 .../ozone/TestStorageContainerManager.java  | 170 
 12 files changed, 683 insertions(+), 67 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/48bcebc0/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index 4dc7e0a..98efbf8 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -75,4 +75,10 @@ public final class HddsConfigKeys {
   "hdds.container.close.threshold";
   public static final float HDDS_CONTAINER_CLOSE_THRESHOLD_DEFAULT = 0.9f;
 
+  // % of containers which should have at least one reported replica
+  // before SCM comes out of chill mode.
+  public static final String HDDS_SCM_CHILLMODE_THRESHOLD_PCT =
+  "hdds.scm.chillmode.threshold.pct";
+  public static final double HDDS_SCM_CHILLMODE_THRESHOLD_PCT_DEFAULT = 0.99;
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48bcebc0/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 778d641..be19e90 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1113,6 +1113,15 @@
   
 
   
+hdds.scm.chillmode.threshold.pct
+0.99
+HDDS,SCM,OPERATION
+ % of containers which should have at least one
+  reported replica before SCM comes out of chill mode.
+
+  
+
+  
 hdds.container.action.max.limit
 20
 DATANODE

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48bcebc0/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index 3554339..5678205 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
@@ -33,6 +33,7 @@ import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -210,6 +211,10 @@ public class ContainerMapping implements Mapping {
 // For close containers create pipeline from datanodes with replicas
 Set dnWithReplicas = containerStateManager
 .getContainerReplicas(contInfo.containerID());
+if (dnWithReplicas.size() == 0) {
+  throw new SCMExcep

[3/3] hadoop git commit: HDDS-190. Improve shell error message for unrecognized option. Contributed by Elek, Marton.

2018-09-07 Thread aengineer
HDDS-190. Improve shell error message for unrecognized option.
Contributed by Elek, Marton.

(cherry picked from commit 73fcbdd296fb0f6e7cde17ef0bd6f3b981878077)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd792ce5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd792ce5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd792ce5

Branch: refs/heads/ozone-0.2
Commit: fd792ce54d53282669696fd90c60689d06ac44f6
Parents: 48bcebc
Author: Anu Engineer 
Authored: Fri Sep 7 12:53:46 2018 -0700
Committer: Anu Engineer 
Committed: Fri Sep 7 13:02:49 2018 -0700

--
 .../org/apache/hadoop/hdds/cli/GenericCli.java  |  17 +-
 .../hadoop/ozone/ozShell/TestOzoneShell.java| 246 ++-
 .../hadoop/ozone/web/ozShell/Handler.java   |  27 +-
 .../apache/hadoop/ozone/web/ozShell/Shell.java  | 407 +++
 .../web/ozShell/bucket/CreateBucketHandler.java |  46 +--
 .../web/ozShell/bucket/DeleteBucketHandler.java |  45 +-
 .../web/ozShell/bucket/InfoBucketHandler.java   |  42 +-
 .../web/ozShell/bucket/ListBucketHandler.java   |  88 ++--
 .../web/ozShell/bucket/UpdateBucketHandler.java |  66 +--
 .../web/ozShell/keys/DeleteKeyHandler.java  |  48 +--
 .../ozone/web/ozShell/keys/GetKeyHandler.java   |  78 ++--
 .../ozone/web/ozShell/keys/InfoKeyHandler.java  |  45 +-
 .../ozone/web/ozShell/keys/ListKeyHandler.java  |  91 ++---
 .../ozone/web/ozShell/keys/PutKeyHandler.java   |  81 ++--
 .../web/ozShell/volume/CreateVolumeHandler.java |  65 ++-
 .../web/ozShell/volume/DeleteVolumeHandler.java |  35 +-
 .../web/ozShell/volume/InfoVolumeHandler.java   |  37 +-
 .../web/ozShell/volume/ListVolumeHandler.java   |  81 ++--
 .../web/ozShell/volume/UpdateVolumeHandler.java |  49 +--
 19 files changed, 617 insertions(+), 977 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd792ce5/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
index 2b3e6c0..f829d82 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
@@ -23,6 +23,7 @@ import java.util.concurrent.Callable;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 
+import com.google.common.annotations.VisibleForTesting;
 import picocli.CommandLine;
 import picocli.CommandLine.ExecutionException;
 import picocli.CommandLine.Option;
@@ -49,13 +50,18 @@ public class GenericCli implements Callable {
 
   public void run(String[] argv) {
 try {
-  cmd.parseWithHandler(new RunLast(), argv);
+  execute(argv);
 } catch (ExecutionException ex) {
   printError(ex.getCause());
   System.exit(-1);
 }
   }
 
+  @VisibleForTesting
+  public void execute(String[] argv) {
+cmd.parseWithHandler(new RunLast(), argv);
+  }
+
   private void printError(Throwable error) {
 if (verbose) {
   error.printStackTrace(System.err);
@@ -79,4 +85,13 @@ public class GenericCli implements Callable {
 }
 return ozoneConf;
   }
+
+  public boolean isVerbose() {
+return verbose;
+  }
+
+  @VisibleForTesting
+  public picocli.CommandLine getCmd() {
+return cmd;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd792ce5/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index 386b1d2..f50de4b 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -17,12 +17,6 @@
  */
 package org.apache.hadoop.ozone.ozShell;
 
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
@@ -38,17 +32,15 @@ import java.util.Random;
 import java.util.UUID;
 import java.util.stream.Collectors;
 
-import com.google.common.base.Strings;
-import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.client.Replicatio

[2/3] hadoop git commit: HDDS-190. Improve shell error message for unrecognized option. Contributed by Elek, Marton.

2018-09-07 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd792ce5/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
index c73307d..bea68f2 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
@@ -18,86 +18,80 @@
 
 package org.apache.hadoop.ozone.web.ozShell.keys;
 
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.codec.digest.DigestUtils;
+import java.io.File;
+import java.io.FileInputStream;
+import java.net.URI;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.client.OzoneClientException;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-
+import org.apache.commons.codec.digest.DigestUtils;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_DEFAULT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+import picocli.CommandLine.Parameters;
 
 /**
  * Puts a file into an ozone bucket.
  */
+@Command(name = "-putKey",
+description = "creates or overwrites an existing key")
 public class PutKeyHandler extends Handler {
-  private String volumeName;
-  private String bucketName;
-  private String keyName;
 
+  @Parameters(arity = "1..1", description = Shell.OZONE_KEY_URI_DESCRIPTION)
+  private String uri;
+
+  @Option(names = {"-f", "--file", "-file"},
+  description = "File to upload",
+  required = true)
+  private String fileName;
+
+  @Option(names = {"-r", "--replication", "-replicationFactor"},
+  description = "Replication factor of the new key. (use ONE or THREE) "
+  + "Default is specified in the cluster-wide config.")
+  private ReplicationFactor replicationFactor;
   /**
* Executes the Client Calls.
-   *
-   * @param cmd - CommandLine
-   * @throws IOException
-   * @throws OzoneException
-   * @throws URISyntaxException
*/
   @Override
-  protected void execute(CommandLine cmd)
-  throws IOException, OzoneException, URISyntaxException {
-if (!cmd.hasOption(Shell.PUT_KEY)) {
-  throw new OzoneClientException("Incorrect call : putKey is missing");
-}
+  public Void call() throws Exception {
 
-if (!cmd.hasOption(Shell.FILE)) {
-  throw new OzoneClientException("put key needs a file to put");
-}
-
-String ozoneURIString = cmd.getOptionValue(Shell.PUT_KEY);
-URI ozoneURI = verifyURI(ozoneURIString);
+URI ozoneURI = verifyURI(uri);
 Path path = Paths.get(ozoneURI.getPath());
 if (path.getNameCount() < 3) {
   throw new OzoneClientException(
   "volume/bucket/key name required in putKey");
 }
 
-volumeName = path.getName(0).toString();
-bucketName = path.getName(1).toString();
-keyName = path.getName(2).toString();
-
+String volumeName = path.getName(0).toString();
+String bucketName = path.getName(1).toString();
+String keyName = path.getName(2).toString();
 
-if (cmd.hasOption(Shell.VERBOSE)) {
+if (isVerbose()) {
   System.out.printf("Volume Name : %s%n", volumeName);
   System.out.printf("Bucket Name : %s%n", bucketName);
   System.out.printf("Key Name : %s%n", keyName);
 }
 
-String fileName = cmd.getOptionValue(Shell.FILE);
 File dataFile = new File

[1/2] hadoop git commit: HDDS-398. Support multiple tests in freon. Contributed by Elek, Marton.

2018-09-07 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 73fcbdd29 -> ca3adf588


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca3adf58/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
--
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
new file mode 100644
index 000..ee4cc87
--- /dev/null
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
@@ -0,0 +1,1038 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.freon;
+
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.UUID;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Supplier;
+
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.client.OzoneQuota;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.VersionInfo;
+
+import com.codahale.metrics.Histogram;
+import com.codahale.metrics.Snapshot;
+import com.codahale.metrics.UniformReservoir;
+import com.fasterxml.jackson.annotation.JsonAutoDetect;
+import com.fasterxml.jackson.annotation.PropertyAccessor;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectWriter;
+import com.google.common.annotations.VisibleForTesting;
+import static java.lang.Math.min;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.lang3.ArrayUtils;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang3.time.DurationFormatUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+import picocli.CommandLine.ParentCommand;
+
+/**
+ * Data generator tool to generate as much keys as possible.
+ */
+@Command(name = "randomkeys",
+aliases = "rk",
+description = "Generate volumes/buckets and put generated keys.",
+versionProvider = HddsVersionProvider.class,
+mixinStandardHelpOptions = true,
+showDefaultValues = true)
+public final class RandomKeyGenerator implements Callable {
+
+  @ParentCommand
+  private Freon freon;
+
+  enum FreonOps {
+VOLUME_CREATE,
+BUCKET_CREATE,
+KEY_CREATE,
+KEY_WRITE
+  }
+
+  private static final String RATIS = "ratis";
+
+  private static final String DURATION_FORMAT = "HH:mm:ss,SSS";
+
+  private static final int QUANTILES = 10;
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(RandomKeyGenerator.class);
+
+  private boolean completed = false;
+  private boolean exception = false;
+
+  @Option(names = "--numOfThreads",
+  description = "number of threads to be launched for the run",
+  defaultValue = "10")
+  private int numOfThreads = 10;
+
+  @Option(names = "--numOfVolumes",
+  description = "specifies number of Volumes to be created in offline 
mode",
+  defaultValue = "10")
+  private i

[2/2] hadoop git commit: HDDS-398. Support multiple tests in freon. Contributed by Elek, Marton.

2018-09-07 Thread aengineer
HDDS-398. Support multiple tests in freon.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca3adf58
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca3adf58
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca3adf58

Branch: refs/heads/trunk
Commit: ca3adf588ceef53de340f03cbfbd45db8d25a408
Parents: 73fcbdd
Author: Anu Engineer 
Authored: Fri Sep 7 13:27:15 2018 -0700
Committer: Anu Engineer 
Committed: Fri Sep 7 13:27:15 2018 -0700

--
 .../src/test/acceptance/basic/basic.robot   |2 +-
 .../hadoop/ozone/freon/TestDataValidate.java|  119 +-
 .../apache/hadoop/ozone/freon/TestFreon.java|  129 --
 .../ozone/freon/TestRandomKeyGenerator.java |  106 ++
 .../org/apache/hadoop/ozone/freon/Freon.java| 1136 +-
 .../hadoop/ozone/freon/RandomKeyGenerator.java  | 1038 
 6 files changed, 1205 insertions(+), 1325 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca3adf58/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot
--
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot 
b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot
index 6d6fea0..71d6e4c 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot
@@ -45,6 +45,6 @@ Check webui static resources
 Should contain ${result}   200
 
 Start freon testing
-${result} =Execute on  ozoneManager
ozone freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10
+${result} = Execute on  ozoneManager   
 ozone freon randomkeys --numOfVolumes 5 --numOfBuckets 5 --numOfKeys 5 
--numOfThreads 10
 Wait Until Keyword Succeeds3min10sec   Should contain  
${result}   Number of Keys added: 125
 Should Not Contain ${result}   ERROR

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca3adf58/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
index 0a3c391..fdce736 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -18,20 +18,15 @@
 
 package org.apache.hadoop.ozone.freon;
 
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.util.ToolRunner;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.List;
-
 /**
  * Tests Freon, with MiniOzoneCluster and validate data.
  */
@@ -45,7 +40,6 @@ public class TestDataValidate {
* 
* Ozone is made active by setting OZONE_ENABLED = true
*
-   * @throws IOException
*/
   @BeforeClass
   public static void init() throws Exception {
@@ -67,74 +61,55 @@ public class TestDataValidate {
 
   @Test
   public void ratisTestLargeKey() throws Exception {
-List args = new ArrayList<>();
-args.add("-validateWrites");
-args.add("-numOfVolumes");
-args.add("1");
-args.add("-numOfBuckets");
-args.add("1");
-args.add("-numOfKeys");
-args.add("1");
-args.add("-ratis");
-args.add("3");
-args.add("-keySize");
-args.add("104857600");
-Freon freon = new Freon(conf);
-int res = ToolRunner.run(conf, freon,
-args.toArray(new String[0]));
-Assert.assertEquals

[2/2] hadoop git commit: HDDS-398. Support multiple tests in freon. Contributed by Elek, Marton.

2018-09-07 Thread aengineer
HDDS-398. Support multiple tests in freon.
Contributed by Elek, Marton.

(cherry picked from commit ca3adf588ceef53de340f03cbfbd45db8d25a408)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5284b504
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5284b504
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5284b504

Branch: refs/heads/ozone-0.2
Commit: 5284b50436186e0aad98854ede4824e24d2c5865
Parents: 25ec339
Author: Anu Engineer 
Authored: Fri Sep 7 13:27:15 2018 -0700
Committer: Anu Engineer 
Committed: Fri Sep 7 13:29:55 2018 -0700

--
 .../src/test/acceptance/basic/basic.robot   |2 +-
 .../hadoop/ozone/freon/TestDataValidate.java|  119 +-
 .../apache/hadoop/ozone/freon/TestFreon.java|  129 --
 .../ozone/freon/TestRandomKeyGenerator.java |  106 ++
 .../org/apache/hadoop/ozone/freon/Freon.java| 1136 +-
 .../hadoop/ozone/freon/RandomKeyGenerator.java  | 1038 
 6 files changed, 1205 insertions(+), 1325 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5284b504/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot
--
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot 
b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot
index 6d6fea0..71d6e4c 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot
@@ -45,6 +45,6 @@ Check webui static resources
 Should contain ${result}   200
 
 Start freon testing
-${result} =Execute on  ozoneManager
ozone freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10
+${result} = Execute on  ozoneManager   
 ozone freon randomkeys --numOfVolumes 5 --numOfBuckets 5 --numOfKeys 5 
--numOfThreads 10
 Wait Until Keyword Succeeds3min10sec   Should contain  
${result}   Number of Keys added: 125
 Should Not Contain ${result}   ERROR

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5284b504/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
index 0a3c391..fdce736 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -18,20 +18,15 @@
 
 package org.apache.hadoop.ozone.freon;
 
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.util.ToolRunner;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.List;
-
 /**
  * Tests Freon, with MiniOzoneCluster and validate data.
  */
@@ -45,7 +40,6 @@ public class TestDataValidate {
* 
* Ozone is made active by setting OZONE_ENABLED = true
*
-   * @throws IOException
*/
   @BeforeClass
   public static void init() throws Exception {
@@ -67,74 +61,55 @@ public class TestDataValidate {
 
   @Test
   public void ratisTestLargeKey() throws Exception {
-List args = new ArrayList<>();
-args.add("-validateWrites");
-args.add("-numOfVolumes");
-args.add("1");
-args.add("-numOfBuckets");
-args.add("1");
-args.add("-numOfKeys");
-args.add("1");
-args.add("-ratis");
-args.add("3");
-args.add("-keySize");
-args.add("104857600");
-Freon freon = new Freon(conf);
-int res = ToolRunner.run(co

[1/2] hadoop git commit: HDDS-398. Support multiple tests in freon. Contributed by Elek, Marton.

2018-09-07 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.2 25ec339af -> 5284b5043


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5284b504/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
--
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
new file mode 100644
index 000..ee4cc87
--- /dev/null
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
@@ -0,0 +1,1038 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.freon;
+
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.UUID;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Supplier;
+
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.client.OzoneQuota;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.VersionInfo;
+
+import com.codahale.metrics.Histogram;
+import com.codahale.metrics.Snapshot;
+import com.codahale.metrics.UniformReservoir;
+import com.fasterxml.jackson.annotation.JsonAutoDetect;
+import com.fasterxml.jackson.annotation.PropertyAccessor;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectWriter;
+import com.google.common.annotations.VisibleForTesting;
+import static java.lang.Math.min;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.lang3.ArrayUtils;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang3.time.DurationFormatUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+import picocli.CommandLine.ParentCommand;
+
+/**
+ * Data generator tool to generate as much keys as possible.
+ */
+@Command(name = "randomkeys",
+aliases = "rk",
+description = "Generate volumes/buckets and put generated keys.",
+versionProvider = HddsVersionProvider.class,
+mixinStandardHelpOptions = true,
+showDefaultValues = true)
+public final class RandomKeyGenerator implements Callable {
+
+  @ParentCommand
+  private Freon freon;
+
+  enum FreonOps {
+VOLUME_CREATE,
+BUCKET_CREATE,
+KEY_CREATE,
+KEY_WRITE
+  }
+
+  private static final String RATIS = "ratis";
+
+  private static final String DURATION_FORMAT = "HH:mm:ss,SSS";
+
+  private static final int QUANTILES = 10;
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(RandomKeyGenerator.class);
+
+  private boolean completed = false;
+  private boolean exception = false;
+
+  @Option(names = "--numOfThreads",
+  description = "number of threads to be launched for the run",
+  defaultValue = "10")
+  private int numOfThreads = 10;
+
+  @Option(names = "--numOfVolumes",
+  description = "specifies number of Volumes to be created in offline 
mode",
+  defaultValue = "10")
+  priva

hadoop git commit: HDDS-222. Remove hdfs command line from ozone distribution. Contributed by Elek, Marton.

2018-09-11 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7ce997af9 -> 7b5886bf7


HDDS-222. Remove hdfs command line from ozone distribution.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b5886bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b5886bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b5886bf

Branch: refs/heads/trunk
Commit: 7b5886bf784579cc97656266901e6f934522b0e8
Parents: 7ce997a
Author: Anu Engineer 
Authored: Tue Sep 11 10:19:50 2018 -0700
Committer: Anu Engineer 
Committed: Tue Sep 11 10:19:50 2018 -0700

--
 dev-support/bin/ozone-dist-layout-stitching |  6 +-
 hadoop-hdds/client/pom.xml  |  1 -
 hadoop-hdds/container-service/pom.xml   |  2 -
 hadoop-hdds/framework/pom.xml   |  1 -
 hadoop-hdds/pom.xml |  3 -
 hadoop-hdds/server-scm/pom.xml  |  4 -
 hadoop-ozone/common/src/main/bin/ozone  |  8 +-
 .../common/src/main/bin/ozone-config.sh | 51 +++
 hadoop-ozone/common/src/main/bin/start-ozone.sh | 90 +---
 hadoop-ozone/common/src/main/bin/stop-ozone.sh  |  6 +-
 hadoop-ozone/objectstore-service/pom.xml|  2 -
 hadoop-ozone/ozone-manager/pom.xml  |  2 -
 hadoop-ozone/pom.xml|  9 --
 13 files changed, 65 insertions(+), 120 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5886bf/dev-support/bin/ozone-dist-layout-stitching
--
diff --git a/dev-support/bin/ozone-dist-layout-stitching 
b/dev-support/bin/ozone-dist-layout-stitching
index 1ba652c..d91c7ef 100755
--- a/dev-support/bin/ozone-dist-layout-stitching
+++ b/dev-support/bin/ozone-dist-layout-stitching
@@ -127,8 +127,6 @@ run cp -p "${ROOT}/README.txt" .
 # Copy hadoop-common first so that it have always have all dependencies.
 # Remaining projects will copy only libraries which are not present already in 
'share' directory.
 run copy 
"${ROOT}/hadoop-common-project/hadoop-common/target/hadoop-common-${VERSION}" .
-run copy 
"${ROOT}/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${VERSION}" .
-run copy 
"${ROOT}/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-${VERSION}"
 .
 
 
 # HDDS
@@ -151,11 +149,15 @@ cp 
"${ROOT}/hadoop-ozone/ozonefs/target/hadoop-ozone-filesystem-${HDDS_VERSION}.
 cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" 
./share/hadoop/ozone/webapps/ozoneManager/
 cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" 
./share/hadoop/hdds/webapps/scm/
 
+rm sbin/*all.sh
+rm sbin/*all.cmd
+
 #Copy docker compose files
 run cp -p -r "${ROOT}/hadoop-dist/src/main/compose" .
 
 mkdir -p ./share/hadoop/mapreduce
 mkdir -p ./share/hadoop/yarn
+mkdir -p ./share/hadoop/hdfs
 echo
 echo "Hadoop Ozone dist layout available at: ${BASEDIR}/ozone"
 echo

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5886bf/hadoop-hdds/client/pom.xml
--
diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml
index fb5320d..6a4cc9d 100644
--- a/hadoop-hdds/client/pom.xml
+++ b/hadoop-hdds/client/pom.xml
@@ -38,7 +38,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
   org.apache.hadoop
   hadoop-hdds-common
-  provided
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5886bf/hadoop-hdds/container-service/pom.xml
--
diff --git a/hadoop-hdds/container-service/pom.xml 
b/hadoop-hdds/container-service/pom.xml
index c2b967c..3d4e581 100644
--- a/hadoop-hdds/container-service/pom.xml
+++ b/hadoop-hdds/container-service/pom.xml
@@ -37,12 +37,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
   org.apache.hadoop
   hadoop-hdds-common
-  provided
 
 
   org.apache.hadoop
   hadoop-hdds-server-framework
-  provided
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5886bf/hadoop-hdds/framework/pom.xml
--
diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml
index 1d81b52..cb380e2 100644
--- a/hadoop-hdds/framework/pom.xml
+++ b/hadoop-hdds/framework/pom.xml
@@ -37,7 +37,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
   org.apache.hadoop
   hadoop-hdds-common
-  provided
 
 
   org.mockito

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5886bf/hadoop-hdds/pom.xml
--
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 0902c7d..563ce9e 100644
--- a/hadoop

hadoop git commit: HDDS-222. Remove hdfs command line from ozone distribution. Contributed by Elek, Marton.

2018-09-11 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.2 4b9f1d763 -> 0d3406e25


HDDS-222. Remove hdfs command line from ozone distribution.
Contributed by Elek, Marton.

(cherry picked from commit 7b5886bf784579cc97656266901e6f934522b0e8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d3406e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d3406e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d3406e2

Branch: refs/heads/ozone-0.2
Commit: 0d3406e25c7f13c70fd02b33b99a8886bb828f31
Parents: 4b9f1d7
Author: Anu Engineer 
Authored: Tue Sep 11 10:19:50 2018 -0700
Committer: Anu Engineer 
Committed: Tue Sep 11 10:25:43 2018 -0700

--
 dev-support/bin/ozone-dist-layout-stitching |  6 +-
 hadoop-hdds/client/pom.xml  |  1 -
 hadoop-hdds/container-service/pom.xml   |  2 -
 hadoop-hdds/framework/pom.xml   |  1 -
 hadoop-hdds/pom.xml |  3 -
 hadoop-hdds/server-scm/pom.xml  |  4 -
 hadoop-ozone/common/src/main/bin/ozone  |  8 +-
 .../common/src/main/bin/ozone-config.sh | 51 +++
 hadoop-ozone/common/src/main/bin/start-ozone.sh | 90 +---
 hadoop-ozone/common/src/main/bin/stop-ozone.sh  |  6 +-
 hadoop-ozone/objectstore-service/pom.xml|  2 -
 hadoop-ozone/ozone-manager/pom.xml  |  2 -
 hadoop-ozone/pom.xml|  9 --
 13 files changed, 65 insertions(+), 120 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d3406e2/dev-support/bin/ozone-dist-layout-stitching
--
diff --git a/dev-support/bin/ozone-dist-layout-stitching 
b/dev-support/bin/ozone-dist-layout-stitching
index 1ba652c..d91c7ef 100755
--- a/dev-support/bin/ozone-dist-layout-stitching
+++ b/dev-support/bin/ozone-dist-layout-stitching
@@ -127,8 +127,6 @@ run cp -p "${ROOT}/README.txt" .
 # Copy hadoop-common first so that it have always have all dependencies.
 # Remaining projects will copy only libraries which are not present already in 
'share' directory.
 run copy 
"${ROOT}/hadoop-common-project/hadoop-common/target/hadoop-common-${VERSION}" .
-run copy 
"${ROOT}/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${VERSION}" .
-run copy 
"${ROOT}/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-${VERSION}"
 .
 
 
 # HDDS
@@ -151,11 +149,15 @@ cp 
"${ROOT}/hadoop-ozone/ozonefs/target/hadoop-ozone-filesystem-${HDDS_VERSION}.
 cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" 
./share/hadoop/ozone/webapps/ozoneManager/
 cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" 
./share/hadoop/hdds/webapps/scm/
 
+rm sbin/*all.sh
+rm sbin/*all.cmd
+
 #Copy docker compose files
 run cp -p -r "${ROOT}/hadoop-dist/src/main/compose" .
 
 mkdir -p ./share/hadoop/mapreduce
 mkdir -p ./share/hadoop/yarn
+mkdir -p ./share/hadoop/hdfs
 echo
 echo "Hadoop Ozone dist layout available at: ${BASEDIR}/ozone"
 echo

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d3406e2/hadoop-hdds/client/pom.xml
--
diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml
index d6db9c6..43e121d 100644
--- a/hadoop-hdds/client/pom.xml
+++ b/hadoop-hdds/client/pom.xml
@@ -38,7 +38,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
   org.apache.hadoop
   hadoop-hdds-common
-  provided
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d3406e2/hadoop-hdds/container-service/pom.xml
--
diff --git a/hadoop-hdds/container-service/pom.xml 
b/hadoop-hdds/container-service/pom.xml
index 43f400c..d43007f 100644
--- a/hadoop-hdds/container-service/pom.xml
+++ b/hadoop-hdds/container-service/pom.xml
@@ -37,12 +37,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
   org.apache.hadoop
   hadoop-hdds-common
-  provided
 
 
   org.apache.hadoop
   hadoop-hdds-server-framework
-  provided
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d3406e2/hadoop-hdds/framework/pom.xml
--
diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml
index 6e1927d..ecd6a24 100644
--- a/hadoop-hdds/framework/pom.xml
+++ b/hadoop-hdds/framework/pom.xml
@@ -37,7 +37,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
   org.apache.hadoop
   hadoop-hdds-common
-  provided
 
 
   org.mockito

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d3406e2/hadoop-hdds/pom.xml
--
diff --git a/hadoop

hadoop git commit: HDDS-431. LeaseManager of CommandWatcher is not started. Contributed by Elek, Marton.

2018-09-11 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7b5886bf7 -> 8ffbbf51c


HDDS-431. LeaseManager of CommandWatcher is not started.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ffbbf51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ffbbf51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ffbbf51

Branch: refs/heads/trunk
Commit: 8ffbbf51c9a0f9bbcd9ee7452d95e588a2cb87ac
Parents: 7b5886b
Author: Anu Engineer 
Authored: Tue Sep 11 10:44:47 2018 -0700
Committer: Anu Engineer 
Committed: Tue Sep 11 10:44:47 2018 -0700

--
 .../org/apache/hadoop/hdds/scm/server/StorageContainerManager.java | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ffbbf51/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index f505430..67d5496 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -613,6 +613,8 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
 "StorageContainerLocationProtocol RPC server",
 getClientRpcAddress()));
 DefaultMetricsSystem.initialize("StorageContainerManager");
+
+commandWatcherLeaseManager.start();
 getClientProtocolServer().start();
 
 LOG.info(buildRpcServerStartMessage("ScmBlockLocationProtocol RPC " +


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-431. LeaseManager of CommandWatcher is not started. Contributed by Elek, Marton.

2018-09-11 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.2 0d3406e25 -> 54ec2366e


HDDS-431. LeaseManager of CommandWatcher is not started.
Contributed by Elek, Marton.

(cherry picked from commit 8ffbbf51c9a0f9bbcd9ee7452d95e588a2cb87ac)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54ec2366
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54ec2366
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54ec2366

Branch: refs/heads/ozone-0.2
Commit: 54ec2366e217254fdc84040842353d6b7fc115ce
Parents: 0d3406e
Author: Anu Engineer 
Authored: Tue Sep 11 10:44:47 2018 -0700
Committer: Anu Engineer 
Committed: Tue Sep 11 10:51:06 2018 -0700

--
 .../org/apache/hadoop/hdds/scm/server/StorageContainerManager.java | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54ec2366/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index f505430..67d5496 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -613,6 +613,8 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
 "StorageContainerLocationProtocol RPC server",
 getClientRpcAddress()));
 DefaultMetricsSystem.initialize("StorageContainerManager");
+
+commandWatcherLeaseManager.start();
 getClientProtocolServer().start();
 
 LOG.info(buildRpcServerStartMessage("ScmBlockLocationProtocol RPC " +


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-424. Consolidate ozone oz parameters to use GNU convention. Contributed by Elek, Marton.

2018-09-11 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1d567c25d -> a406f6f60


HDDS-424. Consolidate ozone oz parameters to use GNU convention.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a406f6f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a406f6f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a406f6f6

Branch: refs/heads/trunk
Commit: a406f6f60ee0caf8229d13bda595d621a9779aa8
Parents: 1d567c2
Author: Anu Engineer 
Authored: Tue Sep 11 16:42:12 2018 -0700
Committer: Anu Engineer 
Committed: Tue Sep 11 16:47:33 2018 -0700

--
 .../org/apache/hadoop/hdds/cli/GenericCli.java  |  14 +-
 .../hadoop/hdds/cli/GenericParentCommand.java   |  25 +++
 .../hdds/cli/MissingSubcommandException.java|  29 
 .../src/test/acceptance/basic/ozone-shell.robot |  36 ++---
 .../src/test/acceptance/ozonefs/ozonefs.robot   |   6 +-
 .../hadoop/ozone/ozShell/TestOzoneShell.java| 162 ++-
 .../hadoop/ozone/web/ozShell/Handler.java   |  36 ++---
 .../apache/hadoop/ozone/web/ozShell/Shell.java  |  39 +
 .../web/ozShell/bucket/BucketCommands.java  |  59 +++
 .../web/ozShell/bucket/CreateBucketHandler.java |   2 +-
 .../web/ozShell/bucket/DeleteBucketHandler.java |   2 +-
 .../web/ozShell/bucket/InfoBucketHandler.java   |   2 +-
 .../web/ozShell/bucket/ListBucketHandler.java   |   9 +-
 .../web/ozShell/bucket/UpdateBucketHandler.java |   6 +-
 .../web/ozShell/keys/DeleteKeyHandler.java  |   2 +-
 .../ozone/web/ozShell/keys/GetKeyHandler.java   |  11 +-
 .../ozone/web/ozShell/keys/InfoKeyHandler.java  |   2 +-
 .../ozone/web/ozShell/keys/KeyCommands.java |  59 +++
 .../ozone/web/ozShell/keys/ListKeyHandler.java  |   9 +-
 .../ozone/web/ozShell/keys/PutKeyHandler.java   |  11 +-
 .../web/ozShell/volume/CreateVolumeHandler.java |  18 +--
 .../web/ozShell/volume/DeleteVolumeHandler.java |   2 +-
 .../web/ozShell/volume/InfoVolumeHandler.java   |   2 +-
 .../web/ozShell/volume/ListVolumeHandler.java   |  11 +-
 .../web/ozShell/volume/UpdateVolumeHandler.java |   6 +-
 .../web/ozShell/volume/VolumeCommands.java  |  60 +++
 26 files changed, 410 insertions(+), 210 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a406f6f6/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
index f829d82..9a0be44 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
@@ -27,13 +27,12 @@ import com.google.common.annotations.VisibleForTesting;
 import picocli.CommandLine;
 import picocli.CommandLine.ExecutionException;
 import picocli.CommandLine.Option;
-import picocli.CommandLine.ParameterException;
 import picocli.CommandLine.RunLast;
 
 /**
  * This is a generic parent class for all the ozone related cli tools.
  */
-public class GenericCli implements Callable {
+public class GenericCli implements Callable, GenericParentCommand {
 
   @Option(names = {"--verbose"},
   description = "More verbose output. Show the stack trace of the errors.")
@@ -72,7 +71,7 @@ public class GenericCli implements Callable {
 
   @Override
   public Void call() throws Exception {
-throw new ParameterException(cmd, "Please choose a subcommand");
+throw new MissingSubcommandException();
   }
 
   public OzoneConfiguration createOzoneConfiguration() {
@@ -86,12 +85,13 @@ public class GenericCli implements Callable {
 return ozoneConf;
   }
 
-  public boolean isVerbose() {
-return verbose;
-  }
-
   @VisibleForTesting
   public picocli.CommandLine getCmd() {
 return cmd;
   }
+
+  @Override
+  public boolean isVerbose() {
+return verbose;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a406f6f6/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
new file mode 100644
index 000..a1d2171
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The AS

hadoop git commit: HDDS-424. Consolidate ozone oz parameters to use GNU convention. Contributed by Elek, Marton.

2018-09-11 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.2 f1165d216 -> c074647c9


HDDS-424. Consolidate ozone oz parameters to use GNU convention.
Contributed by Elek, Marton.

(cherry picked from commit a406f6f60ee0caf8229d13bda595d621a9779aa8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c074647c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c074647c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c074647c

Branch: refs/heads/ozone-0.2
Commit: c074647c9ec80d04d48c30e53a8adf4cf1a2d0fa
Parents: f1165d2
Author: Anu Engineer 
Authored: Tue Sep 11 16:42:12 2018 -0700
Committer: Anu Engineer 
Committed: Tue Sep 11 16:52:23 2018 -0700

--
 .../org/apache/hadoop/hdds/cli/GenericCli.java  |  14 +-
 .../hadoop/hdds/cli/GenericParentCommand.java   |  25 +++
 .../hdds/cli/MissingSubcommandException.java|  29 
 .../src/test/acceptance/basic/ozone-shell.robot |  36 ++---
 .../src/test/acceptance/ozonefs/ozonefs.robot   |   6 +-
 .../hadoop/ozone/ozShell/TestOzoneShell.java| 162 ++-
 .../hadoop/ozone/web/ozShell/Handler.java   |  36 ++---
 .../apache/hadoop/ozone/web/ozShell/Shell.java  |  39 +
 .../web/ozShell/bucket/BucketCommands.java  |  59 +++
 .../web/ozShell/bucket/CreateBucketHandler.java |   2 +-
 .../web/ozShell/bucket/DeleteBucketHandler.java |   2 +-
 .../web/ozShell/bucket/InfoBucketHandler.java   |   2 +-
 .../web/ozShell/bucket/ListBucketHandler.java   |   9 +-
 .../web/ozShell/bucket/UpdateBucketHandler.java |   6 +-
 .../web/ozShell/keys/DeleteKeyHandler.java  |   2 +-
 .../ozone/web/ozShell/keys/GetKeyHandler.java   |  11 +-
 .../ozone/web/ozShell/keys/InfoKeyHandler.java  |   2 +-
 .../ozone/web/ozShell/keys/KeyCommands.java |  59 +++
 .../ozone/web/ozShell/keys/ListKeyHandler.java  |   9 +-
 .../ozone/web/ozShell/keys/PutKeyHandler.java   |  11 +-
 .../web/ozShell/volume/CreateVolumeHandler.java |  18 +--
 .../web/ozShell/volume/DeleteVolumeHandler.java |   2 +-
 .../web/ozShell/volume/InfoVolumeHandler.java   |   2 +-
 .../web/ozShell/volume/ListVolumeHandler.java   |  11 +-
 .../web/ozShell/volume/UpdateVolumeHandler.java |   6 +-
 .../web/ozShell/volume/VolumeCommands.java  |  60 +++
 26 files changed, 410 insertions(+), 210 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c074647c/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
index f829d82..9a0be44 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
@@ -27,13 +27,12 @@ import com.google.common.annotations.VisibleForTesting;
 import picocli.CommandLine;
 import picocli.CommandLine.ExecutionException;
 import picocli.CommandLine.Option;
-import picocli.CommandLine.ParameterException;
 import picocli.CommandLine.RunLast;
 
 /**
  * This is a generic parent class for all the ozone related cli tools.
  */
-public class GenericCli implements Callable {
+public class GenericCli implements Callable, GenericParentCommand {
 
   @Option(names = {"--verbose"},
   description = "More verbose output. Show the stack trace of the errors.")
@@ -72,7 +71,7 @@ public class GenericCli implements Callable {
 
   @Override
   public Void call() throws Exception {
-throw new ParameterException(cmd, "Please choose a subcommand");
+throw new MissingSubcommandException();
   }
 
   public OzoneConfiguration createOzoneConfiguration() {
@@ -86,12 +85,13 @@ public class GenericCli implements Callable {
 return ozoneConf;
   }
 
-  public boolean isVerbose() {
-return verbose;
-  }
-
   @VisibleForTesting
   public picocli.CommandLine getCmd() {
 return cmd;
   }
+
+  @Override
+  public boolean isVerbose() {
+return verbose;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c074647c/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
new file mode 100644
index 000..a1d2171
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with t

hadoop git commit: HDDS-432. Replication of closed containers is not working. Contributed by Elek, Marton.

2018-09-11 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk a406f6f60 -> 9c238ffc3


HDDS-432. Replication of closed containers is not working.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c238ffc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c238ffc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c238ffc

Branch: refs/heads/trunk
Commit: 9c238ffc301c9aa1ae0f811c065e7426b1e23540
Parents: a406f6f
Author: Anu Engineer 
Authored: Tue Sep 11 17:00:04 2018 -0700
Committer: Anu Engineer 
Committed: Tue Sep 11 17:00:04 2018 -0700

--
 .../ReplicateContainerCommandHandler.java   |  15 ++-
 .../TestReplicateContainerCommandHandler.java   |  19 +++-
 .../replication/ReplicationManager.java |  18 +++-
 .../replication/TestReplicationManager.java | 104 +++
 4 files changed, 105 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c238ffc/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
index d1895a8..cb677c2 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
@@ -19,11 +19,13 @@ package 
org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
 import java.io.FileInputStream;
 import java.nio.file.Files;
 import java.nio.file.Path;
+import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.hdds.protocol.proto
@@ -44,6 +46,7 @@ import 
org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader;
 import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 
+import com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -97,13 +100,19 @@ public class ReplicateContainerCommandHandler implements 
CommandHandler {
 ReplicateContainerCommand replicateCommand =
 (ReplicateContainerCommand) command;
 try {
-
+  List sourceDatanodes =
+  replicateCommand.getSourceDatanodes();
   long containerID = replicateCommand.getContainerID();
+
+  Preconditions.checkArgument(sourceDatanodes.size() > 0,
+  String.format("Replication command is received for container %d "
+  + "but the size of source datanodes was 0.", containerID));
+
   LOG.info("Starting replication of container {} from {}", containerID,
-  replicateCommand.getSourceDatanodes());
+  sourceDatanodes);
   CompletableFuture tempTarFile = downloader
   .getContainerDataFromReplicas(containerID,
-  replicateCommand.getSourceDatanodes());
+  sourceDatanodes);
 
   CompletableFuture result =
   tempTarFile.thenAccept(path -> {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c238ffc/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java
index 6a14d33..6529922 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java
@@ -106,7 +106,6 @@ pub

hadoop git commit: HDDS-432. Replication of closed containers is not working. Contributed by Elek, Marton.

2018-09-11 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.2 c074647c9 -> e6ece1506


HDDS-432. Replication of closed containers is not working.
Contributed by Elek, Marton.

(cherry picked from commit 9c238ffc301c9aa1ae0f811c065e7426b1e23540)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6ece150
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6ece150
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6ece150

Branch: refs/heads/ozone-0.2
Commit: e6ece1506dcf85e39537244f4d9614360c8ce98b
Parents: c074647
Author: Anu Engineer 
Authored: Tue Sep 11 17:00:04 2018 -0700
Committer: Anu Engineer 
Committed: Tue Sep 11 17:06:17 2018 -0700

--
 .../ReplicateContainerCommandHandler.java   |  15 ++-
 .../TestReplicateContainerCommandHandler.java   |  19 +++-
 .../replication/ReplicationManager.java |  18 +++-
 .../replication/TestReplicationManager.java | 104 +++
 4 files changed, 105 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6ece150/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
index d1895a8..cb677c2 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
@@ -19,11 +19,13 @@ package 
org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
 import java.io.FileInputStream;
 import java.nio.file.Files;
 import java.nio.file.Path;
+import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.hdds.protocol.proto
@@ -44,6 +46,7 @@ import 
org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader;
 import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 
+import com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -97,13 +100,19 @@ public class ReplicateContainerCommandHandler implements 
CommandHandler {
 ReplicateContainerCommand replicateCommand =
 (ReplicateContainerCommand) command;
 try {
-
+  List sourceDatanodes =
+  replicateCommand.getSourceDatanodes();
   long containerID = replicateCommand.getContainerID();
+
+  Preconditions.checkArgument(sourceDatanodes.size() > 0,
+  String.format("Replication command is received for container %d "
+  + "but the size of source datanodes was 0.", containerID));
+
   LOG.info("Starting replication of container {} from {}", containerID,
-  replicateCommand.getSourceDatanodes());
+  sourceDatanodes);
   CompletableFuture tempTarFile = downloader
   .getContainerDataFromReplicas(containerID,
-  replicateCommand.getSourceDatanodes());
+  sourceDatanodes);
 
   CompletableFuture result =
   tempTarFile.thenAccept(path -> {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6ece150/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java
index 6a14d33..6529922 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/co

[5/5] hadoop git commit: HDFS-13300. Ozone: Remove DatanodeID dependency from HDSL and Ozone. Contributed by Nanda kumar.

2018-03-27 Thread aengineer
HDFS-13300. Ozone: Remove DatanodeID dependency from HDSL and Ozone.
Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3440ca6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3440ca6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3440ca6e

Branch: refs/heads/HDFS-7240
Commit: 3440ca6e0c76bd50854eb5b72fa1486cfe4b6575
Parents: ab8fb01
Author: Anu Engineer 
Authored: Tue Mar 27 12:55:26 2018 -0700
Committer: Anu Engineer 
Committed: Tue Mar 27 12:55:26 2018 -0700

--
 .../src/main/compose/cblock/docker-config   |   2 +-
 .../src/main/compose/ozone/docker-config|   2 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |   7 -
 .../org/apache/hadoop/scm/XceiverClient.java|   6 +-
 .../apache/hadoop/scm/XceiverClientHandler.java |   2 +-
 .../apache/hadoop/scm/XceiverClientRatis.java   |  10 +-
 .../java/org/apache/hadoop/hdsl/HdslUtils.java  |   2 +-
 .../hadoop/hdsl/protocol/DatanodeDetails.java   | 422 +++
 .../hadoop/hdsl/protocol/package-info.java  |  22 +
 .../org/apache/hadoop/scm/XceiverClientSpi.java |   4 +-
 .../scm/container/common/helpers/Pipeline.java  |   8 +-
 .../common/helpers/PipelineChannel.java |  19 +-
 .../scm/storage/ContainerProtocolCalls.java |  40 +-
 .../main/java/org/apache/ratis/RatisHelper.java |  19 +-
 .../main/proto/DatanodeContainerProtocol.proto  |   2 +-
 hadoop-hdsl/common/src/main/proto/hdsl.proto|  16 +-
 .../hadoop/ozone/HdslDatanodeService.java   | 140 ++
 .../apache/hadoop/ozone/HdslServerPlugin.java   |  82 
 .../common/helpers/ContainerUtils.java  |  72 +---
 .../common/impl/ContainerManagerImpl.java   |  16 +-
 .../common/interfaces/ContainerManager.java |   6 +-
 .../statemachine/DatanodeStateMachine.java  |  26 +-
 .../states/datanode/InitDatanodeState.java  |  32 +-
 .../states/datanode/RunningDatanodeState.java   |  82 +---
 .../states/endpoint/HeartbeatEndpointTask.java  |  45 +-
 .../states/endpoint/RegisterEndpointTask.java   |  43 +-
 .../common/transport/server/XceiverServer.java  |   4 +-
 .../server/ratis/XceiverServerRatis.java|  21 +-
 .../container/ozoneimpl/OzoneContainer.java |  13 +-
 .../StorageContainerDatanodeProtocol.java   |  10 +-
 .../protocol/StorageContainerNodeProtocol.java  |  11 +-
 ...rDatanodeProtocolClientSideTranslatorPB.java |  16 +-
 ...rDatanodeProtocolServerSideTranslatorPB.java |   8 +-
 .../StorageContainerDatanodeProtocol.proto  |  28 +-
 .../ozone/container/common/ScmTestMock.java |  24 +-
 .../common/TestDatanodeStateMachine.java|  32 +-
 .../ozone/scm/StorageContainerManager.java  |  43 +-
 .../block/DatanodeDeletedBlockTransactions.java |  26 +-
 .../scm/block/SCMBlockDeletingService.java  |  17 +-
 .../ozone/scm/container/ContainerMapping.java   |   2 +-
 .../scm/container/closer/ContainerCloser.java   |   8 +-
 .../algorithms/ContainerPlacementPolicy.java|   4 +-
 .../placement/algorithms/SCMCommonPolicy.java   |  29 +-
 .../SCMContainerPlacementCapacity.java  |  32 +-
 .../algorithms/SCMContainerPlacementRandom.java |  12 +-
 .../replication/ContainerSupervisor.java|  13 +-
 .../container/replication/InProgressPool.java   |  31 +-
 .../hadoop/ozone/scm/node/CommandQueue.java |  20 +-
 .../ozone/scm/node/HeartbeatQueueItem.java  |  22 +-
 .../hadoop/ozone/scm/node/NodeManager.java  |  25 +-
 .../hadoop/ozone/scm/node/NodePoolManager.java  |  12 +-
 .../hadoop/ozone/scm/node/SCMNodeManager.java   | 155 ---
 .../ozone/scm/node/SCMNodePoolManager.java  |  41 +-
 .../ozone/scm/pipelines/PipelineManager.java|   8 +-
 .../ozone/scm/pipelines/PipelineSelector.java   |  23 +-
 .../scm/pipelines/ratis/RatisManagerImpl.java   |  19 +-
 .../standalone/StandaloneManagerImpl.java   |  18 +-
 .../ozone/container/common/TestEndPoint.java|  49 ++-
 .../placement/TestContainerPlacement.java   |  24 +-
 .../replication/TestContainerSupervisor.java|  15 +-
 .../ReplicationDatanodeStateManager.java|   8 +-
 .../testutils/ReplicationNodeManagerMock.java   |  48 ++-
 .../ReplicationNodePoolManagerMock.java |  23 +-
 .../org/apache/hadoop/ozone/scm/TestUtils.java  |  58 +--
 .../ozone/scm/block/TestBlockManager.java   |   8 +-
 .../ozone/scm/block/TestDeletedBlockLog.java|  44 +-
 .../ozone/scm/container/MockNodeManager.java| 105 ++---
 .../scm/container/TestContainerMapping.java |  19 +-
 .../container/closer/TestContainerCloser.java   |  20 +-
 .../ozone/scm/node/TestContainerPlacement.java  |  11 +-
 .../hadoop/ozone/scm/node/TestNodeManager.java  | 227 +-
 .../ozone/scm/node/TestSCMNodePoolManager.java  |  31 +-
 .../scm/cli/container/InfoContainerHandler.java |   4 +-
 .../hadoop/ozone/web/utils/OzoneUtils.java  |   4 +-
 ...

[2/5] hadoop git commit: HDFS-13300. Ozone: Remove DatanodeID dependency from HDSL and Ozone. Contributed by Nanda kumar.

2018-03-27 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/TestContainerMapping.java
--
diff --git 
a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/TestContainerMapping.java
 
b/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/TestContainerMapping.java
index 4616799..cec02de 100644
--- 
a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/TestContainerMapping.java
+++ 
b/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/TestContainerMapping.java
@@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.scm.container;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
 import org.apache.hadoop.hdsl.protocol.proto
@@ -34,7 +34,6 @@ import 
org.apache.hadoop.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.test.GenericTestUtils;
 
-import static org.apache.hadoop.ozone.scm.TestUtils.getDatanodeID;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
@@ -118,7 +117,7 @@ public class TestContainerMapping {
 5 separate nodes  from the list of 10 datanodes that got allocated a
 container.
  */
-Set pipelineList = new TreeSet<>();
+Set pipelineList = new TreeSet<>();
 for (int x = 0; x < 30; x++) {
   ContainerInfo containerInfo = mapping.allocateContainer(
   xceiverClientManager.getType(),
@@ -128,7 +127,7 @@ public class TestContainerMapping {
   Assert.assertNotNull(containerInfo);
   Assert.assertNotNull(containerInfo.getPipeline());
   pipelineList.add(containerInfo.getPipeline().getLeader()
-  .getDatanodeUuid());
+  .getUuid());
 }
 Assert.assertTrue(pipelineList.size() > 5);
   }
@@ -142,8 +141,8 @@ public class TestContainerMapping {
 containerOwner).getPipeline();
 Assert.assertNotNull(pipeline);
 Pipeline newPipeline = mapping.getContainer(containerName).getPipeline();
-Assert.assertEquals(pipeline.getLeader().getDatanodeUuid(),
-newPipeline.getLeader().getDatanodeUuid());
+Assert.assertEquals(pipeline.getLeader().getUuid(),
+newPipeline.getLeader().getUuid());
   }
 
   @Test
@@ -209,7 +208,7 @@ public class TestContainerMapping {
   public void testFullContainerReport() throws IOException {
 String containerName = UUID.randomUUID().toString();
 ContainerInfo info = createContainer(containerName);
-DatanodeID datanodeID = getDatanodeID();
+DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
 ContainerReportsRequestProto.reportType reportType =
 ContainerReportsRequestProto.reportType.fullReport;
 List reports =
@@ -232,7 +231,7 @@ public class TestContainerMapping {
 
 ContainerReportsRequestProto.Builder crBuilder =
 ContainerReportsRequestProto.newBuilder();
-crBuilder.setDatanodeID(datanodeID.getProtoBufMessage())
+crBuilder.setDatanodeDetails(datanodeDetails.getProtoBufMessage())
 .setType(reportType).addAllReports(reports);
 
 mapping.processContainerReports(crBuilder.build());
@@ -246,7 +245,7 @@ public class TestContainerMapping {
   public void testContainerCloseWithContainerReport() throws IOException {
 String containerName = UUID.randomUUID().toString();
 ContainerInfo info = createContainer(containerName);
-DatanodeID datanodeID = TestUtils.getDatanodeID();
+DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
 ContainerReportsRequestProto.reportType reportType =
 ContainerReportsRequestProto.reportType.fullReport;
 List reports =
@@ -270,7 +269,7 @@ public class TestContainerMapping {
 
 ContainerReportsRequestProto.Builder crBuilder =
 ContainerReportsRequestProto.newBuilder();
-crBuilder.setDatanodeID(datanodeID.getProtoBufMessage())
+crBuilder.setDatanodeDetails(datanodeDetails.getProtoBufMessage())
 .setType(reportType).addAllReports(reports);
 
 mapping.processContainerReports(crBuilder.build());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/closer/TestContainerCloser.java
--
diff --git 
a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/closer/TestContainerCloser.java
 
b/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/closer/TestContainerCloser.java
index dca026e..4a797b2 100644
--- 
a/hadoop-hdsl/server-scm/src/test/

[1/5] hadoop git commit: HDFS-13300. Ozone: Remove DatanodeID dependency from HDSL and Ozone. Contributed by Nanda kumar.

2018-03-27 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 ab8fb0124 -> 3440ca6e0


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java
index 95c65ef..5fc6e04 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.ozone.container.ozoneimpl;
 
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
 import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -83,15 +83,17 @@ public class TestRatisManager {
   cluster.waitOzoneReady();
 
   final List datanodes = cluster.getDataNodes();
-  final List allIds = datanodes.stream()
-  .map(DataNode::getDatanodeId).collect(Collectors.toList());
+  final List datanodeDetailsSet = datanodes.stream()
+  .map(MiniOzoneClassicCluster::getDatanodeDetails).collect(
+  Collectors.toList());
 
   //final RatisManager manager = RatisManager.newRatisManager(conf);
 
   final int[] idIndex = {3, 4, 5};
   for (int i = 0; i < idIndex.length; i++) {
 final int previous = i == 0 ? 0 : idIndex[i - 1];
-final List subIds = allIds.subList(previous, idIndex[i]);
+final List subIds = datanodeDetailsSet.subList(
+previous, idIndex[i]);
 
 // Create Ratis cluster
 final String ratisId = "ratis" + i;
@@ -99,7 +101,7 @@ public class TestRatisManager {
 LOG.info("Created RatisCluster " + ratisId);
 
 // check Ratis cluster members
-//final List dns = manager.getMembers(ratisId);
+//final List dns = manager.getMembers(ratisId);
 //Assert.assertEquals(subIds, dns);
   }
 
@@ -119,7 +121,7 @@ public class TestRatisManager {
   //manager.updatePipeline(ratisId, allIds);
 
   // check Ratis cluster members
-  //final List dns = manager.getMembers(ratisId);
+  //final List dns = manager.getMembers(ratisId);
   //Assert.assertEquals(allIds, dns);
 } finally {
   cluster.shutdown();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index 1cf7732..cfe6a39 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -19,10 +19,10 @@
 package org.apache.hadoop.ozone.container.server;
 
 import io.netty.channel.embedded.EmbeddedChannel;
+import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
 import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos;
 import 
org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
 import 
org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandResponseProto;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.RatisTestHelper;
@@ -34,6 +34,7 @@ import 
org.apache.hadoop.ozone.container.common.transport.server.XceiverServer;
 import 
org.apache.hadoop.ozone.container.common.transport.server.XceiverServerHandler;
 import 
org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
 import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis;
+import org.apache.hadoop.ozone.scm.TestUtils;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.scm.XceiverClient;
 import org.apache.hadoop.scm.XceiverClientRatis;
@@ -95,11 +96,13 @@ public class TestContainerServer {
 
   @Test
   public void testClientServer() throws Exception {
+DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
 runTestClientServer(1,
 (pipeline, conf) -> conf.setInt(OzoneConfigKeys.DFS_CON

[4/5] hadoop git commit: HDFS-13300. Ozone: Remove DatanodeID dependency from HDSL and Ozone. Contributed by Nanda kumar.

2018-03-27 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
--
diff --git 
a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
 
b/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
index 542161f..bfe6a28 100644
--- 
a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
+++ 
b/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
@@ -18,12 +18,11 @@ package 
org.apache.hadoop.ozone.container.common.states.endpoint;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
+import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.DatanodeDetailsProto;
 import org.apache.hadoop.ozone.container.common.statemachine
 .EndpointStateMachine;
 
-import org.apache.hadoop.hdsl.protocol.proto
-.StorageContainerDatanodeProtocolProtos.ContainerNodeIDProto;
 import org.apache.hadoop.scm.ScmConfigKeys;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -42,7 +41,7 @@ public final class RegisterEndpointTask implements
   private final EndpointStateMachine rpcEndPoint;
   private final Configuration conf;
   private Future result;
-  private ContainerNodeIDProto containerNodeIDProto;
+  private DatanodeDetailsProto datanodeDetailsProto;
 
   /**
* Creates a register endpoint task.
@@ -59,22 +58,22 @@ public final class RegisterEndpointTask implements
   }
 
   /**
-   * Get the ContainerNodeID Proto.
+   * Get the DatanodeDetailsProto Proto.
*
-   * @return ContainerNodeIDProto
+   * @return DatanodeDetailsProto
*/
-  public ContainerNodeIDProto getContainerNodeIDProto() {
-return containerNodeIDProto;
+  public DatanodeDetailsProto getDatanodeDetailsProto() {
+return datanodeDetailsProto;
   }
 
   /**
* Set the contiainerNodeID Proto.
*
-   * @param containerNodeIDProto - Container Node ID.
+   * @param datanodeDetailsProto - Container Node ID.
*/
-  public void setContainerNodeIDProto(ContainerNodeIDProto
-  containerNodeIDProto) {
-this.containerNodeIDProto = containerNodeIDProto;
+  public void setDatanodeDetailsProto(
+  DatanodeDetailsProto datanodeDetailsProto) {
+this.datanodeDetailsProto = datanodeDetailsProto;
   }
 
   /**
@@ -86,7 +85,7 @@ public final class RegisterEndpointTask implements
   @Override
   public EndpointStateMachine.EndPointStates call() throws Exception {
 
-if (getContainerNodeIDProto() == null) {
+if (getDatanodeDetailsProto() == null) {
   LOG.error("Container ID proto cannot be null in RegisterEndpoint task, " 
+
   "shutting down the endpoint.");
   return 
rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN);
@@ -94,11 +93,9 @@ public final class RegisterEndpointTask implements
 
 rpcEndPoint.lock();
 try {
-  DatanodeID dnNodeID = DatanodeID.getFromProtoBuf(
-  getContainerNodeIDProto().getDatanodeID());
 
   // TODO : Add responses to the command Queue.
-  rpcEndPoint.getEndPoint().register(dnNodeID,
+  rpcEndPoint.getEndPoint().register(datanodeDetailsProto,
   conf.getStrings(ScmConfigKeys.OZONE_SCM_NAMES));
   EndpointStateMachine.EndPointStates nextState =
   rpcEndPoint.getState().getNextState();
@@ -129,7 +126,7 @@ public final class RegisterEndpointTask implements
   public static class Builder {
 private EndpointStateMachine endPointStateMachine;
 private Configuration conf;
-private ContainerNodeIDProto containerNodeIDProto;
+private DatanodeDetails datanodeDetails;
 
 /**
  * Constructs the builder class.
@@ -162,11 +159,11 @@ public final class RegisterEndpointTask implements
 /**
  * Sets the NodeID.
  *
- * @param nodeID - NodeID proto
+ * @param dnDetails - NodeID proto
  * @return Builder
  */
-public Builder setNodeID(ContainerNodeIDProto nodeID) {
-  this.containerNodeIDProto = nodeID;
+public Builder setDatanodeDetails(DatanodeDetails dnDetails) {
+  this.datanodeDetails = dnDetails;
   return this;
 }
 
@@ -183,15 +180,15 @@ public final class RegisterEndpointTask implements
 " construct RegisterEndpoint task");
   }
 
-  if (containerNodeIDProto == null) {
-LOG.error("No nodeID specified.");
+  if (datanodeDetails == null) {
+LOG.error("No datanode specified.");
 throw new IllegalArgumentException("A vaild Node ID is needed to " +
 "constr

[3/5] hadoop git commit: HDFS-13300. Ozone: Remove DatanodeID dependency from HDSL and Ozone. Contributed by Nanda kumar.

2018-03-27 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodeManager.java
--
diff --git 
a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodeManager.java
 
b/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodeManager.java
index a5e1bac..31915b1 100644
--- 
a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodeManager.java
+++ 
b/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodeManager.java
@@ -20,8 +20,10 @@ package org.apache.hadoop.ozone.scm.node;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
+import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
+import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.DatanodeDetailsProto;
+import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol;
@@ -62,11 +64,13 @@ import org.slf4j.LoggerFactory;
 
 import javax.management.ObjectName;
 import java.io.IOException;
+import java.net.InetAddress;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Queue;
+import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.ScheduledExecutorService;
@@ -115,13 +119,13 @@ public class SCMNodeManager
   /**
* Key = NodeID, value = timestamp.
*/
-  private final ConcurrentHashMap healthyNodes;
-  private final ConcurrentHashMap staleNodes;
-  private final ConcurrentHashMap deadNodes;
+  private final ConcurrentHashMap healthyNodes;
+  private final ConcurrentHashMap staleNodes;
+  private final ConcurrentHashMap deadNodes;
   private final Queue heartbeatQueue;
-  private final ConcurrentHashMap nodes;
+  private final ConcurrentHashMap nodes;
   // Individual live node stats
-  private final ConcurrentHashMap nodeStats;
+  private final ConcurrentHashMap nodeStats;
   // Aggregated node stats
   private SCMNodeStat scmStat;
   // TODO: expose nodeStats and scmStat as metrics
@@ -170,7 +174,7 @@ public class SCMNodeManager
 deadNodes = new ConcurrentHashMap<>();
 staleNodes = new ConcurrentHashMap<>();
 nodes = new ConcurrentHashMap<>();
-nodeStats = new ConcurrentHashMap();
+nodeStats = new ConcurrentHashMap<>();
 scmStat = new SCMNodeStat();
 
 healthyNodeCount = new AtomicInteger(0);
@@ -228,7 +232,7 @@ public class SCMNodeManager
* @throws UnregisteredNodeException
*/
   @Override
-  public void removeNode(DatanodeID node) throws UnregisteredNodeException {
+  public void removeNode(DatanodeDetails node) {
 // TODO : Fix me when adding the SCM CLI.
 
   }
@@ -242,9 +246,9 @@ public class SCMNodeManager
* @return List of Datanodes that are known to SCM in the requested state.
*/
   @Override
-  public List getNodes(NodeState nodestate)
+  public List getNodes(NodeState nodestate)
   throws IllegalArgumentException {
-Map set;
+Map set;
 switch (nodestate) {
 case HEALTHY:
   synchronized (this) {
@@ -272,11 +276,11 @@ public class SCMNodeManager
   /**
* Returns all datanodes that are known to SCM.
*
-   * @return List of DatanodeIDs
+   * @return List of DatanodeDetails
*/
   @Override
-  public List getAllNodes() {
-Map set;
+  public List getAllNodes() {
+Map set;
 synchronized (this) {
   set = Collections.unmodifiableMap(new HashMap<>(nodes));
 }
@@ -406,11 +410,11 @@ public class SCMNodeManager
   /**
* Returns the node state of a specific node.
*
-   * @param id - DatanodeID
+   * @param datanodeDetails - Datanode Details
* @return Healthy/Stale/Dead/Unknown.
*/
   @Override
-  public NodeState getNodeState(DatanodeID id) {
+  public NodeState getNodeState(DatanodeDetails datanodeDetails) {
 // There is a subtle race condition here, hence we also support
 // the NODEState.UNKNOWN. It is possible that just before we check the
 // healthyNodes, we have removed the node from the healthy list but stil
@@ -419,15 +423,16 @@ public class SCMNodeManager
 // then the node is in 2 states to avoid this race condition. Instead we
 // just deal with the possibilty of getting a state called unknown.
 
-if(healthyNodes.containsKey(id.getDatanodeUuid())) {
+UUID id = datanodeDetails.getUuid();
+if(healthyNodes.containsKey(id)) {
   return HEALTHY;
 }
 
-if(staleNodes.containsKey(id.getDatanodeUuid())) {
+if(staleNodes.cont

[09/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/tcp.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/tcp.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/tcp.hpp
new file mode 100644
index 000..f90a55c
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/tcp.hpp
@@ -0,0 +1,155 @@
+//
+// ip/tcp.hpp
+// ~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_IP_TCP_HPP
+#define ASIO_IP_TCP_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+#include "asio/basic_socket_acceptor.hpp"
+#include "asio/basic_socket_iostream.hpp"
+#include "asio/basic_stream_socket.hpp"
+#include "asio/detail/socket_option.hpp"
+#include "asio/detail/socket_types.hpp"
+#include "asio/ip/basic_endpoint.hpp"
+#include "asio/ip/basic_resolver.hpp"
+#include "asio/ip/basic_resolver_iterator.hpp"
+#include "asio/ip/basic_resolver_query.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace ip {
+
+/// Encapsulates the flags needed for TCP.
+/**
+ * The asio::ip::tcp class contains flags necessary for TCP sockets.
+ *
+ * @par Thread Safety
+ * @e Distinct @e objects: Safe.@n
+ * @e Shared @e objects: Safe.
+ *
+ * @par Concepts:
+ * Protocol, InternetProtocol.
+ */
+class tcp
+{
+public:
+  /// The type of a TCP endpoint.
+  typedef basic_endpoint endpoint;
+
+  /// Construct to represent the IPv4 TCP protocol.
+  static tcp v4()
+  {
+return tcp(ASIO_OS_DEF(AF_INET));
+  }
+
+  /// Construct to represent the IPv6 TCP protocol.
+  static tcp v6()
+  {
+return tcp(ASIO_OS_DEF(AF_INET6));
+  }
+
+  /// Obtain an identifier for the type of the protocol.
+  int type() const
+  {
+return ASIO_OS_DEF(SOCK_STREAM);
+  }
+
+  /// Obtain an identifier for the protocol.
+  int protocol() const
+  {
+return ASIO_OS_DEF(IPPROTO_TCP);
+  }
+
+  /// Obtain an identifier for the protocol family.
+  int family() const
+  {
+return family_;
+  }
+
+  /// The TCP socket type.
+  typedef basic_stream_socket socket;
+
+  /// The TCP acceptor type.
+  typedef basic_socket_acceptor acceptor;
+
+  /// The TCP resolver type.
+  typedef basic_resolver resolver;
+
+#if !defined(ASIO_NO_IOSTREAM)
+  /// The TCP iostream type.
+  typedef basic_socket_iostream iostream;
+#endif // !defined(ASIO_NO_IOSTREAM)
+
+  /// Socket option for disabling the Nagle algorithm.
+  /**
+   * Implements the IPPROTO_TCP/TCP_NODELAY socket option.
+   *
+   * @par Examples
+   * Setting the option:
+   * @code
+   * asio::ip::tcp::socket socket(io_service); 
+   * ...
+   * asio::ip::tcp::no_delay option(true);
+   * socket.set_option(option);
+   * @endcode
+   *
+   * @par
+   * Getting the current option value:
+   * @code
+   * asio::ip::tcp::socket socket(io_service); 
+   * ...
+   * asio::ip::tcp::no_delay option;
+   * socket.get_option(option);
+   * bool is_set = option.value();
+   * @endcode
+   *
+   * @par Concepts:
+   * Socket_Option, Boolean_Socket_Option.
+   */
+#if defined(GENERATING_DOCUMENTATION)
+  typedef implementation_defined no_delay;
+#else
+  typedef asio::detail::socket_option::boolean<
+ASIO_OS_DEF(IPPROTO_TCP), ASIO_OS_DEF(TCP_NODELAY)> no_delay;
+#endif
+
+  /// Compare two protocols for equality.
+  friend bool operator==(const tcp& p1, const tcp& p2)
+  {
+return p1.family_ == p2.family_;
+  }
+
+  /// Compare two protocols for inequality.
+  friend bool operator!=(const tcp& p1, const tcp& p2)
+  {
+return p1.family_ != p2.family_;
+  }
+
+private:
+  // Construct with a specific family.
+  explicit tcp(int protocol_family)
+: family_(protocol_family)
+  {
+  }
+
+  int family_;
+};
+
+} // namespace ip
+} // namespace asio
+
+#include "asio/detail/pop_options.hpp"
+
+#endif // ASIO_IP_TCP_HPP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/udp.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/udp.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/udp.hpp
new file mode 100644
index 000..dd2d27d
--- /dev/nul

[22/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/kqueue_reactor.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/kqueue_reactor.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/kqueue_reactor.hpp
new file mode 100644
index 000..ddd6c8a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/kqueue_reactor.hpp
@@ -0,0 +1,219 @@
+//
+// detail/kqueue_reactor.hpp
+// ~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+// Copyright (c) 2005 Stefan Arentz (stefan at soze dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_KQUEUE_REACTOR_HPP
+#define ASIO_DETAIL_KQUEUE_REACTOR_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if defined(ASIO_HAS_KQUEUE)
+
+#include 
+#include 
+#include 
+#include 
+#include "asio/detail/limits.hpp"
+#include "asio/detail/mutex.hpp"
+#include "asio/detail/object_pool.hpp"
+#include "asio/detail/op_queue.hpp"
+#include "asio/detail/reactor_op.hpp"
+#include "asio/detail/select_interrupter.hpp"
+#include "asio/detail/socket_types.hpp"
+#include "asio/detail/timer_queue_base.hpp"
+#include "asio/detail/timer_queue_set.hpp"
+#include "asio/detail/wait_op.hpp"
+#include "asio/error.hpp"
+#include "asio/io_service.hpp"
+
+// Older versions of Mac OS X may not define EV_OOBAND.
+#if !defined(EV_OOBAND)
+# define EV_OOBAND EV_FLAG1
+#endif // !defined(EV_OOBAND)
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+class kqueue_reactor
+  : public asio::detail::service_base
+{
+public:
+  enum op_types { read_op = 0, write_op = 1,
+connect_op = 1, except_op = 2, max_ops = 3 };
+
+  // Per-descriptor queues.
+  struct descriptor_state
+  {
+friend class kqueue_reactor;
+friend class object_pool_access;
+
+descriptor_state* next_;
+descriptor_state* prev_;
+
+mutex mutex_;
+int descriptor_;
+op_queue op_queue_[max_ops];
+bool shutdown_;
+  };
+
+  // Per-descriptor data.
+  typedef descriptor_state* per_descriptor_data;
+
+  // Constructor.
+  ASIO_DECL kqueue_reactor(asio::io_service& io_service);
+
+  // Destructor.
+  ASIO_DECL ~kqueue_reactor();
+
+  // Destroy all user-defined handler objects owned by the service.
+  ASIO_DECL void shutdown_service();
+
+  // Recreate internal descriptors following a fork.
+  ASIO_DECL void fork_service(
+  asio::io_service::fork_event fork_ev);
+
+  // Initialise the task.
+  ASIO_DECL void init_task();
+
+  // Register a socket with the reactor. Returns 0 on success, system error
+  // code on failure.
+  ASIO_DECL int register_descriptor(socket_type descriptor,
+  per_descriptor_data& descriptor_data);
+
+  // Register a descriptor with an associated single operation. Returns 0 on
+  // success, system error code on failure.
+  ASIO_DECL int register_internal_descriptor(
+  int op_type, socket_type descriptor,
+  per_descriptor_data& descriptor_data, reactor_op* op);
+
+  // Move descriptor registration from one descriptor_data object to another.
+  ASIO_DECL void move_descriptor(socket_type descriptor,
+  per_descriptor_data& target_descriptor_data,
+  per_descriptor_data& source_descriptor_data);
+
+  // Post a reactor operation for immediate completion.
+  void post_immediate_completion(reactor_op* op, bool is_continuation)
+  {
+io_service_.post_immediate_completion(op, is_continuation);
+  }
+
+  // Start a new operation. The reactor operation will be performed when the
+  // given descriptor is flagged as ready, or an error has occurred.
+  ASIO_DECL void start_op(int op_type, socket_type descriptor,
+  per_descriptor_data& descriptor_data, reactor_op* op,
+  bool is_continuation, bool allow_speculative);
+
+  // Cancel all operations associated with the given descriptor. The
+  // handlers associated with the descriptor will be invoked with the
+  // operation_aborted error.
+  ASIO_DECL void cancel_ops(socket_type descriptor,
+  per_descriptor_data& descriptor_data);
+
+  // Cancel any operations that are running against the descriptor and remove
+  // its registration from the reactor.
+  ASIO_DECL void deregister_descriptor(socket_type descriptor,
+  per_descriptor_data& descriptor_data, bool closing);
+
+  // Remote the descriptor's registration from the reactor.
+  ASIO_DECL void deregis

[27/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/dev_poll_reactor.ipp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/dev_poll_reactor.ipp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/dev_poll_reactor.ipp
new file mode 100644
index 000..1f365df
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/dev_poll_reactor.ipp
@@ -0,0 +1,445 @@
+//
+// detail/impl/dev_poll_reactor.ipp
+// 
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP
+#define ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if defined(ASIO_HAS_DEV_POLL)
+
+#include "asio/detail/dev_poll_reactor.hpp"
+#include "asio/detail/assert.hpp"
+#include "asio/detail/throw_error.hpp"
+#include "asio/error.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+dev_poll_reactor::dev_poll_reactor(asio::io_service& io_service)
+  : asio::detail::service_base(io_service),
+io_service_(use_service(io_service)),
+mutex_(),
+dev_poll_fd_(do_dev_poll_create()),
+interrupter_(),
+shutdown_(false)
+{
+  // Add the interrupter's descriptor to /dev/poll.
+  ::pollfd ev = { 0, 0, 0 };
+  ev.fd = interrupter_.read_descriptor();
+  ev.events = POLLIN | POLLERR;
+  ev.revents = 0;
+  ::write(dev_poll_fd_, &ev, sizeof(ev));
+}
+
+dev_poll_reactor::~dev_poll_reactor()
+{
+  shutdown_service();
+  ::close(dev_poll_fd_);
+}
+
+void dev_poll_reactor::shutdown_service()
+{
+  asio::detail::mutex::scoped_lock lock(mutex_);
+  shutdown_ = true;
+  lock.unlock();
+
+  op_queue ops;
+
+  for (int i = 0; i < max_ops; ++i)
+op_queue_[i].get_all_operations(ops);
+
+  timer_queues_.get_all_timers(ops);
+
+  io_service_.abandon_operations(ops);
+} 
+
+// Helper class to re-register all descriptors with /dev/poll.
+class dev_poll_reactor::fork_helper
+{
+public:
+  fork_helper(dev_poll_reactor* reactor, short events)
+: reactor_(reactor), events_(events)
+  {
+  }
+
+  bool set(int descriptor)
+  {
+::pollfd& ev = reactor_->add_pending_event_change(descriptor);
+ev.events = events_;
+return true;
+  }
+
+private:
+  dev_poll_reactor* reactor_;
+  short events_;
+};
+
+void dev_poll_reactor::fork_service(asio::io_service::fork_event fork_ev)
+{
+  if (fork_ev == asio::io_service::fork_child)
+  {
+detail::mutex::scoped_lock lock(mutex_);
+
+if (dev_poll_fd_ != -1)
+  ::close(dev_poll_fd_);
+dev_poll_fd_ = -1;
+dev_poll_fd_ = do_dev_poll_create();
+
+interrupter_.recreate();
+
+// Add the interrupter's descriptor to /dev/poll.
+::pollfd ev = { 0, 0, 0 };
+ev.fd = interrupter_.read_descriptor();
+ev.events = POLLIN | POLLERR;
+ev.revents = 0;
+::write(dev_poll_fd_, &ev, sizeof(ev));
+
+// Re-register all descriptors with /dev/poll. The changes will be written
+// to the /dev/poll descriptor the next time the reactor is run.
+op_queue ops;
+fork_helper read_op_helper(this, POLLERR | POLLHUP | POLLIN);
+op_queue_[read_op].get_descriptors(read_op_helper, ops);
+fork_helper write_op_helper(this, POLLERR | POLLHUP | POLLOUT);
+op_queue_[write_op].get_descriptors(write_op_helper, ops);
+fork_helper except_op_helper(this, POLLERR | POLLHUP | POLLPRI);
+op_queue_[except_op].get_descriptors(except_op_helper, ops);
+interrupter_.interrupt();
+
+// The ops op_queue will always be empty because the fork_helper's set()
+// member function never returns false.
+ASIO_ASSERT(ops.empty());
+  }
+}
+
+void dev_poll_reactor::init_task()
+{
+  io_service_.init_task();
+}
+
+int dev_poll_reactor::register_descriptor(socket_type, per_descriptor_data&)
+{
+  return 0;
+}
+
+int dev_poll_reactor::register_internal_descriptor(int op_type,
+socket_type descriptor, per_descriptor_data&, reactor_op* op)
+{
+  asio::detail::mutex::scoped_lock lock(mutex_);
+
+  op_queue_[op_type].enqueue_operation(descriptor, op);
+  ::pollfd& ev = add_pending_event_change(descriptor);
+  ev.events = POLLERR | POLLHUP;
+  switch (op_type)
+  {
+  case read_op: ev.events |= POLLIN; break;
+  case write_op: ev.events |= POLLOUT; break;
+  case except_op: ev.events |= POLLPRI; break;
+  default:

[13/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/read_at.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/read_at.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/read_at.hpp
new file mode 100644
index 000..0ce36bc
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/read_at.hpp
@@ -0,0 +1,810 @@
+//
+// impl/read_at.hpp
+// 
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_IMPL_READ_AT_HPP
+#define ASIO_IMPL_READ_AT_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include 
+#include "asio/buffer.hpp"
+#include "asio/completion_condition.hpp"
+#include "asio/detail/array_fwd.hpp"
+#include "asio/detail/base_from_completion_cond.hpp"
+#include "asio/detail/bind_handler.hpp"
+#include "asio/detail/consuming_buffers.hpp"
+#include "asio/detail/dependent_type.hpp"
+#include "asio/detail/handler_alloc_helpers.hpp"
+#include "asio/detail/handler_cont_helpers.hpp"
+#include "asio/detail/handler_invoke_helpers.hpp"
+#include "asio/detail/handler_type_requirements.hpp"
+#include "asio/detail/throw_error.hpp"
+#include "asio/error.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+
+template 
+std::size_t read_at(SyncRandomAccessReadDevice& d,
+uint64_t offset, const MutableBufferSequence& buffers,
+CompletionCondition completion_condition, asio::error_code& ec)
+{
+  ec = asio::error_code();
+  asio::detail::consuming_buffers<
+mutable_buffer, MutableBufferSequence> tmp(buffers);
+  std::size_t total_transferred = 0;
+  tmp.prepare(detail::adapt_completion_condition_result(
+completion_condition(ec, total_transferred)));
+  while (tmp.begin() != tmp.end())
+  {
+std::size_t bytes_transferred = d.read_some_at(
+offset + total_transferred, tmp, ec);
+tmp.consume(bytes_transferred);
+total_transferred += bytes_transferred;
+tmp.prepare(detail::adapt_completion_condition_result(
+  completion_condition(ec, total_transferred)));
+  }
+  return total_transferred;
+}
+
+template 
+inline std::size_t read_at(SyncRandomAccessReadDevice& d,
+uint64_t offset, const MutableBufferSequence& buffers)
+{
+  asio::error_code ec;
+  std::size_t bytes_transferred = read_at(
+  d, offset, buffers, transfer_all(), ec);
+  asio::detail::throw_error(ec, "read_at");
+  return bytes_transferred;
+}
+
+template 
+inline std::size_t read_at(SyncRandomAccessReadDevice& d,
+uint64_t offset, const MutableBufferSequence& buffers,
+asio::error_code& ec)
+{
+  return read_at(d, offset, buffers, transfer_all(), ec);
+}
+
+template 
+inline std::size_t read_at(SyncRandomAccessReadDevice& d,
+uint64_t offset, const MutableBufferSequence& buffers,
+CompletionCondition completion_condition)
+{
+  asio::error_code ec;
+  std::size_t bytes_transferred = read_at(
+  d, offset, buffers, completion_condition, ec);
+  asio::detail::throw_error(ec, "read_at");
+  return bytes_transferred;
+}
+
+#if !defined(ASIO_NO_IOSTREAM)
+
+template 
+std::size_t read_at(SyncRandomAccessReadDevice& d,
+uint64_t offset, asio::basic_streambuf& b,
+CompletionCondition completion_condition, asio::error_code& ec)
+{
+  ec = asio::error_code();
+  std::size_t total_transferred = 0;
+  std::size_t max_size = detail::adapt_completion_condition_result(
+completion_condition(ec, total_transferred));
+  std::size_t bytes_available = read_size_helper(b, max_size);
+  while (bytes_available > 0)
+  {
+std::size_t bytes_transferred = d.read_some_at(
+offset + total_transferred, b.prepare(bytes_available), ec);
+b.commit(bytes_transferred);
+total_transferred += bytes_transferred;
+max_size = detail::adapt_completion_condition_result(
+  completion_condition(ec, total_transferred));
+bytes_available = read_size_helper(b, max_size);
+  }
+  return total_transferred;
+}
+
+template 
+inline std::size_t read_at(SyncRandomAccessReadDevice& d,
+uint64_t offset, asio::basic_streambuf& b)
+{
+  asio::error_code ec;
+  std::size_t bytes_transferred = read_at(
+  d, offset, b, transfer_all(), ec);
+  asio::detail::throw_error(ec, "read_at");
+  return bytes_transferred;
+}
+
+template 
+inline std::size_t read_at(SyncRandomAccessReadDevice& d,
+uint64_t offset, asio::basic_streambuf& b,
+asio::error_code& e

[07/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/read_until.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/read_until.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/read_until.hpp
new file mode 100644
index 000..212b6f5
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/read_until.hpp
@@ -0,0 +1,923 @@
+//
+// read_until.hpp
+// ~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_READ_UNTIL_HPP
+#define ASIO_READ_UNTIL_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if !defined(ASIO_NO_IOSTREAM)
+
+#include 
+#include 
+#include "asio/async_result.hpp"
+#include "asio/basic_streambuf.hpp"
+#include "asio/detail/regex_fwd.hpp"
+#include "asio/detail/type_traits.hpp"
+#include "asio/error.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+
+namespace detail
+{
+  char (&has_result_type_helper(...))[2];
+
+  template 
+  char has_result_type_helper(T*, typename T::result_type* = 0);
+
+  template 
+  struct has_result_type
+  {
+enum { value = (sizeof((has_result_type_helper)((T*)(0))) == 1) };
+  };
+} // namespace detail
+
+/// Type trait used to determine whether a type can be used as a match 
condition
+/// function with read_until and async_read_until.
+template 
+struct is_match_condition
+{
+#if defined(GENERATING_DOCUMENTATION)
+  /// The value member is true if the type may be used as a match condition.
+  static const bool value;
+#else
+  enum
+  {
+value = asio::is_function<
+typename asio::remove_pointer::type>::value
+  || detail::has_result_type::value
+  };
+#endif
+};
+
+/**
+ * @defgroup read_until asio::read_until
+ *
+ * @brief Read data into a streambuf until it contains a delimiter, matches a
+ * regular expression, or a function object indicates a match.
+ */
+/*@{*/
+
+/// Read data into a streambuf until it contains a specified delimiter.
+/**
+ * This function is used to read data into the specified streambuf until the
+ * streambuf's get area contains the specified delimiter. The call will block
+ * until one of the following conditions is true:
+ *
+ * @li The get area of the streambuf contains the specified delimiter.
+ *
+ * @li An error occurred.
+ *
+ * This operation is implemented in terms of zero or more calls to the stream's
+ * read_some function. If the streambuf's get area already contains the
+ * delimiter, the function returns immediately.
+ *
+ * @param s The stream from which the data is to be read. The type must support
+ * the SyncReadStream concept.
+ *
+ * @param b A streambuf object into which the data will be read.
+ *
+ * @param delim The delimiter character.
+ *
+ * @returns The number of bytes in the streambuf's get area up to and including
+ * the delimiter.
+ *
+ * @throws asio::system_error Thrown on failure.
+ *
+ * @note After a successful read_until operation, the streambuf may contain
+ * additional data beyond the delimiter. An application will typically leave
+ * that data in the streambuf for a subsequent read_until operation to examine.
+ *
+ * @par Example
+ * To read data into a streambuf until a newline is encountered:
+ * @code asio::streambuf b;
+ * asio::read_until(s, b, '\n');
+ * std::istream is(&b);
+ * std::string line;
+ * std::getline(is, line); @endcode
+ * After the @c read_until operation completes successfully, the buffer @c b
+ * contains the delimiter:
+ * @code { 'a', 'b', ..., 'c', '\n', 'd', 'e', ... } @endcode
+ * The call to @c std::getline then extracts the data up to and including the
+ * delimiter, so that the string @c line contains:
+ * @code { 'a', 'b', ..., 'c', '\n' } @endcode
+ * The remaining data is left in the buffer @c b as follows:
+ * @code { 'd', 'e', ... } @endcode
+ * This data may be the start of a new line, to be extracted by a subsequent
+ * @c read_until operation.
+ */
+template 
+std::size_t read_until(SyncReadStream& s,
+asio::basic_streambuf& b, char delim);
+
+/// Read data into a streambuf until it contains a specified delimiter.
+/**
+ * This function is used to read data into the specified streambuf until the
+ * streambuf's get area contains the specified delimiter. The call will block
+ * until one of the following conditions is true:
+ *
+ * @li The get area of the streambuf contains the specified delimite

[25/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/socket_ops.ipp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/socket_ops.ipp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/socket_ops.ipp
new file mode 100644
index 000..c601210
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/socket_ops.ipp
@@ -0,0 +1,3394 @@
+//
+// detail/impl/socket_ops.ipp
+// ~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_SOCKET_OPS_IPP
+#define ASIO_DETAIL_SOCKET_OPS_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "asio/detail/assert.hpp"
+#include "asio/detail/socket_ops.hpp"
+#include "asio/error.hpp"
+
+#if defined(ASIO_WINDOWS_RUNTIME)
+# include 
+# include 
+# include 
+#endif // defined(ASIO_WINDOWS_RUNTIME)
+
+#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) \
+  || defined(__MACH__) && defined(__APPLE__)
+# if defined(ASIO_HAS_PTHREADS)
+#  include 
+# endif // defined(ASIO_HAS_PTHREADS)
+#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
+   // || defined(__MACH__) && defined(__APPLE__)
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+namespace socket_ops {
+
+#if !defined(ASIO_WINDOWS_RUNTIME)
+
+#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
+struct msghdr { int msg_namelen; };
+#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
+
+#if defined(__hpux)
+// HP-UX doesn't declare these functions extern "C", so they are declared again
+// here to avoid linker errors about undefined symbols.
+extern "C" char* if_indextoname(unsigned int, char*);
+extern "C" unsigned int if_nametoindex(const char*);
+#endif // defined(__hpux)
+
+#endif // !defined(ASIO_WINDOWS_RUNTIME)
+
+inline void clear_last_error()
+{
+#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
+  WSASetLastError(0);
+#else
+  errno = 0;
+#endif
+}
+
+#if !defined(ASIO_WINDOWS_RUNTIME)
+
+template 
+inline ReturnType error_wrapper(ReturnType return_value,
+asio::error_code& ec)
+{
+#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
+  ec = asio::error_code(WSAGetLastError(),
+  asio::error::get_system_category());
+#else
+  ec = asio::error_code(errno,
+  asio::error::get_system_category());
+#endif
+  return return_value;
+}
+
+template 
+inline socket_type call_accept(SockLenType msghdr::*,
+socket_type s, socket_addr_type* addr, std::size_t* addrlen)
+{
+  SockLenType tmp_addrlen = addrlen ? (SockLenType)*addrlen : 0;
+  socket_type result = ::accept(s, addr, addrlen ? &tmp_addrlen : 0);
+  if (addrlen)
+*addrlen = (std::size_t)tmp_addrlen;
+  return result;
+}
+
+socket_type accept(socket_type s, socket_addr_type* addr,
+std::size_t* addrlen, asio::error_code& ec)
+{
+  if (s == invalid_socket)
+  {
+ec = asio::error::bad_descriptor;
+return invalid_socket;
+  }
+
+  clear_last_error();
+
+  socket_type new_s = error_wrapper(call_accept(
+&msghdr::msg_namelen, s, addr, addrlen), ec);
+  if (new_s == invalid_socket)
+return new_s;
+
+#if defined(__MACH__) && defined(__APPLE__) || defined(__FreeBSD__)
+  int optval = 1;
+  int result = error_wrapper(::setsockopt(new_s,
+SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)), ec);
+  if (result != 0)
+  {
+::close(new_s);
+return invalid_socket;
+  }
+#endif
+
+  ec = asio::error_code();
+  return new_s;
+}
+
+socket_type sync_accept(socket_type s, state_type state,
+socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec)
+{
+  // Accept a socket.
+  for (;;)
+  {
+// Try to complete the operation without blocking.
+socket_type new_socket = socket_ops::accept(s, addr, addrlen, ec);
+
+// Check if operation succeeded.
+if (new_socket != invalid_socket)
+  return new_socket;
+
+// Operation failed.
+if (ec == asio::error::would_block
+|| ec == asio::error::try_again)
+{
+  if (state & user_set_non_blocking)
+return invalid_socket;
+  // Fall through to retry operation.
+}
+else if (ec == asio::error::connection_aborted)
+{
+  if (state & enable_connection_aborted)
+return invalid_socket;
+  // Fall through to retry operation.
+}
+#if

[43/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem_sync.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem_sync.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem_sync.cc
new file mode 100644
index 000..53c9e26
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem_sync.cc
@@ -0,0 +1,607 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "filesystem.h"
+
+#include 
+#include 
+
+#define FMT_THIS_ADDR "this=" << (void*)this
+
+// Note: This is just a place to hold boilerplate async to sync shim code,
+//   place actual filesystem logic in filesystem.cc
+//
+//
+// Shim pattern pseudocode
+//
+// Status MySynchronizedMethod(method_args):
+//  let stat = a promise wrapped in a shared_ptr
+//
+//  Create a lambda that captures stat and any other variables that need to
+//  be set based on the async operation.  When invoked set variables with the
+//  arguments passed (possibly do some translation), then set stat to indicate
+//  the return status of the async call.
+//
+//  invoke MyAsyncMethod(method_args, handler_lambda)
+//
+//  block until stat value has been set while async work takes place
+//
+//  return stat
+
+namespace hdfs {
+
+Status FileSystemImpl::Connect(const std::string &server, const std::string 
&service) {
+  LOG_INFO(kFileSystem, << "FileSystemImpl::[sync]Connect(" << FMT_THIS_ADDR
+<< ", server=" << server << ", service=" << service << 
") called");
+
+  /* synchronized */
+  auto stat = std::make_shared>();
+  std::future future = stat->get_future();
+
+  auto callback = [stat](const Status &s, FileSystem *fs) {
+(void)fs;
+stat->set_value(s);
+  };
+
+  Connect(server, service, callback);
+
+  /* block until promise is set */
+  auto s = future.get();
+
+  return s;
+}
+
+
+Status FileSystemImpl::ConnectToDefaultFs() {
+  auto stat = std::make_shared>();
+  std::future future = stat->get_future();
+
+  auto callback = [stat](const Status &s, FileSystem *fs) {
+(void)fs;
+stat->set_value(s);
+  };
+
+  ConnectToDefaultFs(callback);
+
+  /* block until promise is set */
+  auto s = future.get();
+
+  return s;
+}
+
+
+Status FileSystemImpl::Open(const std::string &path,
+ FileHandle **handle) {
+  LOG_DEBUG(kFileSystem, << "FileSystemImpl::[sync]Open("
+ << FMT_THIS_ADDR << ", path="
+ << path << ") called");
+
+  auto callstate = std::make_shared>>();
+  std::future> future(callstate->get_future());
+
+  /* wrap async FileSystem::Open with promise to make it a blocking call */
+  auto h = [callstate](const Status &s, FileHandle *is) {
+callstate->set_value(std::make_tuple(s, is));
+  };
+
+  Open(path, h);
+
+  /* block until promise is set */
+  auto returnstate = future.get();
+  Status stat = std::get<0>(returnstate);
+  FileHandle *file_handle = std::get<1>(returnstate);
+
+  if (!stat.ok()) {
+delete file_handle;
+return stat;
+  }
+  if (!file_handle) {
+return stat;
+  }
+
+  *handle = file_handle;
+  return stat;
+}
+
+Status FileSystemImpl::GetBlockLocations(const std::string & path, uint64_t 
offset, uint64_t length,
+  std::shared_ptr * fileBlockLocations)
+{
+  LOG_DEBUG(kFileSystem, << "FileSystemImpl::[sync]GetBlockLocations("
+ << FMT_THIS_ADDR << ", path="
+ << path << ") called");
+
+  if (!fileBlockLocations)
+return Status::InvalidArgument("Null pointer passed to GetBlockLocations");
+
+  auto callstate = std::make_shared>>>();
+  std::future>> 
future(callstate->get_future());
+
+  /* wrap async call with promise/future to make it blocking */
+  auto callback = [callstate](const Status &s, 
std::shared_ptr blockInfo) {
+callstate->set_value(std::make_tuple(s,blockInfo));
+  };
+
+  GetBlockLocations(path, offset, length, callback);
+
+  /

[05/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/detail/impl/openssl_init.ipp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/detail/impl/openssl_init.ipp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/detail/impl/openssl_init.ipp
new file mode 100644
index 000..5104a41
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/detail/impl/openssl_init.ipp
@@ -0,0 +1,145 @@
+//
+// ssl/detail/impl/openssl_init.ipp
+// 
+//
+// Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com
+// Copyright (c) 2005-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_SSL_DETAIL_IMPL_OPENSSL_INIT_IPP
+#define ASIO_SSL_DETAIL_IMPL_OPENSSL_INIT_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+#include 
+#include "asio/detail/assert.hpp"
+#include "asio/detail/mutex.hpp"
+#include "asio/detail/tss_ptr.hpp"
+#include "asio/ssl/detail/openssl_init.hpp"
+#include "asio/ssl/detail/openssl_types.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace ssl {
+namespace detail {
+
+class openssl_init_base::do_init
+{
+public:
+  do_init()
+  {
+::SSL_library_init();
+::SSL_load_error_strings();
+::OpenSSL_add_all_algorithms();
+
+mutexes_.resize(::CRYPTO_num_locks());
+for (size_t i = 0; i < mutexes_.size(); ++i)
+  mutexes_[i].reset(new asio::detail::mutex);
+::CRYPTO_set_locking_callback(&do_init::openssl_locking_func);
+::CRYPTO_set_id_callback(&do_init::openssl_id_func);
+
+#if !defined(SSL_OP_NO_COMPRESSION) \
+  && (OPENSSL_VERSION_NUMBER >= 0x00908000L)
+null_compression_methods_ = sk_SSL_COMP_new_null();
+#endif // !defined(SSL_OP_NO_COMPRESSION)
+   // && (OPENSSL_VERSION_NUMBER >= 0x00908000L)
+  }
+
+  ~do_init()
+  {
+#if !defined(SSL_OP_NO_COMPRESSION) \
+  && (OPENSSL_VERSION_NUMBER >= 0x00908000L)
+sk_SSL_COMP_free(null_compression_methods_);
+#endif // !defined(SSL_OP_NO_COMPRESSION)
+   // && (OPENSSL_VERSION_NUMBER >= 0x00908000L)
+
+::CRYPTO_set_id_callback(0);
+::CRYPTO_set_locking_callback(0);
+::ERR_free_strings();
+::ERR_remove_state(0);
+::EVP_cleanup();
+::CRYPTO_cleanup_all_ex_data();
+::CONF_modules_unload(1);
+#if !defined(OPENSSL_NO_ENGINE)
+::ENGINE_cleanup();
+#endif // !defined(OPENSSL_NO_ENGINE)
+  }
+
+#if !defined(SSL_OP_NO_COMPRESSION) \
+  && (OPENSSL_VERSION_NUMBER >= 0x00908000L)
+  STACK_OF(SSL_COMP)* get_null_compression_methods() const
+  {
+return null_compression_methods_;
+  }
+#endif // !defined(SSL_OP_NO_COMPRESSION)
+   // && (OPENSSL_VERSION_NUMBER >= 0x00908000L)
+
+private:
+  static unsigned long openssl_id_func()
+  {
+#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
+return ::GetCurrentThreadId();
+#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
+void* id = instance()->thread_id_;
+if (id == 0)
+  instance()->thread_id_ = id = &id; // Ugh.
+ASIO_ASSERT(sizeof(unsigned long) >= sizeof(void*));
+return reinterpret_cast(id);
+#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
+  }
+
+  static void openssl_locking_func(int mode, int n, 
+const char* /*file*/, int /*line*/)
+  {
+if (mode & CRYPTO_LOCK)
+  instance()->mutexes_[n]->lock();
+else
+  instance()->mutexes_[n]->unlock();
+  }
+
+  // Mutexes to be used in locking callbacks.
+  std::vector > mutexes_;
+
+#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
+  // The thread identifiers to be used by openssl.
+  asio::detail::tss_ptr thread_id_;
+#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
+
+#if !defined(SSL_OP_NO_COMPRESSION) \
+  && (OPENSSL_VERSION_NUMBER >= 0x00908000L)
+  STACK_OF(SSL_COMP)* null_compression_methods_;
+#endif // !defined(SSL_OP_NO_COMPRESSION)
+   // && (OPENSSL_VERSION_NUMBER >= 0x00908000L)
+};
+
+asio::detail::shared_ptr
+openssl_init_base::instance()
+{
+  static asio::detail::shared_ptr init(new do_init);
+  return init;
+}
+
+#if !defined(SSL_OP_NO_COMPRESSION) \
+  && (OPENSSL_VERSION_NUMBER >= 0x00908000L)
+STACK_OF(SSL_COMP)* openssl_init_base::get_null_compression_methods()
+{
+  return instance()->get_null_compression_methods();
+}
+#endif // !defined(SSL_OP_NO_COMPRESSION)
+   // && (OPENSSL_VERSION_NUMBER >= 0x00908000L)
+
+} // namespace detail
+} // namesp

[23/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_socket_service_base.ipp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_socket_service_base.ipp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_socket_service_base.ipp
new file mode 100644
index 000..85d7545
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_socket_service_base.ipp
@@ -0,0 +1,728 @@
+//
+// detail/impl/win_iocp_socket_service_base.ipp
+// 
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP
+#define ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if defined(ASIO_HAS_IOCP)
+
+#include "asio/detail/win_iocp_socket_service_base.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+win_iocp_socket_service_base::win_iocp_socket_service_base(
+asio::io_service& io_service)
+  : io_service_(io_service),
+iocp_service_(use_service(io_service)),
+reactor_(0),
+connect_ex_(0),
+mutex_(),
+impl_list_(0)
+{
+}
+
+void win_iocp_socket_service_base::shutdown_service()
+{
+  // Close all implementations, causing all operations to complete.
+  asio::detail::mutex::scoped_lock lock(mutex_);
+  base_implementation_type* impl = impl_list_;
+  while (impl)
+  {
+asio::error_code ignored_ec;
+close_for_destruction(*impl);
+impl = impl->next_;
+  }
+}
+
+void win_iocp_socket_service_base::construct(
+win_iocp_socket_service_base::base_implementation_type& impl)
+{
+  impl.socket_ = invalid_socket;
+  impl.state_ = 0;
+  impl.cancel_token_.reset();
+#if defined(ASIO_ENABLE_CANCELIO)
+  impl.safe_cancellation_thread_id_ = 0;
+#endif // defined(ASIO_ENABLE_CANCELIO)
+
+  // Insert implementation into linked list of all implementations.
+  asio::detail::mutex::scoped_lock lock(mutex_);
+  impl.next_ = impl_list_;
+  impl.prev_ = 0;
+  if (impl_list_)
+impl_list_->prev_ = &impl;
+  impl_list_ = &impl;
+}
+
+void win_iocp_socket_service_base::base_move_construct(
+win_iocp_socket_service_base::base_implementation_type& impl,
+win_iocp_socket_service_base::base_implementation_type& other_impl)
+{
+  impl.socket_ = other_impl.socket_;
+  other_impl.socket_ = invalid_socket;
+
+  impl.state_ = other_impl.state_;
+  other_impl.state_ = 0;
+
+  impl.cancel_token_ = other_impl.cancel_token_;
+  other_impl.cancel_token_.reset();
+
+#if defined(ASIO_ENABLE_CANCELIO)
+  impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
+  other_impl.safe_cancellation_thread_id_ = 0;
+#endif // defined(ASIO_ENABLE_CANCELIO)
+
+  // Insert implementation into linked list of all implementations.
+  asio::detail::mutex::scoped_lock lock(mutex_);
+  impl.next_ = impl_list_;
+  impl.prev_ = 0;
+  if (impl_list_)
+impl_list_->prev_ = &impl;
+  impl_list_ = &impl;
+}
+
+void win_iocp_socket_service_base::base_move_assign(
+win_iocp_socket_service_base::base_implementation_type& impl,
+win_iocp_socket_service_base& other_service,
+win_iocp_socket_service_base::base_implementation_type& other_impl)
+{
+  close_for_destruction(impl);
+
+  if (this != &other_service)
+  {
+// Remove implementation from linked list of all implementations.
+asio::detail::mutex::scoped_lock lock(mutex_);
+if (impl_list_ == &impl)
+  impl_list_ = impl.next_;
+if (impl.prev_)
+  impl.prev_->next_ = impl.next_;
+if (impl.next_)
+  impl.next_->prev_= impl.prev_;
+impl.next_ = 0;
+impl.prev_ = 0;
+  }
+
+  impl.socket_ = other_impl.socket_;
+  other_impl.socket_ = invalid_socket;
+
+  impl.state_ = other_impl.state_;
+  other_impl.state_ = 0;
+
+  impl.cancel_token_ = other_impl.cancel_token_;
+  other_impl.cancel_token_.reset();
+
+#if defined(ASIO_ENABLE_CANCELIO)
+  impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
+  other_impl.safe_cancellation_thread_id_ = 0;
+#endif // defined(ASIO_ENABLE_CANCELIO)
+
+  if (this != &other_service)
+  {
+// Insert implementation into linked list of all implementations.
+asio::detail::mutex::scoped_lock lock(other_service.mutex_);
+impl.next_ = other

[18/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/timer_queue.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/timer_queue.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/timer_queue.hpp
new file mode 100644
index 000..171619c
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/timer_queue.hpp
@@ -0,0 +1,332 @@
+//
+// detail/timer_queue.hpp
+// ~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_TIMER_QUEUE_HPP
+#define ASIO_DETAIL_TIMER_QUEUE_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+#include 
+#include 
+#include "asio/detail/cstdint.hpp"
+#include "asio/detail/date_time_fwd.hpp"
+#include "asio/detail/limits.hpp"
+#include "asio/detail/op_queue.hpp"
+#include "asio/detail/timer_queue_base.hpp"
+#include "asio/detail/wait_op.hpp"
+#include "asio/error.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+template 
+class timer_queue
+  : public timer_queue_base
+{
+public:
+  // The time type.
+  typedef typename Time_Traits::time_type time_type;
+
+  // The duration type.
+  typedef typename Time_Traits::duration_type duration_type;
+
+  // Per-timer data.
+  class per_timer_data
+  {
+  public:
+per_timer_data() : next_(0), prev_(0) {}
+
+  private:
+friend class timer_queue;
+
+// The operations waiting on the timer.
+op_queue op_queue_;
+
+// The index of the timer in the heap.
+std::size_t heap_index_;
+
+// Pointers to adjacent timers in a linked list.
+per_timer_data* next_;
+per_timer_data* prev_;
+  };
+
+  // Constructor.
+  timer_queue()
+: timers_(),
+  heap_()
+  {
+  }
+
+  // Add a new timer to the queue. Returns true if this is the timer that is
+  // earliest in the queue, in which case the reactor's event demultiplexing
+  // function call may need to be interrupted and restarted.
+  bool enqueue_timer(const time_type& time, per_timer_data& timer, wait_op* op)
+  {
+// Enqueue the timer object.
+if (timer.prev_ == 0 && &timer != timers_)
+{
+  if (this->is_positive_infinity(time))
+  {
+// No heap entry is required for timers that never expire.
+timer.heap_index_ = (std::numeric_limits::max)();
+  }
+  else
+  {
+// Put the new timer at the correct position in the heap. This is done
+// first since push_back() can throw due to allocation failure.
+timer.heap_index_ = heap_.size();
+heap_entry entry = { time, &timer };
+heap_.push_back(entry);
+up_heap(heap_.size() - 1);
+  }
+
+  // Insert the new timer into the linked list of active timers.
+  timer.next_ = timers_;
+  timer.prev_ = 0;
+  if (timers_)
+timers_->prev_ = &timer;
+  timers_ = &timer;
+}
+
+// Enqueue the individual timer operation.
+timer.op_queue_.push(op);
+
+// Interrupt reactor only if newly added timer is first to expire.
+return timer.heap_index_ == 0 && timer.op_queue_.front() == op;
+  }
+
+  // Whether there are no timers in the queue.
+  virtual bool empty() const
+  {
+return timers_ == 0;
+  }
+
+  // Get the time for the timer that is earliest in the queue.
+  virtual long wait_duration_msec(long max_duration) const
+  {
+if (heap_.empty())
+  return max_duration;
+
+return this->to_msec(
+Time_Traits::to_posix_duration(
+  Time_Traits::subtract(heap_[0].time_, Time_Traits::now())),
+max_duration);
+  }
+
+  // Get the time for the timer that is earliest in the queue.
+  virtual long wait_duration_usec(long max_duration) const
+  {
+if (heap_.empty())
+  return max_duration;
+
+return this->to_usec(
+Time_Traits::to_posix_duration(
+  Time_Traits::subtract(heap_[0].time_, Time_Traits::now())),
+max_duration);
+  }
+
+  // Dequeue all timers not later than the current time.
+  virtual void get_ready_timers(op_queue& ops)
+  {
+if (!heap_.empty())
+{
+  const time_type now = Time_Traits::now();
+  while (!heap_.empty() && !Time_Traits::less_than(now, heap_[0].time_))
+  {
+per_timer_data* timer = heap_[0].timer_;
+ops.push(timer->op_queue_);
+remove_timer(*timer);
+  }
+}
+ 

[08/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/posix/stream_descriptor_service.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/posix/stream_descriptor_service.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/posix/stream_descriptor_service.hpp
new file mode 100644
index 000..556912e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/posix/stream_descriptor_service.hpp
@@ -0,0 +1,260 @@
+//
+// posix/stream_descriptor_service.hpp
+// ~~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_POSIX_STREAM_DESCRIPTOR_SERVICE_HPP
+#define ASIO_POSIX_STREAM_DESCRIPTOR_SERVICE_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) \
+  || defined(GENERATING_DOCUMENTATION)
+
+#include 
+#include "asio/async_result.hpp"
+#include "asio/error.hpp"
+#include "asio/io_service.hpp"
+#include "asio/detail/reactive_descriptor_service.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace posix {
+
+/// Default service implementation for a stream descriptor.
+class stream_descriptor_service
+#if defined(GENERATING_DOCUMENTATION)
+  : public asio::io_service::service
+#else
+  : public asio::detail::service_base
+#endif
+{
+public:
+#if defined(GENERATING_DOCUMENTATION)
+  /// The unique service identifier.
+  static asio::io_service::id id;
+#endif
+
+private:
+  // The type of the platform-specific implementation.
+  typedef detail::reactive_descriptor_service service_impl_type;
+
+public:
+  /// The type of a stream descriptor implementation.
+#if defined(GENERATING_DOCUMENTATION)
+  typedef implementation_defined implementation_type;
+#else
+  typedef service_impl_type::implementation_type implementation_type;
+#endif
+
+  /// (Deprecated: Use native_handle_type.) The native descriptor type.
+#if defined(GENERATING_DOCUMENTATION)
+  typedef implementation_defined native_type;
+#else
+  typedef service_impl_type::native_handle_type native_type;
+#endif
+
+  /// The native descriptor type.
+#if defined(GENERATING_DOCUMENTATION)
+  typedef implementation_defined native_handle_type;
+#else
+  typedef service_impl_type::native_handle_type native_handle_type;
+#endif
+
+  /// Construct a new stream descriptor service for the specified io_service.
+  explicit stream_descriptor_service(asio::io_service& io_service)
+: asio::detail::service_base(io_service),
+  service_impl_(io_service)
+  {
+  }
+
+  /// Construct a new stream descriptor implementation.
+  void construct(implementation_type& impl)
+  {
+service_impl_.construct(impl);
+  }
+
+#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)
+  /// Move-construct a new stream descriptor implementation.
+  void move_construct(implementation_type& impl,
+  implementation_type& other_impl)
+  {
+service_impl_.move_construct(impl, other_impl);
+  }
+
+  /// Move-assign from another stream descriptor implementation.
+  void move_assign(implementation_type& impl,
+  stream_descriptor_service& other_service,
+  implementation_type& other_impl)
+  {
+service_impl_.move_assign(impl, other_service.service_impl_, other_impl);
+  }
+#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)
+
+  /// Destroy a stream descriptor implementation.
+  void destroy(implementation_type& impl)
+  {
+service_impl_.destroy(impl);
+  }
+
+  /// Assign an existing native descriptor to a stream descriptor.
+  asio::error_code assign(implementation_type& impl,
+  const native_handle_type& native_descriptor,
+  asio::error_code& ec)
+  {
+return service_impl_.assign(impl, native_descriptor, ec);
+  }
+
+  /// Determine whether the descriptor is open.
+  bool is_open(const implementation_type& impl) const
+  {
+return service_impl_.is_open(impl);
+  }
+
+  /// Close a stream descriptor implementation.
+  asio::error_code close(implementation_type& impl,
+  asio::error_code& ec)
+  {
+return service_impl_.close(impl, ec);
+  }
+
+  /// (Deprecated: Use native_handle().) Get the native descriptor
+  /// implementation.
+  native_type native(implementation_type& impl)
+  {
+return service_impl_.native_handle(impl);
+  }
+
+  /// Get the native descriptor implementation.
+  native_handle_type native_han

[01/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 d0488c781 -> 7de7daa5b


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/write.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/write.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/write.hpp
new file mode 100644
index 000..6799179
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/write.hpp
@@ -0,0 +1,618 @@
+//
+// write.hpp
+// ~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_WRITE_HPP
+#define ASIO_WRITE_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+#include 
+#include "asio/async_result.hpp"
+#include "asio/basic_streambuf_fwd.hpp"
+#include "asio/error.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+
+/**
+ * @defgroup write asio::write
+ *
+ * @brief Write a certain amount of data to a stream before returning.
+ */
+/*@{*/
+
+/// Write all of the supplied data to a stream before returning.
+/**
+ * This function is used to write a certain number of bytes of data to a 
stream.
+ * The call will block until one of the following conditions is true:
+ *
+ * @li All of the data in the supplied buffers has been written. That is, the
+ * bytes transferred is equal to the sum of the buffer sizes.
+ *
+ * @li An error occurred.
+ *
+ * This operation is implemented in terms of zero or more calls to the stream's
+ * write_some function.
+ *
+ * @param s The stream to which the data is to be written. The type must 
support
+ * the SyncWriteStream concept.
+ *
+ * @param buffers One or more buffers containing the data to be written. The 
sum
+ * of the buffer sizes indicates the maximum number of bytes to write to the
+ * stream.
+ *
+ * @returns The number of bytes transferred.
+ *
+ * @throws asio::system_error Thrown on failure.
+ *
+ * @par Example
+ * To write a single data buffer use the @ref buffer function as follows:
+ * @code asio::write(s, asio::buffer(data, size)); @endcode
+ * See the @ref buffer documentation for information on writing multiple
+ * buffers in one go, and how to use it with arrays, boost::array or
+ * std::vector.
+ *
+ * @note This overload is equivalent to calling:
+ * @code asio::write(
+ * s, buffers,
+ * asio::transfer_all()); @endcode
+ */
+template 
+std::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers);
+
+/// Write all of the supplied data to a stream before returning.
+/**
+ * This function is used to write a certain number of bytes of data to a 
stream.
+ * The call will block until one of the following conditions is true:
+ *
+ * @li All of the data in the supplied buffers has been written. That is, the
+ * bytes transferred is equal to the sum of the buffer sizes.
+ *
+ * @li An error occurred.
+ *
+ * This operation is implemented in terms of zero or more calls to the stream's
+ * write_some function.
+ *
+ * @param s The stream to which the data is to be written. The type must 
support
+ * the SyncWriteStream concept.
+ *
+ * @param buffers One or more buffers containing the data to be written. The 
sum
+ * of the buffer sizes indicates the maximum number of bytes to write to the
+ * stream.
+ *
+ * @param ec Set to indicate what error occurred, if any.
+ *
+ * @returns The number of bytes transferred.
+ *
+ * @par Example
+ * To write a single data buffer use the @ref buffer function as follows:
+ * @code asio::write(s, asio::buffer(data, size), ec); @endcode
+ * See the @ref buffer documentation for information on writing multiple
+ * buffers in one go, and how to use it with arrays, boost::array or
+ * std::vector.
+ *
+ * @note This overload is equivalent to calling:
+ * @code asio::write(
+ * s, buffers,
+ * asio::transfer_all(), ec); @endcode
+ */
+template 
+std::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers,
+asio::error_code& ec);
+
+/// Write a certain amount of data to a stream before returning.
+/**
+ * This function is used to write a certain number of bytes of data to a 
stream.
+ * The call will block until one of the following conditions is true:
+ *
+ * @li All of the data in the supplied buffers has been written. That is, the
+ * bytes transferred is equal to the sum of the buffer sizes.
+ *
+ * @li The completion_condition function object returns 0.
+ *
+ * This opera

[17/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_overlapped_op.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_overlapped_op.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_overlapped_op.hpp
new file mode 100644
index 000..90651f0
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_overlapped_op.hpp
@@ -0,0 +1,88 @@
+//
+// detail/win_iocp_overlapped_op.hpp
+// ~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_WIN_IOCP_OVERLAPPED_OP_HPP
+#define ASIO_DETAIL_WIN_IOCP_OVERLAPPED_OP_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if defined(ASIO_HAS_IOCP)
+
+#include "asio/error.hpp"
+#include "asio/detail/addressof.hpp"
+#include "asio/detail/bind_handler.hpp"
+#include "asio/detail/fenced_block.hpp"
+#include "asio/detail/handler_alloc_helpers.hpp"
+#include "asio/detail/handler_invoke_helpers.hpp"
+#include "asio/detail/operation.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+template 
+class win_iocp_overlapped_op : public operation
+{
+public:
+  ASIO_DEFINE_HANDLER_PTR(win_iocp_overlapped_op);
+
+  win_iocp_overlapped_op(Handler& handler)
+: operation(&win_iocp_overlapped_op::do_complete),
+  handler_(ASIO_MOVE_CAST(Handler)(handler))
+  {
+  }
+
+  static void do_complete(io_service_impl* owner, operation* base,
+  const asio::error_code& ec, std::size_t bytes_transferred)
+  {
+// Take ownership of the operation object.
+win_iocp_overlapped_op* o(static_cast(base));
+ptr p = { asio::detail::addressof(o->handler_), o, o };
+
+ASIO_HANDLER_COMPLETION((o));
+
+// Make a copy of the handler so that the memory can be deallocated before
+// the upcall is made. Even if we're not about to make an upcall, a
+// sub-object of the handler may be the true owner of the memory associated
+// with the handler. Consequently, a local copy of the handler is required
+// to ensure that any owning sub-object remains valid until after we have
+// deallocated the memory here.
+detail::binder2
+  handler(o->handler_, ec, bytes_transferred);
+p.h = asio::detail::addressof(handler.handler_);
+p.reset();
+
+// Make the upcall if required.
+if (owner)
+{
+  fenced_block b(fenced_block::half);
+  ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
+  asio_handler_invoke_helpers::invoke(handler, handler.handler_);
+  ASIO_HANDLER_INVOCATION_END;
+}
+  }
+
+private:
+  Handler handler_;
+};
+
+} // namespace detail
+} // namespace asio
+
+#include "asio/detail/pop_options.hpp"
+
+#endif // defined(ASIO_HAS_IOCP)
+
+#endif // ASIO_DETAIL_WIN_IOCP_OVERLAPPED_OP_HPP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_overlapped_ptr.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_overlapped_ptr.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_overlapped_ptr.hpp
new file mode 100644
index 000..7f816c5
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_overlapped_ptr.hpp
@@ -0,0 +1,144 @@
+//
+// detail/win_iocp_overlapped_ptr.hpp
+// ~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_WIN_IOCP_OVERLAPPED_PTR_HPP
+#define ASIO_DETAIL_WIN_IOCP_OVERLAPPED_PTR_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if defined(ASIO_HAS_IOCP)
+
+#include "asio/io_service.hpp"
+#include "asio/detail/addressof.hpp"
+#include "asio/detail

[24/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/socket_select_interrupter.ipp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/socket_select_interrupter.ipp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/socket_select_interrupter.ipp
new file mode 100644
index 000..bd6f363
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/socket_select_interrupter.ipp
@@ -0,0 +1,175 @@
+//
+// detail/impl/socket_select_interrupter.ipp
+// ~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP
+#define ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if !defined(ASIO_WINDOWS_RUNTIME)
+
+#if defined(ASIO_WINDOWS) \
+  || defined(__CYGWIN__) \
+  || defined(__SYMBIAN32__)
+
+#include 
+#include "asio/detail/socket_holder.hpp"
+#include "asio/detail/socket_ops.hpp"
+#include "asio/detail/socket_select_interrupter.hpp"
+#include "asio/detail/throw_error.hpp"
+#include "asio/error.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+socket_select_interrupter::socket_select_interrupter()
+{
+  open_descriptors();
+}
+
+void socket_select_interrupter::open_descriptors()
+{
+  asio::error_code ec;
+  socket_holder acceptor(socket_ops::socket(
+AF_INET, SOCK_STREAM, IPPROTO_TCP, ec));
+  if (acceptor.get() == invalid_socket)
+asio::detail::throw_error(ec, "socket_select_interrupter");
+
+  int opt = 1;
+  socket_ops::state_type acceptor_state = 0;
+  socket_ops::setsockopt(acceptor.get(), acceptor_state,
+  SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt), ec);
+
+  using namespace std; // For memset.
+  sockaddr_in4_type addr;
+  std::size_t addr_len = sizeof(addr);
+  memset(&addr, 0, sizeof(addr));
+  addr.sin_family = AF_INET;
+  addr.sin_addr.s_addr = inet_addr("127.0.0.1");
+  addr.sin_port = 0;
+  if (socket_ops::bind(acceptor.get(), (const socket_addr_type*)&addr,
+addr_len, ec) == socket_error_retval)
+asio::detail::throw_error(ec, "socket_select_interrupter");
+
+  if (socket_ops::getsockname(acceptor.get(), (socket_addr_type*)&addr,
+&addr_len, ec) == socket_error_retval)
+asio::detail::throw_error(ec, "socket_select_interrupter");
+
+  // Some broken firewalls on Windows will intermittently cause getsockname to
+  // return 0.0.0.0 when the socket is actually bound to 127.0.0.1. We
+  // explicitly specify the target address here to work around this problem.
+  addr.sin_addr.s_addr = inet_addr("127.0.0.1");
+
+  if (socket_ops::listen(acceptor.get(),
+SOMAXCONN, ec) == socket_error_retval)
+asio::detail::throw_error(ec, "socket_select_interrupter");
+
+  socket_holder client(socket_ops::socket(
+AF_INET, SOCK_STREAM, IPPROTO_TCP, ec));
+  if (client.get() == invalid_socket)
+asio::detail::throw_error(ec, "socket_select_interrupter");
+
+  if (socket_ops::connect(client.get(), (const socket_addr_type*)&addr,
+addr_len, ec) == socket_error_retval)
+asio::detail::throw_error(ec, "socket_select_interrupter");
+
+  socket_holder server(socket_ops::accept(acceptor.get(), 0, 0, ec));
+  if (server.get() == invalid_socket)
+asio::detail::throw_error(ec, "socket_select_interrupter");
+  
+  ioctl_arg_type non_blocking = 1;
+  socket_ops::state_type client_state = 0;
+  if (socket_ops::ioctl(client.get(), client_state,
+FIONBIO, &non_blocking, ec))
+asio::detail::throw_error(ec, "socket_select_interrupter");
+
+  opt = 1;
+  socket_ops::setsockopt(client.get(), client_state,
+  IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec);
+
+  non_blocking = 1;
+  socket_ops::state_type server_state = 0;
+  if (socket_ops::ioctl(server.get(), server_state,
+FIONBIO, &non_blocking, ec))
+asio::detail::throw_error(ec, "socket_select_interrupter");
+
+  opt = 1;
+  socket_ops::setsockopt(server.get(), server_state,
+  IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec);
+
+  read_descriptor_ = server.release();
+  write_descriptor_ = client.release();
+}
+
+socket_select_interrupter::~socket_select_interrupter()
+{
+  close_descriptors();
+}
+
+void socket_select_interrupter::close_descriptors()
+{
+  asio::error_co

[02/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/waitable_timer_service.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/waitable_timer_service.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/waitable_timer_service.hpp
new file mode 100644
index 000..0770360
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/waitable_timer_service.hpp
@@ -0,0 +1,168 @@
+//
+// waitable_timer_service.hpp
+// ~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_WAITABLE_TIMER_SERVICE_HPP
+#define ASIO_WAITABLE_TIMER_SERVICE_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+#include 
+#include "asio/async_result.hpp"
+#include "asio/detail/chrono_time_traits.hpp"
+#include "asio/detail/deadline_timer_service.hpp"
+#include "asio/io_service.hpp"
+#include "asio/wait_traits.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+
+/// Default service implementation for a timer.
+template  >
+class waitable_timer_service
+#if defined(GENERATING_DOCUMENTATION)
+  : public asio::io_service::service
+#else
+  : public asio::detail::service_base<
+  waitable_timer_service >
+#endif
+{
+public:
+#if defined(GENERATING_DOCUMENTATION)
+  /// The unique service identifier.
+  static asio::io_service::id id;
+#endif
+
+  /// The clock type.
+  typedef Clock clock_type;
+
+  /// The duration type of the clock.
+  typedef typename clock_type::duration duration;
+
+  /// The time point type of the clock.
+  typedef typename clock_type::time_point time_point;
+
+  /// The wait traits type.
+  typedef WaitTraits traits_type;
+
+private:
+  // The type of the platform-specific implementation.
+  typedef detail::deadline_timer_service<
+detail::chrono_time_traits > service_impl_type;
+
+public:
+  /// The implementation type of the waitable timer.
+#if defined(GENERATING_DOCUMENTATION)
+  typedef implementation_defined implementation_type;
+#else
+  typedef typename service_impl_type::implementation_type implementation_type;
+#endif
+
+  /// Construct a new timer service for the specified io_service.
+  explicit waitable_timer_service(asio::io_service& io_service)
+: asio::detail::service_base<
+waitable_timer_service >(io_service),
+  service_impl_(io_service)
+  {
+  }
+
+  /// Construct a new timer implementation.
+  void construct(implementation_type& impl)
+  {
+service_impl_.construct(impl);
+  }
+
+  /// Destroy a timer implementation.
+  void destroy(implementation_type& impl)
+  {
+service_impl_.destroy(impl);
+  }
+
+  /// Cancel any asynchronous wait operations associated with the timer.
+  std::size_t cancel(implementation_type& impl, asio::error_code& ec)
+  {
+return service_impl_.cancel(impl, ec);
+  }
+
+  /// Cancels one asynchronous wait operation associated with the timer.
+  std::size_t cancel_one(implementation_type& impl,
+  asio::error_code& ec)
+  {
+return service_impl_.cancel_one(impl, ec);
+  }
+
+  /// Get the expiry time for the timer as an absolute time.
+  time_point expires_at(const implementation_type& impl) const
+  {
+return service_impl_.expires_at(impl);
+  }
+
+  /// Set the expiry time for the timer as an absolute time.
+  std::size_t expires_at(implementation_type& impl,
+  const time_point& expiry_time, asio::error_code& ec)
+  {
+return service_impl_.expires_at(impl, expiry_time, ec);
+  }
+
+  /// Get the expiry time for the timer relative to now.
+  duration expires_from_now(const implementation_type& impl) const
+  {
+return service_impl_.expires_from_now(impl);
+  }
+
+  /// Set the expiry time for the timer relative to now.
+  std::size_t expires_from_now(implementation_type& impl,
+  const duration& expiry_time, asio::error_code& ec)
+  {
+return service_impl_.expires_from_now(impl, expiry_time, ec);
+  }
+
+  // Perform a blocking wait on the timer.
+  void wait(implementation_type& impl, asio::error_code& ec)
+  {
+service_impl_.wait(impl, ec);
+  }
+
+  // Start an asynchronous wait on the timer.
+  template 
+  ASIO_INITFN_RESULT_TYPE(WaitHandler,
+  void (asio::error_code))
+  async_wait(implementation_type& impl,
+  ASIO_MOVE_ARG(WaitHandler) handler)
+  {
+detail::async_result_init<
+  WaitHandler, void (asio::error_code)> init(
+ASIO_

[19/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/socket_holder.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/socket_holder.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/socket_holder.hpp
new file mode 100644
index 000..809cf1f
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/socket_holder.hpp
@@ -0,0 +1,98 @@
+//
+// detail/socket_holder.hpp
+// 
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_SOCKET_HOLDER_HPP
+#define ASIO_DETAIL_SOCKET_HOLDER_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+#include "asio/detail/noncopyable.hpp"
+#include "asio/detail/socket_ops.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+// Implement the resource acquisition is initialisation idiom for sockets.
+class socket_holder
+  : private noncopyable
+{
+public:
+  // Construct as an uninitialised socket.
+  socket_holder()
+: socket_(invalid_socket)
+  {
+  }
+
+  // Construct to take ownership of the specified socket.
+  explicit socket_holder(socket_type s)
+: socket_(s)
+  {
+  }
+
+  // Destructor.
+  ~socket_holder()
+  {
+if (socket_ != invalid_socket)
+{
+  asio::error_code ec;
+  socket_ops::state_type state = 0;
+  socket_ops::close(socket_, state, true, ec);
+}
+  }
+
+  // Get the underlying socket.
+  socket_type get() const
+  {
+return socket_;
+  }
+
+  // Reset to an uninitialised socket.
+  void reset()
+  {
+if (socket_ != invalid_socket)
+{
+  asio::error_code ec;
+  socket_ops::state_type state = 0;
+  socket_ops::close(socket_, state, true, ec);
+  socket_ = invalid_socket;
+}
+  }
+
+  // Reset to take ownership of the specified socket.
+  void reset(socket_type s)
+  {
+reset();
+socket_ = s;
+  }
+
+  // Release ownership of the socket.
+  socket_type release()
+  {
+socket_type tmp = socket_;
+socket_ = invalid_socket;
+return tmp;
+  }
+
+private:
+  // The underlying socket.
+  socket_type socket_;
+};
+
+} // namespace detail
+} // namespace asio
+
+#include "asio/detail/pop_options.hpp"
+
+#endif // ASIO_DETAIL_SOCKET_HOLDER_HPP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/socket_ops.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/socket_ops.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/socket_ops.hpp
new file mode 100644
index 000..b976419
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/socket_ops.hpp
@@ -0,0 +1,334 @@
+//
+// detail/socket_ops.hpp
+// ~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_SOCKET_OPS_HPP
+#define ASIO_DETAIL_SOCKET_OPS_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#include "asio/error_code.hpp"
+#include "asio/detail/shared_ptr.hpp"
+#include "asio/detail/socket_types.hpp"
+#include "asio/detail/weak_ptr.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+namespace socket_ops {
+
+// Socket state bits.
+enum
+{
+  // The user wants a non-blocking socket.
+  user_set_non_blocking = 1,
+
+  // The socket has been set non-blocking.
+  internal_non_blocking = 2,
+
+  // Helper "state" used to determine whether the socket is non-blocking.
+  non_blocking = user_set_non_blocking | internal_non_blocking,
+
+  // User wants connection_aborted errors, which are disabled by default.
+  enable_connection_aborted = 4,
+
+  // The user set the linger option. Needs to be checked when closing.
+  user_set_linger = 8,
+
+  // The so

[40/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt
new file mode 100644
index 000..3331935
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt
@@ -0,0 +1,170 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Delegate some functionality to libhdfs, until libhdfspp is complete.
+set (LIBHDFS_SRC_DIR ../../libhdfs)
+set (LIBHDFS_TESTS_DIR ../../libhdfs-tests)
+set (LIBHDFSPP_SRC_DIR ..)
+set (LIBHDFSPP_LIB_DIR ${LIBHDFSPP_SRC_DIR}/lib)
+set (LIBHDFSPP_BINDING_C ${LIBHDFSPP_LIB_DIR}/bindings/c)
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-missing-field-initializers")
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-missing-field-initializers")
+
+include_directories(
+${GENERATED_JAVAH}
+${CMAKE_CURRENT_LIST_DIR}
+${CMAKE_BINARY_DIR}
+${JNI_INCLUDE_DIRS}
+${LIBHDFS_SRC_DIR}/include
+${LIBHDFS_SRC_DIR}/..
+${LIBHDFS_SRC_DIR}
+${OS_DIR}
+)
+add_library(test_common_obj OBJECT mock_connection.cc)
+add_library(test_common $)
+
+set(PROTOBUF_IMPORT_DIRS ${PROTO_HADOOP_TEST_DIR})
+
+protobuf_generate_cpp(PROTO_TEST_SRCS PROTO_TEST_HDRS
+  ${PROTO_HADOOP_TEST_DIR}/test.proto
+  ${PROTO_HADOOP_TEST_DIR}/test_rpc_service.proto
+)
+
+# Shamelessly stolen from
+#
http://stackoverflow.com/questions/9303711/how-do-i-make-ctest-run-a-program-with-valgrind-without-dart
+function(add_memcheck_test name binary)
+  add_test(${name} ${binary} ${ARGN})
+
+  if(NOT MEMORYCHECK_COMMAND MATCHES "MEMORYCHECK_COMMAND-NOTFOUND" AND NOT 
SKIP_VALGRIND)
+set(memcheck_command "${MEMORYCHECK_COMMAND} 
${MEMORYCHECK_COMMAND_OPTIONS}")
+separate_arguments(memcheck_command)
+add_test(memcheck_${name} ${memcheck_command} ./${binary} ${ARGN})
+  endif()
+endfunction(add_memcheck_test)
+
+#
+#
+#   UNIT TESTS - TEST SELECTED PARTS OF THE LIBRARY
+#
+#
+
+add_executable(uri_test uri_test.cc)
+target_link_libraries(uri_test common gmock_main ${CMAKE_THREAD_LIBS_INIT})
+add_memcheck_test(uri uri_test)
+
+add_executable(remote_block_reader_test remote_block_reader_test.cc)
+target_link_libraries(remote_block_reader_test test_common reader proto common 
connection ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} gmock_main 
${CMAKE_THREAD_LIBS_INIT})
+add_memcheck_test(remote_block_reader remote_block_reader_test)
+
+add_executable(sasl_digest_md5_test sasl_digest_md5_test.cc)
+target_link_libraries(sasl_digest_md5_test common ${PROTOBUF_LIBRARIES} 
${OPENSSL_LIBRARIES} gmock_main ${CMAKE_THREAD_LIBS_INIT})
+add_memcheck_test(sasl_digest_md5 sasl_digest_md5_test)
+
+add_executable(retry_policy_test retry_policy_test.cc)
+target_link_libraries(retry_policy_test common gmock_main 
${CMAKE_THREAD_LIBS_INIT})
+add_memcheck_test(retry_policy retry_policy_test)
+
+include_directories(${CMAKE_CURRENT_BINARY_DIR})
+add_executable(rpc_engine_test rpc_engine_test.cc ${PROTO_TEST_SRCS} 
${PROTO_TEST_HDRS})
+target_link_libraries(rpc_engine_test test_common rpc proto common 
${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} gmock_main 
${CMAKE_THREAD_LIBS_INIT})
+add_memcheck_test(rpc_engine rpc_engine_test)
+
+add_executable(bad_datanode_test bad_datanode_test.cc)
+target_link_libraries(bad_datanode_test rpc reader proto fs bindings_c rpc 
proto common reader connection ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} 
${SASL_LIBRARIES} gmock_main ${CMAKE_THREAD_LIBS_INIT})
+add_memcheck_test(bad_datanode bad_datanode_test)
+
+add_executable(node_exclusion_test node_exclusion_test.cc)
+target_link_libraries(node_exclusion_test fs gmock_main common 
${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} 
${CMAKE_THREAD_LIBS_INIT})
+add_memcheck_test(node_exclusion node_exclusion_test)
+
+add_executable(configuration_test configuration_test.cc)
+target_link_libraries(configuration_test common gmock_main 
${CMAKE_THREAD_LIBS_INIT})
+add

[16/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_object_handle_service.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_object_handle_service.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_object_handle_service.hpp
new file mode 100644
index 000..b937f8c
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_object_handle_service.hpp
@@ -0,0 +1,183 @@
+//
+// detail/win_object_handle_service.hpp
+// 
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+// Copyright (c) 2011 Boris Schaeling (bo...@highscore.de)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_WIN_OBJECT_HANDLE_SERVICE_HPP
+#define ASIO_DETAIL_WIN_OBJECT_HANDLE_SERVICE_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE)
+
+#include "asio/detail/addressof.hpp"
+#include "asio/detail/handler_alloc_helpers.hpp"
+#include "asio/detail/wait_handler.hpp"
+#include "asio/error.hpp"
+#include "asio/io_service.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+class win_object_handle_service
+{
+public:
+  // The native type of an object handle.
+  typedef HANDLE native_handle_type;
+
+  // The implementation type of the object handle.
+  class implementation_type
+  {
+   public:
+// Default constructor.
+implementation_type()
+  : handle_(INVALID_HANDLE_VALUE),
+wait_handle_(INVALID_HANDLE_VALUE),
+owner_(0),
+next_(0),
+prev_(0)
+{
+}
+
+  private:
+// Only this service will have access to the internal values.
+friend class win_object_handle_service;
+
+// The native object handle representation. May be accessed or modified
+// without locking the mutex.
+native_handle_type handle_;
+
+// The handle used to unregister the wait operation. The mutex must be
+// locked when accessing or modifying this member.
+HANDLE wait_handle_;
+
+// The operations waiting on the object handle. If there is a registered
+// wait then the mutex must be locked when accessing or modifying this
+// member
+op_queue op_queue_;
+
+// The service instance that owns the object handle implementation.
+win_object_handle_service* owner_;
+
+// Pointers to adjacent handle implementations in linked list. The mutex
+// must be locked when accessing or modifying these members.
+implementation_type* next_;
+implementation_type* prev_;
+  };
+
+  // Constructor.
+  ASIO_DECL win_object_handle_service(
+  asio::io_service& io_service);
+
+  // Destroy all user-defined handler objects owned by the service.
+  ASIO_DECL void shutdown_service();
+
+  // Construct a new handle implementation.
+  ASIO_DECL void construct(implementation_type& impl);
+
+  // Move-construct a new handle implementation.
+  ASIO_DECL void move_construct(implementation_type& impl,
+  implementation_type& other_impl);
+
+  // Move-assign from another handle implementation.
+  ASIO_DECL void move_assign(implementation_type& impl,
+  win_object_handle_service& other_service,
+  implementation_type& other_impl);
+
+  // Destroy a handle implementation.
+  ASIO_DECL void destroy(implementation_type& impl);
+
+  // Assign a native handle to a handle implementation.
+  ASIO_DECL asio::error_code assign(implementation_type& impl,
+  const native_handle_type& handle, asio::error_code& ec);
+
+  // Determine whether the handle is open.
+  bool is_open(const implementation_type& impl) const
+  {
+return impl.handle_ != INVALID_HANDLE_VALUE && impl.handle_ != 0;
+  }
+
+  // Destroy a handle implementation.
+  ASIO_DECL asio::error_code close(implementation_type& impl,
+  asio::error_code& ec);
+
+  // Get the native handle representation.
+  native_handle_type native_handle(const implementation_type& impl) const
+  {
+return impl.handle_;
+  }
+
+  // Cancel all operations associated with the handle.
+  ASIO_DECL asio::error_code cancel(implementation_type& impl,
+  asio::error_code& ec);
+
+  // Perform a synchronous wait for the object to enter a signalled state.
+  ASIO_DECL void wait(implementation_type& impl,
+  asio::error_code& ec);
+
+  /// Start an asynchronous wait.
+  template 
+  void async_wait(implementatio

[29/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/config.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/config.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/config.hpp
new file mode 100644
index 000..3b7875e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/config.hpp
@@ -0,0 +1,895 @@
+//
+// detail/config.hpp
+// ~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_CONFIG_HPP
+#define ASIO_DETAIL_CONFIG_HPP
+
+#if defined(ASIO_STANDALONE)
+# define ASIO_DISABLE_BOOST_ARRAY 1
+# define ASIO_DISABLE_BOOST_ASSERT 1
+# define ASIO_DISABLE_BOOST_BIND 1
+# define ASIO_DISABLE_BOOST_CHRONO 1
+# define ASIO_DISABLE_BOOST_DATE_TIME 1
+# define ASIO_DISABLE_BOOST_LIMITS 1
+# define ASIO_DISABLE_BOOST_REGEX 1
+# define ASIO_DISABLE_BOOST_STATIC_CONSTANT 1
+# define ASIO_DISABLE_BOOST_THROW_EXCEPTION 1
+# define ASIO_DISABLE_BOOST_WORKAROUND 1
+#else // defined(ASIO_STANDALONE)
+# include 
+# include 
+# define ASIO_HAS_BOOST_CONFIG 1
+#endif // defined(ASIO_STANDALONE)
+
+// Default to a header-only implementation. The user must specifically request
+// separate compilation by defining either ASIO_SEPARATE_COMPILATION or
+// ASIO_DYN_LINK (as a DLL/shared library implies separate compilation).
+#if !defined(ASIO_HEADER_ONLY)
+# if !defined(ASIO_SEPARATE_COMPILATION)
+#  if !defined(ASIO_DYN_LINK)
+#   define ASIO_HEADER_ONLY 1
+#  endif // !defined(ASIO_DYN_LINK)
+# endif // !defined(ASIO_SEPARATE_COMPILATION)
+#endif // !defined(ASIO_HEADER_ONLY)
+
+#if defined(ASIO_HEADER_ONLY)
+# define ASIO_DECL inline
+#else // defined(ASIO_HEADER_ONLY)
+# if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CODEGEARC__)
+// We need to import/export our code only if the user has specifically asked
+// for it by defining ASIO_DYN_LINK.
+#  if defined(ASIO_DYN_LINK)
+// Export if this is our own source, otherwise import.
+#   if defined(ASIO_SOURCE)
+#define ASIO_DECL __declspec(dllexport)
+#   else // defined(ASIO_SOURCE)
+#define ASIO_DECL __declspec(dllimport)
+#   endif // defined(ASIO_SOURCE)
+#  endif // defined(ASIO_DYN_LINK)
+# endif // defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CODEGEARC__)
+#endif // defined(ASIO_HEADER_ONLY)
+
+// If ASIO_DECL isn't defined yet define it now.
+#if !defined(ASIO_DECL)
+# define ASIO_DECL
+#endif // !defined(ASIO_DECL)
+
+// Microsoft Visual C++ detection.
+#if !defined(ASIO_MSVC)
+# if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_MSVC)
+#  define ASIO_MSVC BOOST_MSVC
+# elif defined(_MSC_VER) && !defined(__MWERKS__) && !defined(__EDG_VERSION__)
+#  define ASIO_MSVC _MSC_VER
+# endif // defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_MSVC)
+#endif // defined(ASIO_MSVC)
+
+// Clang / libc++ detection.
+#if defined(__clang__)
+# if (__cplusplus >= 201103)
+#  if __has_include(<__config>)
+#   include <__config>
+#   if defined(_LIBCPP_VERSION)
+#define ASIO_HAS_CLANG_LIBCXX 1
+#   endif // defined(_LIBCPP_VERSION)
+#  endif // __has_include(<__config>)
+# endif // (__cplusplus >= 201103)
+#endif // defined(__clang__)
+
+// Support move construction and assignment on compilers known to allow it.
+#if !defined(ASIO_HAS_MOVE)
+# if !defined(ASIO_DISABLE_MOVE)
+#  if defined(__clang__)
+#   if __has_feature(__cxx_rvalue_references__)
+#define ASIO_HAS_MOVE 1
+#   endif // __has_feature(__cxx_rvalue_references__)
+#  endif // defined(__clang__)
+#  if defined(__GNUC__)
+#   if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4)
+#if defined(__GXX_EXPERIMENTAL_CXX0X__)
+# define ASIO_HAS_MOVE 1
+#endif // defined(__GXX_EXPERIMENTAL_CXX0X__)
+#   endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4)
+#  endif // defined(__GNUC__)
+#  if defined(ASIO_MSVC)
+#   if (_MSC_VER >= 1700)
+#define ASIO_HAS_MOVE 1
+#   endif // (_MSC_VER >= 1700)
+#  endif // defined(ASIO_MSVC)
+# endif // !defined(ASIO_DISABLE_MOVE)
+#endif // !defined(ASIO_HAS_MOVE)
+
+// If ASIO_MOVE_CAST isn't defined, and move support is available, define
+// ASIO_MOVE_ARG and ASIO_MOVE_CAST to take advantage of rvalue
+// references and perfect forwarding.
+#if defined(ASIO_HAS_MOVE) && !defined(ASIO_MOVE_CAST)
+# define ASIO_MOVE_ARG(type) type&&
+# define ASIO_MOVE_CAST(type) static_cast
+# define ASIO_MOVE_CAST2(type1, type2) static_cast
+#endif // defined(ASIO_HAS_MOVE) && !

[14/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/buffered_write_stream.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/buffered_write_stream.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/buffered_write_stream.hpp
new file mode 100644
index 000..1e51c11
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/buffered_write_stream.hpp
@@ -0,0 +1,338 @@
+//
+// impl/buffered_write_stream.hpp
+// ~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_IMPL_BUFFERED_WRITE_STREAM_HPP
+#define ASIO_IMPL_BUFFERED_WRITE_STREAM_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/handler_alloc_helpers.hpp"
+#include "asio/detail/handler_cont_helpers.hpp"
+#include "asio/detail/handler_invoke_helpers.hpp"
+#include "asio/detail/handler_type_requirements.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+
+template 
+std::size_t buffered_write_stream::flush()
+{
+  std::size_t bytes_written = write(next_layer_,
+  buffer(storage_.data(), storage_.size()));
+  storage_.consume(bytes_written);
+  return bytes_written;
+}
+
+template 
+std::size_t buffered_write_stream::flush(asio::error_code& ec)
+{
+  std::size_t bytes_written = write(next_layer_,
+  buffer(storage_.data(), storage_.size()),
+  transfer_all(), ec);
+  storage_.consume(bytes_written);
+  return bytes_written;
+}
+
+namespace detail
+{
+  template 
+  class buffered_flush_handler
+  {
+  public:
+buffered_flush_handler(detail::buffered_stream_storage& storage,
+WriteHandler& handler)
+  : storage_(storage),
+handler_(handler)
+{
+}
+
+#if defined(ASIO_HAS_MOVE)
+buffered_flush_handler(const buffered_flush_handler& other)
+  : storage_(other.storage_),
+handler_(other.handler_)
+{
+}
+
+buffered_flush_handler(buffered_flush_handler&& other)
+  : storage_(other.storage_),
+handler_(ASIO_MOVE_CAST(WriteHandler)(other.handler_))
+{
+}
+#endif // defined(ASIO_HAS_MOVE)
+
+void operator()(const asio::error_code& ec,
+const std::size_t bytes_written)
+{
+  storage_.consume(bytes_written);
+  handler_(ec, bytes_written);
+}
+
+  //private:
+detail::buffered_stream_storage& storage_;
+WriteHandler handler_;
+  };
+
+  template 
+  inline void* asio_handler_allocate(std::size_t size,
+  buffered_flush_handler* this_handler)
+  {
+return asio_handler_alloc_helpers::allocate(
+size, this_handler->handler_);
+  }
+
+  template 
+  inline void asio_handler_deallocate(void* pointer, std::size_t size,
+  buffered_flush_handler* this_handler)
+  {
+asio_handler_alloc_helpers::deallocate(
+pointer, size, this_handler->handler_);
+  }
+
+  template 
+  inline bool asio_handler_is_continuation(
+  buffered_flush_handler* this_handler)
+  {
+return asio_handler_cont_helpers::is_continuation(
+  this_handler->handler_);
+  }
+
+  template 
+  inline void asio_handler_invoke(Function& function,
+  buffered_flush_handler* this_handler)
+  {
+asio_handler_invoke_helpers::invoke(
+function, this_handler->handler_);
+  }
+
+  template 
+  inline void asio_handler_invoke(const Function& function,
+  buffered_flush_handler* this_handler)
+  {
+asio_handler_invoke_helpers::invoke(
+function, this_handler->handler_);
+  }
+}
+
+template 
+template 
+ASIO_INITFN_RESULT_TYPE(WriteHandler,
+void (asio::error_code, std::size_t))
+buffered_write_stream::async_flush(
+ASIO_MOVE_ARG(WriteHandler) handler)
+{
+  // If you get an error on the following line it means that your handler does
+  // not meet the documented type requirements for a WriteHandler.
+  ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;
+
+  detail::async_result_init<
+WriteHandler, void (asio::error_code, std::size_t)> init(
+  ASIO_MOVE_CAST(WriteHandler)(handler));
+
+  async_write(next_layer_, buffer(storage_.data(), storage_.size()),
+  detail::buffered_flush_handler(
+storage_, init.handler));
+
+  return init.result.get();
+}
+
+template 
+template 
+std::size_t buffered_write_stream::write_some(
+const ConstBufferSequence& buffers)
+{
+  if (asio::buffer_size(buffers) == 0)
+return 0;
+
+  if (sto

[28/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/eventfd_select_interrupter.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/eventfd_select_interrupter.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/eventfd_select_interrupter.hpp
new file mode 100644
index 000..8beb658
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/eventfd_select_interrupter.hpp
@@ -0,0 +1,83 @@
+//
+// detail/eventfd_select_interrupter.hpp
+// ~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+// Copyright (c) 2008 Roelof Naude (roelof.naude at gmail dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP
+#define ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if defined(ASIO_HAS_EVENTFD)
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+class eventfd_select_interrupter
+{
+public:
+  // Constructor.
+  ASIO_DECL eventfd_select_interrupter();
+
+  // Destructor.
+  ASIO_DECL ~eventfd_select_interrupter();
+
+  // Recreate the interrupter's descriptors. Used after a fork.
+  ASIO_DECL void recreate();
+
+  // Interrupt the select call.
+  ASIO_DECL void interrupt();
+
+  // Reset the select interrupt. Returns true if the call was interrupted.
+  ASIO_DECL bool reset();
+
+  // Get the read descriptor to be passed to select.
+  int read_descriptor() const
+  {
+return read_descriptor_;
+  }
+
+private:
+  // Open the descriptors. Throws on error.
+  ASIO_DECL void open_descriptors();
+
+  // Close the descriptors.
+  ASIO_DECL void close_descriptors();
+
+  // The read end of a connection used to interrupt the select call. This file
+  // descriptor is passed to select such that when it is time to stop, a single
+  // 64bit value will be written on the other end of the connection and this
+  // descriptor will become readable.
+  int read_descriptor_;
+
+  // The write end of a connection used to interrupt the select call. A single
+  // 64bit non-zero value may be written to this to wake up the select which is
+  // waiting for the other end to become readable. This descriptor will only
+  // differ from the read descriptor when a pipe is used.
+  int write_descriptor_;
+};
+
+} // namespace detail
+} // namespace asio
+
+#include "asio/detail/pop_options.hpp"
+
+#if defined(ASIO_HEADER_ONLY)
+# include "asio/detail/impl/eventfd_select_interrupter.ipp"
+#endif // defined(ASIO_HEADER_ONLY)
+
+#endif // defined(ASIO_HAS_EVENTFD)
+
+#endif // ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/fd_set_adapter.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/fd_set_adapter.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/fd_set_adapter.hpp
new file mode 100644
index 000..af85e09
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/fd_set_adapter.hpp
@@ -0,0 +1,39 @@
+//
+// detail/fd_set_adapter.hpp
+// ~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_FD_SET_ADAPTER_HPP
+#define ASIO_DETAIL_FD_SET_ADAPTER_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if !defined(ASIO_WINDOWS_RUNTIME)
+
+#include "asio/detail/posix_fd_set_adapter.hpp"
+#include "asio/detail/win_fd_set_adapter.hpp"
+
+namespace asio {
+namespace detail {
+
+#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
+typedef win_fd_set_adapter fd_set_adapter;
+#else
+typedef posix_fd_set_adapter fd_set_adapter;
+#endif
+
+} // namespace detail
+} // namespace asio
+
+#endif // !defined(ASIO_W

[06/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/socket_base.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/socket_base.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/socket_base.hpp
new file mode 100644
index 000..5412162
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/socket_base.hpp
@@ -0,0 +1,520 @@
+//
+// socket_base.hpp
+// ~~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_SOCKET_BASE_HPP
+#define ASIO_SOCKET_BASE_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+#include "asio/detail/io_control.hpp"
+#include "asio/detail/socket_option.hpp"
+#include "asio/detail/socket_types.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+
+/// The socket_base class is used as a base for the basic_stream_socket and
+/// basic_datagram_socket class templates so that we have a common place to
+/// define the shutdown_type and enum.
+class socket_base
+{
+public:
+  /// Different ways a socket may be shutdown.
+  enum shutdown_type
+  {
+#if defined(GENERATING_DOCUMENTATION)
+/// Shutdown the receive side of the socket.
+shutdown_receive = implementation_defined,
+
+/// Shutdown the send side of the socket.
+shutdown_send = implementation_defined,
+
+/// Shutdown both send and receive on the socket.
+shutdown_both = implementation_defined
+#else
+shutdown_receive = ASIO_OS_DEF(SHUT_RD),
+shutdown_send = ASIO_OS_DEF(SHUT_WR),
+shutdown_both = ASIO_OS_DEF(SHUT_RDWR)
+#endif
+  };
+
+  /// Bitmask type for flags that can be passed to send and receive operations.
+  typedef int message_flags;
+
+#if defined(GENERATING_DOCUMENTATION)
+  /// Peek at incoming data without removing it from the input queue.
+  static const int message_peek = implementation_defined;
+
+  /// Process out-of-band data.
+  static const int message_out_of_band = implementation_defined;
+
+  /// Specify that the data should not be subject to routing.
+  static const int message_do_not_route = implementation_defined;
+
+  /// Specifies that the data marks the end of a record.
+  static const int message_end_of_record = implementation_defined;
+#else
+  ASIO_STATIC_CONSTANT(int,
+  message_peek = ASIO_OS_DEF(MSG_PEEK));
+  ASIO_STATIC_CONSTANT(int,
+  message_out_of_band = ASIO_OS_DEF(MSG_OOB));
+  ASIO_STATIC_CONSTANT(int,
+  message_do_not_route = ASIO_OS_DEF(MSG_DONTROUTE));
+  ASIO_STATIC_CONSTANT(int,
+  message_end_of_record = ASIO_OS_DEF(MSG_EOR));
+#endif
+
+  /// Socket option to permit sending of broadcast messages.
+  /**
+   * Implements the SOL_SOCKET/SO_BROADCAST socket option.
+   *
+   * @par Examples
+   * Setting the option:
+   * @code
+   * asio::ip::udp::socket socket(io_service); 
+   * ...
+   * asio::socket_base::broadcast option(true);
+   * socket.set_option(option);
+   * @endcode
+   *
+   * @par
+   * Getting the current option value:
+   * @code
+   * asio::ip::udp::socket socket(io_service); 
+   * ...
+   * asio::socket_base::broadcast option;
+   * socket.get_option(option);
+   * bool is_set = option.value();
+   * @endcode
+   *
+   * @par Concepts:
+   * Socket_Option, Boolean_Socket_Option.
+   */
+#if defined(GENERATING_DOCUMENTATION)
+  typedef implementation_defined broadcast;
+#else
+  typedef asio::detail::socket_option::boolean<
+ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_BROADCAST)>
+  broadcast;
+#endif
+
+  /// Socket option to enable socket-level debugging.
+  /**
+   * Implements the SOL_SOCKET/SO_DEBUG socket option.
+   *
+   * @par Examples
+   * Setting the option:
+   * @code
+   * asio::ip::tcp::socket socket(io_service); 
+   * ...
+   * asio::socket_base::debug option(true);
+   * socket.set_option(option);
+   * @endcode
+   *
+   * @par
+   * Getting the current option value:
+   * @code
+   * asio::ip::tcp::socket socket(io_service); 
+   * ...
+   * asio::socket_base::debug option;
+   * socket.get_option(option);
+   * bool is_set = option.value();
+   * @endcode
+   *
+   * @par Concepts:
+   * Socket_Option, Boolean_Socket_Option.
+   */
+#if defined(GENERATING_DOCUMENTATION)
+  typedef implementation_defined debug;
+#else
+  typedef asio::detail::socket_option::boolean<
+ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_DEBUG)> debug;
+#endif
+
+  /// Socket option to pr

[15/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/wrapped_handler.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/wrapped_handler.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/wrapped_handler.hpp
new file mode 100644
index 000..5cefcf4
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/wrapped_handler.hpp
@@ -0,0 +1,291 @@
+//
+// detail/wrapped_handler.hpp
+// ~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_WRAPPED_HANDLER_HPP
+#define ASIO_DETAIL_WRAPPED_HANDLER_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/bind_handler.hpp"
+#include "asio/detail/handler_alloc_helpers.hpp"
+#include "asio/detail/handler_cont_helpers.hpp"
+#include "asio/detail/handler_invoke_helpers.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+struct is_continuation_delegated
+{
+  template 
+  bool operator()(Dispatcher&, Handler& handler) const
+  {
+return asio_handler_cont_helpers::is_continuation(handler);
+  }
+};
+
+struct is_continuation_if_running
+{
+  template 
+  bool operator()(Dispatcher& dispatcher, Handler&) const
+  {
+return dispatcher.running_in_this_thread();
+  }
+};
+
+template 
+class wrapped_handler
+{
+public:
+  typedef void result_type;
+
+  wrapped_handler(Dispatcher dispatcher, Handler& handler)
+: dispatcher_(dispatcher),
+  handler_(ASIO_MOVE_CAST(Handler)(handler))
+  {
+  }
+
+#if defined(ASIO_HAS_MOVE)
+  wrapped_handler(const wrapped_handler& other)
+: dispatcher_(other.dispatcher_),
+  handler_(other.handler_)
+  {
+  }
+
+  wrapped_handler(wrapped_handler&& other)
+: dispatcher_(other.dispatcher_),
+  handler_(ASIO_MOVE_CAST(Handler)(other.handler_))
+  {
+  }
+#endif // defined(ASIO_HAS_MOVE)
+
+  void operator()()
+  {
+dispatcher_.dispatch(ASIO_MOVE_CAST(Handler)(handler_));
+  }
+
+  void operator()() const
+  {
+dispatcher_.dispatch(handler_);
+  }
+
+  template 
+  void operator()(const Arg1& arg1)
+  {
+dispatcher_.dispatch(detail::bind_handler(handler_, arg1));
+  }
+
+  template 
+  void operator()(const Arg1& arg1) const
+  {
+dispatcher_.dispatch(detail::bind_handler(handler_, arg1));
+  }
+
+  template 
+  void operator()(const Arg1& arg1, const Arg2& arg2)
+  {
+dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2));
+  }
+
+  template 
+  void operator()(const Arg1& arg1, const Arg2& arg2) const
+  {
+dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2));
+  }
+
+  template 
+  void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3)
+  {
+dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2, arg3));
+  }
+
+  template 
+  void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3) const
+  {
+dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2, arg3));
+  }
+
+  template 
+  void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3,
+  const Arg4& arg4)
+  {
+dispatcher_.dispatch(
+detail::bind_handler(handler_, arg1, arg2, arg3, arg4));
+  }
+
+  template 
+  void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3,
+  const Arg4& arg4) const
+  {
+dispatcher_.dispatch(
+detail::bind_handler(handler_, arg1, arg2, arg3, arg4));
+  }
+
+  template 
+  void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3,
+  const Arg4& arg4, const Arg5& arg5)
+  {
+dispatcher_.dispatch(
+detail::bind_handler(handler_, arg1, arg2, arg3, arg4, arg5));
+  }
+
+  template 
+  void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3,
+  const Arg4& arg4, const Arg5& arg5) const
+  {
+dispatcher_.dispatch(
+detail::bind_handler(handler_, arg1, arg2, arg3, arg4, arg5));
+  }
+
+//private:
+  Dispatcher dispatcher_;
+  Handler handler_;
+};
+
+template 
+class rewrapped_handler
+{
+public:
+  explicit rewrapped_handler(Handler& handler, const Context& context)
+: context_(context),
+  handler_(ASIO_MOVE_CAST(Handler)(handler))
+  {
+  }
+
+  explicit rewrapped_handler(const Handler& handler, const Context& context)
+: context_(context),
+  handler_(handler)
+  {
+  }
+
+#if defined(ASIO_HAS_MOVE)
+  rewrapped_handler(const rewrapp

[21/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/posix_tss_ptr.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/posix_tss_ptr.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/posix_tss_ptr.hpp
new file mode 100644
index 000..23f93ae
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/posix_tss_ptr.hpp
@@ -0,0 +1,79 @@
+//
+// detail/posix_tss_ptr.hpp
+// 
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_POSIX_TSS_PTR_HPP
+#define ASIO_DETAIL_POSIX_TSS_PTR_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if defined(ASIO_HAS_PTHREADS)
+
+#include 
+#include "asio/detail/noncopyable.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+// Helper function to create thread-specific storage.
+ASIO_DECL void posix_tss_ptr_create(pthread_key_t& key);
+
+template 
+class posix_tss_ptr
+  : private noncopyable
+{
+public:
+  // Constructor.
+  posix_tss_ptr()
+  {
+posix_tss_ptr_create(tss_key_);
+  }
+
+  // Destructor.
+  ~posix_tss_ptr()
+  {
+::pthread_key_delete(tss_key_);
+  }
+
+  // Get the value.
+  operator T*() const
+  {
+return static_cast(::pthread_getspecific(tss_key_));
+  }
+
+  // Set the value.
+  void operator=(T* value)
+  {
+::pthread_setspecific(tss_key_, value);
+  }
+
+private:
+  // Thread-specific storage to allow unlocked access to determine whether a
+  // thread is a member of the pool.
+  pthread_key_t tss_key_;
+};
+
+} // namespace detail
+} // namespace asio
+
+#include "asio/detail/pop_options.hpp"
+
+#if defined(ASIO_HEADER_ONLY)
+# include "asio/detail/impl/posix_tss_ptr.ipp"
+#endif // defined(ASIO_HEADER_ONLY)
+
+#endif // defined(ASIO_HAS_PTHREADS)
+
+#endif // ASIO_DETAIL_POSIX_TSS_PTR_HPP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/push_options.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/push_options.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/push_options.hpp
new file mode 100644
index 000..c5ccd47
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/push_options.hpp
@@ -0,0 +1,138 @@
+//
+// detail/push_options.hpp
+// ~~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+// No header guard
+
+#if defined(__COMO__)
+
+// Comeau C++
+
+#elif defined(__DMC__)
+
+// Digital Mars C++
+
+#elif defined(__INTEL_COMPILER) || defined(__ICL) \
+  || defined(__ICC) || defined(__ECC)
+
+// Intel C++
+
+#elif defined(__GNUC__)
+
+// GNU C++
+
+# if defined(__MINGW32__) || defined(__CYGWIN__)
+#  pragma pack (push, 8)
+# endif
+
+# if defined(__OBJC__)
+#  if !defined(__APPLE_CC__) || (__APPLE_CC__ <= 1)
+#   if !defined(ASIO_DISABLE_OBJC_WORKAROUND)
+#if !defined(Protocol) && !defined(id)
+# define Protocol cpp_Protocol
+# define id cpp_id
+# define ASIO_OBJC_WORKAROUND
+#endif
+#   endif
+#  endif
+# endif
+
+#elif defined(__KCC)
+
+// Kai C++
+
+#elif defined(__sgi)
+
+// SGI MIPSpro C++
+
+#elif defined(__DECCXX)
+
+// Compaq Tru64 Unix cxx
+
+#elif defined(__ghs)
+
+// Greenhills C++
+
+#elif defined(__BORLANDC__)
+
+// Borland C++
+
+# pragma option push -a8 -b -Ve- -Vx- -w-inl -vi-
+# pragma nopushoptwarn
+# pragma nopackwarning
+# if !defined(__MT__)
+#  error Multithreaded RTL must be selected.
+# endif // !defined(__MT__)
+
+#elif defined(__MWERKS__)
+
+// Metrowerks CodeWarrior
+
+#elif defined(__SUNPRO_CC)
+
+// Sun Workshop Compiler C++
+
+#elif defined(__HP_aCC)
+
+// HP aCC
+
+#elif defined(__MRC__) || defined(__SC__)
+
+// MPW MrCpp or SCpp
+
+#elif defined(__IBMCPP__)
+
+// IBM Visual Age
+
+#elif defined(_MSC_VER)
+
+// Microsoft Visual C++
+/

[36/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_raw_socket.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_raw_socket.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_raw_socket.hpp
new file mode 100644
index 000..b0f3f18
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_raw_socket.hpp
@@ -0,0 +1,940 @@
+//
+// basic_raw_socket.hpp
+// 
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_BASIC_RAW_SOCKET_HPP
+#define ASIO_BASIC_RAW_SOCKET_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+#include 
+#include "asio/basic_socket.hpp"
+#include "asio/detail/handler_type_requirements.hpp"
+#include "asio/detail/throw_error.hpp"
+#include "asio/detail/type_traits.hpp"
+#include "asio/error.hpp"
+#include "asio/raw_socket_service.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+
+/// Provides raw-oriented socket functionality.
+/**
+ * The basic_raw_socket class template provides asynchronous and blocking
+ * raw-oriented socket functionality.
+ *
+ * @par Thread Safety
+ * @e Distinct @e objects: Safe.@n
+ * @e Shared @e objects: Unsafe.
+ */
+template  >
+class basic_raw_socket
+  : public basic_socket
+{
+public:
+  /// (Deprecated: Use native_handle_type.) The native representation of a
+  /// socket.
+  typedef typename RawSocketService::native_handle_type native_type;
+
+  /// The native representation of a socket.
+  typedef typename RawSocketService::native_handle_type native_handle_type;
+
+  /// The protocol type.
+  typedef Protocol protocol_type;
+
+  /// The endpoint type.
+  typedef typename Protocol::endpoint endpoint_type;
+
+  /// Construct a basic_raw_socket without opening it.
+  /**
+   * This constructor creates a raw socket without opening it. The open()
+   * function must be called before data can be sent or received on the socket.
+   *
+   * @param io_service The io_service object that the raw socket will use
+   * to dispatch handlers for any asynchronous operations performed on the
+   * socket.
+   */
+  explicit basic_raw_socket(asio::io_service& io_service)
+: basic_socket(io_service)
+  {
+  }
+
+  /// Construct and open a basic_raw_socket.
+  /**
+   * This constructor creates and opens a raw socket.
+   *
+   * @param io_service The io_service object that the raw socket will use
+   * to dispatch handlers for any asynchronous operations performed on the
+   * socket.
+   *
+   * @param protocol An object specifying protocol parameters to be used.
+   *
+   * @throws asio::system_error Thrown on failure.
+   */
+  basic_raw_socket(asio::io_service& io_service,
+  const protocol_type& protocol)
+: basic_socket(io_service, protocol)
+  {
+  }
+
+  /// Construct a basic_raw_socket, opening it and binding it to the given
+  /// local endpoint.
+  /**
+   * This constructor creates a raw socket and automatically opens it bound
+   * to the specified endpoint on the local machine. The protocol used is the
+   * protocol associated with the given endpoint.
+   *
+   * @param io_service The io_service object that the raw socket will use
+   * to dispatch handlers for any asynchronous operations performed on the
+   * socket.
+   *
+   * @param endpoint An endpoint on the local machine to which the raw
+   * socket will be bound.
+   *
+   * @throws asio::system_error Thrown on failure.
+   */
+  basic_raw_socket(asio::io_service& io_service,
+  const endpoint_type& endpoint)
+: basic_socket(io_service, endpoint)
+  {
+  }
+
+  /// Construct a basic_raw_socket on an existing native socket.
+  /**
+   * This constructor creates a raw socket object to hold an existing
+   * native socket.
+   *
+   * @param io_service The io_service object that the raw socket will use
+   * to dispatch handlers for any asynchronous operations performed on the
+   * socket.
+   *
+   * @param protocol An object specifying protocol parameters to be used.
+   *
+   * @param native_socket The new underlying socket implementation.
+   *
+   * @throws asio::system_error Thrown on failure.
+   */
+  basic_raw_socket(asio::io_service& io_service,
+  const protocol_type& protocol, const native_handle_type& native_socket)
+: basic_socket(
+io_service, protocol, native_socket)
+  {
+  }
+
+#if de

[12/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/spawn.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/spawn.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/spawn.hpp
new file mode 100644
index 000..f5a504e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/spawn.hpp
@@ -0,0 +1,336 @@
+//
+// impl/spawn.hpp
+// ~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_IMPL_SPAWN_HPP
+#define ASIO_IMPL_SPAWN_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+#include "asio/async_result.hpp"
+#include "asio/detail/handler_alloc_helpers.hpp"
+#include "asio/detail/handler_cont_helpers.hpp"
+#include "asio/detail/handler_invoke_helpers.hpp"
+#include "asio/detail/noncopyable.hpp"
+#include "asio/detail/shared_ptr.hpp"
+#include "asio/handler_type.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+  template 
+  class coro_handler
+  {
+  public:
+coro_handler(basic_yield_context ctx)
+  : coro_(ctx.coro_.lock()),
+ca_(ctx.ca_),
+handler_(ctx.handler_),
+ec_(ctx.ec_),
+value_(0)
+{
+}
+
+void operator()(T value)
+{
+  *ec_ = asio::error_code();
+  *value_ = value;
+  (*coro_)();
+}
+
+void operator()(asio::error_code ec, T value)
+{
+  *ec_ = ec;
+  *value_ = value;
+  (*coro_)();
+}
+
+  //private:
+shared_ptr::callee_type> coro_;
+typename basic_yield_context::caller_type& ca_;
+Handler& handler_;
+asio::error_code* ec_;
+T* value_;
+  };
+
+  template 
+  class coro_handler
+  {
+  public:
+coro_handler(basic_yield_context ctx)
+  : coro_(ctx.coro_.lock()),
+ca_(ctx.ca_),
+handler_(ctx.handler_),
+ec_(ctx.ec_)
+{
+}
+
+void operator()()
+{
+  *ec_ = asio::error_code();
+  (*coro_)();
+}
+
+void operator()(asio::error_code ec)
+{
+  *ec_ = ec;
+  (*coro_)();
+}
+
+  //private:
+shared_ptr::callee_type> coro_;
+typename basic_yield_context::caller_type& ca_;
+Handler& handler_;
+asio::error_code* ec_;
+  };
+
+  template 
+  inline void* asio_handler_allocate(std::size_t size,
+  coro_handler* this_handler)
+  {
+return asio_handler_alloc_helpers::allocate(
+size, this_handler->handler_);
+  }
+
+  template 
+  inline void asio_handler_deallocate(void* pointer, std::size_t size,
+  coro_handler* this_handler)
+  {
+asio_handler_alloc_helpers::deallocate(
+pointer, size, this_handler->handler_);
+  }
+
+  template 
+  inline bool asio_handler_is_continuation(coro_handler*)
+  {
+return true;
+  }
+
+  template 
+  inline void asio_handler_invoke(Function& function,
+  coro_handler* this_handler)
+  {
+asio_handler_invoke_helpers::invoke(
+function, this_handler->handler_);
+  }
+
+  template 
+  inline void asio_handler_invoke(const Function& function,
+  coro_handler* this_handler)
+  {
+asio_handler_invoke_helpers::invoke(
+function, this_handler->handler_);
+  }
+
+} // namespace detail
+
+#if !defined(GENERATING_DOCUMENTATION)
+
+template 
+struct handler_type, ReturnType()>
+{
+  typedef detail::coro_handler type;
+};
+
+template 
+struct handler_type, ReturnType(Arg1)>
+{
+  typedef detail::coro_handler type;
+};
+
+template 
+struct handler_type,
+ReturnType(asio::error_code)>
+{
+  typedef detail::coro_handler type;
+};
+
+template 
+struct handler_type,
+ReturnType(asio::error_code, Arg2)>
+{
+  typedef detail::coro_handler type;
+};
+
+template 
+class async_result >
+{
+public:
+  typedef T type;
+
+  explicit async_result(detail::coro_handler& h)
+: handler_(h),
+  ca_(h.ca_)
+  {
+out_ec_ = h.ec_;
+if (!out_ec_) h.ec_ = &ec_;
+h.value_ = &value_;
+  }
+
+  type get()
+  {
+handler_.coro_.reset(); // Must not hold shared_ptr to coro while 
suspended.
+ca_();
+if (!out_ec_ && ec_) throw asio::system_error(ec_);
+return value_;
+  }
+
+private:
+  detail::coro_handler& handler_;
+  typename basic_yield_context::caller_type& ca_;
+  asio::error_code* out_ec_;
+  asio::error_code ec_;
+  type value_;
+};
+
+template 
+class async_result >
+{
+public:
+  typedef void type;
+
+  explicit async_res

[37/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/async_result.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/async_result.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/async_result.hpp
new file mode 100644
index 000..b98d770
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/async_result.hpp
@@ -0,0 +1,94 @@
+//
+// async_result.hpp
+// 
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_ASYNC_RESULT_HPP
+#define ASIO_ASYNC_RESULT_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+#include "asio/handler_type.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+
+/// An interface for customising the behaviour of an initiating function.
+/**
+ * This template may be specialised for user-defined handler types.
+ */
+template 
+class async_result
+{
+public:
+  /// The return type of the initiating function.
+  typedef void type;
+
+  /// Construct an async result from a given handler.
+  /**
+   * When using a specalised async_result, the constructor has an opportunity
+   * to initialise some state associated with the handler, which is then
+   * returned from the initiating function.
+   */
+  explicit async_result(Handler&)
+  {
+  }
+
+  /// Obtain the value to be returned from the initiating function.
+  type get()
+  {
+  }
+};
+
+namespace detail {
+
+// Helper template to deduce the true type of a handler, capture a local copy
+// of the handler, and then create an async_result for the handler.
+template 
+struct async_result_init
+{
+  explicit async_result_init(ASIO_MOVE_ARG(Handler) orig_handler)
+: handler(ASIO_MOVE_CAST(Handler)(orig_handler)),
+  result(handler)
+  {
+  }
+
+  typename handler_type::type handler;
+  async_result::type> result;
+};
+
+template 
+struct async_result_type_helper
+{
+  typedef typename async_result<
+  typename handler_type::type
+>::type type;
+};
+
+} // namespace detail
+} // namespace asio
+
+#include "asio/detail/pop_options.hpp"
+
+#if defined(GENERATING_DOCUMENTATION)
+# define ASIO_INITFN_RESULT_TYPE(h, sig) \
+  void_or_deduced
+#elif defined(_MSC_VER) && (_MSC_VER < 1500)
+# define ASIO_INITFN_RESULT_TYPE(h, sig) \
+  typename ::asio::detail::async_result_type_helper::type
+#else
+# define ASIO_INITFN_RESULT_TYPE(h, sig) \
+  typename ::asio::async_result< \
+typename ::asio::handler_type::type>::type
+#endif
+
+#endif // ASIO_ASYNC_RESULT_HPP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_datagram_socket.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_datagram_socket.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_datagram_socket.hpp
new file mode 100644
index 000..a1356b9
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_datagram_socket.hpp
@@ -0,0 +1,949 @@
+//
+// basic_datagram_socket.hpp
+// ~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_BASIC_DATAGRAM_SOCKET_HPP
+#define ASIO_BASIC_DATAGRAM_SOCKET_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+#include 
+#include "asio/basic_socket.hpp"
+#include "asio/datagram_socket_service.hpp"
+#include "asio/detail/handler_type_requirements.hpp"
+#include "asio/detail/throw_error.hpp"
+#include "asio/detail/type_traits.hpp"
+#include "asio/error.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+
+/// Provides datagram-oriented socket functionality.
+/**
+ * The basic_datagram_socket class template provides asynchronous and blocking
+ * datagram-oriented socket functionality.
+ *
+ * @par Thread Safety
+ * @e Distinct @e objects: S

[34/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket_acceptor.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket_acceptor.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket_acceptor.hpp
new file mode 100644
index 000..f69f483
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket_acceptor.hpp
@@ -0,0 +1,1136 @@
+//
+// basic_socket_acceptor.hpp
+// ~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_BASIC_SOCKET_ACCEPTOR_HPP
+#define ASIO_BASIC_SOCKET_ACCEPTOR_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+#include "asio/basic_io_object.hpp"
+#include "asio/basic_socket.hpp"
+#include "asio/detail/handler_type_requirements.hpp"
+#include "asio/detail/throw_error.hpp"
+#include "asio/detail/type_traits.hpp"
+#include "asio/error.hpp"
+#include "asio/socket_acceptor_service.hpp"
+#include "asio/socket_base.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+
+/// Provides the ability to accept new connections.
+/**
+ * The basic_socket_acceptor class template is used for accepting new socket
+ * connections.
+ *
+ * @par Thread Safety
+ * @e Distinct @e objects: Safe.@n
+ * @e Shared @e objects: Unsafe.
+ *
+ * @par Example
+ * Opening a socket acceptor with the SO_REUSEADDR option enabled:
+ * @code
+ * asio::ip::tcp::acceptor acceptor(io_service);
+ * asio::ip::tcp::endpoint endpoint(asio::ip::tcp::v4(), port);
+ * acceptor.open(endpoint.protocol());
+ * acceptor.set_option(asio::ip::tcp::acceptor::reuse_address(true));
+ * acceptor.bind(endpoint);
+ * acceptor.listen();
+ * @endcode
+ */
+template  >
+class basic_socket_acceptor
+  : public basic_io_object,
+public socket_base
+{
+public:
+  /// (Deprecated: Use native_handle_type.) The native representation of an
+  /// acceptor.
+  typedef typename SocketAcceptorService::native_handle_type native_type;
+
+  /// The native representation of an acceptor.
+  typedef typename SocketAcceptorService::native_handle_type 
native_handle_type;
+
+  /// The protocol type.
+  typedef Protocol protocol_type;
+
+  /// The endpoint type.
+  typedef typename Protocol::endpoint endpoint_type;
+
+  /// Construct an acceptor without opening it.
+  /**
+   * This constructor creates an acceptor without opening it to listen for new
+   * connections. The open() function must be called before the acceptor can
+   * accept new socket connections.
+   *
+   * @param io_service The io_service object that the acceptor will use to
+   * dispatch handlers for any asynchronous operations performed on the
+   * acceptor.
+   */
+  explicit basic_socket_acceptor(asio::io_service& io_service)
+: basic_io_object(io_service)
+  {
+  }
+
+  /// Construct an open acceptor.
+  /**
+   * This constructor creates an acceptor and automatically opens it.
+   *
+   * @param io_service The io_service object that the acceptor will use to
+   * dispatch handlers for any asynchronous operations performed on the
+   * acceptor.
+   *
+   * @param protocol An object specifying protocol parameters to be used.
+   *
+   * @throws asio::system_error Thrown on failure.
+   */
+  basic_socket_acceptor(asio::io_service& io_service,
+  const protocol_type& protocol)
+: basic_io_object(io_service)
+  {
+asio::error_code ec;
+this->get_service().open(this->get_implementation(), protocol, ec);
+asio::detail::throw_error(ec, "open");
+  }
+
+  /// Construct an acceptor opened on the given endpoint.
+  /**
+   * This constructor creates an acceptor and automatically opens it to listen
+   * for new connections on the specified endpoint.
+   *
+   * @param io_service The io_service object that the acceptor will use to
+   * dispatch handlers for any asynchronous operations performed on the
+   * acceptor.
+   *
+   * @param endpoint An endpoint on the local machine on which the acceptor
+   * will listen for new connections.
+   *
+   * @param reuse_addr Whether the constructor should set the socket option
+   * socket_base::reuse_address.
+   *
+   * @throws asio::system_error Thrown on failure.
+   *
+   * @note This constructor is equivalent to the following code:
+   * @code
+   * basic_socket_acceptor acceptor(io_service);
+   * acceptor.open(endpoint.protocol());
+   * if (

[03/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/rfc2818_verification.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/rfc2818_verification.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/rfc2818_verification.hpp
new file mode 100644
index 000..acadfaa
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/rfc2818_verification.hpp
@@ -0,0 +1,100 @@
+//
+// ssl/rfc2818_verification.hpp
+// 
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_SSL_RFC2818_VERIFICATION_HPP
+#define ASIO_SSL_RFC2818_VERIFICATION_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if !defined(ASIO_ENABLE_OLD_SSL)
+# include 
+# include "asio/ssl/detail/openssl_types.hpp"
+# include "asio/ssl/verify_context.hpp"
+#endif // !defined(ASIO_ENABLE_OLD_SSL)
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace ssl {
+
+#if !defined(ASIO_ENABLE_OLD_SSL)
+
+/// Verifies a certificate against a hostname according to the rules described
+/// in RFC 2818.
+/**
+ * @par Example
+ * The following example shows how to synchronously open a secure connection to
+ * a given host name:
+ * @code
+ * using asio::ip::tcp;
+ * namespace ssl = asio::ssl;
+ * typedef ssl::stream ssl_socket;
+ *
+ * // Create a context that uses the default paths for finding CA certificates.
+ * ssl::context ctx(ssl::context::sslv23);
+ * ctx.set_default_verify_paths();
+ *
+ * // Open a socket and connect it to the remote host.
+ * asio::io_service io_service;
+ * ssl_socket sock(io_service, ctx);
+ * tcp::resolver resolver(io_service);
+ * tcp::resolver::query query("host.name", "https");
+ * asio::connect(sock.lowest_layer(), resolver.resolve(query));
+ * sock.lowest_layer().set_option(tcp::no_delay(true));
+ *
+ * // Perform SSL handshake and verify the remote host's certificate.
+ * sock.set_verify_mode(ssl::verify_peer);
+ * sock.set_verify_callback(ssl::rfc2818_verification("host.name"));
+ * sock.handshake(ssl_socket::client);
+ *
+ * // ... read and write as normal ...
+ * @endcode
+ */
+class rfc2818_verification
+{
+public:
+  /// The type of the function object's result.
+  typedef bool result_type;
+
+  /// Constructor.
+  explicit rfc2818_verification(const std::string& host)
+: host_(host)
+  {
+  }
+
+  /// Perform certificate verification.
+  ASIO_DECL bool operator()(bool preverified, verify_context& ctx) const;
+
+private:
+  // Helper function to check a host name against a pattern.
+  ASIO_DECL static bool match_pattern(const char* pattern,
+  std::size_t pattern_length, const char* host);
+
+  // Helper function to check a host name against an IPv4 address
+  // The host name to be checked.
+  std::string host_;
+};
+
+#endif // defined(ASIO_ENABLE_OLD_SSL)
+
+} // namespace ssl
+} // namespace asio
+
+#include "asio/detail/pop_options.hpp"
+
+#if defined(ASIO_HEADER_ONLY)
+# include "asio/ssl/impl/rfc2818_verification.ipp"
+#endif // defined(ASIO_HEADER_ONLY)
+
+#endif // ASIO_SSL_RFC2818_VERIFICATION_HPP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/stream.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/stream.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/stream.hpp
new file mode 100644
index 000..5e3af01
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/stream.hpp
@@ -0,0 +1,756 @@
+//
+// ssl/stream.hpp
+// ~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_SSL_STREAM_HPP
+#define ASIO_SSL_STREAM_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if defined(ASIO_ENABLE_OLD_SSL)
+# include 

[35/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_signal_set.hpp
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_signal_set.hpp
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_signal_set.hpp
new file mode 100644
index 000..2dd71ce
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_signal_set.hpp
@@ -0,0 +1,384 @@
+//
+// basic_signal_set.hpp
+// 
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_BASIC_SIGNAL_SET_HPP
+#define ASIO_BASIC_SIGNAL_SET_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#include "asio/basic_io_object.hpp"
+#include "asio/detail/handler_type_requirements.hpp"
+#include "asio/detail/throw_error.hpp"
+#include "asio/error.hpp"
+#include "asio/signal_set_service.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+
+/// Provides signal functionality.
+/**
+ * The basic_signal_set class template provides the ability to perform an
+ * asynchronous wait for one or more signals to occur.
+ *
+ * Most applications will use the asio::signal_set typedef.
+ *
+ * @par Thread Safety
+ * @e Distinct @e objects: Safe.@n
+ * @e Shared @e objects: Unsafe.
+ *
+ * @par Example
+ * Performing an asynchronous wait:
+ * @code
+ * void handler(
+ * const asio::error_code& error,
+ * int signal_number)
+ * {
+ *   if (!error)
+ *   {
+ * // A signal occurred.
+ *   }
+ * }
+ *
+ * ...
+ *
+ * // Construct a signal set registered for process termination.
+ * asio::signal_set signals(io_service, SIGINT, SIGTERM);
+ *
+ * // Start an asynchronous wait for one of the signals to occur.
+ * signals.async_wait(handler);
+ * @endcode
+ *
+ * @par Queueing of signal notifications
+ *
+ * If a signal is registered with a signal_set, and the signal occurs when
+ * there are no waiting handlers, then the signal notification is queued. The
+ * next async_wait operation on that signal_set will dequeue the notification.
+ * If multiple notifications are queued, subsequent async_wait operations
+ * dequeue them one at a time. Signal notifications are dequeued in order of
+ * ascending signal number.
+ *
+ * If a signal number is removed from a signal_set (using the @c remove or @c
+ * erase member functions) then any queued notifications for that signal are
+ * discarded.
+ *
+ * @par Multiple registration of signals
+ *
+ * The same signal number may be registered with different signal_set objects.
+ * When the signal occurs, one handler is called for each signal_set object.
+ *
+ * Note that multiple registration only works for signals that are registered
+ * using Asio. The application must not also register a signal handler using
+ * functions such as @c signal() or @c sigaction().
+ *
+ * @par Signal masking on POSIX platforms
+ *
+ * POSIX allows signals to be blocked using functions such as @c sigprocmask()
+ * and @c pthread_sigmask(). For signals to be delivered, programs must ensure
+ * that any signals registered using signal_set objects are unblocked in at
+ * least one thread.
+ */
+template 
+class basic_signal_set
+  : public basic_io_object
+{
+public:
+  /// Construct a signal set without adding any signals.
+  /**
+   * This constructor creates a signal set without registering for any signals.
+   *
+   * @param io_service The io_service object that the signal set will use to
+   * dispatch handlers for any asynchronous operations performed on the set.
+   */
+  explicit basic_signal_set(asio::io_service& io_service)
+: basic_io_object(io_service)
+  {
+  }
+
+  /// Construct a signal set and add one signal.
+  /**
+   * This constructor creates a signal set and registers for one signal.
+   *
+   * @param io_service The io_service object that the signal set will use to
+   * dispatch handlers for any asynchronous operations performed on the set.
+   *
+   * @param signal_number_1 The signal number to be added.
+   *
+   * @note This constructor is equivalent to performing:
+   * @code asio::signal_set signals(io_service);
+   * signals.add(signal_number_1); @endcode
+   */
+  basic_signal_set(asio::io_service& io_service, int signal_number_1)
+: basic_io_object(io_service)
+  {
+asio::error_code ec;
+this->service.add(this->implementation, signal_number_1, ec);
+asio::detail::throw_error(ec,

[38/53] [abbrv] [partial] hadoop git commit: Revert "Revert "Merge branch 'trunk' into HDFS-7240"" After testing it was confirmed that these changes work as expected.

2018-04-02 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/logging_test.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/logging_test.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/logging_test.cc
new file mode 100644
index 000..d487bf5
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/logging_test.cc
@@ -0,0 +1,374 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include 
+#include 
+
+#include 
+#include 
+
+#include 
+
+using namespace hdfs;
+
+struct log_state {
+  int trace_count;
+  int debug_count;
+  int info_count;
+  int warning_count;
+  int error_count;
+
+  int origin_unknown;
+  int origin_rpc;
+  int origin_blockreader;
+  int origin_filehandle;
+  int origin_filesystem;
+
+  std::string msg;
+
+  log_state() {
+reset();
+  }
+
+  void reset() {
+trace_count = 0;
+debug_count = 0;
+info_count = 0;
+warning_count = 0;
+error_count = 0;
+
+origin_unknown = 0;
+origin_rpc = 0;
+origin_blockreader = 0;
+origin_filehandle = 0;
+origin_filesystem = 0;
+
+msg = "";
+  }
+};
+log_state log_state_instance;
+
+void process_log_msg(LogData *data) {
+  if(data->msg)
+log_state_instance.msg = data->msg;
+
+  switch(data->level) {
+case HDFSPP_LOG_LEVEL_TRACE:
+  log_state_instance.trace_count++;
+  break;
+case HDFSPP_LOG_LEVEL_DEBUG:
+  log_state_instance.debug_count++;
+  break;
+case HDFSPP_LOG_LEVEL_INFO:
+  log_state_instance.info_count++;
+  break;
+case HDFSPP_LOG_LEVEL_WARN:
+  log_state_instance.warning_count++;
+  break;
+case HDFSPP_LOG_LEVEL_ERROR:
+  log_state_instance.error_count++;
+  break;
+default:
+  //should never happen
+  std::cout << "foo" << std::endl;
+  ASSERT_FALSE(true);
+  }
+
+  switch(data->component) {
+case HDFSPP_LOG_COMPONENT_UNKNOWN:
+  log_state_instance.origin_unknown++;
+  break;
+case HDFSPP_LOG_COMPONENT_RPC:
+  log_state_instance.origin_rpc++;
+  break;
+case HDFSPP_LOG_COMPONENT_BLOCKREADER:
+  log_state_instance.origin_blockreader++;
+  break;
+case HDFSPP_LOG_COMPONENT_FILEHANDLE:
+  log_state_instance.origin_filehandle++;
+  break;
+case HDFSPP_LOG_COMPONENT_FILESYSTEM:
+  log_state_instance.origin_filesystem++;
+  break;
+default:
+  std::cout << "bar" << std::endl;
+  ASSERT_FALSE(true);
+  }
+
+}
+
+void reset_log_counters() {
+  log_state_instance.reset();
+}
+
+void assert_nothing_logged() {
+  if(log_state_instance.trace_count || log_state_instance.debug_count ||
+ log_state_instance.info_count || log_state_instance.warning_count ||
+ log_state_instance.error_count) {
+ASSERT_FALSE(true);
+  }
+}
+
+void assert_trace_logged() { ASSERT_TRUE(log_state_instance.trace_count > 0); }
+void assert_debug_logged() { ASSERT_TRUE(log_state_instance.debug_count > 0); }
+void assert_info_logged() { ASSERT_TRUE(log_state_instance.info_count > 0); }
+void assert_warning_logged() { ASSERT_TRUE(log_state_instance.warning_count > 
0); }
+void assert_error_logged() { ASSERT_TRUE(log_state_instance.error_count > 0); }
+
+void assert_no_trace_logged() { ASSERT_EQ(log_state_instance.trace_count, 0); }
+void assert_no_debug_logged() { ASSERT_EQ(log_state_instance.debug_count, 0); }
+void assert_no_info_logged() { ASSERT_EQ(log_state_instance.info_count, 0); }
+void assert_no_warning_logged() { ASSERT_EQ(log_state_instance.warning_count, 
0); }
+void assert_no_error_logged() { ASSERT_EQ(log_state_instance.error_count, 0); }
+
+void assert_unknown_logged() { ASSERT_TRUE(log_state_instance.origin_unknown > 
0); }
+void assert_rpc_logged() { ASSERT_TRUE(log_state_instance.origin_rpc > 0); }
+void assert_blockreader_logged() { 
ASSERT_TRUE(log_state_instance.origin_blockreader > 0); }
+void assert_filehandle_logged() { 
ASSERT_TRUE(log_state_instance.origin_filehandle > 0); }
+void assert_filesystem_logged() { 
ASSERT_TRUE(l

<    1   2   3   4   5   6   7   8   9   10   >