svn commit: r19992 - /release/hbase/KEYS

2017-06-10 Thread stack
Author: stack
Date: Sun Jun 11 04:38:09 2017
New Revision: 19992

Log:
Add new St.Ack key

Modified:
release/hbase/KEYS

Modified: release/hbase/KEYS
==
--- release/hbase/KEYS (original)
+++ release/hbase/KEYS Sun Jun 11 04:38:09 2017
@@ -921,3 +921,60 @@ MvDc/AWKqytjEqWa4DtKJwzV4+3bSXu9W3q1A/bs
 wOsH/YmOwlg0nDruIbanNk/zYHVQyb2sMw/KYVsyVWRWMnR8s9uKnag=
 =7aLw
 -END PGP PUBLIC KEY BLOCK-
+pub   4096R/8ACC93D2 2016-10-19
+uid   [ultimate] Michael Stack 
+sig 38ACC93D2 2016-10-19  Michael Stack 
+sub   4096R/E87CDF51 2016-10-19
+sig  8ACC93D2 2016-10-19  Michael Stack 
+
+-BEGIN PGP PUBLIC KEY BLOCK-
+Version: GnuPG v2
+
+mQINBFgH4+sBEADJlaqxy+zbVaIT31UFiFbEou2/yg67KK2LrXQ+anoqrj0KWq7b
+dGNSzMyWvBnGN0XBDIbNYqqz9DAlJwsWR0s8C7Gni3y+/5nCwxGjoajQa9lDFMnn
+vJiYv8039o75zucgzf+5cZ2K+Tba9wpwquqYNROsmIuW5uM/KHdGmQOCWIloRmkp
+Xi7VPdlnXR8wCXW8u5udikVsLH/Pj6YleiAgmSa/8YyVbhjqQxYHfWX/r5zgK92Y
+cvqgfG71ImJ5uetPYC4O+n0vfo8b5ZVml2HZ2Hw7j5n8LzviDyBV2PU57TZBur/F
+rPaFI0Y5nqMEg11YNMNGUamg+6RQzgRxXrT1YWD9ODI9aRLTCotJ55YrKYtQ04XM
+5PfHS56/9Z1NQoIuFO6rWSU/hX7qZj4pax+7EeY1Qdz3jZjZWJWORAXe/HQbnlgp
+ZNxIb1kLIwZKA1UGJ6fHa7i0nikBQTgxrrIYaxctxpoOIdXhsMJGczz8zpPYnF5k
+RuhB6NkR2Z7S1ddOz2nbl1rBPd6e5Wh+jhTqFHXVUJzbpNqiL3hKG60MF150TRyP
+Zg3iU3wHRyqlHbM0D0HlEoF3+d3fZ6CTC/7nKtmrT3K5k7rLxrWRU96mm5O1T611
+fVKB/FjTZdjEB6eWbj/zRx2Mqruzqj/2Jw6qkdV0DNrXosinUUM9jmONYwARAQAB
+tCBNaWNoYWVsIFN0YWNrIDxzdGFja0BkdWJvY2UubmV0PokCOQQTAQgAIwUCWAfj
+6wIbAwcLCQgHAwIBBhUIAgkKCwQWAgMBAh4BAheAAAoJEJgWx/yKzJPSMIsP/2e6
+dcN3j2eMrrtInoU7qcSRBEEMVGcDxe3Ez01XaPqEk1+i6ajdNk2xX+UwOiVR/ZX1
+661CKI7qWt/SZFuBIqQTkPrg/eulUfaOP47dhkcQW8dCOh763taF7qnwu2oqtMtC
+Jy9cccS00MnZTdQuzVGgK0IGhMLe4dY90Oq01b74t/T+BGhbtLIkZCZ/Y1HaQ1kW
+1Pmi1Aip3K6o0BTVHsr6iod26XHkDRzHJ4/J5Hgj+J3jXnr1fK/fFTisi2ksr9sB
+qwsXgjhg2YBaq814U6ZJ1GmJ4y0od9XTdvBevBEQpaNZ5rHjYC2ylAxFAC8xxOhK
+qUHqeKaReuI06TSwGXt1LvPed+M9Hnxkih1+34fXWzHEojqvNHJUEkvjFAgiWyik
+mWreJT+2uILABMboHZWWbujBeRl2JTNZrsnlk5J9Ne4j4kBRXrIreZDcPiRDFkZr
+8AGaKZSCr/Fr6DKL3y4G6nSF8xvdeLuwUAxPOLa0O1wFsaggI5sV+euVID5s15Qt
+IfD5HzXwEoNV9RajeIdkZDDtIGjJ+XFPFCqCU0rGDG4e3DuHG2jfr2hN4FScS9g6
+8KRT8FaoLI1Ky4E7DiRXDZU6j01+iVV4RfIPkOZxut8e0U3PmLU9txexrajHNeET
+KGHF+gqMa6uXj5tRSn5cDhRm2CKxax3YtmflF1LUuQINBFgH4+sBEADPNjQ9MOaD
+YK0+dAl5p2YHUR76wQpf+5fEKLbgxEsTcKHsTXh6CJ1FHnp91+2DN+GQRqW2z860
+3xCwAUvwAwQg/hMGGsbq8VEgxNBAaT0EQTE4HLuhtstRAsBNAUcNlWMWFZp8kqu7
+xSU7Jfukdf7Gl66DzE3Vn0ZmcagARvnCgQW0QQfDhj9K0/Dy0Cid/D/KiYbYFxxW
++mr+/SJ5FbWwrEc1xXkbilo1Bgp55aGSQr//C6At7b/ppwA91HaUwYbvEaXgE6tV
+gnYGYQGXJ7XU/6rjaI+U30fNlhUEFZ05HnV5fdzmmSLauoXoM96EOEktXMqFWXL8
+1l9/+6UO184dULhRKCHfbQPmhGUNhuZcc5WW/4RExclmemXasnWW7jrEShg7L6gq
+IIzjr6owMfT6d0ktW1+50u2afewRGVzOyPuPFRaM1dy+kRil0lM7kbmpqpL1GIGt
+vPne0wDY8+qida8C+zNGW26ryKPV2aUwF8FGLS/dFkP6PJJV/PAiSwt3z8B4mseJ
+qIDpwEyy2cdhN/5mLyKU6RbjqpECd/1vP+0czL2daJTwGOltGrxGUWXW8FiN9vbL
+2JKGdIf1FIpvTp6Z/maHNQZOluHp2/vKc1HaM5wgoYIVNT2t7N9w960U8Ra051MO
+VkA1++Mv9zLMN41hpMGbTQAbwvBZozPSZQARAQABiQIfBBgBCAAJBQJYB+PrAhsM
+AAoJEJgWx/yKzJPSL4sP+wXtwSsizqlURcYqwpN5m8ox8hczbwUh9uaRv95M8lSb
+N/oNB+w2IVhmsD/18eMtrLYI4j0cmfQ0zbftTEA24dW+eajvhPYQltOY+n0qinMa
+pv062CopX0WzrtcH4mS8WBUugnwH2Xp/MNk1tIGJIjK8H5IX+7TrTvoRnT9YL08B
+z+ElMkldlnJ/89vA4CtHb1aTWHnvA1WAqbQSvh+Z9lYsiMXjcY9cKy8rtAFgCMbJ
+ykaPhGajhbJtFPaK9DFsNm2Lb2JEe+h/hskTMMSNehBacxaGY21tbDOofrCMzKg7
+rakYvDBHRsfJUiwGnqNi5yUom0811/5XO6YCoR89hIKGsp5rnY50IVnvuCtXqrhL
+BGUl+A74a7e3Yc44zFCybpcQ6ve+bJidnhYmF7m1AayzJkFurWVm8vNrdHIRuzdX
+90GHeMekUjLZChXCAiNk0gttmzbPZqLyRkwws4ciWBWHCI6BD7Q59IXnvDN+sopm
+7+Jwp6xExq5TMUnvmQ4XoeyrWnC67dBYy3+ZbHI/1zkDDL6z+75jCo1oTPpR4Bqa
+IP9wzT5o/UOKSxqqYmsgSoSEJFwO5adAkLyW8Ci8yUytTkZDU2bfHHig5VcCrFSo
+bFBjjz8VFd1cF4F3MIrVi3UN/Knw9EIWcPqJeB2eDm6TIN3UmJSyFObAGQZmZW1N
+=yX4c
+-END PGP PUBLIC KEY BLOCK-




svn commit: r19991 - /dev/hbase/hbase-1.1.11RC0/

2017-06-10 Thread ndimiduk
Author: ndimiduk
Date: Sun Jun 11 02:25:19 2017
New Revision: 19991

Log:
HBase 1.1.11RC0 artifacts

Added:
dev/hbase/hbase-1.1.11RC0/
dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-bin.tar.gz   (with props)
dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-bin.tar.gz.asc
dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-bin.tar.gz.md5
dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-bin.tar.gz.mds
dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-src.tar.gz   (with props)
dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-src.tar.gz.asc
dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-src.tar.gz.md5
dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-src.tar.gz.mds

Added: dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-bin.tar.gz
==
Binary file - no diff available.

Propchange: dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-bin.tar.gz.asc
==
--- dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-bin.tar.gz.asc (added)
+++ dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-bin.tar.gz.asc Sun Jun 11 02:25:19 
2017
@@ -0,0 +1,16 @@
+-BEGIN PGP SIGNATURE-
+
+iQIcBAABCgAGBQJZPKNiAAoJEK2QOQccNIm9mVwP/jR2zznQfKDS2K9B/CIyJUou
+P1/9F7UfChv+mwiDgsuv5fuxQXoBQpRcfvoEtTNjme6MsB7An0As+aUgMmrK4Kyf
+kgFMQct+9oLdt7btvQ/zZMTeCNu3IECHfhKUwL2SX3esF1s9zHkZNdF6q9D1FGka
+ei8IE9uMQLaYXbSzVYAxCiDnJDXYaRkoxMuK7c02TOakHsafwKZ89UERJvWTVtvN
+mC6LW18lEem3yzEzL5abU023vPUKSUdGGRW4mM1zCkuWvjFnPVBeuv+A82bDihlc
+M2xnzxKB6KcnTio9g63e9MOHzq1eFPD0ZrVrCzMI+VnZLTwHKhGNKTA+8DI5oq3N
+jwhlEgQehbs45SSFYFjtj6feCjsPQLnz84zqgQPyHcSPt0a7CsVihKzpyEZGUyYT
++UaN285NCT8yA82JIvM8fYEsbLI9DkWRV0o63/sASsUUYJAWHHhpuOi3WlCkwQaL
+G9q6VIA/X7H7IyiEDWGfwVAWxLL/hkiJK0eGdc4Zix1ga5IrR4E6seqtTwsLeZ9a
+fGnQR/V7n/0unumGZATs+4uhAUq+b+3ei7gOVsTvI4peX1yJ1mTS9Z70FmskmxVt
+2S9gMOxIUYztdgjbuz8ffgqOJ6LO57hBbbmbmsCPkMspsCuczB2Wxmls4IwyHh7i
+NQoFnn92Twp9kGL6X2nR
+=61Ys
+-END PGP SIGNATURE-

Added: dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-bin.tar.gz.md5
==
--- dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-bin.tar.gz.md5 (added)
+++ dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-bin.tar.gz.md5 Sun Jun 11 02:25:19 
2017
@@ -0,0 +1 @@
+hbase-1.1.11-bin.tar.gz: 87 0E F5 E8 D1 35 AF 6A  A3 E2 96 83 31 F5 84 93

Added: dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-bin.tar.gz.mds
==
--- dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-bin.tar.gz.mds (added)
+++ dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-bin.tar.gz.mds Sun Jun 11 02:25:19 
2017
@@ -0,0 +1,17 @@
+hbase-1.1.11-bin.tar.gz:MD5 = 87 0E F5 E8 D1 35 AF 6A  A3 E2 96 83 31 F5 84
+  93
+hbase-1.1.11-bin.tar.gz:   SHA1 = 64C8 3952 9EE4 76D7 0F61  8649 0D18 79BD 1F07
+  0028
+hbase-1.1.11-bin.tar.gz: RMD160 = C54F 46D2 4F12 337F EB05  F9A8 9C27 0E99 B75F
+  EE99
+hbase-1.1.11-bin.tar.gz: SHA224 = 1D59C48B E2B49E20 1A31690B 3660604E A8E0C9B5
+  5810B5B1 C6335455
+hbase-1.1.11-bin.tar.gz: SHA256 = 95ED3072 99A13D8B 8AC688F7 962F1A4C 98988925
+  8D9B443C 225573E6 5A46E0A8
+hbase-1.1.11-bin.tar.gz: SHA384 = 8B4F3697 8EF8CA0C 727020D2 F8E2553E D84E7E61
+  4713127F E370CD0C AAEEC71C 53EED9D7 62A674D5
+  8F1658EE F34A345C
+hbase-1.1.11-bin.tar.gz: SHA512 = 52582AC4 58C26C42 A704F84E 86F527B2 5229C1FF
+  0465868E E56DFC95 2AD8173F 6E624282 F9934682
+  F5B2E89C AECAFB16 7D33A2F0 CB78FBF2 2054E45F
+  B555234E

Added: dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-src.tar.gz
==
Binary file - no diff available.

Propchange: dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-src.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-src.tar.gz.asc
==
--- dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-src.tar.gz.asc (added)
+++ dev/hbase/hbase-1.1.11RC0/hbase-1.1.11-src.tar.gz.asc Sun Jun 11 02:25:19 
2017
@@ -0,0 +1,16 @@
+-BEGIN PGP SIGNATURE-
+
+iQIcBAABCgAGBQJZPKN0AAoJEK2QOQccNIm9dZsP/iDESLZd1Kr2h+pz0ao22HK6
+reia16RdQIqDPI4NXJuex3ipK7RK1wjkWqASpTSpB3C0psoH22vWGqxbXboGBVKD
+96ckqo6uSRe0dQ8JTc6RwuCCJJAa92eX7B9CcGjc+Qc9+MnESoXNKmHqhtwTotLY
+BMQ7sd8G361w1saDMvIyLrGhZSAnMSUDvIn1MZUsTdjnsDxFrZqcRIBQCO3tnhHv
+N1O0I7KLvXqZY6ghdsEhs/MhvkXRK4gIj5+khpdLZLS4oBzQvCgN2MOzIAPLaY7t

[hbase] Git Push Summary

2017-06-10 Thread ndimiduk
Repository: hbase
Updated Tags:  refs/tags/1.1.11RC0 [created] d318fbeef


[1/2] hbase git commit: bump version to 1.1.11

2017-06-10 Thread ndimiduk
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 2a6990123 -> 0dab4ad2a


bump version to 1.1.11


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7a823b41
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7a823b41
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7a823b41

Branch: refs/heads/branch-1.1
Commit: 7a823b419b9f579a079a912d6500c37986b223b8
Parents: 2a69901
Author: Nick Dimiduk 
Authored: Sat Jun 10 17:48:05 2017 -0700
Committer: Nick Dimiduk 
Committed: Sat Jun 10 17:48:05 2017 -0700

--
 hbase-annotations/pom.xml| 2 +-
 hbase-assembly/pom.xml   | 2 +-
 hbase-checkstyle/pom.xml | 4 ++--
 hbase-client/pom.xml | 2 +-
 hbase-common/pom.xml | 2 +-
 hbase-examples/pom.xml   | 2 +-
 hbase-hadoop-compat/pom.xml  | 2 +-
 hbase-hadoop2-compat/pom.xml | 2 +-
 hbase-it/pom.xml | 2 +-
 hbase-prefix-tree/pom.xml| 2 +-
 hbase-procedure/pom.xml  | 2 +-
 hbase-protocol/pom.xml   | 2 +-
 hbase-resource-bundle/pom.xml| 2 +-
 hbase-rest/pom.xml   | 2 +-
 hbase-server/pom.xml | 2 +-
 hbase-shaded/hbase-shaded-client/pom.xml | 2 +-
 hbase-shaded/hbase-shaded-server/pom.xml | 2 +-
 hbase-shaded/pom.xml | 2 +-
 hbase-shell/pom.xml  | 2 +-
 hbase-testing-util/pom.xml   | 2 +-
 hbase-thrift/pom.xml | 2 +-
 pom.xml  | 2 +-
 22 files changed, 23 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7a823b41/hbase-annotations/pom.xml
--
diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml
index e8fb765..c348084 100644
--- a/hbase-annotations/pom.xml
+++ b/hbase-annotations/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-1.1.11-SNAPSHOT
+1.1.11
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a823b41/hbase-assembly/pom.xml
--
diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml
index d34d867..b5d610d 100644
--- a/hbase-assembly/pom.xml
+++ b/hbase-assembly/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-1.1.11-SNAPSHOT
+1.1.11
 ..
   
   hbase-assembly

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a823b41/hbase-checkstyle/pom.xml
--
diff --git a/hbase-checkstyle/pom.xml b/hbase-checkstyle/pom.xml
index 6bab268..7d71d84 100644
--- a/hbase-checkstyle/pom.xml
+++ b/hbase-checkstyle/pom.xml
@@ -24,14 +24,14 @@
 4.0.0
 org.apache.hbase
 hbase-checkstyle
-1.1.11-SNAPSHOT
+1.1.11
 Apache HBase - Checkstyle
 Module to hold Checkstyle properties for HBase.
 
   
 hbase
 org.apache.hbase
-1.1.11-SNAPSHOT
+1.1.11
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a823b41/hbase-client/pom.xml
--
diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml
index ebea94d..5400922 100644
--- a/hbase-client/pom.xml
+++ b/hbase-client/pom.xml
@@ -24,7 +24,7 @@
   
 hbase
 org.apache.hbase
-1.1.11-SNAPSHOT
+1.1.11
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a823b41/hbase-common/pom.xml
--
diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml
index 48d173b..d706981 100644
--- a/hbase-common/pom.xml
+++ b/hbase-common/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-1.1.11-SNAPSHOT
+1.1.11
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a823b41/hbase-examples/pom.xml
--
diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml
index de3e40e..6704aa9 100644
--- a/hbase-examples/pom.xml
+++ b/hbase-examples/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-1.1.11-SNAPSHOT
+1.1.11
 ..
   
   hbase-examples

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a823b41/hbase-hadoop-compat/pom.xml
--
diff --git a/hbase-hadoop-compat/pom.xml b/hbase-hadoop-compat/pom.xml
index ab318c1..70e5174 100644
--- a/hbase-hadoop-compat/pom.xml
+++ b/hbase-hadoop-compat/pom.xml
@@ -23,7 +23,7 @@
 
 hbase
 org.apache.hbase
-1.1.11-SNAPSHOT
+1.1.11
 ..
 
 


[2/2] hbase git commit: updating docs from master

2017-06-10 Thread ndimiduk
updating docs from master


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0dab4ad2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0dab4ad2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0dab4ad2

Branch: refs/heads/branch-1.1
Commit: 0dab4ad2a03fb64530582329b773fa337301a7a1
Parents: 7a823b4
Author: Nick Dimiduk 
Authored: Sat Jun 10 17:49:35 2017 -0700
Committer: Nick Dimiduk 
Committed: Sat Jun 10 17:49:35 2017 -0700

--
 src/main/asciidoc/_chapters/external_apis.adoc |  9 ++-
 src/main/asciidoc/_chapters/faq.adoc   |  3 +
 src/main/asciidoc/_chapters/hbase_mob.adoc |  4 +-
 src/main/asciidoc/_chapters/ops_mgt.adoc   | 80 -
 src/main/asciidoc/_chapters/orca.adoc  |  6 +-
 src/main/asciidoc/_chapters/upgrading.adoc |  2 +-
 6 files changed, 96 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0dab4ad2/src/main/asciidoc/_chapters/external_apis.adoc
--
diff --git a/src/main/asciidoc/_chapters/external_apis.adoc 
b/src/main/asciidoc/_chapters/external_apis.adoc
index 556c4e0..2f85461 100644
--- a/src/main/asciidoc/_chapters/external_apis.adoc
+++ b/src/main/asciidoc/_chapters/external_apis.adoc
@@ -225,14 +225,17 @@ creation or mutation, and `DELETE` for deletion.
 |Description
 |Example
 
-|/_table_/_row_/_column:qualifier_/_timestamp_
+|/_table_/_row_
 |GET
-|Get the value of a single row. Values are Base-64 encoded.
+|Get all columns of a single row. Values are Base-64 encoded. This requires 
the "Accept" request header with a type that can hold multiple columns (like 
xml, json or protobuf).
 |curl -vi -X GET \
   -H "Accept: text/xml" \
   "http://example.com:8000/users/row1;
 
-curl -vi -X GET \
+|/_table_/_row_/_column:qualifier_/_timestamp_
+|GET
+|Get the value of a single column. Values are Base-64 encoded.
+|curl -vi -X GET \
   -H "Accept: text/xml" \
   "http://example.com:8000/users/row1/cf:a/1458586888395;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0dab4ad2/src/main/asciidoc/_chapters/faq.adoc
--
diff --git a/src/main/asciidoc/_chapters/faq.adoc 
b/src/main/asciidoc/_chapters/faq.adoc
index 7bffe0e..9034d4b 100644
--- a/src/main/asciidoc/_chapters/faq.adoc
+++ b/src/main/asciidoc/_chapters/faq.adoc
@@ -44,6 +44,9 @@ How can I find examples of NoSQL/HBase?::
 What is the history of HBase?::
   See <>.
 
+Why are the cells above 10MB not recommended for HBase?::
+  Large cells don't fit well into HBase's approach to buffering data. First, 
the large cells bypass the MemStoreLAB when they are written. Then, they cannot 
be cached in the L2 block cache during read operations. Instead, HBase has to 
allocate on-heap memory for them each time. This can have a significant impact 
on the garbage collector within the RegionServer process.
+
 === Upgrading
 How do I upgrade Maven-managed projects from HBase 0.94 to HBase 0.96+?::
   In HBase 0.96, the project moved to a modular structure. Adjust your 
project's dependencies to rely upon the `hbase-client` module or another module 
as appropriate, rather than a single JAR. You can model your Maven dependency 
after one of the following, depending on your targeted version of HBase. See 
Section 3.5, “Upgrading from 0.94.x to 0.96.x” or Section 3.3, “Upgrading 
from 0.96.x to 0.98.x” for more information.

http://git-wip-us.apache.org/repos/asf/hbase/blob/0dab4ad2/src/main/asciidoc/_chapters/hbase_mob.adoc
--
diff --git a/src/main/asciidoc/_chapters/hbase_mob.adoc 
b/src/main/asciidoc/_chapters/hbase_mob.adoc
index bdf077a..5da0343 100644
--- a/src/main/asciidoc/_chapters/hbase_mob.adoc
+++ b/src/main/asciidoc/_chapters/hbase_mob.adoc
@@ -36,7 +36,7 @@ read and write paths are optimized for values smaller than 
100KB in size. When
 HBase deals with large numbers of objects over this threshold, referred to here
 as medium objects, or MOBs, performance is degraded due to write amplification
 caused by splits and compactions. When using MOBs, ideally your objects will 
be between
-100KB and 10MB. HBase ***FIX_VERSION_NUMBER*** adds support
+100KB and 10MB (see the <>). HBase ***FIX_VERSION_NUMBER*** adds support
 for better managing large numbers of MOBs while maintaining performance,
 consistency, and low operational overhead. MOB support is provided by the work
 done in link:https://issues.apache.org/jira/browse/HBASE-11339[HBASE-11339]. To
@@ -155,7 +155,7 @@ family as the second argument. and take a compaction type 
as the third argument.
 
 
 hbase> compact 

hbase git commit: HBASE-18137 Replication gets stuck for empty WALs

2017-06-10 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 96e48c3df -> c1289960d


HBASE-18137 Replication gets stuck for empty WALs

Signed-off-by: Andrew Purtell 

Conflicts:

hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c1289960
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c1289960
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c1289960

Branch: refs/heads/branch-1.2
Commit: c1289960dd7ebf94c615d00ef9c77c27737496e1
Parents: 96e48c3
Author: Vincent 
Authored: Wed Jun 7 14:48:45 2017 -0700
Committer: Andrew Purtell 
Committed: Sat Jun 10 12:47:18 2017 -0700

--
 .../regionserver/ReplicationSource.java | 16 ++--
 .../hbase/replication/TestReplicationBase.java  |  1 +
 .../replication/TestReplicationSmallTests.java  | 83 
 3 files changed, 94 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c1289960/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 2285a5e..a59c3c8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -512,9 +512,9 @@ public class ReplicationSource extends Thread
   terminate("Couldn't get the position of this recovered queue " + 
peerClusterZnode, e);
 }
   }
+  int sleepMultiplier = 1;
   // Loop until we close down
   while (isWorkerActive()) {
-int sleepMultiplier = 1;
 // Sleep until replication is enabled again
 if (!isPeerEnabled()) {
   if (sleepForRetries("Replication is disabled", sleepMultiplier)) {
@@ -591,7 +591,7 @@ public class ReplicationSource extends Thread
 
 if (considerDumping &&
 sleepMultiplier == maxRetriesMultiplier &&
-processEndOfFile()) {
+processEndOfFile(false)) {
   continue;
 }
   }
@@ -717,7 +717,7 @@ public class ReplicationSource extends Thread
   }
   // If we didn't get anything and the queue has an object, it means we
   // hit the end of the file for sure
-  return seenEntries == 0 && processEndOfFile();
+  return seenEntries == 0 && processEndOfFile(false);
 }
 
 /**
@@ -846,11 +846,12 @@ public class ReplicationSource extends Thread
   // which throws a NPE if we open a file before any data node has the 
most recent block
   // Just sleep and retry. Will require re-reading compressed WALs for 
compressionContext.
   LOG.warn("Got NPE opening reader, will retry.");
-} else if (sleepMultiplier >= maxRetriesMultiplier) {
+} else if (sleepMultiplier >= maxRetriesMultiplier
+&& conf.getBoolean("replication.source.eof.autorecovery", false)) {
   // TODO Need a better way to determine if a file is really gone but
   // TODO without scanning all logs dir
   LOG.warn("Waited too long for this file, considering dumping");
-  return !processEndOfFile();
+  return !processEndOfFile(true);
 }
   }
   return true;
@@ -990,7 +991,7 @@ public class ReplicationSource extends Thread
  */
 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = 
"DE_MIGHT_IGNORE",
 justification = "Yeah, this is how it works")
-protected boolean processEndOfFile() {
+protected boolean processEndOfFile(boolean dumpOnlyIfZeroLength) {
   // We presume this means the file we're reading is closed.
   if (this.queue.size() != 0) {
 // -1 means the wal wasn't closed cleanly.
@@ -1025,6 +1026,9 @@ public class ReplicationSource extends Thread
   LOG.trace("Reached the end of log " + this.currentPath + ", stats: " 
+ getStats()
   + ", and the length of the file is " + (stat == null ? "N/A" : 
stat.getLen()));
 }
+if (dumpOnlyIfZeroLength && stat.getLen() != 0) {
+  return false;
+}
 this.currentPath = null;
 this.repLogReader.finishCurrentFile();
 this.reader = null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c1289960/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java

[3/4] hbase git commit: HBASE-18137 Replication gets stuck for empty WALs

2017-06-10 Thread apurtell
HBASE-18137 Replication gets stuck for empty WALs

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6782dfca
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6782dfca
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6782dfca

Branch: refs/heads/branch-1.3
Commit: 6782dfca4f3a2f5e02cc60a7c04d8d5d95ebc36e
Parents: 6a216c7
Author: Vincent 
Authored: Wed Jun 7 14:48:45 2017 -0700
Committer: Andrew Purtell 
Committed: Sat Jun 10 12:26:12 2017 -0700

--
 .../regionserver/ReplicationSource.java | 16 ++--
 .../hbase/replication/TestReplicationBase.java  |  1 +
 .../replication/TestReplicationSmallTests.java  | 83 
 3 files changed, 94 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6782dfca/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 65f581a..2285292 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -542,9 +542,9 @@ public class ReplicationSource extends Thread
   terminate("Couldn't get the position of this recovered queue " + 
peerClusterZnode, e);
 }
   }
+  int sleepMultiplier = 1;
   // Loop until we close down
   while (isWorkerActive()) {
-int sleepMultiplier = 1;
 // Sleep until replication is enabled again
 if (!isPeerEnabled()) {
   if (sleepForRetries("Replication is disabled", sleepMultiplier)) {
@@ -622,7 +622,7 @@ public class ReplicationSource extends Thread
 
 if (considerDumping &&
 sleepMultiplier == maxRetriesMultiplier &&
-processEndOfFile()) {
+processEndOfFile(false)) {
   continue;
 }
   }
@@ -749,7 +749,7 @@ public class ReplicationSource extends Thread
   }
   // If we didn't get anything and the queue has an object, it means we
   // hit the end of the file for sure
-  return seenEntries == 0 && processEndOfFile();
+  return seenEntries == 0 && processEndOfFile(false);
 }
 
 /**
@@ -930,11 +930,12 @@ public class ReplicationSource extends Thread
   // which throws a NPE if we open a file before any data node has the 
most recent block
   // Just sleep and retry. Will require re-reading compressed WALs for 
compressionContext.
   LOG.warn("Got NPE opening reader, will retry.");
-} else if (sleepMultiplier >= maxRetriesMultiplier) {
+} else if (sleepMultiplier >= maxRetriesMultiplier
+&& conf.getBoolean("replication.source.eof.autorecovery", false)) {
   // TODO Need a better way to determine if a file is really gone but
   // TODO without scanning all logs dir
   LOG.warn("Waited too long for this file, considering dumping");
-  return !processEndOfFile();
+  return !processEndOfFile(true);
 }
   }
   return true;
@@ -1100,7 +1101,7 @@ public class ReplicationSource extends Thread
  */
 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = 
"DE_MIGHT_IGNORE",
 justification = "Yeah, this is how it works")
-protected boolean processEndOfFile() {
+protected boolean processEndOfFile(boolean dumpOnlyIfZeroLength) {
   // We presume this means the file we're reading is closed.
   if (this.queue.size() != 0) {
 // -1 means the wal wasn't closed cleanly.
@@ -1135,6 +1136,9 @@ public class ReplicationSource extends Thread
   LOG.trace("Reached the end of log " + this.currentPath + ", stats: " 
+ getStats()
   + ", and the length of the file is " + (stat == null ? "N/A" : 
stat.getLen()));
 }
+if (dumpOnlyIfZeroLength && stat.getLen() != 0) {
+  return false;
+}
 this.currentPath = null;
 this.repLogReader.finishCurrentFile();
 this.reader = null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/6782dfca/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
 

[4/4] hbase git commit: HBASE-18137 Replication gets stuck for empty WALs

2017-06-10 Thread apurtell
HBASE-18137 Replication gets stuck for empty WALs

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/385b7924
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/385b7924
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/385b7924

Branch: refs/heads/branch-2
Commit: 385b792446ea1b0c58b7365904d677ba48eec930
Parents: eca1ec3
Author: Vincent 
Authored: Fri Jun 9 18:47:14 2017 -0700
Committer: Andrew Purtell 
Committed: Sat Jun 10 12:45:40 2017 -0700

--
 .../ReplicationSourceShipperThread.java |  2 +-
 .../ReplicationSourceWALReaderThread.java   | 30 
 .../hbase/replication/TestReplicationBase.java  |  1 +
 .../replication/TestReplicationSmallTests.java  | 80 
 4 files changed, 112 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/385b7924/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.java
index d1a8ac2..6807da2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.java
@@ -303,7 +303,7 @@ public class ReplicationSourceShipperThread extends Thread {
   }
 
   public Path getCurrentPath() {
-return this.currentPath;
+return this.entryReader.getCurrentPath();
   }
 
   public long getCurrentPosition() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/385b7924/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java
index ad08866..c1af6e6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java
@@ -18,6 +18,7 @@
  */
 package org.apache.hadoop.hbase.replication.regionserver;
 
+import java.io.EOFException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -189,6 +190,7 @@ public class ReplicationSourceWALReaderThread extends 
Thread {
   sleepMultiplier++;
 } else {
   LOG.error("Failed to read stream of replication entries", e);
+  handleEofException(e);
 }
 Threads.sleep(sleepForRetries * sleepMultiplier);
   } catch (InterruptedException e) {
@@ -198,6 +200,34 @@ public class ReplicationSourceWALReaderThread extends 
Thread {
 }
   }
 
+  // if we get an EOF due to a zero-length log, and there are other logs in 
queue
+  // (highly likely we've closed the current log), we've hit the max retries, 
and autorecovery is
+  // enabled, then dump the log
+  private void handleEofException(Exception e) {
+if (e.getCause() instanceof EOFException && logQueue.size() > 1
+&& conf.getBoolean("replication.source.eof.autorecovery", false)) {
+  try {
+if (fs.getFileStatus(logQueue.peek()).getLen() == 0) {
+  LOG.warn("Forcing removal of 0 length log in queue: " + 
logQueue.peek());
+  logQueue.remove();
+  currentPosition = 0;
+}
+  } catch (IOException ioe) {
+LOG.warn("Couldn't get file length information about log " + 
logQueue.peek());
+  }
+}
+  }
+
+  public Path getCurrentPath() {
+// if we've read some WAL entries, get the Path we read from
+WALEntryBatch batchQueueHead = entryBatchQueue.peek();
+if (batchQueueHead != null) {
+  return batchQueueHead.lastWalPath;
+}
+// otherwise, we must be currently reading from the head of the log queue
+return logQueue.peek();
+  }
+
   //returns false if we've already exceeded the global quota
   private boolean checkQuota() {
 // try not to go over total quota

http://git-wip-us.apache.org/repos/asf/hbase/blob/385b7924/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
--
diff 

[1/4] hbase git commit: HBASE-18137 Replication gets stuck for empty WALs

2017-06-10 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 6860ddca9 -> 650ef5cf5
  refs/heads/branch-1.3 6a216c787 -> 6782dfca4
  refs/heads/branch-2 eca1ec335 -> 385b79244
  refs/heads/master ea64dbef7 -> 384e308e9


HBASE-18137 Replication gets stuck for empty WALs

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/384e308e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/384e308e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/384e308e

Branch: refs/heads/master
Commit: 384e308e9f2387422e76ceb1432d6b2b85a973cf
Parents: ea64dbe
Author: Vincent 
Authored: Fri Jun 9 18:47:14 2017 -0700
Committer: Andrew Purtell 
Committed: Sat Jun 10 10:30:40 2017 -0700

--
 .../ReplicationSourceShipperThread.java |  2 +-
 .../ReplicationSourceWALReaderThread.java   | 30 
 .../hbase/replication/TestReplicationBase.java  |  1 +
 .../replication/TestReplicationSmallTests.java  | 80 
 4 files changed, 112 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/384e308e/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.java
index d1a8ac2..6807da2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.java
@@ -303,7 +303,7 @@ public class ReplicationSourceShipperThread extends Thread {
   }
 
   public Path getCurrentPath() {
-return this.currentPath;
+return this.entryReader.getCurrentPath();
   }
 
   public long getCurrentPosition() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/384e308e/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java
index ad08866..c1af6e6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java
@@ -18,6 +18,7 @@
  */
 package org.apache.hadoop.hbase.replication.regionserver;
 
+import java.io.EOFException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -189,6 +190,7 @@ public class ReplicationSourceWALReaderThread extends 
Thread {
   sleepMultiplier++;
 } else {
   LOG.error("Failed to read stream of replication entries", e);
+  handleEofException(e);
 }
 Threads.sleep(sleepForRetries * sleepMultiplier);
   } catch (InterruptedException e) {
@@ -198,6 +200,34 @@ public class ReplicationSourceWALReaderThread extends 
Thread {
 }
   }
 
+  // if we get an EOF due to a zero-length log, and there are other logs in 
queue
+  // (highly likely we've closed the current log), we've hit the max retries, 
and autorecovery is
+  // enabled, then dump the log
+  private void handleEofException(Exception e) {
+if (e.getCause() instanceof EOFException && logQueue.size() > 1
+&& conf.getBoolean("replication.source.eof.autorecovery", false)) {
+  try {
+if (fs.getFileStatus(logQueue.peek()).getLen() == 0) {
+  LOG.warn("Forcing removal of 0 length log in queue: " + 
logQueue.peek());
+  logQueue.remove();
+  currentPosition = 0;
+}
+  } catch (IOException ioe) {
+LOG.warn("Couldn't get file length information about log " + 
logQueue.peek());
+  }
+}
+  }
+
+  public Path getCurrentPath() {
+// if we've read some WAL entries, get the Path we read from
+WALEntryBatch batchQueueHead = entryBatchQueue.peek();
+if (batchQueueHead != null) {
+  return batchQueueHead.lastWalPath;
+}
+// otherwise, we must be currently reading from the head of the log queue
+return logQueue.peek();
+  }
+
   //returns false if we've already exceeded the global quota
   private boolean checkQuota() {
 // try not to go over total quota


[2/4] hbase git commit: HBASE-18137 Replication gets stuck for empty WALs

2017-06-10 Thread apurtell
HBASE-18137 Replication gets stuck for empty WALs

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/650ef5cf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/650ef5cf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/650ef5cf

Branch: refs/heads/branch-1
Commit: 650ef5cf59ee7f6e4c219a9043b66a814da52f19
Parents: 6860ddc
Author: Vincent 
Authored: Fri Jun 9 18:36:23 2017 -0700
Committer: Andrew Purtell 
Committed: Sat Jun 10 11:29:51 2017 -0700

--
 .../regionserver/ReplicationSource.java |  2 +-
 .../ReplicationSourceWALReaderThread.java   | 30 +++
 .../hbase/replication/TestReplicationBase.java  |  1 +
 .../replication/TestReplicationSmallTests.java  | 82 
 4 files changed, 114 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/650ef5cf/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 6954ea2..8378b9b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -931,7 +931,7 @@ public class ReplicationSource extends Thread implements 
ReplicationSourceInterf
 }
 
 public Path getCurrentPath() {
-  return this.currentPath;
+  return this.entryReader.getCurrentPath();
 }
 
 public long getCurrentPosition() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/650ef5cf/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java
index 6f1c641..40828b7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java
@@ -18,6 +18,7 @@
  */
 package org.apache.hadoop.hbase.replication.regionserver;
 
+import java.io.EOFException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -188,6 +189,7 @@ public class ReplicationSourceWALReaderThread extends 
Thread {
   sleepMultiplier++;
 } else {
   LOG.error("Failed to read stream of replication entries", e);
+  handleEofException(e);
 }
 Threads.sleep(sleepForRetries * sleepMultiplier);
   } catch (InterruptedException e) {
@@ -197,6 +199,34 @@ public class ReplicationSourceWALReaderThread extends 
Thread {
 }
   }
 
+  // if we get an EOF due to a zero-length log, and there are other logs in 
queue
+  // (highly likely we've closed the current log), we've hit the max retries, 
and autorecovery is
+  // enabled, then dump the log
+  private void handleEofException(Exception e) {
+if (e.getCause() instanceof EOFException && logQueue.size() > 1
+&& conf.getBoolean("replication.source.eof.autorecovery", false)) {
+  try {
+if (fs.getFileStatus(logQueue.peek()).getLen() == 0) {
+  LOG.warn("Forcing removal of 0 length log in queue: " + 
logQueue.peek());
+  logQueue.remove();
+  currentPosition = 0;
+}
+  } catch (IOException ioe) {
+LOG.warn("Couldn't get file length information about log " + 
logQueue.peek());
+  }
+}
+  }
+
+  public Path getCurrentPath() {
+// if we've read some WAL entries, get the Path we read from
+WALEntryBatch batchQueueHead = entryBatchQueue.peek();
+if (batchQueueHead != null) {
+  return batchQueueHead.lastWalPath;
+}
+// otherwise, we must be currently reading from the head of the log queue
+return logQueue.peek();
+  }
+
   //returns false if we've already exceeded the global quota
   private boolean checkQuota() {
 // try not to go over total quota

http://git-wip-us.apache.org/repos/asf/hbase/blob/650ef5cf/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
--
diff --git 

hbase-site git commit: INFRA-10751 Empty commit

2017-06-10 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 476c54ede -> a108d2508


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/a108d250
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/a108d250
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/a108d250

Branch: refs/heads/asf-site
Commit: a108d2508918c9c414064c2569abf4f20879d518
Parents: 476c54e
Author: jenkins 
Authored: Sat Jun 10 15:00:10 2017 +
Committer: jenkins 
Committed: Sat Jun 10 15:00:10 2017 +

--

--




[48/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index cf7404f..97b9f09 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -1666,6 +1666,8 @@
 
 addFamilyPart(Cell)
 - Method in class org.apache.hadoop.hbase.codec.prefixtree.encode.PrefixTreeEncoder
 
+addFamilyStoreFile(String,
 String) - Method in class org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore.StoreFileReference
+
 addFamilyVersion(byte[],
 long) - Method in class org.apache.hadoop.hbase.client.Delete
 
 Delete all columns of the specified family with a timestamp 
equal to
@@ -10795,6 +10797,8 @@
 
 chore()
 - Method in class org.apache.hadoop.hbase.quotas.QuotaObserverChore
 
+chore()
 - Method in class org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore
+
 chore()
 - Method in class org.apache.hadoop.hbase.quotas.SpaceQuotaRefresherChore
 
 chore
 - Variable in class org.apache.hadoop.hbase.quotas.TableQuotaSnapshotStore
@@ -15305,6 +15309,10 @@
 
 Computes total FileSystem size for the given Region.
 
+computeSnapshotSizes(MultimapTableName,
 String) - Method in class org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore
+
+Computes the size of each snapshot provided given the 
current files referenced by the table.
+
 computeTrailerSizeByVersion()
 - Static method in class org.apache.hadoop.hbase.io.hfile.FixedFileTrailer
 
 ConcatenatedListsT - Class in org.apache.hadoop.hbase.util
@@ -15510,6 +15518,8 @@
 
 conf
 - Variable in class org.apache.hadoop.hbase.quotas.QuotaObserverChore.TablesWithQuotas
 
+conf
 - Variable in class org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore
+
 conf
 - Static variable in class org.apache.hadoop.hbase.quotas.TimeBasedLimiter
 
 conf
 - Variable in class org.apache.hadoop.hbase.regionserver.AbstractMemStore
@@ -15978,6 +15988,8 @@
 
 conn
 - Variable in class org.apache.hadoop.hbase.quotas.QuotaObserverChore.TablesWithQuotas
 
+conn
 - Variable in class org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore
+
 conn
 - Variable in class org.apache.hadoop.hbase.quotas.SpaceQuotaRefresherChore
 
 conn
 - Variable in class org.apache.hadoop.hbase.quotas.TableQuotaSnapshotStore
@@ -18766,6 +18778,10 @@
 
 Creates Get operation to retrieve start code from backup 
system table
 
+createGetNamespaceSnapshotSize(String)
 - Static method in class org.apache.hadoop.hbase.quotas.QuotaTableUtil
+
+Creates a Get to fetch the namespace's total 
snapshot size.
+
 createGroupKey(byte[][])
 - Method in class org.apache.hadoop.hbase.mapred.GroupingTableMap
 
 Create a key by concatenating multiple column values.
@@ -19198,12 +19214,26 @@
 
 Creates Put to store incremental backup table set
 
+createPutForNamespaceSnapshotSize(String,
 long) - Static method in class org.apache.hadoop.hbase.quotas.QuotaTableUtil
+
+Creates a Put for the namespace's total 
snapshot size.
+
 createPutForPreparedBulkload(TableName,
 byte[], byte[], ListPairPath, Path) - Static method 
in class org.apache.hadoop.hbase.backup.impl.BackupSystemTable
 
 createPutForRegionServerLastLogRollResult(String,
 Long, String) - Method in class 
org.apache.hadoop.hbase.backup.impl.BackupSystemTable
 
 Creates Put to store RS last log result
 
+createPutForSnapshotSize(TableName,
 String, long) - Static method in class 
org.apache.hadoop.hbase.quotas.QuotaTableUtil
+
+Creates a Put to persist 
the current size of the snapshot with respect to
+ the given table.
+
+createPutForSpaceSnapshot(TableName,
 SpaceQuotaSnapshot) - Static method in class 
org.apache.hadoop.hbase.quotas.QuotaTableUtil
+
+Creates a Put to store 
the given snapshot for the given tableName in
+ the quota table.
+
 createPutForStartBackupSession()
 - Method in class org.apache.hadoop.hbase.backup.impl.BackupSystemTable
 
 createPutForStartCode(String,
 String) - Method in class org.apache.hadoop.hbase.backup.impl.BackupSystemTable
@@ -19228,11 +19258,6 @@
 
 Creates put list for list of WAL files
 
-createPutSpaceSnapshot(TableName,
 SpaceQuotaSnapshot) - Static method in class 
org.apache.hadoop.hbase.quotas.QuotaTableUtil
-
-Creates a Put to store 
the given snapshot for the given tableName in
- the quota table.
-
 createQuarantinePath(Path)
 - Method in class org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker
 
 Given a path, generates a new path to where we move a 
corrupted hfile (bad
@@ -19483,6 +19508,8 @@
 
 Creates Scan operation to load last RS log roll 
results
 
+createScanForSnapshotSizes(TableName)
 - Static method in class org.apache.hadoop.hbase.quotas.QuotaTableUtil
+
 createScanFromConfiguration(Configuration)
 - Static method in class org.apache.hadoop.hbase.mapreduce.TableInputFormat
 
 Sets up a Scan instance, applying 
settings from the configuration property
@@ -26862,6 +26889,10 @@
 
 equals(Object)
 - Method in class 

[35/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/regionserver/Store.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/Store.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/Store.html
index b770602..27bfdcd 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/Store.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/Store.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":38,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":38,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":38,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":18,"i44":6,"i45":18,"i46":6,"i47":6,"i48":6,"i49":6,"i50":6,"i51":6,"i52":38,"i53":6,"i54":6,"i55":6,"i56":6,"i57":6,"i58":6,"i59":6,"i60":6,"i61":6,"i62":6,"i63":6,"i64":6,"i65":6,"i66":6,"i67":6,"i68":6,"i69":6,"i70":6,"i71":38,"i72":6,"i73":6,"i74":6,"i75":6};
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":38,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":38,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":38,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":18,"i45":6,"i46":18,"i47":6,"i48":6,"i49":6,"i50":6,"i51":6,"i52":6,"i53":38,"i54":6,"i55":6,"i56":6,"i57":6,"i58":6,"i59":6,"i60":6,"i61":6,"i62":6,"i63":6,"i64":6,"i65":6,"i66":6,"i67":6,"i68":6,"i69":6,"i70":6,"i71":6,"i72":38,"i73":6,"i74":6,"i75":6,"i76":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -318,30 +318,34 @@ extends 
 
 long
-getLastCompactSize()
+getHFilesSize()
 
 
 long
-getMajorCompactedCellsCount()
+getLastCompactSize()
 
 
 long
-getMajorCompactedCellsSize()
+getMajorCompactedCellsCount()
 
 
 long
-getMaxMemstoreTS()
+getMajorCompactedCellsSize()
 
 
 long
-getMaxSequenceId()
+getMaxMemstoreTS()
 
 
 long
-getMaxStoreFileAge()
+getMaxSequenceId()
 
 
 long
+getMaxStoreFileAge()
+
+
+long
 getMemStoreSize()
 Deprecated.
 Since 2.0 and will be 
removed in 3.0. Use getSizeOfMemStore()
 instead.
@@ -351,27 +355,27 @@ extends 
 
 
-
+
 long
 getMinStoreFileAge()
 
-
+
 long
 getNumHFiles()
 
-
+
 long
 getNumReferenceFiles()
 
-
+
 HRegionInfo
 getRegionInfo()
 
-
+
 ScanInfo
 getScanInfo()
 
-
+
 KeyValueScanner
 getScanner(Scanscan,
   http://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true;
 title="class or interface in 
java.util">NavigableSetbyte[]targetCols,
@@ -379,7 +383,7 @@ extends Return a scanner for both the memstore and the HStore 
files.
 
 
-
+
 default http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanner
 getScanners(booleancacheBlocks,
booleanisGet,
@@ -392,7 +396,7 @@ extends Get all scanners with no filtering based on TTL (that 
happens further down the line).
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanner
 getScanners(booleancacheBlocks,
booleanusePread,
@@ -406,7 +410,7 @@ extends Get all scanners with no filtering based on TTL (that 
happens further down the line).
 
 
-
+
 default http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanner
 getScanners(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFilefiles,
booleancacheBlocks,
@@ -422,7 +426,7 @@ extends 
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanner
 getScanners(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFilefiles,
booleancacheBlocks,
@@ -439,27 +443,27 @@ extends 
 
 
-
+
 long
 getSize()
 
-
+
 MemstoreSize
 getSizeOfMemStore()
 
-
+
 MemstoreSize
 getSizeOfSnapshot()
 
-
+
 MemstoreSize
 getSizeToFlush()
 
-
+
 long
 getSmallestReadPoint()
 
-
+
 long
 getSnapshotSize()
 Deprecated.
@@ -470,43 +474,43 @@ extends 
 
 
-
+
 byte[]
 getSplitPoint()
 Determines if Store should be split
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionStoreFile
 getStorefiles()
 
-
+
 int
 getStorefilesCount()
 
-
+
 long
 getStorefilesIndexSize()
 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
index acb426d..20455ea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
@@ -34,9 +34,9 @@
 026@org.jamon.annotations.Argument(name 
= "regionServer", type = "HRegionServer")},
 027  optionalArguments = {
 028@org.jamon.annotations.Argument(name 
= "bcn", type = "String"),
-029@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-030@org.jamon.annotations.Argument(name 
= "bcv", type = "String"),
-031@org.jamon.annotations.Argument(name 
= "filter", type = "String")})
+029@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+030@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+031@org.jamon.annotations.Argument(name 
= "bcv", type = "String")})
 032public class RSStatusTmpl
 033  extends 
org.jamon.AbstractTemplateProxy
 034{
@@ -94,57 +94,57 @@
 086  return m_bcn__IsNotDefault;
 087}
 088private boolean 
m_bcn__IsNotDefault;
-089// 22, 1
-090public void setFormat(String 
format)
+089// 21, 1
+090public void setFilter(String 
filter)
 091{
-092  // 22, 1
-093  m_format = format;
-094  m_format__IsNotDefault = true;
+092  // 21, 1
+093  m_filter = filter;
+094  m_filter__IsNotDefault = true;
 095}
-096public String getFormat()
+096public String getFilter()
 097{
-098  return m_format;
+098  return m_filter;
 099}
-100private String m_format;
-101public boolean 
getFormat__IsNotDefault()
+100private String m_filter;
+101public boolean 
getFilter__IsNotDefault()
 102{
-103  return m_format__IsNotDefault;
+103  return m_filter__IsNotDefault;
 104}
-105private boolean 
m_format__IsNotDefault;
-106// 24, 1
-107public void setBcv(String bcv)
+105private boolean 
m_filter__IsNotDefault;
+106// 22, 1
+107public void setFormat(String 
format)
 108{
-109  // 24, 1
-110  m_bcv = bcv;
-111  m_bcv__IsNotDefault = true;
+109  // 22, 1
+110  m_format = format;
+111  m_format__IsNotDefault = true;
 112}
-113public String getBcv()
+113public String getFormat()
 114{
-115  return m_bcv;
+115  return m_format;
 116}
-117private String m_bcv;
-118public boolean 
getBcv__IsNotDefault()
+117private String m_format;
+118public boolean 
getFormat__IsNotDefault()
 119{
-120  return m_bcv__IsNotDefault;
+120  return m_format__IsNotDefault;
 121}
-122private boolean 
m_bcv__IsNotDefault;
-123// 21, 1
-124public void setFilter(String 
filter)
+122private boolean 
m_format__IsNotDefault;
+123// 24, 1
+124public void setBcv(String bcv)
 125{
-126  // 21, 1
-127  m_filter = filter;
-128  m_filter__IsNotDefault = true;
+126  // 24, 1
+127  m_bcv = bcv;
+128  m_bcv__IsNotDefault = true;
 129}
-130public String getFilter()
+130public String getBcv()
 131{
-132  return m_filter;
+132  return m_bcv;
 133}
-134private String m_filter;
-135public boolean 
getFilter__IsNotDefault()
+134private String m_bcv;
+135public boolean 
getBcv__IsNotDefault()
 136{
-137  return m_filter__IsNotDefault;
+137  return m_bcv__IsNotDefault;
 138}
-139private boolean 
m_filter__IsNotDefault;
+139private boolean 
m_bcv__IsNotDefault;
 140  }
 141  @Override
 142  protected 
org.jamon.AbstractTemplateProxy.ImplData makeImplData()
@@ -163,24 +163,24 @@
 155return this;
 156  }
 157  
-158  protected String format;
-159  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFormat(String 
p_format)
+158  protected String filter;
+159  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFilter(String 
p_filter)
 160  {
-161
(getImplData()).setFormat(p_format);
+161
(getImplData()).setFilter(p_filter);
 162return this;
 163  }
 164  
-165  protected String bcv;
-166  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setBcv(String p_bcv)
+165  protected String format;
+166  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFormat(String 
p_format)
 167  {
-168(getImplData()).setBcv(p_bcv);
+168
(getImplData()).setFormat(p_format);
 169return this;
 170  }
 171  
-172  protected String filter;
-173  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFilter(String 
p_filter)
+172  

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
index 1e72f7d..f4cde49 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
@@ -1241,69 +1241,73 @@
 StripeStoreFileManager.getSplitPoint(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionStoreFilesfs)
 
 
+private long
+HStore.getStorefilesSize(http://docs.oracle.com/javase/8/docs/api/java/util/function/Predicate.html?is-external=true;
 title="class or interface in java.util.function">PredicateStoreFilepredicate)
+
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionStoreFile
 DefaultStoreFileManager.getUnneededFiles(longmaxTs,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFilefilesCompacting)
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionStoreFile
 StoreFileManager.getUnneededFiles(longmaxTs,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFilefilesCompacting)
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionStoreFile
 StripeStoreFileManager.getUnneededFiles(longmaxTs,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFilefilesCompacting)
 
-
+
 static boolean
 StoreUtils.hasReferences(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionStoreFilefiles)
 Determines whether any files in the collection are 
references.
 
 
-
+
 private static void
 StripeStoreFileManager.insertFileIntoStripe(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in java.util">ArrayListStoreFilestripe,
 StoreFilesf)
 Inserts a file in the correct place (by seqnum) in a stripe 
copy.
 
 
-
+
 void
 DefaultStoreFileManager.insertNewFiles(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionStoreFilesfs)
 
-
+
 void
 StoreFileManager.insertNewFiles(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionStoreFilesfs)
 Adds new files, either for from MemStore flush or bulk 
insert, into the structure.
 
 
-
+
 void
 StripeStoreFileManager.insertNewFiles(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionStoreFilesfs)
 
-
+
 void
 DefaultStoreFileManager.loadFiles(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFilestoreFiles)
 
-
+
 void
 StoreFileManager.loadFiles(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFilestoreFiles)
 Loads the initial store files into empty 
StoreFileManager.
 
 
-
+
 void
 StripeStoreFileManager.loadFiles(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFilestoreFiles)
 
-
+
 private void
 StripeStoreFileManager.loadUnclassifiedStoreFiles(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFilestoreFiles)
 Loads initial store files that were picked up from some 
physical location pertaining to
  this store (presumably).
 
 
-
+
 private void
 HStore.logCompactionEndMessage(CompactionRequestcr,
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFilesfs,
@@ -1312,45 +1316,45 @@
 Log a very elaborate compaction completion message.
 
 
-
+
 private void
 DefaultStoreFileManager.markCompactedAway(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionStoreFilecompactedFiles)
 
-
+
 private void
 

[51/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/476c54ed
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/476c54ed
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/476c54ed

Branch: refs/heads/asf-site
Commit: 476c54ede973db0e1cdc92c5cddd7e437cc71afc
Parents: 6ea4056
Author: jenkins 
Authored: Sat Jun 10 14:59:32 2017 +
Committer: jenkins 
Committed: Sat Jun 10 14:59:32 2017 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 4 +-
 apache_hbase_reference_guide.pdfmarks   | 4 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 17074 +
 checkstyle.rss  |30 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/allclasses-frame.html| 4 +
 devapidocs/allclasses-noframe.html  | 4 +
 devapidocs/constant-values.html |95 +-
 devapidocs/index-all.html   |   294 +-
 .../org/apache/hadoop/hbase/ScheduledChore.html | 2 +-
 .../hadoop/hbase/backup/package-tree.html   | 4 +-
 .../org/apache/hadoop/hbase/class-use/Cell.html |50 +-
 .../hadoop/hbase/class-use/ScheduledChore.html  | 7 +
 .../hadoop/hbase/class-use/Stoppable.html   | 7 +
 .../hadoop/hbase/class-use/TableName.html   |   225 +-
 .../class-use/InterfaceAudience.Private.html|29 +-
 .../hbase/classification/package-tree.html  | 6 +-
 .../hadoop/hbase/client/class-use/Admin.html|40 +-
 .../hbase/client/class-use/Connection.html  |48 +-
 .../hadoop/hbase/client/class-use/Get.html  |15 +-
 .../hadoop/hbase/client/class-use/Put.html  |22 +-
 .../hadoop/hbase/client/class-use/Scan.html |11 +
 .../hadoop/hbase/client/class-use/Table.html|24 +
 .../hadoop/hbase/client/package-tree.html   |26 +-
 .../apache/hadoop/hbase/client/package-use.html |21 +-
 .../hadoop/hbase/filter/package-tree.html   | 6 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 6 +-
 .../hadoop/hbase/ipc/NettyRpcConnection.html|19 +-
 .../apache/hadoop/hbase/ipc/class-use/Call.html | 7 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 2 +-
 .../master/HMaster.InitializationMonitor.html   |20 +-
 .../hbase/master/HMaster.RedirectServlet.html   |12 +-
 .../org/apache/hadoop/hbase/master/HMaster.html |   457 +-
 .../hadoop/hbase/master/MetricsMaster.html  |58 +-
 .../hbase/master/MetricsMasterQuotaSource.html  |   186 +-
 .../master/MetricsMasterQuotaSourceImpl.html|   146 +-
 .../hadoop/hbase/master/class-use/HMaster.html  | 4 +
 .../hbase/master/class-use/MetricsMaster.html   |15 +
 .../hadoop/hbase/master/package-tree.html   | 2 +-
 .../hbase/master/procedure/package-tree.html| 2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |14 +-
 .../org/apache/hadoop/hbase/package-use.html|28 +-
 .../hadoop/hbase/procedure2/package-tree.html   | 4 +-
 .../quotas/FileSystemUtilizationChore.html  |50 +-
 .../quotas/NamespaceQuotaSnapshotStore.html |11 +-
 .../QuotaObserverChore.TablesWithQuotas.html|38 +-
 .../hadoop/hbase/quotas/QuotaObserverChore.html |38 +-
 .../hadoop/hbase/quotas/QuotaSnapshotStore.html | 5 +-
 .../QuotaTableUtil.NamespaceQuotasVisitor.html  | 4 +-
 .../quotas/QuotaTableUtil.QuotasVisitor.html| 2 +-
 .../QuotaTableUtil.TableQuotasVisitor.html  | 4 +-
 .../QuotaTableUtil.UserQuotasVisitor.html   | 8 +-
 .../hadoop/hbase/quotas/QuotaTableUtil.html |   479 +-
 .../apache/hadoop/hbase/quotas/QuotaUtil.html   | 4 +-
 .../quotas/RegionServerSpaceQuotaManager.html   | 4 +-
 ...shotQuotaObserverChore.SnapshotWithSize.html |   394 +
 ...otQuotaObserverChore.StoreFileReference.html |   405 +
 .../quotas/SnapshotQuotaObserverChore.html  |   883 +
 .../hbase/quotas/SpaceLimitingException.html| 4 +-
 .../SpaceQuotaSnapshot.SpaceQuotaStatus.html|28 +-
 .../hadoop/hbase/quotas/SpaceQuotaSnapshot.html |30 +-
 .../hbase/quotas/TableQuotaSnapshotStore.html   |82 +-
 ...shotQuotaObserverChore.SnapshotWithSize.html |   203 +
 ...otQuotaObserverChore.StoreFileReference.html |   201 +
 .../class-use/SnapshotQuotaObserverChore.html   |   165 +
 

[26/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/master/MetricsMaster.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MetricsMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MetricsMaster.html
index a9223ea..e159f19 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MetricsMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MetricsMaster.html
@@ -164,7 +164,28 @@
 156  }
 157};
 158  }
-159}
+159
+160  /**
+161   * Sets the execution time of a period 
of the {@code SnapshotQuotaObserverChore}.
+162   */
+163  public void 
incrementSnapshotObserverTime(final long executionTime) {
+164
masterQuotaSource.incrementSnapshotObserverChoreTime(executionTime);
+165  }
+166
+167  /**
+168   * Sets the execution time to compute 
the size of a single snapshot.
+169   */
+170  public void 
incrementSnapshotSizeComputationTime(final long executionTime) {
+171
masterQuotaSource.incrementSnapshotObserverSnapshotComputationTime(executionTime);
+172  }
+173
+174  /**
+175   * Sets the execution time to fetch the 
mapping of snapshots to originating table.
+176   */
+177  public void 
incrementSnapshotFetchTime(long executionTime) {
+178
masterQuotaSource.incrementSnapshotObserverSnapshotFetchTime(executionTime);
+179  }
+180}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.html
index 2d63c04..54c92d7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.html
@@ -47,51 +47,78 @@
 039  String QUOTA_OBSERVER_CHORE_TIME_NAME = 
"quotaObserverChoreTime";
 040  String QUOTA_OBSERVER_CHORE_TIME_DESC 
=
 041  "Histogram for the time in millis 
for the QuotaObserverChore";
-042  String TABLE_QUOTA_USAGE_NAME = 
"tableSpaceQuotaOverview";
-043  String TABLE_QUOTA_USAGE_DESC = "A JSON 
summary of the usage of all tables with space quotas";
-044  String NS_QUOTA_USAGE_NAME = 
"namespaceSpaceQuotaOverview";
-045  String NS_QUOTA_USAGE_DESC = "A JSON 
summary of the usage of all namespaces with space quotas";
-046
-047  /**
-048   * Updates the metric tracking the 
number of space quotas defined in the system.
-049   *
-050   * @param numSpaceQuotas The number of 
space quotas defined
-051   */
-052  void updateNumSpaceQuotas(long 
numSpaceQuotas);
-053
-054  /**
-055   * Updates the metric tracking the 
number of tables the master has computed to be in
-056   * violation of their space quota.
-057   *
-058   * @param numTablesInViolation The 
number of tables violating a space quota
-059   */
-060  void 
updateNumTablesInSpaceQuotaViolation(long numTablesInViolation);
-061
-062  /**
-063   * Updates the metric tracking the 
number of namespaces the master has computed to be in
-064   * violation of their space quota.
-065   *
-066   * @param numNamespacesInViolation The 
number of namespaces violating a space quota
-067   */
-068  void 
updateNumNamespacesInSpaceQuotaViolation(long numNamespacesInViolation);
-069
-070  /**
-071   * Updates the metric tracking the 
number of region size reports the master is currently
-072   * retaining in memory.
-073   *
-074   * @param numCurrentRegionSizeReports 
The number of region size reports the master is holding in
-075   *memory
+042  String 
SNAPSHOT_OBSERVER_CHORE_TIME_NAME = "snapshotQuotaObserverChoreTime";
+043  String 
SNAPSHOT_OBSERVER_CHORE_TIME_DESC =
+044  "Histogram for the time in millis 
for the SnapshotQuotaObserverChore";
+045  String 
SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_NAME = 
"snapshotObserverSizeComputationTime";
+046  String 
SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_DESC =
+047  "Histogram for the time in millis 
to compute the size of each snapshot";
+048  String 
SNAPSHOT_OBSERVER_FETCH_TIME_NAME = "snapshotObserverSnapshotFetchTime";
+049  String 
SNAPSHOT_OBSERVER_FETCH_TIME_DESC =
+050  "Histogram for the time in millis 
to fetch all snapshots from HBase";
+051  String TABLE_QUOTA_USAGE_NAME = 
"tableSpaceQuotaOverview";
+052  String TABLE_QUOTA_USAGE_DESC = "A JSON 
summary of the usage of all tables with space quotas";
+053  String NS_QUOTA_USAGE_NAME = 
"namespaceSpaceQuotaOverview";
+054  String NS_QUOTA_USAGE_DESC = "A JSON 
summary of the usage of all namespaces with space quotas";
+055
+056  /**
+057   * Updates the metric tracking the 
number of space quotas defined in the system.
+058   *
+059   * @param numSpaceQuotas The number of 
space quotas defined

[50/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index ff8a519..09d3bdf 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -286,10 +286,10 @@
 Warnings
 Errors
 
-2231
+2232
 0
 0
-14590
+14594
 
 Files
 
@@ -3817,7 +3817,7 @@
 org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
 0
 0
-5
+7
 
 org/apache/hadoop/hbase/quotas/FixedIntervalRateLimiter.java
 0
@@ -3832,7 +3832,7 @@
 org/apache/hadoop/hbase/quotas/NamespaceQuotaSnapshotStore.java
 0
 0
-3
+2
 
 org/apache/hadoop/hbase/quotas/NoopOperationQuota.java
 0
@@ -3882,7 +3882,7 @@
 org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
 0
 0
-3
+5
 
 org/apache/hadoop/hbase/quotas/QuotaUtil.java
 0
@@ -3904,3301 +3904,3306 @@
 0
 4
 
+org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.java
+0
+0
+2
+
 org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifierFactory.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/quotas/SpaceViolationPolicyEnforcementFactory.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/quotas/TableQuotaSnapshotStore.java
 0
 0
-3
-
+2
+
 org/apache/hadoop/hbase/quotas/ThrottleSettings.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/quotas/ThrottlingException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/quotas/UserQuotaState.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/quotas/policies/MissingSnapshotViolationPolicyEnforcement.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java
 0
 0
 16
-
+
 org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/CellArrayMap.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/CellChunkMap.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/regionserver/CellFlatMap.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/regionserver/CellSet.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/CellSink.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/ChangedReadersObserver.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/Chunk.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/ChunkCreator.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/regionserver/CompactSplit.java
 0
 0
 11
-
+
 org/apache/hadoop/hbase/regionserver/CompactedHFilesDischargeHandler.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/regionserver/CompactionTool.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/DefaultHeapMemoryTuner.java
 0
 0
 31
-
+
 org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/DisabledRegionSplitPolicy.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/FavoredNodesForRegion.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/FlushPolicyFactory.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/FlushRequestListener.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/FlushRequester.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/HMobStore.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/regionserver/HRegion.java
 0
 0
 207
-
+
 org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
 0
 0
 47
-
+
 org/apache/hadoop/hbase/regionserver/HRegionServer.java
 0
 0
 137
-
+
 org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/HStore.java
 0
 0
 52
-
+
 org/apache/hadoop/hbase/regionserver/HStoreFile.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
 0
 0
 15
-
+
 org/apache/hadoop/hbase/regionserver/HeapMemoryTuner.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
 0
 0
 2
-
+
 

[44/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
index 80447fa..6faced1 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.LimitedPrivate(value="Tools")
-public class HMaster
+public class HMaster
 extends HRegionServer
 implements MasterServices
 HMaster is the "master server" for HBase. An HBase cluster 
has one active
@@ -424,22 +424,26 @@ implements snapshotManager
 
 
+private SnapshotQuotaObserverChore
+snapshotQuotaChore
+
+
 private SpaceQuotaSnapshotNotifier
 spaceQuotaSnapshotNotifier
 
-
+
 private SplitOrMergeTracker
 splitOrMergeTracker
 
-
+
 private long
 splitPlanCount
 
-
+
 private TableStateManager
 tableStateManager
 
-
+
 private MasterWalManager
 walManager
 
@@ -1400,7 +1404,7 @@ implements 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -1409,7 +1413,7 @@ implements 
 
 MASTER
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MASTER
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MASTER
 
 See Also:
 Constant
 Field Values
@@ -1422,7 +1426,7 @@ implements 
 
 activeMasterManager
-private finalActiveMasterManager activeMasterManager
+private finalActiveMasterManager activeMasterManager
 
 
 
@@ -1431,7 +1435,7 @@ implements 
 
 regionServerTracker
-RegionServerTracker regionServerTracker
+RegionServerTracker regionServerTracker
 
 
 
@@ -1440,7 +1444,7 @@ implements 
 
 drainingServerTracker
-privateDrainingServerTracker 
drainingServerTracker
+privateDrainingServerTracker 
drainingServerTracker
 
 
 
@@ -1449,7 +1453,7 @@ implements 
 
 loadBalancerTracker
-LoadBalancerTracker loadBalancerTracker
+LoadBalancerTracker loadBalancerTracker
 
 
 
@@ -1458,7 +1462,7 @@ implements 
 
 splitOrMergeTracker
-privateSplitOrMergeTracker splitOrMergeTracker
+privateSplitOrMergeTracker splitOrMergeTracker
 
 
 
@@ -1467,7 +1471,7 @@ implements 
 
 regionNormalizerTracker
-privateRegionNormalizerTracker 
regionNormalizerTracker
+privateRegionNormalizerTracker 
regionNormalizerTracker
 
 
 
@@ -1476,7 +1480,7 @@ implements 
 
 maintenanceModeTracker
-privateMasterMaintenanceModeTracker maintenanceModeTracker
+privateMasterMaintenanceModeTracker maintenanceModeTracker
 
 
 
@@ -1485,7 +1489,7 @@ implements 
 
 clusterSchemaService
-privateClusterSchemaService clusterSchemaService
+privateClusterSchemaService clusterSchemaService
 
 
 
@@ -1494,7 +1498,7 @@ implements 
 
 metricsMaster
-finalMetricsMaster metricsMaster
+finalMetricsMaster metricsMaster
 
 
 
@@ -1503,7 +1507,7 @@ implements 
 
 fileSystemManager
-privateMasterFileSystem fileSystemManager
+privateMasterFileSystem fileSystemManager
 
 
 
@@ -1512,7 +1516,7 @@ implements 
 
 walManager
-privateMasterWalManager walManager
+privateMasterWalManager walManager
 
 
 
@@ -1521,7 +1525,7 @@ implements 
 
 serverManager
-private volatileServerManager serverManager
+private volatileServerManager serverManager
 
 
 
@@ -1530,7 +1534,7 @@ implements 
 
 assignmentManager
-privateAssignmentManager assignmentManager
+privateAssignmentManager assignmentManager
 
 
 
@@ -1539,7 +1543,7 @@ implements 
 
 replicationManager
-privateReplicationManager replicationManager
+privateReplicationManager replicationManager
 
 
 
@@ -1548,7 +1552,7 @@ implements 
 
 rsFatals
-MemoryBoundedLogMessageBuffer rsFatals
+MemoryBoundedLogMessageBuffer rsFatals
 
 
 
@@ -1557,7 +1561,7 @@ implements 
 
 activeMaster
-private volatileboolean activeMaster
+private volatileboolean activeMaster
 
 
 
@@ -1566,7 +1570,7 @@ implements 
 
 initialized
-private finalProcedureEvent initialized
+private finalProcedureEvent initialized
 
 
 
@@ -1575,7 +1579,7 @@ implements 
 
 serviceStarted
-volatileboolean serviceStarted
+volatileboolean serviceStarted
 
 
 
@@ -1584,7 +1588,7 @@ implements 
 
 serverCrashProcessingEnabled
-private finalProcedureEvent serverCrashProcessingEnabled
+private finalProcedureEvent serverCrashProcessingEnabled
 
 
 
@@ -1593,7 +1597,7 @@ implements 
 
 maxBlancingTime
-private finalint maxBlancingTime
+private finalint maxBlancingTime
 
 
 
@@ -1602,7 +1606,7 @@ implements 
 
 maxRitPercent
-private finaldouble maxRitPercent
+private finaldouble maxRitPercent
 
 
 
@@ -1611,7 +1615,7 @@ implements 
 
 lockManager
-private finalLockManager lockManager
+private finalLockManager lockManager
 
 
 
@@ -1620,7 +1624,7 @@ implements 
 
 balancer

[36/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
index 96b824f..d262a1b 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":9,"i32":10,"i33":10,"i34":10,"i35":10,"i36":9,"i37":10,"i38":9,"i39":9,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":42,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":42,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":42,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":41,"i89":41,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":9,"i32":10,"i33":10,"i34":10,"i35":10,"i36":9,"i37":10,"i38":9,"i39":9,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":42,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":42,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":42,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":41,"i91":41,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class HStore
+public class HStore
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements Store
 A Store holds a column family in a Region.  Its a memstore 
and a set of zero
@@ -714,72 +714,76 @@ implements getFlushedOutputFileSize()
 
 
+long
+getHFilesSize()
+
+
 HRegion
 getHRegion()
 
-
+
 long
 getLastCompactSize()
 
-
+
 long
 getMajorCompactedCellsCount()
 
-
+
 long
 getMajorCompactedCellsSize()
 
-
+
 long
 getMaxMemstoreTS()
 
-
+
 long
 getMaxSequenceId()
 
-
+
 long
 getMaxStoreFileAge()
 
-
+
 long
 getMemstoreFlushSize()
 
-
+
 long
 getMemStoreSize()
 Deprecated.
 
 
-
+
 long
 getMinStoreFileAge()
 
-
+
 long
 getNumHFiles()
 
-
+
 long
 getNumReferenceFiles()
 
-
+
 protected OffPeakHours
 getOffPeakHours()
 
-
+
 HRegionFileSystem
 getRegionFileSystem()
 
-
+
 HRegionInfo
 getRegionInfo()
 
-
+
 ScanInfo
 getScanInfo()
 
-
+
 KeyValueScanner
 getScanner(Scanscan,
   http://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true;
 title="class or interface in 
java.util">NavigableSetbyte[]targetCols,
@@ -787,7 +791,7 @@ implements Return a scanner for both the memstore and the HStore 
files.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanner
 getScanners(booleancacheBlocks,
booleanusePread,
@@ -801,7 +805,7 @@ implements Get all scanners with no filtering based on TTL (that 
happens further down the line).
 
 
-
+
 

[43/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/master/MetricsMaster.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/MetricsMaster.html 
b/devapidocs/org/apache/hadoop/hbase/master/MetricsMaster.html
index ffd3511..9d902a1 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MetricsMaster.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MetricsMaster.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10};
+var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -222,23 +222,41 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 void
+incrementSnapshotFetchTime(longexecutionTime)
+Sets the execution time to fetch the mapping of snapshots 
to originating table.
+
+
+
+void
+incrementSnapshotObserverTime(longexecutionTime)
+Sets the execution time of a period of the 
SnapshotQuotaObserverChore.
+
+
+
+void
+incrementSnapshotSizeComputationTime(longexecutionTime)
+Sets the execution time to compute the size of a single 
snapshot.
+
+
+
+void
 setNumNamespacesInSpaceQuotaViolation(longnumNamespacesInViolation)
 Sets the number of namespaces in violation of a space 
quota.
 
 
-
+
 void
 setNumRegionSizeReports(longnumRegionReports)
 Sets the number of region size reports the master currently 
has in memory.
 
 
-
+
 void
 setNumSpaceQuotas(longnumSpaceQuotas)
 Sets the number of space quotas defined.
 
 
-
+
 void
 setNumTableInSpaceQuotaViolation(longnumTablesInViolation)
 Sets the number of table in violation of a space 
quota.
@@ -464,7 +482,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-
+
 
 convertToProcedureMetrics
 public staticProcedureMetricsconvertToProcedureMetrics(OperationMetricsmetrics)
@@ -478,6 +496,36 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
  required conversion.
 
 
+
+
+
+
+
+incrementSnapshotObserverTime
+publicvoidincrementSnapshotObserverTime(longexecutionTime)
+Sets the execution time of a period of the 
SnapshotQuotaObserverChore.
+
+
+
+
+
+
+
+incrementSnapshotSizeComputationTime
+publicvoidincrementSnapshotSizeComputationTime(longexecutionTime)
+Sets the execution time to compute the size of a single 
snapshot.
+
+
+
+
+
+
+
+incrementSnapshotFetchTime
+publicvoidincrementSnapshotFetchTime(longexecutionTime)
+Sets the execution time to fetch the mapping of snapshots 
to originating table.
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.html 
b/devapidocs/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.html
index d1358b1..14b9b22 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6};
+var methods = {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -196,6 +196,30 @@ extends 
 
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+SNAPSHOT_OBSERVER_CHORE_TIME_DESC
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+SNAPSHOT_OBSERVER_CHORE_TIME_NAME
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+SNAPSHOT_OBSERVER_FETCH_TIME_DESC
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+SNAPSHOT_OBSERVER_FETCH_TIME_NAME
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_DESC
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_NAME
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 

[39/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.html
new file mode 100644
index 000..3e745ba
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.html
@@ -0,0 +1,883 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+SnapshotQuotaObserverChore (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":9,"i4":9,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.quotas
+Class 
SnapshotQuotaObserverChore
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.ScheduledChore
+
+
+org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable
+
+
+
+@InterfaceAudience.Private
+public class SnapshotQuotaObserverChore
+extends ScheduledChore
+A Master-invoked Chore that computes the size 
of each snapshot which was created from
+ a table which has a space quota.
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes
+
+Modifier and Type
+Class and Description
+
+
+(package private) static class
+SnapshotQuotaObserverChore.SnapshotWithSize
+A struct encapsulating the name of a snapshot and its 
"size" on the filesystem.
+
+
+
+(package private) static class
+SnapshotQuotaObserverChore.StoreFileReference
+A reference to a collection of files in the archive 
directory for a single region.
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private 
org.apache.hadoop.conf.Configuration
+conf
+
+
+private Connection
+conn
+
+
+private org.apache.hadoop.fs.FileSystem
+fs
+
+
+private static 
org.apache.commons.logging.Log
+LOG
+
+
+private MetricsMaster
+metrics
+
+
+(package private) static long
+SNAPSHOT_QUOTA_CHORE_DELAY_DEFAULT
+
+
+(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+SNAPSHOT_QUOTA_CHORE_DELAY_KEY
+
+
+(package private) static int
+SNAPSHOT_QUOTA_CHORE_PERIOD_DEFAULT
+
+
+(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+SNAPSHOT_QUOTA_CHORE_PERIOD_KEY
+
+
+(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+SNAPSHOT_QUOTA_CHORE_TIMEUNIT_DEFAULT
+
+
+(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+SNAPSHOT_QUOTA_CHORE_TIMEUNIT_KEY
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+SnapshotQuotaObserverChore(Connectionconn,
+  org.apache.hadoop.conf.Configurationconf,
+  org.apache.hadoop.fs.FileSystemfs,
+  Stoppablestopper,
+  MetricsMastermetrics)
+
+
+SnapshotQuotaObserverChore(HMastermaster,
+  MetricsMastermetrics)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+(package private) void
+_chore()
+
+
+protected void
+chore()
+The task to execute on each scheduled execution of the 
Chore
+
+
+
+(package private) 

[29/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index 0c3fe3b..d262744 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -149,3339 +149,3348 @@
 141import 
org.apache.hadoop.hbase.quotas.MasterSpaceQuotaObserver;
 142import 
org.apache.hadoop.hbase.quotas.QuotaObserverChore;
 143import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-144import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier;
-145import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory;
-146import 
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
-147import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-148import 
org.apache.hadoop.hbase.regionserver.HStore;
-149import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-150import 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
-151import 
org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
-152import 
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
-153import 
org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
-154import 
org.apache.hadoop.hbase.replication.ReplicationException;
-155import 
org.apache.hadoop.hbase.replication.ReplicationFactory;
-156import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-157import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-158import 
org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
-159import 
org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
-160import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
-161import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-162import 
org.apache.hadoop.hbase.security.UserProvider;
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
-165import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
-166import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-167import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;
-168import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-169import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
-170import 
org.apache.hadoop.hbase.util.Addressing;
-171import 
org.apache.hadoop.hbase.util.Bytes;
-172import 
org.apache.hadoop.hbase.util.CompressionTest;
-173import 
org.apache.hadoop.hbase.util.EncryptionTest;
-174import 
org.apache.hadoop.hbase.util.FSUtils;
-175import 
org.apache.hadoop.hbase.util.HFileArchiveUtil;
-176import 
org.apache.hadoop.hbase.util.HasThread;
-177import 
org.apache.hadoop.hbase.util.IdLock;
-178import 
org.apache.hadoop.hbase.util.ModifyRegionUtils;
-179import 
org.apache.hadoop.hbase.util.Pair;
-180import 
org.apache.hadoop.hbase.util.Threads;
-181import 
org.apache.hadoop.hbase.util.VersionInfo;
-182import 
org.apache.hadoop.hbase.util.ZKDataMigrator;
-183import 
org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
-184import 
org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
-185import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-186import 
org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;
-187import 
org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
-188import 
org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
-189import 
org.apache.hadoop.hbase.zookeeper.SplitOrMergeTracker;
-190import 
org.apache.hadoop.hbase.zookeeper.ZKClusterId;
-191import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-192import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-193import 
org.apache.zookeeper.KeeperException;
-194import org.eclipse.jetty.server.Server;
-195import 
org.eclipse.jetty.server.ServerConnector;
-196import 
org.eclipse.jetty.servlet.ServletHolder;
-197import 
org.eclipse.jetty.webapp.WebAppContext;
-198
-199import 
com.google.common.annotations.VisibleForTesting;
-200import com.google.common.collect.Lists;
-201import com.google.common.collect.Maps;
-202import com.google.protobuf.Descriptors;
-203import com.google.protobuf.Service;
-204
-205/**
-206 * HMaster is the "master server" for 
HBase. An HBase cluster has one active
-207 * master.  If many masters are started, 
all compete.  Whichever wins goes on to
-208 * run the cluster.  All others park 
themselves in their constructor until
-209 * master or cluster shutdown or until 
the active master loses its lease in
-210 * 

[28/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index 0c3fe3b..d262744 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -149,3339 +149,3348 @@
 141import 
org.apache.hadoop.hbase.quotas.MasterSpaceQuotaObserver;
 142import 
org.apache.hadoop.hbase.quotas.QuotaObserverChore;
 143import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-144import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier;
-145import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory;
-146import 
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
-147import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-148import 
org.apache.hadoop.hbase.regionserver.HStore;
-149import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-150import 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
-151import 
org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
-152import 
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
-153import 
org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
-154import 
org.apache.hadoop.hbase.replication.ReplicationException;
-155import 
org.apache.hadoop.hbase.replication.ReplicationFactory;
-156import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-157import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-158import 
org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
-159import 
org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
-160import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
-161import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-162import 
org.apache.hadoop.hbase.security.UserProvider;
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
-165import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
-166import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-167import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;
-168import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-169import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
-170import 
org.apache.hadoop.hbase.util.Addressing;
-171import 
org.apache.hadoop.hbase.util.Bytes;
-172import 
org.apache.hadoop.hbase.util.CompressionTest;
-173import 
org.apache.hadoop.hbase.util.EncryptionTest;
-174import 
org.apache.hadoop.hbase.util.FSUtils;
-175import 
org.apache.hadoop.hbase.util.HFileArchiveUtil;
-176import 
org.apache.hadoop.hbase.util.HasThread;
-177import 
org.apache.hadoop.hbase.util.IdLock;
-178import 
org.apache.hadoop.hbase.util.ModifyRegionUtils;
-179import 
org.apache.hadoop.hbase.util.Pair;
-180import 
org.apache.hadoop.hbase.util.Threads;
-181import 
org.apache.hadoop.hbase.util.VersionInfo;
-182import 
org.apache.hadoop.hbase.util.ZKDataMigrator;
-183import 
org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
-184import 
org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
-185import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-186import 
org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;
-187import 
org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
-188import 
org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
-189import 
org.apache.hadoop.hbase.zookeeper.SplitOrMergeTracker;
-190import 
org.apache.hadoop.hbase.zookeeper.ZKClusterId;
-191import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-192import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-193import 
org.apache.zookeeper.KeeperException;
-194import org.eclipse.jetty.server.Server;
-195import 
org.eclipse.jetty.server.ServerConnector;
-196import 
org.eclipse.jetty.servlet.ServletHolder;
-197import 
org.eclipse.jetty.webapp.WebAppContext;
-198
-199import 
com.google.common.annotations.VisibleForTesting;
-200import com.google.common.collect.Lists;
-201import com.google.common.collect.Maps;
-202import com.google.protobuf.Descriptors;
-203import com.google.protobuf.Service;
-204
-205/**
-206 * HMaster is the "master server" for 
HBase. An HBase cluster has one active
-207 * master.  If many masters are started, 
all compete.  Whichever wins goes on to
-208 * run the cluster.  All others park 
themselves in their constructor until
-209 * master or cluster shutdown or until 
the active master loses its lease in
-210 * zookeeper.  Thereafter, all 

[47/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/ScheduledChore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ScheduledChore.html 
b/devapidocs/org/apache/hadoop/hbase/ScheduledChore.html
index 93752d9..ade39c4 100644
--- a/devapidocs/org/apache/hadoop/hbase/ScheduledChore.html
+++ b/devapidocs/org/apache/hadoop/hbase/ScheduledChore.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 Direct Known Subclasses:
-BalancerChore, CatalogJanitor, CleanerChore, ClusterStatusChore, ClusterStatusPublisher, CompactedHFilesDischarger, ExpiredMobFileCleanerChore, FileSystemUtilizationChore, HealthCheckChore, HeapMemoryManager.HeapMemoryTunerChore,
 HRegionServer.CompactionChecker, HRegionServer.MovedRegionsCleaner, 
 HRegionServer.PeriodicMemstoreFlusher,
 MobCompactionChore, QuotaCache.QuotaRefresherChore, QuotaObserverChore, RegionNormalizerChore, Repli
 cationMetaCleaner, ReplicationZKNodeCleanerChore, SpaceQuotaRefresherChore, 
SplitLogManager.TimeoutMonitor, StorefileRefresherChore
+BalancerChore, CatalogJanitor, CleanerChore, ClusterStatusChore, ClusterStatusPublisher, CompactedHFilesDischarger, ExpiredMobFileCleanerChore, FileSystemUtilizationChore, HealthCheckChore, HeapMemoryManager.HeapMemoryTunerChore,
 HRegionServer.CompactionChecker, HRegionServer.MovedRegionsCleaner, 
 HRegionServer.PeriodicMemstoreFlusher,
 MobCompactionChore, QuotaCache.QuotaRefresherChore, QuotaObserverChore, RegionNormalizerChore, Repli
 cationMetaCleaner, ReplicationZKNodeCleanerChore, SnapshotQuotaObserverChore, SpaceQuotaRefresherChore, 
SplitLogManager.TimeoutMonitor, StorefileRefresherChore
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
index 746a472..2f84b3e 100644
--- a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
@@ -166,10 +166,10 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.backup.BackupType
-org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase
 org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand
+org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase
 org.apache.hadoop.hbase.backup.BackupInfo.BackupState
+org.apache.hadoop.hbase.backup.BackupType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
index 3b80f88..bd79f29 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
@@ -177,65 +177,69 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-org.apache.hadoop.hbase.regionserver
+org.apache.hadoop.hbase.quotas
 
 
 
-org.apache.hadoop.hbase.regionserver.handler
+org.apache.hadoop.hbase.regionserver
 
 
 
-org.apache.hadoop.hbase.regionserver.querymatcher
+org.apache.hadoop.hbase.regionserver.handler
 
 
 
-org.apache.hadoop.hbase.regionserver.wal
+org.apache.hadoop.hbase.regionserver.querymatcher
 
 
 
+org.apache.hadoop.hbase.regionserver.wal
+
+
+
 org.apache.hadoop.hbase.replication
 
 Multi Cluster Replication
 
 
-
+
 org.apache.hadoop.hbase.replication.regionserver
 
 
-
+
 org.apache.hadoop.hbase.rest
 
 HBase REST
 
 
-
+
 org.apache.hadoop.hbase.rest.model
 
 
-
+
 org.apache.hadoop.hbase.security.access
 
 
-
+
 org.apache.hadoop.hbase.security.visibility
 
 
-
+
 org.apache.hadoop.hbase.thrift
 
 Provides an HBase http://incubator.apache.org/thrift/;>Thrift
 service.
 
 
-
+
 org.apache.hadoop.hbase.util
 
 
-
+
 org.apache.hadoop.hbase.util.test
 
 
-
+
 org.apache.hadoop.hbase.wal
 
 
@@ -4453,6 +4457,26 @@ service.
 
 
 
+
+
+
+Uses of Cell in org.apache.hadoop.hbase.quotas
+
+Methods in org.apache.hadoop.hbase.quotas
 with parameters of type Cell
+
+Modifier and Type
+Method and Description
+
+
+
+(package private) static long
+QuotaTableUtil.parseSnapshotSize(Cellc)
+Parses the snapshot size from the given Cell's value.
+
+
+
+
+
 
 
 


[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html
index 78816d5..ca6eaae 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html
@@ -169,599 +169,601 @@
 161// The current "view" of region space 
use. Used henceforth.
 162final MapHRegionInfo,Long 
reportedRegionSpaceUse = quotaManager.snapshotRegionSizes();
 163if (LOG.isTraceEnabled()) {
-164  LOG.trace("Using " + 
reportedRegionSpaceUse.size() + " region space use reports");
-165}
-166
-167// Remove the "old" region reports
-168pruneOldRegionReports();
-169
-170// Create the stores to track table 
and namespace snapshots
-171
initializeSnapshotStores(reportedRegionSpaceUse);
-172// Report the number of (non-expired) 
region size reports
-173if (metrics != null) {
-174  
metrics.setNumRegionSizeReports(reportedRegionSpaceUse.size());
-175}
-176
-177// Filter out tables for which we 
don't have adequate regionspace reports yet.
-178// Important that we do this after we 
instantiate the stores above
-179// This gives us a set of Tables 
which may or may not be violating their quota.
-180// To be safe, we want to make sure 
that these are not in violation.
-181SetTableName tablesInLimbo = 
tablesWithQuotas.filterInsufficientlyReportedTables(
-182tableSnapshotStore);
-183
-184if (LOG.isTraceEnabled()) {
-185  LOG.trace("Filtered insufficiently 
reported tables, left with " +
-186  reportedRegionSpaceUse.size() + 
" regions reported");
-187}
-188
-189for (TableName tableInLimbo : 
tablesInLimbo) {
-190  final SpaceQuotaSnapshot 
currentSnapshot = tableSnapshotStore.getCurrentState(tableInLimbo);
-191  if 
(currentSnapshot.getQuotaStatus().isInViolation()) {
-192if (LOG.isTraceEnabled()) {
-193  LOG.trace("Moving " + 
tableInLimbo + " out of violation because fewer region sizes were"
-194  + " reported than 
required.");
-195}
-196SpaceQuotaSnapshot targetSnapshot 
= new SpaceQuotaSnapshot(
-197
SpaceQuotaStatus.notInViolation(), currentSnapshot.getUsage(),
-198
currentSnapshot.getLimit());
-199
this.snapshotNotifier.transitionTable(tableInLimbo, targetSnapshot);
-200// Update it in the Table 
QuotaStore so that memory is consistent with no violation.
-201
tableSnapshotStore.setCurrentState(tableInLimbo, targetSnapshot);
-202  }
-203}
-204
-205// Transition each table to/from 
quota violation based on the current and target state.
-206// Only table quotas are enacted.
-207final SetTableName 
tablesWithTableQuotas = tablesWithQuotas.getTableQuotaTables();
-208
processTablesWithQuotas(tablesWithTableQuotas);
-209
-210// For each Namespace quota, 
transition each table in the namespace in or out of violation
-211// only if a table quota violation 
policy has not already been applied.
-212final SetString 
namespacesWithQuotas = tablesWithQuotas.getNamespacesWithQuotas();
-213final 
MultimapString,TableName tablesByNamespace = 
tablesWithQuotas.getTablesByNamespace();
-214
processNamespacesWithQuotas(namespacesWithQuotas, tablesByNamespace);
-215  }
-216
-217  void 
initializeSnapshotStores(MapHRegionInfo,Long regionSizes) {
-218MapHRegionInfo,Long 
immutableRegionSpaceUse = Collections.unmodifiableMap(regionSizes);
-219if (tableSnapshotStore == null) {
-220  tableSnapshotStore = new 
TableQuotaSnapshotStore(conn, this, immutableRegionSpaceUse);
-221} else {
-222  
tableSnapshotStore.setRegionUsage(immutableRegionSpaceUse);
-223}
-224if (namespaceSnapshotStore == null) 
{
-225  namespaceSnapshotStore = new 
NamespaceQuotaSnapshotStore(
-226  conn, this, 
immutableRegionSpaceUse);
-227} else {
-228  
namespaceSnapshotStore.setRegionUsage(immutableRegionSpaceUse);
-229}
-230  }
-231
-232  /**
-233   * Processes each {@code TableName} 
which has a quota defined and moves it in or out of
-234   * violation based on the space use.
-235   *
-236   * @param tablesWithTableQuotas The 
HBase tables which have quotas defined
-237   */
-238  void processTablesWithQuotas(final 
SetTableName tablesWithTableQuotas) throws IOException {
-239long numTablesInViolation = 0L;
-240for (TableName table : 
tablesWithTableQuotas) {
-241  final SpaceQuota spaceQuota = 
tableSnapshotStore.getSpaceQuota(table);
-242  if (spaceQuota == null) {
-243if (LOG.isDebugEnabled()) {
-244  LOG.debug("Unexpectedly did not 
find a space quota 

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.TableQuotasVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.TableQuotasVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.TableQuotasVisitor.html
index 509b93c..3c6f9b8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.TableQuotasVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.TableQuotasVisitor.html
@@ -53,606 +53,717 @@
 045import 
org.apache.hadoop.hbase.client.ResultScanner;
 046import 
org.apache.hadoop.hbase.client.Scan;
 047import 
org.apache.hadoop.hbase.client.Table;
-048import 
org.apache.hadoop.hbase.filter.CompareFilter;
-049import 
org.apache.hadoop.hbase.filter.Filter;
-050import 
org.apache.hadoop.hbase.filter.FilterList;
-051import 
org.apache.hadoop.hbase.filter.QualifierFilter;
-052import 
org.apache.hadoop.hbase.filter.RegexStringComparator;
-053import 
org.apache.hadoop.hbase.filter.RowFilter;
-054import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-055import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-056import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
-057import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.TableQuotaSnapshot;
-065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes;
-066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-067import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
-068import 
org.apache.hadoop.hbase.util.Bytes;
-069import 
org.apache.hadoop.hbase.util.Strings;
-070
-071/**
-072 * Helper class to interact with the 
quota table.
-073 * pre
-074 * ROW-KEY  FAM/QUAL
DATA
-075 *   n.lt;namespacegt; q:s  
   lt;global-quotasgt;
-076 *   t.lt;namespacegt; u:p  
  lt;namespace-quota policygt;
-077 *   t.lt;tablegt; q:s  
   lt;global-quotasgt;
-078 *   t.lt;tablegt; u:p  
  lt;table-quota policygt;
-079 *   u.lt;usergt;  q:s  
   lt;global-quotasgt;
-080 *   u.lt;usergt;  
q:s.lt;tablegt; lt;table-quotasgt;
-081 *   u.lt;usergt;  
q:s.lt;nsgt;:   lt;namespace-quotasgt;
-082 * /pre
-083 */
-084@InterfaceAudience.Private
-085@InterfaceStability.Evolving
-086public class QuotaTableUtil {
-087  private static final Log LOG = 
LogFactory.getLog(QuotaTableUtil.class);
-088
-089  /** System table for quotas */
-090  public static final TableName 
QUOTA_TABLE_NAME =
-091  
TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "quota");
-092
-093  protected static final byte[] 
QUOTA_FAMILY_INFO = Bytes.toBytes("q");
-094  protected static final byte[] 
QUOTA_FAMILY_USAGE = Bytes.toBytes("u");
-095  protected static final byte[] 
QUOTA_QUALIFIER_SETTINGS = Bytes.toBytes("s");
-096  protected static final byte[] 
QUOTA_QUALIFIER_SETTINGS_PREFIX = Bytes.toBytes("s.");
-097  protected static final byte[] 
QUOTA_QUALIFIER_POLICY = Bytes.toBytes("p");
-098  protected static final String 
QUOTA_POLICY_COLUMN =
-099  Bytes.toString(QUOTA_FAMILY_USAGE) 
+ ":" + Bytes.toString(QUOTA_QUALIFIER_POLICY);
-100  protected static final byte[] 
QUOTA_USER_ROW_KEY_PREFIX = Bytes.toBytes("u.");
-101  protected static final byte[] 
QUOTA_TABLE_ROW_KEY_PREFIX = Bytes.toBytes("t.");
-102  protected static final byte[] 
QUOTA_NAMESPACE_ROW_KEY_PREFIX = Bytes.toBytes("n.");
-103
-104  /* 
=
-105   *  Quota "settings" helpers
-106   */
-107  public static Quotas 
getTableQuota(final Connection connection, final TableName table)
-108  throws IOException {
-109return getQuotas(connection, 
getTableRowKey(table));
-110  }
-111
-112  public static Quotas 
getNamespaceQuota(final Connection connection, final String namespace)
-113  throws IOException {
-114return getQuotas(connection, 
getNamespaceRowKey(namespace));
-115  }
-116
-117  public static Quotas getUserQuota(final 
Connection connection, final String user)
-118  throws IOException {
-119return 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/metrics2/class-use/MetricHistogram.html
--
diff --git 
a/devapidocs/org/apache/hadoop/metrics2/class-use/MetricHistogram.html 
b/devapidocs/org/apache/hadoop/metrics2/class-use/MetricHistogram.html
index bc9ad26..4c691a0 100644
--- a/devapidocs/org/apache/hadoop/metrics2/class-use/MetricHistogram.html
+++ b/devapidocs/org/apache/hadoop/metrics2/class-use/MetricHistogram.html
@@ -223,18 +223,30 @@ service.
 
 
 private MetricHistogram
-MetricsSnapshotSourceImpl.snapshotRestoreTimeHisto
+MetricsMasterQuotaSourceImpl.snapshotObserverSizeComputationTimeHisto
 
 
 private MetricHistogram
-MetricsSnapshotSourceImpl.snapshotTimeHisto
+MetricsMasterQuotaSourceImpl.snapshotObserverSnapshotFetchTimeHisto
 
 
 private MetricHistogram
-MetricsMasterFilesystemSourceImpl.splitSizeHisto
+MetricsMasterQuotaSourceImpl.snapshotObserverTimeHisto
 
 
 private MetricHistogram
+MetricsSnapshotSourceImpl.snapshotRestoreTimeHisto
+
+
+private MetricHistogram
+MetricsSnapshotSourceImpl.snapshotTimeHisto
+
+
+private MetricHistogram
+MetricsMasterFilesystemSourceImpl.splitSizeHisto
+
+
+private MetricHistogram
 MetricsMasterFilesystemSourceImpl.splitTimeHisto
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/overview-tree.html
--
diff --git a/devapidocs/overview-tree.html b/devapidocs/overview-tree.html
index 1e62252..1903c18 100644
--- a/devapidocs/overview-tree.html
+++ b/devapidocs/overview-tree.html
@@ -3268,6 +3268,7 @@
 org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore
 org.apache.hadoop.hbase.master.cleaner.ReplicationMetaCleaner
 org.apache.hadoop.hbase.master.cleaner.ReplicationZKNodeCleanerChore
+org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore
 org.apache.hadoop.hbase.quotas.SpaceQuotaRefresherChore
 org.apache.hadoop.hbase.master.SplitLogManager.TimeoutMonitor
 org.apache.hadoop.hbase.regionserver.StorefileRefresherChore
@@ -3393,6 +3394,8 @@
 org.apache.hadoop.hbase.snapshot.SnapshotManifestV2
 org.apache.hadoop.hbase.snapshot.SnapshotManifestV2.ManifestBuilder (implements 
org.apache.hadoop.hbase.snapshot.SnapshotManifest.RegionVisitorTRegion,TFamily)
 org.apache.hadoop.hbase.master.SnapshotOfRegionAssignmentFromMeta
+org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore.SnapshotWithSize
+org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore.StoreFileReference
 org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil
 org.apache.hadoop.hbase.util.SortedListE (implements java.util.http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListE, java.util.http://docs.oracle.com/javase/8/docs/api/java/util/RandomAccess.html?is-external=true;
 title="class or interface in java.util">RandomAccess)
 org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot
@@ -5129,6 +5132,7 @@
 org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType
 org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE
 org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position
+org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipperThread.WorkerState
 org.apache.hadoop.hbase.security.visibility.expression.Operator
 org.apache.hadoop.hbase.security.access.AccessControlFilter.Strategy
 org.apache.hadoop.hbase.security.access.AccessController.OpType

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index 58cbf79..a976616 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
 008@InterfaceAudience.Private
 009public class Version {
 010  public static final String version = 
"3.0.0-SNAPSHOT";
-011  public static final String revision = 
"30817b922ed72ba5630d8cea3d26bba9fef346e4";
+011  public static final String revision = 
"ea64dbef7f5239ab2162d0bd3dccded60e20ecda";
 012  public static final String user = 
"jenkins";
-013  public static final String date = "Fri 
Jun  9 14:39:05 UTC 2017";
+013  public static final String date = "Sat 
Jun 10 14:39:04 UTC 2017";
 014  public static final String url = 
"git://asf920.gq1.ygridcore.net/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015  public static final String srcChecksum 
= "0bf0d03062d078ec9d8e0f407e783cfa";
+015  public static final String srcChecksum 
= "0347fbff381e8e942b1429ce992adda0";
 016}
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcConnection.html

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
index 89e07b5..9299946 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
@@ -127,9 +127,12 @@
 org.apache.hadoop.hbase.quotas.FileSystemUtilizationChore
 org.apache.hadoop.hbase.quotas.QuotaCache.QuotaRefresherChore
 org.apache.hadoop.hbase.quotas.QuotaObserverChore
+org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore
 org.apache.hadoop.hbase.quotas.SpaceQuotaRefresherChore
 
 
+org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore.SnapshotWithSize
+org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore.StoreFileReference
 org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot
 org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus
 org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory
@@ -198,13 +201,13 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.quotas.QuotaSnapshotStore.ViolationState
-org.apache.hadoop.hbase.quotas.OperationQuota.OperationType
 org.apache.hadoop.hbase.quotas.ThrottlingException.Type
-org.apache.hadoop.hbase.quotas.ThrottleType
-org.apache.hadoop.hbase.quotas.QuotaScope
 org.apache.hadoop.hbase.quotas.QuotaType
+org.apache.hadoop.hbase.quotas.QuotaScope
 org.apache.hadoop.hbase.quotas.SpaceViolationPolicy
+org.apache.hadoop.hbase.quotas.OperationQuota.OperationType
+org.apache.hadoop.hbase.quotas.QuotaSnapshotStore.ViolationState
+org.apache.hadoop.hbase.quotas.ThrottleType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/quotas/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/package-use.html
index a4f61de..9ef8639 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-use.html
@@ -157,11 +157,17 @@
 
 
 
+SnapshotQuotaObserverChore
+A Master-invoked Chore that computes the size 
of each snapshot which was created from
+ a table which has a space quota.
+
+
+
 SpaceQuotaSnapshot
 A point-in-time view of a space quota on a table.
 
 
-
+
 SpaceQuotaSnapshotNotifier
 An interface which abstract away the action taken to enable 
or disable
  a space quota violation policy across the HBase cluster.
@@ -329,6 +335,16 @@
 
 
 
+SnapshotQuotaObserverChore.SnapshotWithSize
+A struct encapsulating the name of a snapshot and its 
"size" on the filesystem.
+
+
+
+SnapshotQuotaObserverChore.StoreFileReference
+A reference to a collection of files in the archive 
directory for a single region.
+
+
+
 SpaceLimitingException
 An Exception that is thrown when a space quota is in 
violation.
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
index e2832b3..51c5c1b 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
@@ -472,7 +472,7 @@ extends HStore
-add,
 add,
 addChangedReaderObserver,
 areWritesEnabled,
 assertBulkLoadHFileOk,
 bulkLoadHFile,
 bulkLoadHFile,
 cancelRequestedCompaction,
 canSplit,
 close,
 closeAndArchiveCompactedFiles,
 compact,
 compact,
 compactRecentForTestingAssumingDefaultPolicy,
 completeCompaction,
 createFlushContext,
 createWriterInTmp,
 createWriterInTmp,
 createWriterInTmp,
 deleteChangedReaderObserver,
 deregisterChildren,
 determineTTLFromFamily,
 flushCache, getAvgStoreFileAge,
 getBlockingFileCount,
 getBytesPerChecksum,
 getCacheConfig,
 getChecksumType,
 getCloseCheckInterval,
 getColumnFamilyName,
 getCompactedCellsCount,
 getCompactedCellsSize,
 getCompactionCheckMultiplier,
 getCompactionPressure,
 getCompactionProgress,
 getCompactPriority,
 getComparator,
 getCoprocessorHost,
 getDataBlockEncoder,
 getFamily,
 getFileSystem,
 getFlushableSize,
 getFlushedCellsCount,
 getFlushedCellsSize,
 getFlushedOutputFileSize,
 getHRegion,
 

[19/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.UserQuotasVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.UserQuotasVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.UserQuotasVisitor.html
index 509b93c..3c6f9b8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.UserQuotasVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.UserQuotasVisitor.html
@@ -53,606 +53,717 @@
 045import 
org.apache.hadoop.hbase.client.ResultScanner;
 046import 
org.apache.hadoop.hbase.client.Scan;
 047import 
org.apache.hadoop.hbase.client.Table;
-048import 
org.apache.hadoop.hbase.filter.CompareFilter;
-049import 
org.apache.hadoop.hbase.filter.Filter;
-050import 
org.apache.hadoop.hbase.filter.FilterList;
-051import 
org.apache.hadoop.hbase.filter.QualifierFilter;
-052import 
org.apache.hadoop.hbase.filter.RegexStringComparator;
-053import 
org.apache.hadoop.hbase.filter.RowFilter;
-054import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-055import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-056import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
-057import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.TableQuotaSnapshot;
-065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes;
-066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-067import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
-068import 
org.apache.hadoop.hbase.util.Bytes;
-069import 
org.apache.hadoop.hbase.util.Strings;
-070
-071/**
-072 * Helper class to interact with the 
quota table.
-073 * pre
-074 * ROW-KEY  FAM/QUAL
DATA
-075 *   n.lt;namespacegt; q:s  
   lt;global-quotasgt;
-076 *   t.lt;namespacegt; u:p  
  lt;namespace-quota policygt;
-077 *   t.lt;tablegt; q:s  
   lt;global-quotasgt;
-078 *   t.lt;tablegt; u:p  
  lt;table-quota policygt;
-079 *   u.lt;usergt;  q:s  
   lt;global-quotasgt;
-080 *   u.lt;usergt;  
q:s.lt;tablegt; lt;table-quotasgt;
-081 *   u.lt;usergt;  
q:s.lt;nsgt;:   lt;namespace-quotasgt;
-082 * /pre
-083 */
-084@InterfaceAudience.Private
-085@InterfaceStability.Evolving
-086public class QuotaTableUtil {
-087  private static final Log LOG = 
LogFactory.getLog(QuotaTableUtil.class);
-088
-089  /** System table for quotas */
-090  public static final TableName 
QUOTA_TABLE_NAME =
-091  
TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "quota");
-092
-093  protected static final byte[] 
QUOTA_FAMILY_INFO = Bytes.toBytes("q");
-094  protected static final byte[] 
QUOTA_FAMILY_USAGE = Bytes.toBytes("u");
-095  protected static final byte[] 
QUOTA_QUALIFIER_SETTINGS = Bytes.toBytes("s");
-096  protected static final byte[] 
QUOTA_QUALIFIER_SETTINGS_PREFIX = Bytes.toBytes("s.");
-097  protected static final byte[] 
QUOTA_QUALIFIER_POLICY = Bytes.toBytes("p");
-098  protected static final String 
QUOTA_POLICY_COLUMN =
-099  Bytes.toString(QUOTA_FAMILY_USAGE) 
+ ":" + Bytes.toString(QUOTA_QUALIFIER_POLICY);
-100  protected static final byte[] 
QUOTA_USER_ROW_KEY_PREFIX = Bytes.toBytes("u.");
-101  protected static final byte[] 
QUOTA_TABLE_ROW_KEY_PREFIX = Bytes.toBytes("t.");
-102  protected static final byte[] 
QUOTA_NAMESPACE_ROW_KEY_PREFIX = Bytes.toBytes("n.");
-103
-104  /* 
=
-105   *  Quota "settings" helpers
-106   */
-107  public static Quotas 
getTableQuota(final Connection connection, final TableName table)
-108  throws IOException {
-109return getQuotas(connection, 
getTableRowKey(table));
-110  }
-111
-112  public static Quotas 
getNamespaceQuota(final Connection connection, final String namespace)
-113  throws IOException {
-114return getQuotas(connection, 
getNamespaceRowKey(namespace));
-115  }
-116
-117  public static Quotas getUserQuota(final 
Connection connection, final String user)
-118  throws IOException {
-119return 

[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.NamespaceQuotasVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.NamespaceQuotasVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.NamespaceQuotasVisitor.html
index 509b93c..3c6f9b8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.NamespaceQuotasVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.NamespaceQuotasVisitor.html
@@ -53,606 +53,717 @@
 045import 
org.apache.hadoop.hbase.client.ResultScanner;
 046import 
org.apache.hadoop.hbase.client.Scan;
 047import 
org.apache.hadoop.hbase.client.Table;
-048import 
org.apache.hadoop.hbase.filter.CompareFilter;
-049import 
org.apache.hadoop.hbase.filter.Filter;
-050import 
org.apache.hadoop.hbase.filter.FilterList;
-051import 
org.apache.hadoop.hbase.filter.QualifierFilter;
-052import 
org.apache.hadoop.hbase.filter.RegexStringComparator;
-053import 
org.apache.hadoop.hbase.filter.RowFilter;
-054import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-055import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-056import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
-057import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.TableQuotaSnapshot;
-065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes;
-066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-067import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
-068import 
org.apache.hadoop.hbase.util.Bytes;
-069import 
org.apache.hadoop.hbase.util.Strings;
-070
-071/**
-072 * Helper class to interact with the 
quota table.
-073 * pre
-074 * ROW-KEY  FAM/QUAL
DATA
-075 *   n.lt;namespacegt; q:s  
   lt;global-quotasgt;
-076 *   t.lt;namespacegt; u:p  
  lt;namespace-quota policygt;
-077 *   t.lt;tablegt; q:s  
   lt;global-quotasgt;
-078 *   t.lt;tablegt; u:p  
  lt;table-quota policygt;
-079 *   u.lt;usergt;  q:s  
   lt;global-quotasgt;
-080 *   u.lt;usergt;  
q:s.lt;tablegt; lt;table-quotasgt;
-081 *   u.lt;usergt;  
q:s.lt;nsgt;:   lt;namespace-quotasgt;
-082 * /pre
-083 */
-084@InterfaceAudience.Private
-085@InterfaceStability.Evolving
-086public class QuotaTableUtil {
-087  private static final Log LOG = 
LogFactory.getLog(QuotaTableUtil.class);
-088
-089  /** System table for quotas */
-090  public static final TableName 
QUOTA_TABLE_NAME =
-091  
TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "quota");
-092
-093  protected static final byte[] 
QUOTA_FAMILY_INFO = Bytes.toBytes("q");
-094  protected static final byte[] 
QUOTA_FAMILY_USAGE = Bytes.toBytes("u");
-095  protected static final byte[] 
QUOTA_QUALIFIER_SETTINGS = Bytes.toBytes("s");
-096  protected static final byte[] 
QUOTA_QUALIFIER_SETTINGS_PREFIX = Bytes.toBytes("s.");
-097  protected static final byte[] 
QUOTA_QUALIFIER_POLICY = Bytes.toBytes("p");
-098  protected static final String 
QUOTA_POLICY_COLUMN =
-099  Bytes.toString(QUOTA_FAMILY_USAGE) 
+ ":" + Bytes.toString(QUOTA_QUALIFIER_POLICY);
-100  protected static final byte[] 
QUOTA_USER_ROW_KEY_PREFIX = Bytes.toBytes("u.");
-101  protected static final byte[] 
QUOTA_TABLE_ROW_KEY_PREFIX = Bytes.toBytes("t.");
-102  protected static final byte[] 
QUOTA_NAMESPACE_ROW_KEY_PREFIX = Bytes.toBytes("n.");
-103
-104  /* 
=
-105   *  Quota "settings" helpers
-106   */
-107  public static Quotas 
getTableQuota(final Connection connection, final TableName table)
-108  throws IOException {
-109return getQuotas(connection, 
getTableRowKey(table));
-110  }
-111
-112  public static Quotas 
getNamespaceQuota(final Connection connection, final String namespace)
-113  throws IOException {
-114return getQuotas(connection, 
getNamespaceRowKey(namespace));
-115  }
-116
-117  public static Quotas getUserQuota(final 
Connection connection, final String user)
-118  throws IOException {
-119 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
index 681e137..d167295 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class ReplicationSourceManager.AdoptAbandonedQueuesWorker
+class ReplicationSourceManager.AdoptAbandonedQueuesWorker
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
 title="class or interface in java.lang">Thread
 
 
@@ -228,7 +228,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 AdoptAbandonedQueuesWorker
-publicAdoptAbandonedQueuesWorker()
+publicAdoptAbandonedQueuesWorker()
 
 
 
@@ -245,7 +245,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 run
-publicvoidrun()
+publicvoidrun()
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true#run--;
 title="class or interface in java.lang">runin 
interfacehttp://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
index 227c6cb..91e8ca3 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class ReplicationSourceManager.NodeFailoverWorker
+class ReplicationSourceManager.NodeFailoverWorker
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
 title="class or interface in java.lang">Thread
 Class responsible to setup new ReplicationSources to take 
care of the
  queues from dead region servers.
@@ -259,7 +259,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 rsZnode
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String rsZnode
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String rsZnode
 
 
 
@@ -268,7 +268,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 rq
-private finalReplicationQueues 
rq
+private finalReplicationQueues 
rq
 
 
 
@@ -277,7 +277,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 rp
-private finalReplicationPeers 
rp
+private finalReplicationPeers 
rp
 
 
 
@@ -286,7 +286,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 clusterId
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/UUID.html?is-external=true;
 title="class or interface in java.util">UUID clusterId
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/UUID.html?is-external=true;
 title="class or interface in java.util">UUID clusterId
 
 
 
@@ -303,7 +303,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 NodeFailoverWorker
-publicNodeFailoverWorker(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringrsZnode)
+publicNodeFailoverWorker(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringrsZnode)
 
 Parameters:
 rsZnode - 
@@ -316,7 +316,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 NodeFailoverWorker
-publicNodeFailoverWorker(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringrsZnode,
+publicNodeFailoverWorker(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringrsZnode,
  

[41/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/quotas/QuotaTableUtil.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/QuotaTableUtil.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/QuotaTableUtil.html
index b2b602a..d08d46c 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/QuotaTableUtil.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/QuotaTableUtil.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9,"i26":9,"i27":9,"i28":9,"i29":9,"i30":9,"i31":9,"i32":9,"i33":9,"i34":9,"i35":9,"i36":9,"i37":9,"i38":9,"i39":9,"i40":9,"i41":9,"i42":9,"i43":9,"i44":9,"i45":9,"i46":9,"i47":9,"i48":9,"i49":9,"i50":9,"i51":9,"i52":9};
+var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9,"i26":9,"i27":9,"i28":9,"i29":9,"i30":9,"i31":9,"i32":9,"i33":9,"i34":9,"i35":9,"i36":9,"i37":9,"i38":9,"i39":9,"i40":9,"i41":9,"i42":9,"i43":9,"i44":9,"i45":9,"i46":9,"i47":9,"i48":9,"i49":9,"i50":9,"i51":9,"i52":9,"i53":9,"i54":9,"i55":9,"i56":9,"i57":9,"i58":9,"i59":9,"i60":9,"i61":9,"i62":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -115,19 +115,21 @@ var activeTableTab = "activeTableTab";
 
 @InterfaceAudience.Private
  @InterfaceStability.Evolving
-public class QuotaTableUtil
+public class QuotaTableUtil
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Helper class to interact with the quota table.
- 
- ROW-KEY  FAM/QUALDATA
-   n.namespace q:s global-quotas
-   t.namespace u:pnamespace-quota policy
-   t.table q:s global-quotas
-   t.table u:ptable-quota policy
-   u.user  q:s global-quotas
-   u.user  q:s.table table-quotas
-   u.user  q:s.ns:   namespace-quotas
- 
+ 
+   ROW-KEYFAM/QUALDATA
+   
n.namespaceq:sglobal-quotas
+   n.namespaceu:pnamespace-quota 
policy
+   
n.namespaceu:sSpaceQuotaSnapshot
+   t.tableq:sglobal-quotas
+   t.tableu:ptable-quota 
policy
+   t.tableu:ss.snapshot 
nameSpaceQuotaSnapshot
+   u.userq:sglobal-quotas
+   
u.userq:s.tabletable-quotas
+   
u.userq:s.nsnamespace-quotas
+ 
 
 
 
@@ -210,16 +212,20 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 QUOTA_QUALIFIER_SETTINGS_PREFIX
 
 
+protected static byte[]
+QUOTA_SNAPSHOT_SIZE_QUALIFIER
+
+
 static TableName
 QUOTA_TABLE_NAME
 System table for quotas
 
 
-
+
 protected static byte[]
 QUOTA_TABLE_ROW_KEY_PREFIX
 
-
+
 protected static byte[]
 QUOTA_USER_ROW_KEY_PREFIX
 
@@ -256,24 +262,50 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Method and Description
 
 
-static Put
-createPutSpaceSnapshot(TableNametableName,
-  SpaceQuotaSnapshotsnapshot)
+(package private) static Get
+createGetNamespaceSnapshotSize(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringnamespace)
+Creates a Get to fetch the namespace's total 
snapshot size.
+
+
+
+(package private) static Put
+createPutForNamespaceSnapshotSize(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringnamespace,
+ longsize)
+Creates a Put for the namespace's total 
snapshot size.
+
+
+
+(package private) static Put
+createPutForSnapshotSize(TableNametableName,
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringsnapshot,
+longsize)
+Creates a Put to persist the current 
size of the snapshot with respect to
+ the given table.
+
+
+
+(package private) static Put
+createPutForSpaceSnapshot(TableNametableName,
+ SpaceQuotaSnapshotsnapshot)
 Creates a Put to store the given 
snapshot for the given tableName in
  the quota table.
 
 
-
+
+(package private) static Scan
+createScanForSnapshotSizes(TableNametable)
+
+
 protected static Result
 doGet(Connectionconnection,
  Getget)
 
-
+
 protected static Result[]
 doGet(Connectionconnection,
  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)
 
-
+
 static void
 extractQuotaSnapshot(Resultresult,
 

[10/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.LogsComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.LogsComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.LogsComparator.html
index d9136a8..6b8f5c5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.LogsComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.LogsComparator.html
@@ -65,12 +65,12 @@
 057import 
org.apache.hadoop.hbase.replication.ReplicationQueues;
 058import 
org.apache.hadoop.hbase.replication.SystemTableWALEntryFilter;
 059import 
org.apache.hadoop.hbase.replication.WALEntryFilter;
-060import 
org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReaderThread.WALEntryBatch;
-061import 
org.apache.hadoop.hbase.util.Bytes;
-062import 
org.apache.hadoop.hbase.util.Pair;
-063import 
org.apache.hadoop.hbase.util.Threads;
-064import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-065import 
org.apache.hadoop.hbase.wal.WAL.Entry;
+060import 
org.apache.hadoop.hbase.util.Bytes;
+061import 
org.apache.hadoop.hbase.util.Pair;
+062import 
org.apache.hadoop.hbase.util.Threads;
+063import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
+064import 
org.apache.hadoop.hbase.wal.WAL.Entry;
+065
 066
 067/**
 068 * Class that handles the source of a 
replication stream.
@@ -456,7 +456,7 @@
 448this.sourceRunning = false;
 449
CollectionReplicationSourceShipperThread workers = 
workerThreads.values();
 450for (ReplicationSourceShipperThread 
worker : workers) {
-451  worker.setWorkerRunning(false);
+451  worker.stopWorker();
 452  worker.entryReader.interrupt();
 453  worker.interrupt();
 454}
@@ -548,24 +548,20 @@
 540return sb.toString();
 541  }
 542
-543  /**
-544   * Get Replication Source Metrics
-545   * @return sourceMetrics
-546   */
-547  @Override
-548  public MetricsSource getSourceMetrics() 
{
-549return this.metrics;
-550  }
-551
-552  @Override
-553  public void 
postShipEdits(ListEntry entries, int batchSize) {
-554if (throttler.isEnabled()) {
-555  throttler.addPushSize(batchSize);
-556}
-557
totalReplicatedEdits.addAndGet(entries.size());
-558
totalBufferUsed.addAndGet(-batchSize);
-559  }
-560}
+543  @Override
+544  public MetricsSource getSourceMetrics() 
{
+545return this.metrics;
+546  }
+547
+548  @Override
+549  public void 
postShipEdits(ListEntry entries, int batchSize) {
+550if (throttler.isEnabled()) {
+551  throttler.addPushSize(batchSize);
+552}
+553
totalReplicatedEdits.addAndGet(entries.size());
+554
totalBufferUsed.addAndGet(-batchSize);
+555  }
+556}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.html
index d9136a8..6b8f5c5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.html
@@ -65,12 +65,12 @@
 057import 
org.apache.hadoop.hbase.replication.ReplicationQueues;
 058import 
org.apache.hadoop.hbase.replication.SystemTableWALEntryFilter;
 059import 
org.apache.hadoop.hbase.replication.WALEntryFilter;
-060import 
org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReaderThread.WALEntryBatch;
-061import 
org.apache.hadoop.hbase.util.Bytes;
-062import 
org.apache.hadoop.hbase.util.Pair;
-063import 
org.apache.hadoop.hbase.util.Threads;
-064import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-065import 
org.apache.hadoop.hbase.wal.WAL.Entry;
+060import 
org.apache.hadoop.hbase.util.Bytes;
+061import 
org.apache.hadoop.hbase.util.Pair;
+062import 
org.apache.hadoop.hbase.util.Threads;
+063import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
+064import 
org.apache.hadoop.hbase.wal.WAL.Entry;
+065
 066
 067/**
 068 * Class that handles the source of a 
replication stream.
@@ -456,7 +456,7 @@
 448this.sourceRunning = false;
 449
CollectionReplicationSourceShipperThread workers = 
workerThreads.values();
 450for (ReplicationSourceShipperThread 
worker : workers) {
-451  worker.setWorkerRunning(false);
+451  worker.stopWorker();
 452  worker.entryReader.interrupt();
 453  worker.interrupt();
 454}
@@ -548,24 +548,20 @@
 540return sb.toString();
 

[06/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.WorkerState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.WorkerState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.WorkerState.html
new file mode 100644
index 000..20bb545
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.WorkerState.html
@@ -0,0 +1,431 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/*
+002 *
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS,
+015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+016 * See the License for the specific 
language governing permissions and
+017 * limitations under the License.
+018 */
+019package 
org.apache.hadoop.hbase.replication.regionserver;
+020
+021import java.io.IOException;
+022import java.util.List;
+023import java.util.Map;
+024import 
java.util.concurrent.PriorityBlockingQueue;
+025import java.util.concurrent.TimeUnit;
+026
+027import org.apache.commons.logging.Log;
+028import 
org.apache.commons.logging.LogFactory;
+029import 
org.apache.hadoop.conf.Configuration;
+030import org.apache.hadoop.fs.Path;
+031import org.apache.hadoop.hbase.Cell;
+032import 
org.apache.hadoop.hbase.CellUtil;
+033import 
org.apache.hadoop.hbase.MetaTableAccessor;
+034import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+035import 
org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+036import 
org.apache.hadoop.hbase.replication.ReplicationEndpoint;
+037import 
org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReaderThread.WALEntryBatch;
+038import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor;
+039import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
+040import 
org.apache.hadoop.hbase.util.Bytes;
+041import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+042import 
org.apache.hadoop.hbase.util.Threads;
+043import 
org.apache.hadoop.hbase.wal.WAL.Entry;
+044
+045import 
com.google.common.cache.CacheBuilder;
+046import 
com.google.common.cache.CacheLoader;
+047import 
com.google.common.cache.LoadingCache;
+048
+049/**
+050 * This thread reads entries from a queue 
and ships them. Entries are placed onto the queue by
+051 * ReplicationSourceWALReaderThread
+052 */
+053@InterfaceAudience.Private
+054public class 
ReplicationSourceShipperThread extends Thread {
+055  private static final Log LOG = 
LogFactory.getLog(ReplicationSourceShipperThread.class);
+056
+057  // Hold the state of a replication 
worker thread
+058  public enum WorkerState {
+059RUNNING,
+060STOPPED,
+061FINISHED,  // The worker is done 
processing a recovered queue
+062  }
+063
+064  protected final Configuration conf;
+065  protected final String walGroupId;
+066  protected final 
PriorityBlockingQueuePath queue;
+067  protected final 
ReplicationSourceInterface source;
+068
+069  // Last position in the log that we 
sent to ZooKeeper
+070  protected long lastLoggedPosition = 
-1;
+071  // Path of the current log
+072  protected volatile Path currentPath;
+073  // Current state of the worker thread
+074  private WorkerState state;
+075  protected 
ReplicationSourceWALReaderThread entryReader;
+076
+077  // How long should we sleep for each 
retry
+078  protected final long sleepForRetries;
+079  // Maximum number of retries before 
taking bold actions
+080  protected final int 
maxRetriesMultiplier;
+081
+082  // Use guava cache to set ttl for each 
key
+083  private final LoadingCacheString, 
Boolean canSkipWaitingSet = CacheBuilder.newBuilder()
+084  .expireAfterAccess(1, 
TimeUnit.DAYS).build(
+085  new CacheLoaderString, 
Boolean() {
+086@Override
+087public Boolean load(String key) 
throws Exception {
+088  return false;
+089}
+090  }
+091  );
+092
+093  public 
ReplicationSourceShipperThread(Configuration conf, String walGroupId,
+094  PriorityBlockingQueuePath 
queue, ReplicationSourceInterface source) {
+095this.conf = conf;

[18/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.html
index 509b93c..3c6f9b8 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.html
@@ -53,606 +53,717 @@
 045import 
org.apache.hadoop.hbase.client.ResultScanner;
 046import 
org.apache.hadoop.hbase.client.Scan;
 047import 
org.apache.hadoop.hbase.client.Table;
-048import 
org.apache.hadoop.hbase.filter.CompareFilter;
-049import 
org.apache.hadoop.hbase.filter.Filter;
-050import 
org.apache.hadoop.hbase.filter.FilterList;
-051import 
org.apache.hadoop.hbase.filter.QualifierFilter;
-052import 
org.apache.hadoop.hbase.filter.RegexStringComparator;
-053import 
org.apache.hadoop.hbase.filter.RowFilter;
-054import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-055import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-056import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
-057import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.TableQuotaSnapshot;
-065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes;
-066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-067import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
-068import 
org.apache.hadoop.hbase.util.Bytes;
-069import 
org.apache.hadoop.hbase.util.Strings;
-070
-071/**
-072 * Helper class to interact with the 
quota table.
-073 * pre
-074 * ROW-KEY  FAM/QUAL
DATA
-075 *   n.lt;namespacegt; q:s  
   lt;global-quotasgt;
-076 *   t.lt;namespacegt; u:p  
  lt;namespace-quota policygt;
-077 *   t.lt;tablegt; q:s  
   lt;global-quotasgt;
-078 *   t.lt;tablegt; u:p  
  lt;table-quota policygt;
-079 *   u.lt;usergt;  q:s  
   lt;global-quotasgt;
-080 *   u.lt;usergt;  
q:s.lt;tablegt; lt;table-quotasgt;
-081 *   u.lt;usergt;  
q:s.lt;nsgt;:   lt;namespace-quotasgt;
-082 * /pre
-083 */
-084@InterfaceAudience.Private
-085@InterfaceStability.Evolving
-086public class QuotaTableUtil {
-087  private static final Log LOG = 
LogFactory.getLog(QuotaTableUtil.class);
-088
-089  /** System table for quotas */
-090  public static final TableName 
QUOTA_TABLE_NAME =
-091  
TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "quota");
-092
-093  protected static final byte[] 
QUOTA_FAMILY_INFO = Bytes.toBytes("q");
-094  protected static final byte[] 
QUOTA_FAMILY_USAGE = Bytes.toBytes("u");
-095  protected static final byte[] 
QUOTA_QUALIFIER_SETTINGS = Bytes.toBytes("s");
-096  protected static final byte[] 
QUOTA_QUALIFIER_SETTINGS_PREFIX = Bytes.toBytes("s.");
-097  protected static final byte[] 
QUOTA_QUALIFIER_POLICY = Bytes.toBytes("p");
-098  protected static final String 
QUOTA_POLICY_COLUMN =
-099  Bytes.toString(QUOTA_FAMILY_USAGE) 
+ ":" + Bytes.toString(QUOTA_QUALIFIER_POLICY);
-100  protected static final byte[] 
QUOTA_USER_ROW_KEY_PREFIX = Bytes.toBytes("u.");
-101  protected static final byte[] 
QUOTA_TABLE_ROW_KEY_PREFIX = Bytes.toBytes("t.");
-102  protected static final byte[] 
QUOTA_NAMESPACE_ROW_KEY_PREFIX = Bytes.toBytes("n.");
-103
-104  /* 
=
-105   *  Quota "settings" helpers
-106   */
-107  public static Quotas 
getTableQuota(final Connection connection, final TableName table)
-108  throws IOException {
-109return getQuotas(connection, 
getTableRowKey(table));
-110  }
-111
-112  public static Quotas 
getNamespaceQuota(final Connection connection, final String namespace)
-113  throws IOException {
-114return getQuotas(connection, 
getNamespaceRowKey(namespace));
-115  }
-116
-117  public static Quotas getUserQuota(final 
Connection connection, final String user)
-118  throws IOException {
-119return getQuotas(connection, 
getUserRowKey(user));
-120  }
-121
-122  public static Quotas getUserQuota(final 

[12/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
index 4b85756..4262d4d 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -48,2512 +48,2526 @@
 040import 
java.util.concurrent.atomic.AtomicBoolean;
 041import 
java.util.concurrent.locks.ReentrantLock;
 042import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-043
-044import org.apache.commons.logging.Log;
-045import 
org.apache.commons.logging.LogFactory;
-046import 
org.apache.hadoop.conf.Configuration;
-047import org.apache.hadoop.fs.FileSystem;
-048import org.apache.hadoop.fs.Path;
-049import org.apache.hadoop.hbase.Cell;
-050import 
org.apache.hadoop.hbase.CellComparator;
-051import 
org.apache.hadoop.hbase.CellUtil;
-052import 
org.apache.hadoop.hbase.CompoundConfiguration;
-053import 
org.apache.hadoop.hbase.HColumnDescriptor;
-054import 
org.apache.hadoop.hbase.HConstants;
-055import 
org.apache.hadoop.hbase.HRegionInfo;
-056import 
org.apache.hadoop.hbase.MemoryCompactionPolicy;
-057import 
org.apache.hadoop.hbase.TableName;
-058import 
org.apache.hadoop.hbase.backup.FailedArchiveException;
-059import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-060import 
org.apache.hadoop.hbase.client.Scan;
-061import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-062import 
org.apache.hadoop.hbase.io.compress.Compression;
-063import 
org.apache.hadoop.hbase.io.crypto.Encryption;
-064import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-065import 
org.apache.hadoop.hbase.io.hfile.HFile;
-066import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
-067import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-068import 
org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
-069import 
org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
-070import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
-071import 
org.apache.hadoop.hbase.io.hfile.InvalidHFileException;
-072import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-073import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
-074import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
-075import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-076import 
org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
-077import 
org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours;
-078import 
org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
-079import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-080import 
org.apache.hadoop.hbase.regionserver.wal.WALUtil;
-081import 
org.apache.hadoop.hbase.security.EncryptionUtil;
-082import 
org.apache.hadoop.hbase.security.User;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
-085import 
org.apache.hadoop.hbase.util.Bytes;
-086import 
org.apache.hadoop.hbase.util.ChecksumType;
-087import 
org.apache.hadoop.hbase.util.ClassSize;
-088import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-089import 
org.apache.hadoop.hbase.util.Pair;
-090import 
org.apache.hadoop.hbase.util.ReflectionUtils;
-091import 
org.apache.hadoop.util.StringUtils;
-092import 
org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
-093
-094import 
com.google.common.annotations.VisibleForTesting;
-095import 
com.google.common.base.Preconditions;
-096import 
com.google.common.collect.ImmutableCollection;
-097import 
com.google.common.collect.ImmutableList;
-098import com.google.common.collect.Lists;
-099import com.google.common.collect.Sets;
-100
-101/**
-102 * A Store holds a column family in a 
Region.  Its a memstore and a set of zero
-103 * or more StoreFiles, which stretch 
backwards over time.
-104 *
-105 * pThere's no reason to consider 
append-logging at this level; all logging
-106 * and locking is handled at the HRegion 
level.  Store just provides
-107 * services to manage sets of StoreFiles. 
 One of the most important of those
-108 * services is compaction services where 
files are aggregated once they pass
-109 * a configurable threshold.
-110 *
-111 * pLocking and transactions are 
handled at a higher level.  This API should
-112 * not be called directly but by an 
HRegion manager.
-113 */
-114@InterfaceAudience.Private
-115public class HStore implements Store {
-116  public static final String 
MEMSTORE_CLASS_NAME = "hbase.regionserver.memstore.class";
-117  public static final String 
COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY =
-118  
"hbase.server.compactchecker.interval.multiplier";
-119  public 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
index da61859..4fa61f8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
@@ -366,598 +366,607 @@
 358return this.oldsources;
 359  }
 360
-361  @VisibleForTesting
-362  ListString getAllQueues() {
-363return 
replicationQueues.getAllQueues();
-364  }
-365
-366  void preLogRoll(Path newLog) throws 
IOException {
-367recordLog(newLog);
-368String logName = newLog.getName();
-369String logPrefix = 
AbstractFSWALProvider.getWALPrefixFromWALName(logName);
-370synchronized (latestPaths) {
-371  IteratorPath iterator = 
latestPaths.iterator();
-372  while (iterator.hasNext()) {
-373Path path = iterator.next();
-374if 
(path.getName().contains(logPrefix)) {
-375  iterator.remove();
-376  break;
-377}
-378  }
-379  this.latestPaths.add(newLog);
-380}
-381  }
-382
-383  /**
-384   * Check and enqueue the given log to 
the correct source. If there's still no source for the
-385   * group to which the given log 
belongs, create one
-386   * @param logPath the log path to check 
and enqueue
-387   * @throws IOException
-388   */
-389  private void recordLog(Path logPath) 
throws IOException {
-390String logName = logPath.getName();
-391String logPrefix = 
AbstractFSWALProvider.getWALPrefixFromWALName(logName);
-392// update replication queues on ZK
-393// synchronize on replicationPeers to 
avoid adding source for the to-be-removed peer
-394synchronized (replicationPeers) {
-395  for (String id : 
replicationPeers.getConnectedPeerIds()) {
-396try {
-397  
this.replicationQueues.addLog(id, logName);
-398} catch (ReplicationException e) 
{
-399  throw new IOException("Cannot 
add log to replication queue"
-400  + " when creating a new 
source, queueId=" + id + ", filename=" + logName, e);
-401}
-402  }
-403}
-404// update walsById map
-405synchronized (walsById) {
-406  for (Map.EntryString, 
MapString, SortedSetString entry : 
this.walsById.entrySet()) {
-407String peerId = entry.getKey();
-408MapString, 
SortedSetString walsByPrefix = entry.getValue();
-409boolean existingPrefix = false;
-410for (Map.EntryString, 
SortedSetString walsEntry : walsByPrefix.entrySet()) {
-411  SortedSetString wals = 
walsEntry.getValue();
-412  if (this.sources.isEmpty()) {
-413// If there's no slaves, 
don't need to keep the old wals since
-414// we only consider the last 
one when a new slave comes in
-415wals.clear();
-416  }
-417  if 
(logPrefix.equals(walsEntry.getKey())) {
-418wals.add(logName);
-419existingPrefix = true;
-420  }
-421}
-422if (!existingPrefix) {
-423  // The new log belongs to a new 
group, add it into this peer
-424  LOG.debug("Start tracking logs 
for wal group " + logPrefix + " for peer " + peerId);
-425  SortedSetString wals = 
new TreeSet();
-426  wals.add(logName);
-427  walsByPrefix.put(logPrefix, 
wals);
-428}
-429  }
-430}
-431  }
-432
-433  void postLogRoll(Path newLog) throws 
IOException {
-434// This only updates the sources we 
own, not the recovered ones
-435for (ReplicationSourceInterface 
source : this.sources) {
-436  source.enqueueLog(newLog);
-437}
-438  }
-439
-440  @VisibleForTesting
-441  public AtomicLong getTotalBufferUsed() 
{
-442return totalBufferUsed;
-443  }
-444
-445  /**
-446   * Factory method to create a 
replication source
-447   * @param conf the configuration to 
use
-448   * @param fs the file system to use
-449   * @param manager the manager to use
-450   * @param server the server object for 
this region server
-451   * @param peerId the id of the peer 
cluster
-452   * @return the created source
-453   * @throws IOException
-454   */
-455  protected ReplicationSourceInterface 
getReplicationSource(final Configuration conf,
-456  final FileSystem fs, final 
ReplicationSourceManager manager,
-457  final ReplicationQueues 
replicationQueues, final ReplicationPeers replicationPeers,
-458  final Server server, final String 
peerId, final UUID clusterId,
-459  final ReplicationPeerConfig 
peerConfig, final ReplicationPeer replicationPeer)
-460  throws 

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.QuotasVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.QuotasVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.QuotasVisitor.html
index 509b93c..3c6f9b8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.QuotasVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.QuotasVisitor.html
@@ -53,606 +53,717 @@
 045import 
org.apache.hadoop.hbase.client.ResultScanner;
 046import 
org.apache.hadoop.hbase.client.Scan;
 047import 
org.apache.hadoop.hbase.client.Table;
-048import 
org.apache.hadoop.hbase.filter.CompareFilter;
-049import 
org.apache.hadoop.hbase.filter.Filter;
-050import 
org.apache.hadoop.hbase.filter.FilterList;
-051import 
org.apache.hadoop.hbase.filter.QualifierFilter;
-052import 
org.apache.hadoop.hbase.filter.RegexStringComparator;
-053import 
org.apache.hadoop.hbase.filter.RowFilter;
-054import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-055import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-056import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
-057import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.TableQuotaSnapshot;
-065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes;
-066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-067import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
-068import 
org.apache.hadoop.hbase.util.Bytes;
-069import 
org.apache.hadoop.hbase.util.Strings;
-070
-071/**
-072 * Helper class to interact with the 
quota table.
-073 * pre
-074 * ROW-KEY  FAM/QUAL
DATA
-075 *   n.lt;namespacegt; q:s  
   lt;global-quotasgt;
-076 *   t.lt;namespacegt; u:p  
  lt;namespace-quota policygt;
-077 *   t.lt;tablegt; q:s  
   lt;global-quotasgt;
-078 *   t.lt;tablegt; u:p  
  lt;table-quota policygt;
-079 *   u.lt;usergt;  q:s  
   lt;global-quotasgt;
-080 *   u.lt;usergt;  
q:s.lt;tablegt; lt;table-quotasgt;
-081 *   u.lt;usergt;  
q:s.lt;nsgt;:   lt;namespace-quotasgt;
-082 * /pre
-083 */
-084@InterfaceAudience.Private
-085@InterfaceStability.Evolving
-086public class QuotaTableUtil {
-087  private static final Log LOG = 
LogFactory.getLog(QuotaTableUtil.class);
-088
-089  /** System table for quotas */
-090  public static final TableName 
QUOTA_TABLE_NAME =
-091  
TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "quota");
-092
-093  protected static final byte[] 
QUOTA_FAMILY_INFO = Bytes.toBytes("q");
-094  protected static final byte[] 
QUOTA_FAMILY_USAGE = Bytes.toBytes("u");
-095  protected static final byte[] 
QUOTA_QUALIFIER_SETTINGS = Bytes.toBytes("s");
-096  protected static final byte[] 
QUOTA_QUALIFIER_SETTINGS_PREFIX = Bytes.toBytes("s.");
-097  protected static final byte[] 
QUOTA_QUALIFIER_POLICY = Bytes.toBytes("p");
-098  protected static final String 
QUOTA_POLICY_COLUMN =
-099  Bytes.toString(QUOTA_FAMILY_USAGE) 
+ ":" + Bytes.toString(QUOTA_QUALIFIER_POLICY);
-100  protected static final byte[] 
QUOTA_USER_ROW_KEY_PREFIX = Bytes.toBytes("u.");
-101  protected static final byte[] 
QUOTA_TABLE_ROW_KEY_PREFIX = Bytes.toBytes("t.");
-102  protected static final byte[] 
QUOTA_NAMESPACE_ROW_KEY_PREFIX = Bytes.toBytes("n.");
-103
-104  /* 
=
-105   *  Quota "settings" helpers
-106   */
-107  public static Quotas 
getTableQuota(final Connection connection, final TableName table)
-108  throws IOException {
-109return getQuotas(connection, 
getTableRowKey(table));
-110  }
-111
-112  public static Quotas 
getNamespaceQuota(final Connection connection, final String namespace)
-113  throws IOException {
-114return getQuotas(connection, 
getNamespaceRowKey(namespace));
-115  }
-116
-117  public static Quotas getUserQuota(final 
Connection connection, final String user)
-118  throws IOException {
-119return getQuotas(connection, 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.TablesWithQuotas.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.TablesWithQuotas.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.TablesWithQuotas.html
index 78816d5..ca6eaae 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.TablesWithQuotas.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.TablesWithQuotas.html
@@ -169,599 +169,601 @@
 161// The current "view" of region space 
use. Used henceforth.
 162final MapHRegionInfo,Long 
reportedRegionSpaceUse = quotaManager.snapshotRegionSizes();
 163if (LOG.isTraceEnabled()) {
-164  LOG.trace("Using " + 
reportedRegionSpaceUse.size() + " region space use reports");
-165}
-166
-167// Remove the "old" region reports
-168pruneOldRegionReports();
-169
-170// Create the stores to track table 
and namespace snapshots
-171
initializeSnapshotStores(reportedRegionSpaceUse);
-172// Report the number of (non-expired) 
region size reports
-173if (metrics != null) {
-174  
metrics.setNumRegionSizeReports(reportedRegionSpaceUse.size());
-175}
-176
-177// Filter out tables for which we 
don't have adequate regionspace reports yet.
-178// Important that we do this after we 
instantiate the stores above
-179// This gives us a set of Tables 
which may or may not be violating their quota.
-180// To be safe, we want to make sure 
that these are not in violation.
-181SetTableName tablesInLimbo = 
tablesWithQuotas.filterInsufficientlyReportedTables(
-182tableSnapshotStore);
-183
-184if (LOG.isTraceEnabled()) {
-185  LOG.trace("Filtered insufficiently 
reported tables, left with " +
-186  reportedRegionSpaceUse.size() + 
" regions reported");
-187}
-188
-189for (TableName tableInLimbo : 
tablesInLimbo) {
-190  final SpaceQuotaSnapshot 
currentSnapshot = tableSnapshotStore.getCurrentState(tableInLimbo);
-191  if 
(currentSnapshot.getQuotaStatus().isInViolation()) {
-192if (LOG.isTraceEnabled()) {
-193  LOG.trace("Moving " + 
tableInLimbo + " out of violation because fewer region sizes were"
-194  + " reported than 
required.");
-195}
-196SpaceQuotaSnapshot targetSnapshot 
= new SpaceQuotaSnapshot(
-197
SpaceQuotaStatus.notInViolation(), currentSnapshot.getUsage(),
-198
currentSnapshot.getLimit());
-199
this.snapshotNotifier.transitionTable(tableInLimbo, targetSnapshot);
-200// Update it in the Table 
QuotaStore so that memory is consistent with no violation.
-201
tableSnapshotStore.setCurrentState(tableInLimbo, targetSnapshot);
-202  }
-203}
-204
-205// Transition each table to/from 
quota violation based on the current and target state.
-206// Only table quotas are enacted.
-207final SetTableName 
tablesWithTableQuotas = tablesWithQuotas.getTableQuotaTables();
-208
processTablesWithQuotas(tablesWithTableQuotas);
-209
-210// For each Namespace quota, 
transition each table in the namespace in or out of violation
-211// only if a table quota violation 
policy has not already been applied.
-212final SetString 
namespacesWithQuotas = tablesWithQuotas.getNamespacesWithQuotas();
-213final 
MultimapString,TableName tablesByNamespace = 
tablesWithQuotas.getTablesByNamespace();
-214
processNamespacesWithQuotas(namespacesWithQuotas, tablesByNamespace);
-215  }
-216
-217  void 
initializeSnapshotStores(MapHRegionInfo,Long regionSizes) {
-218MapHRegionInfo,Long 
immutableRegionSpaceUse = Collections.unmodifiableMap(regionSizes);
-219if (tableSnapshotStore == null) {
-220  tableSnapshotStore = new 
TableQuotaSnapshotStore(conn, this, immutableRegionSpaceUse);
-221} else {
-222  
tableSnapshotStore.setRegionUsage(immutableRegionSpaceUse);
-223}
-224if (namespaceSnapshotStore == null) 
{
-225  namespaceSnapshotStore = new 
NamespaceQuotaSnapshotStore(
-226  conn, this, 
immutableRegionSpaceUse);
-227} else {
-228  
namespaceSnapshotStore.setRegionUsage(immutableRegionSpaceUse);
-229}
-230  }
-231
-232  /**
-233   * Processes each {@code TableName} 
which has a quota defined and moves it in or out of
-234   * violation based on the space use.
-235   *
-236   * @param tablesWithTableQuotas The 
HBase tables which have quotas defined
-237   */
-238  void processTablesWithQuotas(final 
SetTableName tablesWithTableQuotas) throws IOException {
-239long numTablesInViolation = 0L;
-240for (TableName table : 
tablesWithTableQuotas) {
-241  final SpaceQuota spaceQuota = 
tableSnapshotStore.getSpaceQuota(table);
-242  if (spaceQuota == null) {
-243if 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshot.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshot.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshot.html
index 3a57bbd..b408e5f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshot.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshot.html
@@ -30,182 +30,184 @@
 022import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 023import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 024import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
-025
-026/**
-027 * A point-in-time view of a space quota 
on a table.
-028 */
-029@InterfaceAudience.Private
-030public class SpaceQuotaSnapshot {
-031  private static final SpaceQuotaSnapshot 
NO_SUCH_SNAPSHOT = new SpaceQuotaSnapshot(
-032  SpaceQuotaStatus.notInViolation(), 
0, Long.MAX_VALUE);
-033  private final SpaceQuotaStatus 
quotaStatus;
-034  private final long usage;
-035  private final long limit;
-036
-037  /**
-038   * Encapsulates the state of a quota on 
a table. The quota may or may not be in violation.
-039   * If the quota is not in violation, 
the violation may be null. If the quota is in violation,
-040   * there is guaranteed to be a non-null 
violation policy.
-041   */
-042  @InterfaceAudience.Private
-043  public static class SpaceQuotaStatus 
{
-044private static final SpaceQuotaStatus 
NOT_IN_VIOLATION = new SpaceQuotaStatus(null, false);
-045final SpaceViolationPolicy policy;
-046final boolean inViolation;
-047
-048/**
-049 * Constructs a {@code 
SpaceQuotaSnapshot} which is in violation of the provided {@code policy}.
-050 *
-051 * Use {@link #notInViolation()} to 
obtain an instance of this class for the cases when the
-052 * quota is not in violation.
-053 *
-054 * @param policy The non-null policy 
being violated.
-055 */
-056public 
SpaceQuotaStatus(SpaceViolationPolicy policy) {
-057  // If the caller is instantiating a 
status, the policy must be non-null
-058  this 
(Objects.requireNonNull(policy), true);
-059}
-060
-061private 
SpaceQuotaStatus(SpaceViolationPolicy policy, boolean inViolation) {
-062  this.policy = policy;
-063  this.inViolation = inViolation;
-064}
-065
-066/**
-067 * Returns the violation policy, 
which may be null. It is guaranteed to be non-null if
-068 * {@link #isInViolation()} is {@code 
true}, but may be null otherwise.
-069 */
-070public SpaceViolationPolicy 
getPolicy() {
-071  return policy;
-072}
-073
-074/**
-075 * @return {@code true} if the quota 
is being violated, {@code false} otherwise.
-076 */
-077public boolean isInViolation() {
-078  return inViolation;
-079}
-080
-081/**
-082 * Returns a singleton referring to a 
quota which is not in violation.
-083 */
-084public static SpaceQuotaStatus 
notInViolation() {
-085  return NOT_IN_VIOLATION;
-086}
-087
-088@Override
-089public int hashCode() {
-090  return new 
HashCodeBuilder().append(policy == null ? 0 : policy.hashCode())
-091  
.append(inViolation).toHashCode();
-092}
-093
-094@Override
-095public boolean equals(Object o) {
-096  if (o instanceof SpaceQuotaStatus) 
{
-097SpaceQuotaStatus other = 
(SpaceQuotaStatus) o;
-098return Objects.equals(policy, 
other.policy)  inViolation == other.inViolation;
-099  }
-100  return false;
-101}
-102
-103@Override
-104public String toString() {
-105  StringBuilder sb = new 
StringBuilder(getClass().getSimpleName());
-106  
sb.append("[policy=").append(policy);
-107  sb.append(", 
inViolation=").append(inViolation).append("]");
-108  return sb.toString();
-109}
-110
-111public static 
QuotaProtos.SpaceQuotaStatus toProto(SpaceQuotaStatus status) {
-112  
QuotaProtos.SpaceQuotaStatus.Builder builder = 
QuotaProtos.SpaceQuotaStatus.newBuilder();
-113  
builder.setInViolation(status.inViolation);
-114  if (status.isInViolation()) {
-115
builder.setViolationPolicy(ProtobufUtil.toProtoViolationPolicy(status.getPolicy()));
-116  }
-117  return builder.build();
-118}
-119
-120public static SpaceQuotaStatus 
toStatus(QuotaProtos.SpaceQuotaStatus proto) {
-121  if (proto.getInViolation()) {
-122return new 
SpaceQuotaStatus(ProtobufUtil.toViolationPolicy(proto.getViolationPolicy()));
-123  } else {
-124return NOT_IN_VIOLATION;
-125  }
-126}
-127  }
-128
-129  public 
SpaceQuotaSnapshot(SpaceQuotaStatus quotaStatus, long usage, long limit) {
-130this.quotaStatus = 
Objects.requireNonNull(quotaStatus);
-131this.usage = usage;
-132this.limit 

[09/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
index da61859..4fa61f8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
@@ -366,598 +366,607 @@
 358return this.oldsources;
 359  }
 360
-361  @VisibleForTesting
-362  ListString getAllQueues() {
-363return 
replicationQueues.getAllQueues();
-364  }
-365
-366  void preLogRoll(Path newLog) throws 
IOException {
-367recordLog(newLog);
-368String logName = newLog.getName();
-369String logPrefix = 
AbstractFSWALProvider.getWALPrefixFromWALName(logName);
-370synchronized (latestPaths) {
-371  IteratorPath iterator = 
latestPaths.iterator();
-372  while (iterator.hasNext()) {
-373Path path = iterator.next();
-374if 
(path.getName().contains(logPrefix)) {
-375  iterator.remove();
-376  break;
-377}
-378  }
-379  this.latestPaths.add(newLog);
-380}
-381  }
-382
-383  /**
-384   * Check and enqueue the given log to 
the correct source. If there's still no source for the
-385   * group to which the given log 
belongs, create one
-386   * @param logPath the log path to check 
and enqueue
-387   * @throws IOException
-388   */
-389  private void recordLog(Path logPath) 
throws IOException {
-390String logName = logPath.getName();
-391String logPrefix = 
AbstractFSWALProvider.getWALPrefixFromWALName(logName);
-392// update replication queues on ZK
-393// synchronize on replicationPeers to 
avoid adding source for the to-be-removed peer
-394synchronized (replicationPeers) {
-395  for (String id : 
replicationPeers.getConnectedPeerIds()) {
-396try {
-397  
this.replicationQueues.addLog(id, logName);
-398} catch (ReplicationException e) 
{
-399  throw new IOException("Cannot 
add log to replication queue"
-400  + " when creating a new 
source, queueId=" + id + ", filename=" + logName, e);
-401}
-402  }
-403}
-404// update walsById map
-405synchronized (walsById) {
-406  for (Map.EntryString, 
MapString, SortedSetString entry : 
this.walsById.entrySet()) {
-407String peerId = entry.getKey();
-408MapString, 
SortedSetString walsByPrefix = entry.getValue();
-409boolean existingPrefix = false;
-410for (Map.EntryString, 
SortedSetString walsEntry : walsByPrefix.entrySet()) {
-411  SortedSetString wals = 
walsEntry.getValue();
-412  if (this.sources.isEmpty()) {
-413// If there's no slaves, 
don't need to keep the old wals since
-414// we only consider the last 
one when a new slave comes in
-415wals.clear();
-416  }
-417  if 
(logPrefix.equals(walsEntry.getKey())) {
-418wals.add(logName);
-419existingPrefix = true;
-420  }
-421}
-422if (!existingPrefix) {
-423  // The new log belongs to a new 
group, add it into this peer
-424  LOG.debug("Start tracking logs 
for wal group " + logPrefix + " for peer " + peerId);
-425  SortedSetString wals = 
new TreeSet();
-426  wals.add(logName);
-427  walsByPrefix.put(logPrefix, 
wals);
-428}
-429  }
-430}
-431  }
-432
-433  void postLogRoll(Path newLog) throws 
IOException {
-434// This only updates the sources we 
own, not the recovered ones
-435for (ReplicationSourceInterface 
source : this.sources) {
-436  source.enqueueLog(newLog);
-437}
-438  }
-439
-440  @VisibleForTesting
-441  public AtomicLong getTotalBufferUsed() 
{
-442return totalBufferUsed;
-443  }
-444
-445  /**
-446   * Factory method to create a 
replication source
-447   * @param conf the configuration to 
use
-448   * @param fs the file system to use
-449   * @param manager the manager to use
-450   * @param server the server object for 
this region server
-451   * @param peerId the id of the peer 
cluster
-452   * @return the created source
-453   * @throws IOException
-454   */
-455  protected ReplicationSourceInterface 
getReplicationSource(final Configuration conf,
-456  final FileSystem fs, final 
ReplicationSourceManager manager,
-457  final ReplicationQueues 
replicationQueues, final ReplicationPeers replicationPeers,
-458  final Server server, final String 
peerId, 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 6ea4056e5 -> 476c54ede


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/hbase-spark/dependency-convergence.html
--
diff --git a/hbase-spark/dependency-convergence.html 
b/hbase-spark/dependency-convergence.html
index 3e06cc6..7807f9d 100644
--- a/hbase-spark/dependency-convergence.html
+++ b/hbase-spark/dependency-convergence.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-06-09
+Last Published: 2017-06-10
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Spark



[11/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Store.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Store.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Store.html
index a9e679e..a56c63f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Store.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Store.html
@@ -410,149 +410,154 @@
 402  long getStorefilesSize();
 403
 404  /**
-405   * @return The size of the store file 
indexes, in bytes.
+405   * @return The size of only the store 
files which are HFiles, in bytes.
 406   */
-407  long getStorefilesIndexSize();
+407  long getHFilesSize();
 408
 409  /**
-410   * Returns the total size of all index 
blocks in the data block indexes, including the root level,
-411   * intermediate levels, and the leaf 
level for multi-level indexes, or just the root level for
-412   * single-level indexes.
-413   * @return the total size of block 
indexes in the store
-414   */
-415  long getTotalStaticIndexSize();
-416
-417  /**
-418   * Returns the total byte size of all 
Bloom filter bit arrays. For compound Bloom filters even the
-419   * Bloom blocks currently not loaded 
into the block cache are counted.
-420   * @return the total size of all Bloom 
filters in the store
-421   */
-422  long getTotalStaticBloomSize();
-423
-424  // Test-helper methods
-425
-426  /**
-427   * Used for tests.
-428   * @return cache configuration for this 
Store.
-429   */
-430  CacheConfig getCacheConfig();
-431
-432  /**
-433   * @return the parent region info 
hosting this store
+410   * @return The size of the store file 
indexes, in bytes.
+411   */
+412  long getStorefilesIndexSize();
+413
+414  /**
+415   * Returns the total size of all index 
blocks in the data block indexes, including the root level,
+416   * intermediate levels, and the leaf 
level for multi-level indexes, or just the root level for
+417   * single-level indexes.
+418   * @return the total size of block 
indexes in the store
+419   */
+420  long getTotalStaticIndexSize();
+421
+422  /**
+423   * Returns the total byte size of all 
Bloom filter bit arrays. For compound Bloom filters even the
+424   * Bloom blocks currently not loaded 
into the block cache are counted.
+425   * @return the total size of all Bloom 
filters in the store
+426   */
+427  long getTotalStaticBloomSize();
+428
+429  // Test-helper methods
+430
+431  /**
+432   * Used for tests.
+433   * @return cache configuration for this 
Store.
 434   */
-435  HRegionInfo getRegionInfo();
+435  CacheConfig getCacheConfig();
 436
-437  RegionCoprocessorHost 
getCoprocessorHost();
-438
-439  boolean areWritesEnabled();
-440
-441  /**
-442   * @return The smallest mvcc readPoint 
across all the scanners in this
-443   * region. Writes older than this 
readPoint, are included  in every
-444   * read operation.
-445   */
-446  long getSmallestReadPoint();
-447
-448  String getColumnFamilyName();
-449
-450  TableName getTableName();
-451
-452  /**
-453   * @return The number of cells flushed 
to disk
-454   */
-455  long getFlushedCellsCount();
+437  /**
+438   * @return the parent region info 
hosting this store
+439   */
+440  HRegionInfo getRegionInfo();
+441
+442  RegionCoprocessorHost 
getCoprocessorHost();
+443
+444  boolean areWritesEnabled();
+445
+446  /**
+447   * @return The smallest mvcc readPoint 
across all the scanners in this
+448   * region. Writes older than this 
readPoint, are included  in every
+449   * read operation.
+450   */
+451  long getSmallestReadPoint();
+452
+453  String getColumnFamilyName();
+454
+455  TableName getTableName();
 456
 457  /**
-458   * @return The total size of data 
flushed to disk, in bytes
+458   * @return The number of cells flushed 
to disk
 459   */
-460  long getFlushedCellsSize();
+460  long getFlushedCellsCount();
 461
 462  /**
-463   * @return The total size of out output 
files on disk, in bytes
+463   * @return The total size of data 
flushed to disk, in bytes
 464   */
-465  long getFlushedOutputFileSize();
+465  long getFlushedCellsSize();
 466
 467  /**
-468   * @return The number of cells 
processed during minor compactions
+468   * @return The total size of out output 
files on disk, in bytes
 469   */
-470  long getCompactedCellsCount();
+470  long getFlushedOutputFileSize();
 471
 472  /**
-473   * @return The total amount of data 
processed during minor compactions, in bytes
+473   * @return The number of cells 
processed during minor compactions
 474   */
-475  long getCompactedCellsSize();
+475  long getCompactedCellsCount();
 476
 477  /**
-478   * @return The number of cells 
processed during major compactions
+478   * @return The total amount of data 
processed during minor compactions, in bytes
 479   */
-480  long getMajorCompactedCellsCount();
+480  long 

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index 0c3fe3b..d262744 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -149,3339 +149,3348 @@
 141import 
org.apache.hadoop.hbase.quotas.MasterSpaceQuotaObserver;
 142import 
org.apache.hadoop.hbase.quotas.QuotaObserverChore;
 143import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-144import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier;
-145import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory;
-146import 
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
-147import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-148import 
org.apache.hadoop.hbase.regionserver.HStore;
-149import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-150import 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
-151import 
org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
-152import 
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
-153import 
org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
-154import 
org.apache.hadoop.hbase.replication.ReplicationException;
-155import 
org.apache.hadoop.hbase.replication.ReplicationFactory;
-156import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-157import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-158import 
org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
-159import 
org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
-160import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
-161import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-162import 
org.apache.hadoop.hbase.security.UserProvider;
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
-165import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
-166import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-167import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;
-168import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-169import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
-170import 
org.apache.hadoop.hbase.util.Addressing;
-171import 
org.apache.hadoop.hbase.util.Bytes;
-172import 
org.apache.hadoop.hbase.util.CompressionTest;
-173import 
org.apache.hadoop.hbase.util.EncryptionTest;
-174import 
org.apache.hadoop.hbase.util.FSUtils;
-175import 
org.apache.hadoop.hbase.util.HFileArchiveUtil;
-176import 
org.apache.hadoop.hbase.util.HasThread;
-177import 
org.apache.hadoop.hbase.util.IdLock;
-178import 
org.apache.hadoop.hbase.util.ModifyRegionUtils;
-179import 
org.apache.hadoop.hbase.util.Pair;
-180import 
org.apache.hadoop.hbase.util.Threads;
-181import 
org.apache.hadoop.hbase.util.VersionInfo;
-182import 
org.apache.hadoop.hbase.util.ZKDataMigrator;
-183import 
org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
-184import 
org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
-185import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-186import 
org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;
-187import 
org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
-188import 
org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
-189import 
org.apache.hadoop.hbase.zookeeper.SplitOrMergeTracker;
-190import 
org.apache.hadoop.hbase.zookeeper.ZKClusterId;
-191import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-192import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-193import 
org.apache.zookeeper.KeeperException;
-194import org.eclipse.jetty.server.Server;
-195import 
org.eclipse.jetty.server.ServerConnector;
-196import 
org.eclipse.jetty.servlet.ServletHolder;
-197import 
org.eclipse.jetty.webapp.WebAppContext;
-198
-199import 
com.google.common.annotations.VisibleForTesting;
-200import com.google.common.collect.Lists;
-201import com.google.common.collect.Maps;
-202import com.google.protobuf.Descriptors;
-203import com.google.protobuf.Service;
-204
-205/**
-206 * HMaster is the "master server" for 
HBase. An HBase cluster has one active
-207 * master.  If many masters are started, 
all compete.  Whichever wins goes on to
-208 * run the cluster.  All others park 
themselves in their constructor until
-209 * master or cluster shutdown or until 
the active master loses its lease in
-210 * zookeeper.  Thereafter, all running 
master jostle to take over master role.
-211 *
-212 * pThe Master can be 

[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.StoreFileReference.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.StoreFileReference.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.StoreFileReference.html
new file mode 100644
index 000..b7ca7d9
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.StoreFileReference.html
@@ -0,0 +1,615 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/*
+002 * Licensed to the Apache Software 
Foundation (ASF) under one or more
+003 * contributor license agreements.  See 
the NOTICE file distributed with
+004 * this work for additional information 
regarding copyright ownership.
+005 * The ASF licenses this file to you 
under the Apache License, Version 2.0
+006 * (the "License"); you may not use this 
file except in compliance with
+007 * the License.  You may obtain a copy of 
the License at
+008 *
+009 * 
http://www.apache.org/licenses/LICENSE-2.0
+010 *
+011 * Unless required by applicable law or 
agreed to in writing, software
+012 * distributed under the License is 
distributed on an "AS IS" BASIS,
+013 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+014 * See the License for the specific 
language governing permissions and
+015 * limitations under the License.
+016 */
+017package org.apache.hadoop.hbase.quotas;
+018
+019import java.io.IOException;
+020import java.util.ArrayList;
+021import java.util.Arrays;
+022import java.util.Collection;
+023import java.util.Collections;
+024import java.util.HashSet;
+025import java.util.List;
+026import java.util.Map;
+027import java.util.Map.Entry;
+028import java.util.Objects;
+029import java.util.Set;
+030import java.util.concurrent.TimeUnit;
+031import java.util.function.Predicate;
+032import java.util.stream.Collectors;
+033
+034import 
org.apache.commons.lang.builder.HashCodeBuilder;
+035import org.apache.commons.logging.Log;
+036import 
org.apache.commons.logging.LogFactory;
+037import 
org.apache.hadoop.conf.Configuration;
+038import org.apache.hadoop.fs.FileStatus;
+039import org.apache.hadoop.fs.FileSystem;
+040import org.apache.hadoop.fs.Path;
+041import 
org.apache.hadoop.hbase.HRegionInfo;
+042import 
org.apache.hadoop.hbase.ScheduledChore;
+043import 
org.apache.hadoop.hbase.Stoppable;
+044import 
org.apache.hadoop.hbase.TableName;
+045import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+046import 
org.apache.hadoop.hbase.client.Admin;
+047import 
org.apache.hadoop.hbase.client.Connection;
+048import 
org.apache.hadoop.hbase.client.Table;
+049import 
org.apache.hadoop.hbase.master.HMaster;
+050import 
org.apache.hadoop.hbase.master.MetricsMaster;
+051import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
+052import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
+053import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles;
+054import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile;
+055import 
org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+056import 
org.apache.hadoop.hbase.snapshot.SnapshotManifest;
+057import 
org.apache.hadoop.hbase.util.FSUtils;
+058import 
org.apache.hadoop.hbase.util.HFileArchiveUtil;
+059import 
org.apache.hadoop.util.StringUtils;
+060
+061import 
com.google.common.collect.HashMultimap;
+062import 
com.google.common.collect.Multimap;
+063
+064/**
+065 * A Master-invoked {@code Chore} that 
computes the size of each snapshot which was created from
+066 * a table which has a space quota.
+067 */
+068@InterfaceAudience.Private
+069public class SnapshotQuotaObserverChore 
extends ScheduledChore {
+070  private static final Log LOG = 
LogFactory.getLog(SnapshotQuotaObserverChore.class);
+071  static final String 
SNAPSHOT_QUOTA_CHORE_PERIOD_KEY =
+072  
"hbase.master.quotas.snapshot.chore.period";
+073  static final int 
SNAPSHOT_QUOTA_CHORE_PERIOD_DEFAULT = 1000 * 60 * 5; // 5 minutes in millis
+074
+075  static final String 
SNAPSHOT_QUOTA_CHORE_DELAY_KEY =
+076  
"hbase.master.quotas.snapshot.chore.delay";
+077  static final long 
SNAPSHOT_QUOTA_CHORE_DELAY_DEFAULT = 1000L * 60L; // 1 minute in millis
+078
+079  static final String 
SNAPSHOT_QUOTA_CHORE_TIMEUNIT_KEY =
+080  
"hbase.master.quotas.snapshot.chore.timeunit";
+081  static final String 
SNAPSHOT_QUOTA_CHORE_TIMEUNIT_DEFAULT = TimeUnit.MILLISECONDS.name();
+082
+083  private final Connection conn;
+084  private final Configuration conf;
+085  private final MetricsMaster metrics;
+086  private final FileSystem fs;
+087
+088  public 
SnapshotQuotaObserverChore(HMaster 

[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
index 4b85756..4262d4d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
@@ -48,2512 +48,2526 @@
 040import 
java.util.concurrent.atomic.AtomicBoolean;
 041import 
java.util.concurrent.locks.ReentrantLock;
 042import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-043
-044import org.apache.commons.logging.Log;
-045import 
org.apache.commons.logging.LogFactory;
-046import 
org.apache.hadoop.conf.Configuration;
-047import org.apache.hadoop.fs.FileSystem;
-048import org.apache.hadoop.fs.Path;
-049import org.apache.hadoop.hbase.Cell;
-050import 
org.apache.hadoop.hbase.CellComparator;
-051import 
org.apache.hadoop.hbase.CellUtil;
-052import 
org.apache.hadoop.hbase.CompoundConfiguration;
-053import 
org.apache.hadoop.hbase.HColumnDescriptor;
-054import 
org.apache.hadoop.hbase.HConstants;
-055import 
org.apache.hadoop.hbase.HRegionInfo;
-056import 
org.apache.hadoop.hbase.MemoryCompactionPolicy;
-057import 
org.apache.hadoop.hbase.TableName;
-058import 
org.apache.hadoop.hbase.backup.FailedArchiveException;
-059import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-060import 
org.apache.hadoop.hbase.client.Scan;
-061import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-062import 
org.apache.hadoop.hbase.io.compress.Compression;
-063import 
org.apache.hadoop.hbase.io.crypto.Encryption;
-064import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-065import 
org.apache.hadoop.hbase.io.hfile.HFile;
-066import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
-067import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-068import 
org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
-069import 
org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
-070import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
-071import 
org.apache.hadoop.hbase.io.hfile.InvalidHFileException;
-072import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-073import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
-074import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
-075import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-076import 
org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
-077import 
org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours;
-078import 
org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
-079import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-080import 
org.apache.hadoop.hbase.regionserver.wal.WALUtil;
-081import 
org.apache.hadoop.hbase.security.EncryptionUtil;
-082import 
org.apache.hadoop.hbase.security.User;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
-085import 
org.apache.hadoop.hbase.util.Bytes;
-086import 
org.apache.hadoop.hbase.util.ChecksumType;
-087import 
org.apache.hadoop.hbase.util.ClassSize;
-088import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-089import 
org.apache.hadoop.hbase.util.Pair;
-090import 
org.apache.hadoop.hbase.util.ReflectionUtils;
-091import 
org.apache.hadoop.util.StringUtils;
-092import 
org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
-093
-094import 
com.google.common.annotations.VisibleForTesting;
-095import 
com.google.common.base.Preconditions;
-096import 
com.google.common.collect.ImmutableCollection;
-097import 
com.google.common.collect.ImmutableList;
-098import com.google.common.collect.Lists;
-099import com.google.common.collect.Sets;
-100
-101/**
-102 * A Store holds a column family in a 
Region.  Its a memstore and a set of zero
-103 * or more StoreFiles, which stretch 
backwards over time.
-104 *
-105 * pThere's no reason to consider 
append-logging at this level; all logging
-106 * and locking is handled at the HRegion 
level.  Store just provides
-107 * services to manage sets of StoreFiles. 
 One of the most important of those
-108 * services is compaction services where 
files are aggregated once they pass
-109 * a configurable threshold.
-110 *
-111 * pLocking and transactions are 
handled at a higher level.  This API should
-112 * not be called directly but by an 
HRegion manager.
-113 */
-114@InterfaceAudience.Private
-115public class HStore implements Store {
-116  public static final String 
MEMSTORE_CLASS_NAME = "hbase.regionserver.memstore.class";
-117  public static final String 

[15/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.html
new file mode 100644
index 000..b7ca7d9
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.html
@@ -0,0 +1,615 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/*
+002 * Licensed to the Apache Software 
Foundation (ASF) under one or more
+003 * contributor license agreements.  See 
the NOTICE file distributed with
+004 * this work for additional information 
regarding copyright ownership.
+005 * The ASF licenses this file to you 
under the Apache License, Version 2.0
+006 * (the "License"); you may not use this 
file except in compliance with
+007 * the License.  You may obtain a copy of 
the License at
+008 *
+009 * 
http://www.apache.org/licenses/LICENSE-2.0
+010 *
+011 * Unless required by applicable law or 
agreed to in writing, software
+012 * distributed under the License is 
distributed on an "AS IS" BASIS,
+013 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+014 * See the License for the specific 
language governing permissions and
+015 * limitations under the License.
+016 */
+017package org.apache.hadoop.hbase.quotas;
+018
+019import java.io.IOException;
+020import java.util.ArrayList;
+021import java.util.Arrays;
+022import java.util.Collection;
+023import java.util.Collections;
+024import java.util.HashSet;
+025import java.util.List;
+026import java.util.Map;
+027import java.util.Map.Entry;
+028import java.util.Objects;
+029import java.util.Set;
+030import java.util.concurrent.TimeUnit;
+031import java.util.function.Predicate;
+032import java.util.stream.Collectors;
+033
+034import 
org.apache.commons.lang.builder.HashCodeBuilder;
+035import org.apache.commons.logging.Log;
+036import 
org.apache.commons.logging.LogFactory;
+037import 
org.apache.hadoop.conf.Configuration;
+038import org.apache.hadoop.fs.FileStatus;
+039import org.apache.hadoop.fs.FileSystem;
+040import org.apache.hadoop.fs.Path;
+041import 
org.apache.hadoop.hbase.HRegionInfo;
+042import 
org.apache.hadoop.hbase.ScheduledChore;
+043import 
org.apache.hadoop.hbase.Stoppable;
+044import 
org.apache.hadoop.hbase.TableName;
+045import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+046import 
org.apache.hadoop.hbase.client.Admin;
+047import 
org.apache.hadoop.hbase.client.Connection;
+048import 
org.apache.hadoop.hbase.client.Table;
+049import 
org.apache.hadoop.hbase.master.HMaster;
+050import 
org.apache.hadoop.hbase.master.MetricsMaster;
+051import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
+052import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
+053import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles;
+054import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile;
+055import 
org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+056import 
org.apache.hadoop.hbase.snapshot.SnapshotManifest;
+057import 
org.apache.hadoop.hbase.util.FSUtils;
+058import 
org.apache.hadoop.hbase.util.HFileArchiveUtil;
+059import 
org.apache.hadoop.util.StringUtils;
+060
+061import 
com.google.common.collect.HashMultimap;
+062import 
com.google.common.collect.Multimap;
+063
+064/**
+065 * A Master-invoked {@code Chore} that 
computes the size of each snapshot which was created from
+066 * a table which has a space quota.
+067 */
+068@InterfaceAudience.Private
+069public class SnapshotQuotaObserverChore 
extends ScheduledChore {
+070  private static final Log LOG = 
LogFactory.getLog(SnapshotQuotaObserverChore.class);
+071  static final String 
SNAPSHOT_QUOTA_CHORE_PERIOD_KEY =
+072  
"hbase.master.quotas.snapshot.chore.period";
+073  static final int 
SNAPSHOT_QUOTA_CHORE_PERIOD_DEFAULT = 1000 * 60 * 5; // 5 minutes in millis
+074
+075  static final String 
SNAPSHOT_QUOTA_CHORE_DELAY_KEY =
+076  
"hbase.master.quotas.snapshot.chore.delay";
+077  static final long 
SNAPSHOT_QUOTA_CHORE_DELAY_DEFAULT = 1000L * 60L; // 1 minute in millis
+078
+079  static final String 
SNAPSHOT_QUOTA_CHORE_TIMEUNIT_KEY =
+080  
"hbase.master.quotas.snapshot.chore.timeunit";
+081  static final String 
SNAPSHOT_QUOTA_CHORE_TIMEUNIT_DEFAULT = TimeUnit.MILLISECONDS.name();
+082
+083  private final Connection conn;
+084  private final Configuration conf;
+085  private final MetricsMaster metrics;
+086  private final FileSystem fs;
+087
+088  public 
SnapshotQuotaObserverChore(HMaster master, MetricsMaster metrics) {
+089this(
+090

[02/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/hbase-archetypes/hbase-archetype-builder/source-repository.html
--
diff --git a/hbase-archetypes/hbase-archetype-builder/source-repository.html 
b/hbase-archetypes/hbase-archetype-builder/source-repository.html
index 960538f..12b23a9 100644
--- a/hbase-archetypes/hbase-archetype-builder/source-repository.html
+++ b/hbase-archetypes/hbase-archetype-builder/source-repository.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-06-09
+Last Published: 2017-06-10
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Archetype builder

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/hbase-archetypes/hbase-archetype-builder/team-list.html
--
diff --git a/hbase-archetypes/hbase-archetype-builder/team-list.html 
b/hbase-archetypes/hbase-archetype-builder/team-list.html
index 22a1c57..78090ed 100644
--- a/hbase-archetypes/hbase-archetype-builder/team-list.html
+++ b/hbase-archetypes/hbase-archetype-builder/team-list.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-06-09
+Last Published: 2017-06-10
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Archetype builder

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/hbase-archetypes/hbase-client-project/checkstyle.html
--
diff --git a/hbase-archetypes/hbase-client-project/checkstyle.html 
b/hbase-archetypes/hbase-client-project/checkstyle.html
index 24c9cd4..705070d 100644
--- a/hbase-archetypes/hbase-client-project/checkstyle.html
+++ b/hbase-archetypes/hbase-client-project/checkstyle.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-06-09
+Last Published: 2017-06-10
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Exemplar for 
hbase-client archetype

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/hbase-archetypes/hbase-client-project/dependencies.html
--
diff --git a/hbase-archetypes/hbase-client-project/dependencies.html 
b/hbase-archetypes/hbase-client-project/dependencies.html
index 1ab6024..b2083cb 100644
--- a/hbase-archetypes/hbase-client-project/dependencies.html
+++ b/hbase-archetypes/hbase-client-project/dependencies.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-06-09
+Last Published: 2017-06-10
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Exemplar for 
hbase-client archetype
@@ -3585,7 +3585,7 @@ built on Jackson JSON processor
 -
 
 hbase-server-3.0.0-SNAPSHOT-tests.jar
-7.91 MB
+7.96 MB
 -
 -
 -
@@ -4071,7 +4071,7 @@ built on Jackson JSON processor
 Sealed
 
 150
-71.09 MB
+71.14 MB
 35,968
 31,657
 1,529
@@ -4089,7 +4089,7 @@ built on Jackson JSON processor
 compile: 1
 
 test: 70
-test: 38.62 MB
+test: 38.67 MB
 test: 13,539
 test: 11,825
 test: 536

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/hbase-archetypes/hbase-client-project/dependency-convergence.html
--
diff --git a/hbase-archetypes/hbase-client-project/dependency-convergence.html 
b/hbase-archetypes/hbase-client-project/dependency-convergence.html
index c965e63..27a6313 100644
--- a/hbase-archetypes/hbase-client-project/dependency-convergence.html
+++ b/hbase-archetypes/hbase-client-project/dependency-convergence.html

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.html
index bc4042f..20bb545 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.html
@@ -62,286 +62,309 @@
 054public class 
ReplicationSourceShipperThread extends Thread {
 055  private static final Log LOG = 
LogFactory.getLog(ReplicationSourceShipperThread.class);
 056
-057  protected final Configuration conf;
-058  protected final String walGroupId;
-059  protected final 
PriorityBlockingQueuePath queue;
-060  protected final 
ReplicationSourceInterface source;
-061
-062  // Last position in the log that we 
sent to ZooKeeper
-063  protected long lastLoggedPosition = 
-1;
-064  // Path of the current log
-065  protected volatile Path currentPath;
-066  // Indicates whether this particular 
worker is running
-067  private boolean workerRunning = true;
-068  protected 
ReplicationSourceWALReaderThread entryReader;
-069
-070  // How long should we sleep for each 
retry
-071  protected final long sleepForRetries;
-072  // Maximum number of retries before 
taking bold actions
-073  protected final int 
maxRetriesMultiplier;
-074
-075  // Use guava cache to set ttl for each 
key
-076  private final LoadingCacheString, 
Boolean canSkipWaitingSet = CacheBuilder.newBuilder()
-077  .expireAfterAccess(1, 
TimeUnit.DAYS).build(
-078  new CacheLoaderString, 
Boolean() {
-079@Override
-080public Boolean load(String key) 
throws Exception {
-081  return false;
-082}
-083  }
-084  );
-085
-086  public 
ReplicationSourceShipperThread(Configuration conf, String walGroupId,
-087  PriorityBlockingQueuePath 
queue, ReplicationSourceInterface source) {
-088this.conf = conf;
-089this.walGroupId = walGroupId;
-090this.queue = queue;
-091this.source = source;
-092this.sleepForRetries =
-093
this.conf.getLong("replication.source.sleepforretries", 1000);// 1 second
-094this.maxRetriesMultiplier =
-095
this.conf.getInt("replication.source.maxretriesmultiplier", 300); // 5 minutes 
@ 1 sec per
-096  }
-097
-098  @Override
-099  public void run() {
-100// Loop until we close down
-101while (isActive()) {
-102  int sleepMultiplier = 1;
-103  // Sleep until replication is 
enabled again
-104  if (!source.isPeerEnabled()) {
-105if (sleepForRetries("Replication 
is disabled", sleepMultiplier)) {
-106  sleepMultiplier++;
-107}
-108continue;
-109  }
-110
-111  while (entryReader == null) {
-112if (sleepForRetries("Replication 
WAL entry reader thread not initialized",
-113  sleepMultiplier)) {
+057  // Hold the state of a replication 
worker thread
+058  public enum WorkerState {
+059RUNNING,
+060STOPPED,
+061FINISHED,  // The worker is done 
processing a recovered queue
+062  }
+063
+064  protected final Configuration conf;
+065  protected final String walGroupId;
+066  protected final 
PriorityBlockingQueuePath queue;
+067  protected final 
ReplicationSourceInterface source;
+068
+069  // Last position in the log that we 
sent to ZooKeeper
+070  protected long lastLoggedPosition = 
-1;
+071  // Path of the current log
+072  protected volatile Path currentPath;
+073  // Current state of the worker thread
+074  private WorkerState state;
+075  protected 
ReplicationSourceWALReaderThread entryReader;
+076
+077  // How long should we sleep for each 
retry
+078  protected final long sleepForRetries;
+079  // Maximum number of retries before 
taking bold actions
+080  protected final int 
maxRetriesMultiplier;
+081
+082  // Use guava cache to set ttl for each 
key
+083  private final LoadingCacheString, 
Boolean canSkipWaitingSet = CacheBuilder.newBuilder()
+084  .expireAfterAccess(1, 
TimeUnit.DAYS).build(
+085  new CacheLoaderString, 
Boolean() {
+086@Override
+087public Boolean load(String key) 
throws Exception {
+088  return false;
+089}
+090  }
+091  );
+092
+093  public 
ReplicationSourceShipperThread(Configuration conf, String walGroupId,
+094  PriorityBlockingQueuePath 
queue, ReplicationSourceInterface source) {
+095this.conf = conf;
+096this.walGroupId = walGroupId;
+097this.queue = queue;
+098this.source = source;
+099this.sleepForRetries =
+100
this.conf.getLong("replication.source.sleepforretries", 1000);// 1 second
+101

hbase git commit: HBASE-18199 Race in NettyRpcConnection may cause call stuck in BufferCallBeforeInitHandler forever

2017-06-10 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-1 6e3da5a39 -> 6860ddca9


HBASE-18199 Race in NettyRpcConnection may cause call stuck in 
BufferCallBeforeInitHandler forever


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6860ddca
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6860ddca
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6860ddca

Branch: refs/heads/branch-1
Commit: 6860ddca9f1237129f50d6920d20b832f3ded50d
Parents: 6e3da5a
Author: zhangduo 
Authored: Sat Jun 10 18:49:34 2017 +0800
Committer: zhangduo 
Committed: Sat Jun 10 19:07:23 2017 +0800

--
 .../hadoop/hbase/ipc/NettyRpcConnection.java| 59 ++--
 1 file changed, 42 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6860ddca/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
index 9a90b09..1b31182 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
@@ -70,8 +70,8 @@ class NettyRpcConnection extends RpcConnection {
 
   private static final Log LOG = LogFactory.getLog(NettyRpcConnection.class);
 
-  private static final ScheduledExecutorService RELOGIN_EXECUTOR = Executors
-  
.newSingleThreadScheduledExecutor(Threads.newDaemonThreadFactory("Relogin"));
+  private static final ScheduledExecutorService RELOGIN_EXECUTOR =
+  
Executors.newSingleThreadScheduledExecutor(Threads.newDaemonThreadFactory("Relogin"));
 
   private final NettyRpcClient rpcClient;
 
@@ -88,8 +88,8 @@ class NettyRpcConnection extends RpcConnection {
 rpcClient.userProvider.isHBaseSecurityEnabled(), rpcClient.codec, 
rpcClient.compressor);
 this.rpcClient = rpcClient;
 byte[] connectionHeaderPreamble = getConnectionHeaderPreamble();
-this.connectionHeaderPreamble = 
Unpooled.directBuffer(connectionHeaderPreamble.length)
-.writeBytes(connectionHeaderPreamble);
+this.connectionHeaderPreamble =
+
Unpooled.directBuffer(connectionHeaderPreamble.length).writeBytes(connectionHeaderPreamble);
 ConnectionHeader header = getConnectionHeader();
 this.connectionHeaderWithLength = Unpooled.directBuffer(4 + 
header.getSerializedSize());
 this.connectionHeaderWithLength.writeInt(header.getSerializedSize());
@@ -246,9 +246,23 @@ class NettyRpcConnection extends RpcConnection {
 }).channel();
   }
 
+  private void write(Channel ch, final Call call) {
+ch.writeAndFlush(call).addListener(new ChannelFutureListener() {
+
+  @Override
+  public void operationComplete(ChannelFuture future) throws Exception {
+// Fail the call if we failed to write it out. This usually because 
the channel is
+// closed. This is needed because we may shutdown the channel inside 
event loop and
+// there may still be some pending calls in the event loop queue after 
us.
+if (!future.isSuccess()) {
+  call.setException(toIOE(future.cause()));
+}
+  }
+});
+  }
+
   @Override
-  public synchronized void sendRequest(final Call call, HBaseRpcController hrc)
-  throws IOException {
+  public synchronized void sendRequest(final Call call, HBaseRpcController 
hrc) throws IOException {
 if (reloginInProgress) {
   throw new IOException("Can not send request because relogin is in 
progress.");
 }
@@ -274,18 +288,29 @@ class NettyRpcConnection extends RpcConnection {
 connect();
   }
   scheduleTimeoutTask(call);
-  channel.writeAndFlush(call).addListener(new ChannelFutureListener() {
-
-@Override
-public void operationComplete(ChannelFuture future) throws 
Exception {
-  // Fail the call if we failed to write it out. This usually 
because the channel is
-  // closed. This is needed because we may shutdown the channel 
inside event loop and
-  // there may still be some pending calls in the event loop queue 
after us.
-  if (!future.isSuccess()) {
-call.setException(toIOE(future.cause()));
+  final Channel ch = channel;
+  // We must move the whole writeAndFlush call inside event loop 
otherwise there will be a
+  // race condition.
+  // In netty's DefaultChannelPipeline, it will find the first 
outbound handler in the
+  // current thread and then schedule a task to event loop which will 

hbase git commit: HBASE-18199 Race in NettyRpcConnection may cause call stuck in BufferCallBeforeInitHandler forever

2017-06-10 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 1aedc07b5 -> eca1ec335


HBASE-18199 Race in NettyRpcConnection may cause call stuck in 
BufferCallBeforeInitHandler forever


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/eca1ec33
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/eca1ec33
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/eca1ec33

Branch: refs/heads/branch-2
Commit: eca1ec335667429896c40106e584128bdc5f90b8
Parents: 1aedc07
Author: zhangduo 
Authored: Sat Jun 10 19:11:46 2017 +0800
Committer: zhangduo 
Committed: Sat Jun 10 19:12:29 2017 +0800

--
 .../hadoop/hbase/ipc/NettyRpcConnection.java| 63 ++--
 1 file changed, 44 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/eca1ec33/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
index 47d7234..204b812 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
@@ -71,8 +71,8 @@ class NettyRpcConnection extends RpcConnection {
 
   private static final Log LOG = LogFactory.getLog(NettyRpcConnection.class);
 
-  private static final ScheduledExecutorService RELOGIN_EXECUTOR = Executors
-  
.newSingleThreadScheduledExecutor(Threads.newDaemonThreadFactory("Relogin"));
+  private static final ScheduledExecutorService RELOGIN_EXECUTOR =
+  
Executors.newSingleThreadScheduledExecutor(Threads.newDaemonThreadFactory("Relogin"));
 
   private final NettyRpcClient rpcClient;
 
@@ -89,8 +89,8 @@ class NettyRpcConnection extends RpcConnection {
 rpcClient.userProvider.isHBaseSecurityEnabled(), rpcClient.codec, 
rpcClient.compressor);
 this.rpcClient = rpcClient;
 byte[] connectionHeaderPreamble = getConnectionHeaderPreamble();
-this.connectionHeaderPreamble = 
Unpooled.directBuffer(connectionHeaderPreamble.length)
-.writeBytes(connectionHeaderPreamble);
+this.connectionHeaderPreamble =
+
Unpooled.directBuffer(connectionHeaderPreamble.length).writeBytes(connectionHeaderPreamble);
 ConnectionHeader header = getConnectionHeader();
 this.connectionHeaderWithLength = Unpooled.directBuffer(4 + 
header.getSerializedSize());
 this.connectionHeaderWithLength.writeInt(header.getSerializedSize());
@@ -215,8 +215,8 @@ class NettyRpcConnection extends RpcConnection {
 
 // add ReadTimeoutHandler to deal with server doesn't response 
connection header
 // because of the different configuration in client side and 
server side
-p.addFirst(new ReadTimeoutHandler(
-RpcClient.DEFAULT_SOCKET_TIMEOUT_READ, TimeUnit.MILLISECONDS));
+p.addFirst(
+  new ReadTimeoutHandler(RpcClient.DEFAULT_SOCKET_TIMEOUT_READ, 
TimeUnit.MILLISECONDS));
 p.addLast(chHandler);
 connectionHeaderPromise.addListener(new FutureListener() {
   @Override
@@ -281,9 +281,23 @@ class NettyRpcConnection extends RpcConnection {
 }).channel();
   }
 
+  private void write(Channel ch, final Call call) {
+ch.writeAndFlush(call).addListener(new ChannelFutureListener() {
+
+  @Override
+  public void operationComplete(ChannelFuture future) throws Exception {
+// Fail the call if we failed to write it out. This usually because 
the channel is
+// closed. This is needed because we may shutdown the channel inside 
event loop and
+// there may still be some pending calls in the event loop queue after 
us.
+if (!future.isSuccess()) {
+  call.setException(toIOE(future.cause()));
+}
+  }
+});
+  }
+
   @Override
-  public synchronized void sendRequest(final Call call, HBaseRpcController hrc)
-  throws IOException {
+  public synchronized void sendRequest(final Call call, HBaseRpcController 
hrc) throws IOException {
 if (reloginInProgress) {
   throw new IOException("Can not send request because relogin is in 
progress.");
 }
@@ -309,18 +323,29 @@ class NettyRpcConnection extends RpcConnection {
 connect();
   }
   scheduleTimeoutTask(call);
-  channel.writeAndFlush(call).addListener(new ChannelFutureListener() {
-
-@Override
-public void operationComplete(ChannelFuture future) throws 
Exception {
-  // Fail the call if we failed to write it out. This usually 
because the channel 

hbase git commit: HBASE-18199 Race in NettyRpcConnection may cause call stuck in BufferCallBeforeInitHandler forever

2017-06-10 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master eb2dc5d2a -> ea64dbef7


HBASE-18199 Race in NettyRpcConnection may cause call stuck in 
BufferCallBeforeInitHandler forever


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ea64dbef
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ea64dbef
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ea64dbef

Branch: refs/heads/master
Commit: ea64dbef7f5239ab2162d0bd3dccded60e20ecda
Parents: eb2dc5d
Author: zhangduo 
Authored: Sat Jun 10 19:11:46 2017 +0800
Committer: zhangduo 
Committed: Sat Jun 10 19:12:06 2017 +0800

--
 .../hadoop/hbase/ipc/NettyRpcConnection.java| 63 ++--
 1 file changed, 44 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ea64dbef/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
index 47d7234..204b812 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
@@ -71,8 +71,8 @@ class NettyRpcConnection extends RpcConnection {
 
   private static final Log LOG = LogFactory.getLog(NettyRpcConnection.class);
 
-  private static final ScheduledExecutorService RELOGIN_EXECUTOR = Executors
-  
.newSingleThreadScheduledExecutor(Threads.newDaemonThreadFactory("Relogin"));
+  private static final ScheduledExecutorService RELOGIN_EXECUTOR =
+  
Executors.newSingleThreadScheduledExecutor(Threads.newDaemonThreadFactory("Relogin"));
 
   private final NettyRpcClient rpcClient;
 
@@ -89,8 +89,8 @@ class NettyRpcConnection extends RpcConnection {
 rpcClient.userProvider.isHBaseSecurityEnabled(), rpcClient.codec, 
rpcClient.compressor);
 this.rpcClient = rpcClient;
 byte[] connectionHeaderPreamble = getConnectionHeaderPreamble();
-this.connectionHeaderPreamble = 
Unpooled.directBuffer(connectionHeaderPreamble.length)
-.writeBytes(connectionHeaderPreamble);
+this.connectionHeaderPreamble =
+
Unpooled.directBuffer(connectionHeaderPreamble.length).writeBytes(connectionHeaderPreamble);
 ConnectionHeader header = getConnectionHeader();
 this.connectionHeaderWithLength = Unpooled.directBuffer(4 + 
header.getSerializedSize());
 this.connectionHeaderWithLength.writeInt(header.getSerializedSize());
@@ -215,8 +215,8 @@ class NettyRpcConnection extends RpcConnection {
 
 // add ReadTimeoutHandler to deal with server doesn't response 
connection header
 // because of the different configuration in client side and 
server side
-p.addFirst(new ReadTimeoutHandler(
-RpcClient.DEFAULT_SOCKET_TIMEOUT_READ, TimeUnit.MILLISECONDS));
+p.addFirst(
+  new ReadTimeoutHandler(RpcClient.DEFAULT_SOCKET_TIMEOUT_READ, 
TimeUnit.MILLISECONDS));
 p.addLast(chHandler);
 connectionHeaderPromise.addListener(new FutureListener() {
   @Override
@@ -281,9 +281,23 @@ class NettyRpcConnection extends RpcConnection {
 }).channel();
   }
 
+  private void write(Channel ch, final Call call) {
+ch.writeAndFlush(call).addListener(new ChannelFutureListener() {
+
+  @Override
+  public void operationComplete(ChannelFuture future) throws Exception {
+// Fail the call if we failed to write it out. This usually because 
the channel is
+// closed. This is needed because we may shutdown the channel inside 
event loop and
+// there may still be some pending calls in the event loop queue after 
us.
+if (!future.isSuccess()) {
+  call.setException(toIOE(future.cause()));
+}
+  }
+});
+  }
+
   @Override
-  public synchronized void sendRequest(final Call call, HBaseRpcController hrc)
-  throws IOException {
+  public synchronized void sendRequest(final Call call, HBaseRpcController 
hrc) throws IOException {
 if (reloginInProgress) {
   throw new IOException("Can not send request because relogin is in 
progress.");
 }
@@ -309,18 +323,29 @@ class NettyRpcConnection extends RpcConnection {
 connect();
   }
   scheduleTimeoutTask(call);
-  channel.writeAndFlush(call).addListener(new ChannelFutureListener() {
-
-@Override
-public void operationComplete(ChannelFuture future) throws 
Exception {
-  // Fail the call if we failed to write it out. This usually 
because the channel is