svn commit: r46472 - /dev/phoenix/KEYS

2021-03-04 Thread chinmayskulkarni
Author: chinmayskulkarni
Date: Fri Mar  5 03:42:54 2021
New Revision: 46472

Log:
Added Chinmay Kulkarni's new KEYS and removed old ones

Modified:
dev/phoenix/KEYS

Modified: dev/phoenix/KEYS
==
--- dev/phoenix/KEYS (original)
+++ dev/phoenix/KEYS Fri Mar  5 03:42:54 2021
@@ -838,140 +838,6 @@ gYChkTZ6ZRrCY+hOM/QszHHwdJutL5gJN8Wqzy8X
 96YDHgCmW2GHse+OcuDV1rSt47aeou5P0rY/3I0cMHitzA==
 =znSV
 -END PGP PUBLIC KEY BLOCK-
-pub   rsa4096 2019-10-17 [SC]
-  E072ED9263AFA2F6A6F08F6D7C5FC713DE4C59D7
-uid   [ultimate] Chinmay Kulkarni (CODE SIGNING KEY) 

-sig 37C5FC713DE4C59D7 2019-10-17  Chinmay Kulkarni (CODE SIGNING KEY) 

-uid   [ultimate] Chinmay Kulkarni (CODE SIGNING KEY) 

-sig 37C5FC713DE4C59D7 2019-10-17  Chinmay Kulkarni (CODE SIGNING KEY) 

-sub   rsa4096 2019-10-17 [E]
-sig  7C5FC713DE4C59D7 2019-10-17  Chinmay Kulkarni (CODE SIGNING KEY) 

-
--BEGIN PGP PUBLIC KEY BLOCK-
-
-mQINBF2oD4kBEAD32gCyHWmJsdJmjbhMQJydEJLUjDqEj7/94jsU57ZLscyg9uz5
-stuyLyR914qa8MYmvc4RjkKEsTNppmI6znMpgeGpa6iuPpZil0sentZfZeifyxDd
-JkLKKib9jhdfD5zXsSTJe9n2zfYExjcyRWTeF2tUrBonAO+ex2hIpkF1ZljXDGHb
-M5lliTmhFBGxM8PSwtcPGV826ff+cLOv70lZTcd+2TIKuQ/QDY72+NohQE8k3YDk
-w9imQIKQ4bGqy3jzeCi2oDOzXD2wAdha5zkYbZMFgAmwYONHR1j0oFdjAm0Rn8lj
-1K6WyTTUiJ0pPdApMrgxRlR6VlxXV2YL7H4Gyk03FnQEbyyp+PxDnxlkw1cJOelX
-9xYI9piVH1kV/H5eL21XzIQSChEPorsFX/oXvPe6Anfl0nXeUT7Zfo0HrGGG83yR
-w5bBSameG9rM9WBxoRVT8DUeOMrfj+cIxUqBBSaE16ooXHIGoHijcfAoOd7dKvD2
-gsUSwlmJGkohjTnwIDQpsFR9s8reQ60nohVFER4U59DPraoC/34N7Sv+YVOqKKyT
-XWJR6kRMcZvPqpBkhWaxMXmlvnsoTSY3v7IDAD9sNnJQ9i7yCrwEJBoNXPNG8EAf
-NT4+jyc31rX7tRUNvtR63s7yJ6b7BVeHeFAfAwxkzpyti3Zl1RgPOs/tbQARAQAB
-tEBDaGlubWF5IEt1bGthcm5pIChDT0RFIFNJR05JTkcgS0VZKSA8Y2hpbm1heXNr
-dWxrYXJuaUBnbWFpbC5jb20+iQJOBBMBCAA4FiEE4HLtkmOvovam8I9tfF/HE95M
-WdcFAl2oD4kCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AACgkQfF/HE95MWdfA
-xg/5AdWN+IzCXHTRztJbqhyE/y7ehhnyBVkbc8TvwOyCV/LwAt+gTqZ8VXU03JUm
-0b68tj+Cin5XXlw5OZhceeOgeUI/dmBaHJlhjpYNnCPQprLAA3jjVEGfHNZXNuk0
-xOD3mBvvgHT6sa/n8GBKa7Q5nFkYDr37TYm2dkEArwkSfa7TE6m2B6ZE2tVNW5uD
-xQl5EMVsnpKZptKD4U30DyrR4b48mymIyHJ8fS5S9g+TiJwFIxdJCa9rnlPZH6+I
-4Nt4GFQS5HL48WQpkB7IXRQm1QPO1bH0QfMaiVKHUxW8wAjhZbjJgfIJUfeFx8bI
-y019qGQY5tfTzPcI5c7nkz4oAmvHOad7RDiXa+KdSN2bL5xuRZQ4zsl4aZIOHaSh
-Y5WA4dAeLQTylxfboRhgc64LHPpkJH4U/8kqYmdrmPxox7MJ2Wwvs8bOqLwpVUod
-VZNkbEGuMBnwYJkQqYffZ9TBl9krULR8VsEnrfVjPsRCBjiZZP09BYgNLypnPvUQ
-53wenmOxFXt4wCuCaPgcDMO90Jq0hUTibcs/fcIfQWt3o8E5vN/6VwbSOrb7w35M
-lwcLX0wlYSibZpO4nsGeB8lFRQtaS1n1xFd2lin7mMOX+eSXsV3eb7HWDm+ccizH
-zpYm9avbq0ekxolZKxvH+ibIoxjKn7OTkD2ubfd45OvZabm0QUNoaW5tYXkgS3Vs
-a2FybmkgKENPREUgU0lHTklORyBLRVkpIDxjaGlubWF5c2t1bGthcm5pQGFwYWNo
-ZS5vcmc+iQJOBBMBCAA4FiEE4HLtkmOvovam8I9tfF/HE95MWdcFAl2oFn8CGwMF
-CwkIBwIGFQoJCAsCBBYCAwECHgECF4AACgkQfF/HE95MWdcbixAAqtf0mCf7b3WY
-w1tHZ6HfbGjtCae4QW875wwOxtbG+Mpbvprp/D5cxHcG2t46M63tPhvzwXSy2etl
-RfiSq3Nl7Af5/i+dzCIEsvtyX0lSTdonBpqoSXrVN9Br4cJmlFlGwNHrP/2f+xhI
-J+7CYUWvNFMo/Y8UxKWvhlhhGHGPBylXllsKGPnjJRrDy8zsx/pnd/LKLGvP/aoZ
-JEyB95qadz601Wbfo9hzvEEGAJdKbS2ORXLAmguYPDtSxwYUQVdNdhyuA0nV1l+J
-KupEAgqtbL3spri1V4mL9S+6P5EXCU1jF7JMJaTBz1tvbkA7Qp95M+y9SHas0N7+
-11BqSQ/X4biV+lueCMmxnjUuoQxCsPbvxFrANPFA4q2Swq7jZGCf92OWjDzuwMzy
-9eocMC03ZdtrkM2mXmB1QXlqgQTjMzAiX3g+l05GutKtnuYAd5ecNcYrF4eye2jX
-vrKpu1xFlkTVhhZtO0NQPWxQOGxtPs8jZ/pGJl1Okz9NOdBfn1qmUjFY7O0LiA28
-M8tbvwo4w2+tVYQztbzqOoyhhvbHTT+Aje6vjDXlQwKi34XRdfAOfFyRUriKj+Ru
-FkqYGRAnrgnObLNavjT5KhAjh9Gbut6bciO1c5zrLfw5q6TnGPs3S/2MZNwhcktu
-ctCf4kooVuhZbamN/UaajgSTeVR9MBm5Ag0EXagPiQEQAL5d5ytZnVoaWxPpaH8c
-NUkL9BSF/swGJdRyDrscGdleipT83AjOH7YrDKphG8MeABUTqv+0S+wVu3kOodhW
-Se+4wU4s4chIR8qCxF7nBpWHecLmdr7dMCX2opaBA7ILndzw1ZAiU1TJeu3N5oNw
-L0oXbiPW1ZZZ+h6lb5QCXksWYYsf4PTIU6AP2z3/DjCqiKrgLx1pBmjYGTFU8mP3
-AswkLZi0J5kffREhznK/KjDI++UZ1tNCK8mzDC0Vy4Y7sGDBoB832vWyn+FYmUMV
-V9avRLkL5WoOLBCXks+WLYuL5hKlNjNEDr2eN9OD65Zf7O3IjEraVA7viCSOcEfC
-3lFxsQz3nEJG/GM8PQfgqWnfLF5kbKrK5ao8d29oCUmSJISAVfz7pZZjdAWKahwV
-oZ/1MdDbiduVzht1Kf89zjO9n722FcQpZkV3ltFdL1/MVOA0ZqPfND7S3OYmQH+S
-AR9CoVkdmar70hHx2W9YxdIxy53O6WPastctfs2sm1zF18zrIYxEEygE5Y2gQN7g
-g/j/CNvANKNr9A28IbLkkbQCfUdWhUYv/TSW5+T+eoAkhv/QkKP46SM1rZq8xwxG
-c52F3SNC5mcgNtoouTa0REnaGVKg+9Y42Dcdcyv6PgDVL3eqk8N5bsHGWr5uNw8e
-EE3kfEKK0CtHvhnl91f0HSj/ABEBAAGJAjYEGAEIACAWIQTgcu2SY6+i9qbwj218
-X8cT3kxZ1wUCXagPiQIbDAAKCRB8X8cT3kxZ160PEACiFossyD6NHHOZDUxDUpjH
-ZHu87fJhSnazu8O6K94HTUPl9/h+WKs9irs7aT9sO+dAKqiOJMHKwD5cnI68ThGx
-ffS/6HDx2S74gW23Vaqq0sb+lNgA/JDXE4rcP2M8r2XmlvBSH1USF2SQVTbPtcUm
-sLp2y6GjvP43BwqrtgljURhuAkWxgPmMhce/T2259aLq8DZHisMuMzLYFP7p/gSB
-lHaX+35feefma/Yd1o6t0F6VpQBX1DmQyNbltMI4D6tnDnre8Z1ly9Biwma3tttI
-D99tT7lkI/V/3WLx8A+3ryvlSVlMNeqUwS9v7X5Lu3Ucq0jl87xnRuizC+4YiHrx
-ZEdQKmKsRkK0llmoDkO8FESaHNGg01tHQyBpROrrneRU9JSyH+qzDCm3LP4bSDoV
-3zd/VPPphffuY2VAkXdKfzkL8/w5eUGigeSJ7XYraNwM+82cWgbHlwlNnqU/XqeP
-sB8rYnbSBonVO9ODUPFKGaW2RtXwmYhRGUyr8fNoEVKIVdcl5R5b0t0cQpIIjXMC
-HV5R2k6xpcZ0I/J20KU1bs/+67itptqSP0KGzcKP50SQjwp59u98vVzA6pchbp5w

svn commit: r46471 - /release/phoenix/KEYS

2021-03-04 Thread chinmayskulkarni
Author: chinmayskulkarni
Date: Fri Mar  5 03:37:00 2021
New Revision: 46471

Log:
Added Chinmay Kulkarni's new KEYS and removed old ones

Modified:
release/phoenix/KEYS

Modified: release/phoenix/KEYS
==
--- release/phoenix/KEYS (original)
+++ release/phoenix/KEYS Fri Mar  5 03:37:00 2021
@@ -838,81 +838,6 @@ gYChkTZ6ZRrCY+hOM/QszHHwdJutL5gJN8Wqzy8X
 96YDHgCmW2GHse+OcuDV1rSt47aeou5P0rY/3I0cMHitzA==
 =znSV
 -END PGP PUBLIC KEY BLOCK-
-pub   rsa4096 2019-10-17 [SC]
-  E072ED9263AFA2F6A6F08F6D7C5FC713DE4C59D7
-uid   [ultimate] Chinmay Kulkarni (CODE SIGNING KEY) 

-sig 37C5FC713DE4C59D7 2019-10-17  Chinmay Kulkarni (CODE SIGNING KEY) 

-uid   [ultimate] Chinmay Kulkarni (CODE SIGNING KEY) 

-sig 37C5FC713DE4C59D7 2019-10-17  Chinmay Kulkarni (CODE SIGNING KEY) 

-sub   rsa4096 2019-10-17 [E]
-sig  7C5FC713DE4C59D7 2019-10-17  Chinmay Kulkarni (CODE SIGNING KEY) 

-
--BEGIN PGP PUBLIC KEY BLOCK-
-
-mQINBF2oD4kBEAD32gCyHWmJsdJmjbhMQJydEJLUjDqEj7/94jsU57ZLscyg9uz5
-stuyLyR914qa8MYmvc4RjkKEsTNppmI6znMpgeGpa6iuPpZil0sentZfZeifyxDd
-JkLKKib9jhdfD5zXsSTJe9n2zfYExjcyRWTeF2tUrBonAO+ex2hIpkF1ZljXDGHb
-M5lliTmhFBGxM8PSwtcPGV826ff+cLOv70lZTcd+2TIKuQ/QDY72+NohQE8k3YDk
-w9imQIKQ4bGqy3jzeCi2oDOzXD2wAdha5zkYbZMFgAmwYONHR1j0oFdjAm0Rn8lj
-1K6WyTTUiJ0pPdApMrgxRlR6VlxXV2YL7H4Gyk03FnQEbyyp+PxDnxlkw1cJOelX
-9xYI9piVH1kV/H5eL21XzIQSChEPorsFX/oXvPe6Anfl0nXeUT7Zfo0HrGGG83yR
-w5bBSameG9rM9WBxoRVT8DUeOMrfj+cIxUqBBSaE16ooXHIGoHijcfAoOd7dKvD2
-gsUSwlmJGkohjTnwIDQpsFR9s8reQ60nohVFER4U59DPraoC/34N7Sv+YVOqKKyT
-XWJR6kRMcZvPqpBkhWaxMXmlvnsoTSY3v7IDAD9sNnJQ9i7yCrwEJBoNXPNG8EAf
-NT4+jyc31rX7tRUNvtR63s7yJ6b7BVeHeFAfAwxkzpyti3Zl1RgPOs/tbQARAQAB
-tEBDaGlubWF5IEt1bGthcm5pIChDT0RFIFNJR05JTkcgS0VZKSA8Y2hpbm1heXNr
-dWxrYXJuaUBnbWFpbC5jb20+iQJOBBMBCAA4FiEE4HLtkmOvovam8I9tfF/HE95M
-WdcFAl2oD4kCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AACgkQfF/HE95MWdfA
-xg/5AdWN+IzCXHTRztJbqhyE/y7ehhnyBVkbc8TvwOyCV/LwAt+gTqZ8VXU03JUm
-0b68tj+Cin5XXlw5OZhceeOgeUI/dmBaHJlhjpYNnCPQprLAA3jjVEGfHNZXNuk0
-xOD3mBvvgHT6sa/n8GBKa7Q5nFkYDr37TYm2dkEArwkSfa7TE6m2B6ZE2tVNW5uD
-xQl5EMVsnpKZptKD4U30DyrR4b48mymIyHJ8fS5S9g+TiJwFIxdJCa9rnlPZH6+I
-4Nt4GFQS5HL48WQpkB7IXRQm1QPO1bH0QfMaiVKHUxW8wAjhZbjJgfIJUfeFx8bI
-y019qGQY5tfTzPcI5c7nkz4oAmvHOad7RDiXa+KdSN2bL5xuRZQ4zsl4aZIOHaSh
-Y5WA4dAeLQTylxfboRhgc64LHPpkJH4U/8kqYmdrmPxox7MJ2Wwvs8bOqLwpVUod
-VZNkbEGuMBnwYJkQqYffZ9TBl9krULR8VsEnrfVjPsRCBjiZZP09BYgNLypnPvUQ
-53wenmOxFXt4wCuCaPgcDMO90Jq0hUTibcs/fcIfQWt3o8E5vN/6VwbSOrb7w35M
-lwcLX0wlYSibZpO4nsGeB8lFRQtaS1n1xFd2lin7mMOX+eSXsV3eb7HWDm+ccizH
-zpYm9avbq0ekxolZKxvH+ibIoxjKn7OTkD2ubfd45OvZabm0QUNoaW5tYXkgS3Vs
-a2FybmkgKENPREUgU0lHTklORyBLRVkpIDxjaGlubWF5c2t1bGthcm5pQGFwYWNo
-ZS5vcmc+iQJOBBMBCAA4FiEE4HLtkmOvovam8I9tfF/HE95MWdcFAl2oFn8CGwMF
-CwkIBwIGFQoJCAsCBBYCAwECHgECF4AACgkQfF/HE95MWdcbixAAqtf0mCf7b3WY
-w1tHZ6HfbGjtCae4QW875wwOxtbG+Mpbvprp/D5cxHcG2t46M63tPhvzwXSy2etl
-RfiSq3Nl7Af5/i+dzCIEsvtyX0lSTdonBpqoSXrVN9Br4cJmlFlGwNHrP/2f+xhI
-J+7CYUWvNFMo/Y8UxKWvhlhhGHGPBylXllsKGPnjJRrDy8zsx/pnd/LKLGvP/aoZ
-JEyB95qadz601Wbfo9hzvEEGAJdKbS2ORXLAmguYPDtSxwYUQVdNdhyuA0nV1l+J
-KupEAgqtbL3spri1V4mL9S+6P5EXCU1jF7JMJaTBz1tvbkA7Qp95M+y9SHas0N7+
-11BqSQ/X4biV+lueCMmxnjUuoQxCsPbvxFrANPFA4q2Swq7jZGCf92OWjDzuwMzy
-9eocMC03ZdtrkM2mXmB1QXlqgQTjMzAiX3g+l05GutKtnuYAd5ecNcYrF4eye2jX
-vrKpu1xFlkTVhhZtO0NQPWxQOGxtPs8jZ/pGJl1Okz9NOdBfn1qmUjFY7O0LiA28
-M8tbvwo4w2+tVYQztbzqOoyhhvbHTT+Aje6vjDXlQwKi34XRdfAOfFyRUriKj+Ru
-FkqYGRAnrgnObLNavjT5KhAjh9Gbut6bciO1c5zrLfw5q6TnGPs3S/2MZNwhcktu
-ctCf4kooVuhZbamN/UaajgSTeVR9MBm5Ag0EXagPiQEQAL5d5ytZnVoaWxPpaH8c
-NUkL9BSF/swGJdRyDrscGdleipT83AjOH7YrDKphG8MeABUTqv+0S+wVu3kOodhW
-Se+4wU4s4chIR8qCxF7nBpWHecLmdr7dMCX2opaBA7ILndzw1ZAiU1TJeu3N5oNw
-L0oXbiPW1ZZZ+h6lb5QCXksWYYsf4PTIU6AP2z3/DjCqiKrgLx1pBmjYGTFU8mP3
-AswkLZi0J5kffREhznK/KjDI++UZ1tNCK8mzDC0Vy4Y7sGDBoB832vWyn+FYmUMV
-V9avRLkL5WoOLBCXks+WLYuL5hKlNjNEDr2eN9OD65Zf7O3IjEraVA7viCSOcEfC
-3lFxsQz3nEJG/GM8PQfgqWnfLF5kbKrK5ao8d29oCUmSJISAVfz7pZZjdAWKahwV
-oZ/1MdDbiduVzht1Kf89zjO9n722FcQpZkV3ltFdL1/MVOA0ZqPfND7S3OYmQH+S
-AR9CoVkdmar70hHx2W9YxdIxy53O6WPastctfs2sm1zF18zrIYxEEygE5Y2gQN7g
-g/j/CNvANKNr9A28IbLkkbQCfUdWhUYv/TSW5+T+eoAkhv/QkKP46SM1rZq8xwxG
-c52F3SNC5mcgNtoouTa0REnaGVKg+9Y42Dcdcyv6PgDVL3eqk8N5bsHGWr5uNw8e
-EE3kfEKK0CtHvhnl91f0HSj/ABEBAAGJAjYEGAEIACAWIQTgcu2SY6+i9qbwj218
-X8cT3kxZ1wUCXagPiQIbDAAKCRB8X8cT3kxZ160PEACiFossyD6NHHOZDUxDUpjH
-ZHu87fJhSnazu8O6K94HTUPl9/h+WKs9irs7aT9sO+dAKqiOJMHKwD5cnI68ThGx
-ffS/6HDx2S74gW23Vaqq0sb+lNgA/JDXE4rcP2M8r2XmlvBSH1USF2SQVTbPtcUm
-sLp2y6GjvP43BwqrtgljURhuAkWxgPmMhce/T2259aLq8DZHisMuMzLYFP7p/gSB
-lHaX+35feefma/Yd1o6t0F6VpQBX1DmQyNbltMI4D6tnDnre8Z1ly9Biwma3tttI
-D99tT7lkI/V/3WLx8A+3ryvlSVlMNeqUwS9v7X5Lu3Ucq0jl87xnRuizC+4YiHrx
-ZEdQKmKsRkK0llmoDkO8FESaHNGg01tHQyBpROrrneRU9JSyH+qzDCm3LP4bSDoV
-3zd/VPPphffuY2VAkXdKfzkL8/w5eUGigeSJ7XYraNwM+82cWgbHlwlNnqU/XqeP
-sB8rYnbSBonVO9ODUPFKGaW2RtXwmYhRGUyr8fNoEVKIVdcl5R5b0t0cQpIIjXMC
-HV5R2k6xpcZ0I/J20KU1bs

svn commit: r46298 - /dev/phoenix/phoenix-4.16.0RC3/ /release/phoenix/phoenix-4.16.0/

2021-02-23 Thread chinmayskulkarni
Author: chinmayskulkarni
Date: Wed Feb 24 01:12:03 2021
New Revision: 46298

Log:
Moving 4.16.0 RC3 to the release folder for release manager Xinyi Yan

Added:
release/phoenix/phoenix-4.16.0/
  - copied from r46297, dev/phoenix/phoenix-4.16.0RC3/
Removed:
dev/phoenix/phoenix-4.16.0RC3/



svn commit: r46297 - /release/phoenix/KEYS

2021-02-23 Thread chinmayskulkarni
Author: chinmayskulkarni
Date: Wed Feb 24 01:08:59 2021
New Revision: 46297

Log:
Add Xinyi Yan's key to release KEYS

Modified:
release/phoenix/KEYS

Modified: release/phoenix/KEYS
==
--- release/phoenix/KEYS (original)
+++ release/phoenix/KEYS Wed Feb 24 01:08:59 2021
@@ -950,3 +950,74 @@ qic2S63UJAmcu8NJPcpyLPEL9dW73l/mJzEeJ+6R
 LrpNgabmWs+9yCbisv599junedHZVYBLikoFE14TWsD6cXk//MdxhJGZVwsP
 =L91m
 -END PGP PUBLIC KEY BLOCK-
+pub   rsa4096 2021-01-19 [SC]
+  AFA8933E99C11D56A003F7A0E4882DD3AB711587
+uid   [ultimate] Xinyi Yan (CODE SIGNING KEY) 
+sig 3E4882DD3AB711587 2021-01-19  Xinyi Yan (CODE SIGNING KEY) 

+sig 3E4882DD3AB711587 2021-01-19  Xinyi Yan (CODE SIGNING KEY) 

+sub   rsa4096 2021-01-19 [E]
+sig  E4882DD3AB711587 2021-01-19  Xinyi Yan (CODE SIGNING KEY) 

+
+-BEGIN PGP PUBLIC KEY BLOCK-
+
+mQINBGAHOyABEAC8nlXF7NNVp4rqfnc1qg2K8lVAX1a23nENEswOZ++OB1cME2hl
+7JVbRs7IHyxUK1oeBDLZ6f3jJRVvmHC+Kz7Aa+KMfFpYdvq2OYGBvZ+gm3N6fJ1k
+SYlhkytbOfWX5WSJcXtEZCAKPPsx1BvSVdS5sYUto4qdrW+94rqzEIbrBLvbVETj
+ls4Gtj1hVIaMy3PbCptJK4utv/oP/0jNZfYRdXaXCD45vAxVfVGjDikW3f85Oosi
+sJppey/3JxL1mfS+lga8oIHRMoiFfEgNB6hLCvQZZ9Z4zS5li8zEuEmBFNmerxs4
+fhSqYVdQqUZZpEkzzSBs1vEpOnxgnwCjaArlB8Xrcd3IO6YW20ZR00gJWmKPHYmz
+aXO3ninHqOZh4Sjuctl7EgnIpdz8hZ3fn8w++Od1OygopxKvxukFbxUicgCDvLol
++1aQGCvtqBoPHrhEmqxhg8JoHWUD13P3Z1Lz/4DKRM65iVdgcGT1QXXKJpHKVKJj
+L4kta10+92yIV127ng75530MN1CP2WbFCaq1pFSi5qgSL3wdLUjDgC7aUrihvvbo
+wR6gxCX5D7tooGZFQikdlCd2Do8SPRvsW3LgHz7DcegQWkoGNvHCdbyYjzr0fHNT
+Ln/2hnoRdctaURlfoBm2GP7/amgdvPUkcGXM/ddY8tSZtvW7LJwXRo3FFwARAQAB
+tDJYaW55aSBZYW4gKENPREUgU0lHTklORyBLRVkpIDx5YW54aW55aUBhcGFjaGUu
+b3JnPokCNwQTAQIAIQIbAwIeAQIXgAUCYAc8JwULCQgHAwUVCgkICwUWAgMBAAAK
+CRDkiC3Tq3EVh7bdD/wIhUOB9l3C5Ro2CLJQ4fztIYJs+t+UmAAGSKjlErwZJicI
+1b0WF0w6YWpLozhdzGTUtR7GwQKZYI0VOqTRC1gAhh/lFuN0JvkAUp+sapw/Ba17
+tYvIMWbaGwxGMb8+r830wsKMcK4H1YSF/9yT2WeuQIwTOZveLAh1+8NjQ99NTeOn
+DjN7ZxCXSpCHl9JRx1x/42TYxd7qbiYqeZ2eZBFx9oexJzy547F1YnxevDIHjg6P
+XTpvoXGY7KK6TReDPr/8JCg8bwyOSfJyn/u6m8hI8begUVHezCpur2iTAmB3E717
+YaVfa0XOOCDUe43SlIls0JjnFNwwb4B33Im0wi4Y1xLFZiRJakGQLb88aszmT5I9
+O5qecluyoRQzN4zbdMjlOOQDDXn5KfQd6NQaROP1w7sX0JW+W6+1HIHP4AhsQJ8i
+ew0i+7IFhHuc+FRnNPY3jWnITsNaqGFh8xcTaevPDoMEsfEzHmzi1pyqQrRoC/zw
+NWH20Db9SvqHmf4Rt0xtnIjJBzNAoVAlArhirJMIYrlSzlHSv06IV8et6qhqcHK8
+uuMzBDmB8Al70us9SlrVlspG7jmAO5mnDEuAKBA+i7JbQgDqUCmFWlAEV/rlt1RK
+0GkaqchMihLKeq27QNE1o9lEwcSf1uCfF6vPvOSfkNgTAi1zTwGXV0D4bIdb84kC
+OAQTAQIAIgUCYAc7IAIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQ5Igt
+06txFYeq+g//Y8Swj+VONLrKIuo9OGmgK4lhzmCpyY/8GH8dBwMQcyF8lyr4eeS9
+4RdXusF8KQtd34SRK5jnBL+5icbSatfhQ9VlIQc+gFAJSxeXRe/h1r60ikSjLTGf
+ic6fuYjVVpTgAJ+viuyNfMdsJ9Srl9yMJwXD0+8JVsl4ACE8WsXQ4xYMhfLha0Y4
+nGDs2HV+vTtTo433jmi+J4qUIZ97DKSxdFK4IDDfPRD61N/aqF0I6MT/3R/nHRAy
+UrlnSA5kDvcRhujT45GxuSaVyw/ZnukPJJGOaMZF481ehlvZu4CTTIa4VoMJNfSs
+69NZQ+C1cI+gPXoiLyGiFsYAv4yOjWnCL3xaU0iEP/wHVUyXtj7luVHW1bdZidZZ
+ROUFj6kgkyi+QL/f7LGhSF4sRz53eXlkJ/D7DLyHXhT9JOhghkBEkqyhXPfhzJ05
+1ZCA2hYqTu8h5p9rCqKGWrZAaREdxn8flfRcDwk7j3y9Pl/r0MMae3WuyYkfZcqY
+rFUipkNkDeIjAzC923McFAOi5TRsToMA8Sxzw3kcnjE5VklprUminviZM8ppxDUe
+g9hhSZVw4fkeTbu3+AzY8iC2fBdAX1mb8QTZnAnidNvzG+2FEJycTvBJmEkw+57I
+8IU2lYlBpWwIRHEX7p4iKrC0bx0yxN0dr+stnaK/CMMFxQFRAZpcepi5Ag0EYAc7
+IAEQALTL/836qNK/wTCIfWp3ccC9L298stGu5+xcxgckMhd5XuhWXr0qONrlOkhX
+F0FMvwKl5hNTOOnRX7vcl/g6DNMDWRqGccA7ZGRch/uQ9jm5fX5zMNHmrxEC1H4I
+NkMGFoE63C0DMa7An37VAatm4n9SoAaukol7cwjUqH7HIHZ6EkZyJcbW909QIoxN
+iuU9Qvh4wNR44O50ZFruCsUJ6Xe2wSYEjW/oJw3Wm4jrYu84KO3Iv1Yxrxvk7tdB
+pUabQTf2IpdMtP+U6eCvCgPnc183m/BFpbvBoTNUFV/1ksYAC0V0nXlfa8HISuey
+a03clDnaZWnaqexRgbbtvSNEit6Bw3OTdLxH/PiFr/mv+A5FUtGt0ndxcD2nqNal
+EWL5ZJc3ffSh6Y2JgRZV+Ec6U0IRSye8eBwv+rUZdm9+b1DLYmsqCZFald1ajUy6
+9yDeLb0eZ5mB6GtWCKZ5c3T2usIEv5OmpxvTYYnIOQHO2Rook5BVnT1VDJhoMkyT
+ruRqrtFlQJ/VbPNHi2hQV7F9Ksa70YO22YYABYUnTeztSjoeQYOWCjbu+jdBwlVV
+l12BeJXUCan8LsE/eH64vuIzqpoD2yPwn8gDQ34aGoBnUmmGtgnepY1iS0QAC1P4
+PIP/b5f/ew5W6ou4OSOhczKLzsaKdOz1cxWq/rKTP735hupvABEBAAGJAh8EGAEC
+AAkFAmAHOyACGwwACgkQ5Igt06txFYfqJRAAhAkbpBLdhbU7rqOPNYyd6qRzgIvZ
+HtIIW0MZcyE0UoiKjMBhM8t/SGEZ/QaB1EsQ3XFpgP+8OEi/KXJ176QTVbfqpyWH
+A3WWY0EJvcmRtoodH8IrU9vnH6jMPvPBkxsDaEecXQNG0fZJgm2pHbzEvDEsBPcR
+JHMQKRG+khKXxU9MHkzZG6SwymL2NVqZ1Ac7ueTRYsUTsK763kX536+QeFZCfjpk
+d8VIgoKisQHFHOI73nai4mifijt6LLHGxinsxYF5/RFo56xDPgNpvRHs8qo4tVkO
+XlFdt+vFQY3pwO/P5cTrdFqPb40jBUBehLitxNZ5VzSc4P1xqJ+xZDzIrdQYi4XP
+XABq25WR25cUQpRZaoAkqmOPBEfwDicL3gwfFZsQTZZT2N2qQu68HPVo8Nl5ISu7
+2vFFMdPil6qH1LhwYirx79K/CXQf6ikT6R7Oh+L+7r0mXlRwnE5USkWzVh7qo7e4
+8yLrhb4ypf824T3KEQcrIva58mlBGBB1/tngohp+4/Tq6K22AuAqO6vcEAD0XmFi
+6eLNZ160urMD7nO5QiwfZeVJubEr2oBelqXyhX3B5eJriE/JUI4v5dfoHlgjcCmf
++swFz3qrC//fWKXcY36eVtLrQ5poBnt+c84ATJxF5G1FiH86584dxkda9ZUDtSpD
+2MDwHo+b3qEej9Y=
+=6W+C
+-END PGP PUBLIC KEY BLOCK-




[phoenix] branch 4.x updated: PHOENIX-6256: Fix MaxConcurrentConnectionsIT test flapper

2020-12-14 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new c0dd428  PHOENIX-6256: Fix MaxConcurrentConnectionsIT test flapper
c0dd428 is described below

commit c0dd428dfae813b26dda89e8588024ecbc6b23d7
Author: Chinmay Kulkarni 
AuthorDate: Thu Dec 10 15:53:04 2020 -0800

PHOENIX-6256: Fix MaxConcurrentConnectionsIT test flapper
---
 .../phoenix/query/MaxConcurrentConnectionsIT.java  | 29 +-
 .../org/apache/phoenix/jdbc/PhoenixConnection.java |  2 +-
 2 files changed, 18 insertions(+), 13 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/query/MaxConcurrentConnectionsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/query/MaxConcurrentConnectionsIT.java
index 611ef89..003dde7 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/query/MaxConcurrentConnectionsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/query/MaxConcurrentConnectionsIT.java
@@ -18,6 +18,7 @@
 
 package org.apache.phoenix.query;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -39,8 +40,6 @@ import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_OPEN_INTE
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_OPEN_PHOENIX_CONNECTIONS;
 import static 
org.apache.phoenix.query.QueryServices.CLIENT_CONNECTION_MAX_ALLOWED_CONNECTIONS;
 import static 
org.apache.phoenix.query.QueryServices.INTERNAL_CONNECTION_MAX_ALLOWED_CONNECTIONS;
-import static 
org.apache.phoenix.query.QueryServices.TASK_HANDLING_INITIAL_DELAY_MS_ATTRIB;
-import static 
org.apache.phoenix.query.QueryServices.TASK_HANDLING_INTERVAL_MS_ATTRIB;
 import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
@@ -55,8 +54,15 @@ public class MaxConcurrentConnectionsIT extends 
BaseUniqueNamesOwnClusterIT {
 @BeforeClass
 public static void setUp() throws Exception {
 hbaseTestUtil = new HBaseTestingUtility();
-
+Configuration serverConf = hbaseTestUtil.getConfiguration();
+// Disable any task handling as that creates additional connections
+// This must be set before the mini-cluster is brought up
+serverConf.set(QueryServices.TASK_HANDLING_INTERVAL_MS_ATTRIB,
+Long.toString(Long.MAX_VALUE));
+serverConf.set(QueryServices.TASK_HANDLING_INITIAL_DELAY_MS_ATTRIB,
+Long.toString(Long.MAX_VALUE));
 
hbaseTestUtil.startMiniCluster(1,1,null,null,DelayedRegionServer.class);
+
 // establish url and quorum. Need to use PhoenixDriver and not 
PhoenixTestDriver
 String zkQuorum = "localhost:" + 
hbaseTestUtil.getZkCluster().getClientPort();
 url = PhoenixRuntime.JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + 
zkQuorum +
@@ -85,17 +91,16 @@ public class MaxConcurrentConnectionsIT extends 
BaseUniqueNamesOwnClusterIT {
 //table with lots of regions
 String ddl = "create table " + tableName +  "  (i integer not null 
primary key, j integer) SALT_BUCKETS=256 ";
 
-Properties props = new Properties();
-
props.setProperty(CLIENT_CONNECTION_MAX_ALLOWED_CONNECTIONS,String.valueOf(10));
-
props.setProperty(INTERNAL_CONNECTION_MAX_ALLOWED_CONNECTIONS,String.valueOf(10));
-
-//delay any task handeling as that causes additional connections
-
props.setProperty(TASK_HANDLING_INTERVAL_MS_ATTRIB,String.valueOf(60));
-
props.setProperty(TASK_HANDLING_INITIAL_DELAY_MS_ATTRIB,String.valueOf(60));
+Properties clientProps = new Properties();
+clientProps.setProperty(CLIENT_CONNECTION_MAX_ALLOWED_CONNECTIONS,
+String.valueOf(10));
+clientProps.setProperty(INTERNAL_CONNECTION_MAX_ALLOWED_CONNECTIONS,
+String.valueOf(10));
 
 String deleteStmt = "DELETE FROM " + tableName + " WHERE 20 = j";
 
-try(Connection conn = DriverManager.getConnection(connectionUrl, 
props); Statement statement = conn.createStatement()) {
+try(Connection conn = DriverManager.getConnection(connectionUrl,
+clientProps); Statement statement = conn.createStatement()) {
 statement.execute(ddl);
 }
 
@@ -103,7 +108,7 @@ public class MaxConcurrentConnectionsIT extends 
BaseUniqueNamesOwnClusterIT {
 assertEquals(0, 
GLOBAL_OPEN_INTERNAL_PHOENIX_CONNECTIONS.getMetric().getValue());
 Connection conn = null;
 try {
-conn = DriverManager.getConnection(connectionUrl, props);
+conn = Driv

[phoenix] branch master updated (99a5f4c -> 29c45c6)

2020-12-02 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 99a5f4c  PHOENIX-6239: NullPointerException when index table does not 
use COLUMN_ENCODED_BYTES (#994)
 add 29c45c6  PHOENIX-6086 : Avoid restoring snapshots of System tables

No new revisions were added by this update.

Summary of changes:
 .../phoenix/query/ConnectionQueryServicesImpl.java | 97 +++---
 1 file changed, 13 insertions(+), 84 deletions(-)



[phoenix] branch 4.x updated (0b69306 -> b8cb658)

2020-12-02 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a change to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 0b69306  PHOENIX-6239: NullPointerException when index table does not 
use COLUMN_ENCODED_BYTES (#994)
 add b8cb658  PHOENIX-6086 : Avoid restoring snapshots of System tables

No new revisions were added by this update.

Summary of changes:
 .../phoenix/query/ConnectionQueryServicesImpl.java | 97 +++---
 1 file changed, 13 insertions(+), 84 deletions(-)



[phoenix] branch 4.x updated (1c055e3 -> dbfa24e)

2020-11-23 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a change to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 1c055e3  PHOENIX-6155 : Use CompatRegionCoprocessorEnvironment 
(ADDENDUM)
 add dbfa24e  PHOENIX-5895 Leverage WALCellFilter in the 
SystemCatalogWALEntryFilter to replicate system catalog table

No new revisions were added by this update.

Summary of changes:
 .../replication/SystemCatalogWALEntryFilterIT.java | 17 +--
 .../replication/SystemCatalogWALEntryFilter.java   | 54 +++---
 2 files changed, 39 insertions(+), 32 deletions(-)



[phoenix] branch 4.x updated: PHOENIX-6155 : Use CompatRegionCoprocessorEnvironment (ADDENDUM)

2020-11-23 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 1c055e3  PHOENIX-6155 : Use CompatRegionCoprocessorEnvironment 
(ADDENDUM)
1c055e3 is described below

commit 1c055e3050c0e64ce009eb3127b62b08fd75ee3a
Author: Viraj Jasani 
AuthorDate: Mon Nov 23 18:43:04 2020 +0530

PHOENIX-6155 : Use CompatRegionCoprocessorEnvironment (ADDENDUM)

Signed-off-by: Chinmay Kulkarni 
---
 .../java/org/apache/phoenix/coprocessor/TaskMetaDataEndpointTest.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpointTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpointTest.java
index ae19bc6..fb2e9e9 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpointTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpointTest.java
@@ -32,6 +32,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compat.hbase.CompatRegionCoprocessorEnvironment;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos;
 import org.apache.phoenix.coprocessor.generated.TaskMetaDataProtos;
 import org.apache.phoenix.protobuf.ProtobufUtil;
@@ -70,7 +71,7 @@ public class TaskMetaDataEndpointTest {
 MockitoAnnotations.initMocks(this);
 configuration = new Configuration();
 RegionCoprocessorEnvironment environment =
-new RegionCoprocessorEnvironment() {
+new CompatRegionCoprocessorEnvironment() {
 
 @Override
 public int getVersion() {



[phoenix] branch 4.x updated: PHOENIX-6155 : Provide a coprocessor endpoint to avoid direct upserts into SYSTEM.TASK from the client

2020-11-18 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new ed7f1a6  PHOENIX-6155 : Provide a coprocessor endpoint to avoid direct 
upserts into SYSTEM.TASK from the client
ed7f1a6 is described below

commit ed7f1a6b69d20c87dfbbee97ec8612ccfb866d1b
Author: Viraj Jasani 
AuthorDate: Wed Nov 11 17:05:04 2020 +0530

PHOENIX-6155 : Provide a coprocessor endpoint to avoid direct upserts into 
SYSTEM.TASK from the client

Signed-off-by: Chinmay Kulkarni 
---
 .../phoenix/end2end/BackwardCompatibilityIT.java   |  70 +-
 .../end2end/BackwardCompatibilityTestUtil.java |   3 +
 .../apache/phoenix/end2end/IndexRebuildTaskIT.java |  17 +-
 .../phoenix/end2end/SystemTablesUpgradeIT.java |  10 +-
 .../phoenix/end2end/index/IndexMetadataIT.java |  18 +-
 .../gold_files/gold_query_index_rebuild_async.txt  |  23 +
 .../it/resources/sql_files/index_rebuild_async.sql |  27 +
 .../sql_files/query_index_rebuild_async.sql|  20 +
 .../coprocessor/BaseMetaDataEndpointObserver.java  |   6 +
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  22 +-
 .../coprocessor/MetaDataEndpointObserver.java  |   3 +
 .../phoenix/coprocessor/MetaDataProtocol.java  |   3 +-
 .../PhoenixMetaDataCoprocessorHost.java|  14 +
 .../phoenix/coprocessor/TaskMetaDataEndpoint.java  | 127 
 .../phoenix/coprocessor/TaskRegionObserver.java|  34 +-
 .../coprocessor/generated/MetaDataProtos.java  |  62 +-
 .../coprocessor/generated/TaskMetaDataProtos.java  | 784 +
 .../coprocessor/tasks/IndexRebuildTask.java|  18 +-
 .../apache/phoenix/exception/SQLExceptionCode.java |   2 +
 .../org/apache/phoenix/protobuf/ProtobufUtil.java  |   7 +
 .../phoenix/query/ConnectionQueryServicesImpl.java |  20 +-
 .../org/apache/phoenix/schema/MetaDataClient.java  |  35 +-
 .../phoenix/schema/task/SystemTaskParams.java  | 188 +
 .../java/org/apache/phoenix/schema/task/Task.java  | 152 +++-
 .../phoenix/util/TaskMetaDataServiceCallBack.java  |  67 ++
 .../coprocessor/TaskMetaDataEndpointTest.java  | 186 +
 phoenix-protocol/src/main/MetaDataService.proto|   1 +
 .../src/main/TaskMetaDataService.proto |  34 +
 28 files changed, 1876 insertions(+), 77 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
index 0061b83..79f7302 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
@@ -21,10 +21,12 @@ import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.ADD_DATA;
 import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.ADD_DELETE;
 import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.CREATE_ADD;
 import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.CREATE_DIVERGED_VIEW;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.INDEX_REBUILD_ASYNC;
 import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.QUERY_ADD_DATA;
 import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.QUERY_ADD_DELETE;
 import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.QUERY_CREATE_ADD;
 import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.QUERY_CREATE_DIVERGED_VIEW;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.QUERY_INDEX_REBUILD_ASYNC;
 import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.assertExpectedOutput;
 import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.checkForPreConditions;
 import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.computeClientVersions;
@@ -34,6 +36,9 @@ import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.UpgradePr
 import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.UpgradeProps.SET_MAX_LOOK_BACK_AGE;
 import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
@@ -44,6 +49,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.phoenix.coprocessor.TaskMetaDataEndpoint;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixDriver;
 import

[phoenix] branch master updated (a2618f1 -> 68ac1b4)

2020-11-18 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from a2618f1  PHOENIX-6191: Creating a view which has its own new columns 
should also do checkAndPut checks on SYSTEM.MUTEX
 add 68ac1b4  PHOENIX-6155 : Provide a coprocessor endpoint to avoid direct 
upserts into SYSTEM.TASK from the client

No new revisions were added by this update.

Summary of changes:
 .../phoenix/end2end/BackwardCompatibilityIT.java   |  71 ++-
 .../end2end/BackwardCompatibilityTestUtil.java |   3 +
 .../apache/phoenix/end2end/IndexRebuildTaskIT.java |  17 +-
 .../phoenix/end2end/SystemTablesUpgradeIT.java |  10 +-
 .../phoenix/end2end/index/IndexMetadataIT.java |  18 +-
 .../gold_files/gold_query_index_rebuild_async.txt} |  10 +-
 .../sql_files/index_rebuild_async.sql} |  19 +-
 .../sql_files/query_index_rebuild_async.sql}   |   7 +-
 .../coprocessor/BaseMetaDataEndpointObserver.java  |   6 +
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  22 ++-
 .../coprocessor/MetaDataEndpointObserver.java  |   3 +
 .../phoenix/coprocessor/MetaDataProtocol.java  |   3 +-
 .../PhoenixMetaDataCoprocessorHost.java|  10 +
 ...DataEndpoint.java => TaskMetaDataEndpoint.java} |  80 
 .../phoenix/coprocessor/TaskRegionObserver.java|  35 +++-
 .../coprocessor/generated/MetaDataProtos.java  |  62 ---
 ...MetaDataProtos.java => TaskMetaDataProtos.java} | 192 ++-
 .../coprocessor/tasks/IndexRebuildTask.java|  18 +-
 .../apache/phoenix/exception/SQLExceptionCode.java |   2 +
 .../org/apache/phoenix/protobuf/ProtobufUtil.java  |   7 +
 .../phoenix/query/ConnectionQueryServicesImpl.java |  21 ++-
 .../org/apache/phoenix/schema/MetaDataClient.java  |  38 +++-
 .../phoenix/schema/task/SystemTaskParams.java  | 188 +++
 .../java/org/apache/phoenix/schema/task/Task.java  | 147 ++-
 .../TaskMetaDataServiceCallBack.java}  |  43 +++--
 .../coprocessor/TaskMetaDataEndpointTest.java  | 203 +
 phoenix-protocol/src/main/MetaDataService.proto|   1 +
 ...DataService.proto => TaskMetaDataService.proto} |  12 +-
 28 files changed, 984 insertions(+), 264 deletions(-)
 copy 
phoenix-core/src/{main/java/org/apache/phoenix/expression/BaseDecimalAddSubtractExpression.java
 => it/resources/gold_files/gold_query_index_rebuild_async.txt} (87%)
 copy 
phoenix-core/src/it/{java/org/apache/phoenix/end2end/SplitSystemCatalogTests.java
 => resources/sql_files/index_rebuild_async.sql} (66%)
 copy 
phoenix-core/src/{main/java/org/apache/phoenix/expression/BaseDecimalAddSubtractExpression.java
 => it/resources/sql_files/query_index_rebuild_async.sql} (89%)
 copy 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/{ChildLinkMetaDataEndpoint.java
 => TaskMetaDataEndpoint.java} (58%)
 copy 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/{ChildLinkMetaDataProtos.java
 => TaskMetaDataProtos.java} (72%)
 create mode 100644 
phoenix-core/src/main/java/org/apache/phoenix/schema/task/SystemTaskParams.java
 copy 
phoenix-core/src/main/java/org/apache/phoenix/{query/ChildLinkMetaDataServiceCallBack.java
 => util/TaskMetaDataServiceCallBack.java} (55%)
 create mode 100644 
phoenix-core/src/test/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpointTest.java
 copy phoenix-protocol/src/main/{ChildLinkMetaDataService.proto => 
TaskMetaDataService.proto} (80%)



[phoenix] branch master updated (a2618f1 -> 68ac1b4)

2020-11-18 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from a2618f1  PHOENIX-6191: Creating a view which has its own new columns 
should also do checkAndPut checks on SYSTEM.MUTEX
 add 68ac1b4  PHOENIX-6155 : Provide a coprocessor endpoint to avoid direct 
upserts into SYSTEM.TASK from the client

No new revisions were added by this update.

Summary of changes:
 .../phoenix/end2end/BackwardCompatibilityIT.java   |  71 ++-
 .../end2end/BackwardCompatibilityTestUtil.java |   3 +
 .../apache/phoenix/end2end/IndexRebuildTaskIT.java |  17 +-
 .../phoenix/end2end/SystemTablesUpgradeIT.java |  10 +-
 .../phoenix/end2end/index/IndexMetadataIT.java |  18 +-
 .../gold_files/gold_query_index_rebuild_async.txt} |  10 +-
 .../sql_files/index_rebuild_async.sql} |  19 +-
 .../sql_files/query_index_rebuild_async.sql}   |   7 +-
 .../coprocessor/BaseMetaDataEndpointObserver.java  |   6 +
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  22 ++-
 .../coprocessor/MetaDataEndpointObserver.java  |   3 +
 .../phoenix/coprocessor/MetaDataProtocol.java  |   3 +-
 .../PhoenixMetaDataCoprocessorHost.java|  10 +
 ...DataEndpoint.java => TaskMetaDataEndpoint.java} |  80 
 .../phoenix/coprocessor/TaskRegionObserver.java|  35 +++-
 .../coprocessor/generated/MetaDataProtos.java  |  62 ---
 ...MetaDataProtos.java => TaskMetaDataProtos.java} | 192 ++-
 .../coprocessor/tasks/IndexRebuildTask.java|  18 +-
 .../apache/phoenix/exception/SQLExceptionCode.java |   2 +
 .../org/apache/phoenix/protobuf/ProtobufUtil.java  |   7 +
 .../phoenix/query/ConnectionQueryServicesImpl.java |  21 ++-
 .../org/apache/phoenix/schema/MetaDataClient.java  |  38 +++-
 .../phoenix/schema/task/SystemTaskParams.java  | 188 +++
 .../java/org/apache/phoenix/schema/task/Task.java  | 147 ++-
 .../TaskMetaDataServiceCallBack.java}  |  43 +++--
 .../coprocessor/TaskMetaDataEndpointTest.java  | 203 +
 phoenix-protocol/src/main/MetaDataService.proto|   1 +
 ...DataService.proto => TaskMetaDataService.proto} |  12 +-
 28 files changed, 984 insertions(+), 264 deletions(-)
 copy 
phoenix-core/src/{main/java/org/apache/phoenix/expression/BaseDecimalAddSubtractExpression.java
 => it/resources/gold_files/gold_query_index_rebuild_async.txt} (87%)
 copy 
phoenix-core/src/it/{java/org/apache/phoenix/end2end/SplitSystemCatalogTests.java
 => resources/sql_files/index_rebuild_async.sql} (66%)
 copy 
phoenix-core/src/{main/java/org/apache/phoenix/expression/BaseDecimalAddSubtractExpression.java
 => it/resources/sql_files/query_index_rebuild_async.sql} (89%)
 copy 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/{ChildLinkMetaDataEndpoint.java
 => TaskMetaDataEndpoint.java} (58%)
 copy 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/{ChildLinkMetaDataProtos.java
 => TaskMetaDataProtos.java} (72%)
 create mode 100644 
phoenix-core/src/main/java/org/apache/phoenix/schema/task/SystemTaskParams.java
 copy 
phoenix-core/src/main/java/org/apache/phoenix/{query/ChildLinkMetaDataServiceCallBack.java
 => util/TaskMetaDataServiceCallBack.java} (55%)
 create mode 100644 
phoenix-core/src/test/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpointTest.java
 copy phoenix-protocol/src/main/{ChildLinkMetaDataService.proto => 
TaskMetaDataService.proto} (80%)



[phoenix] branch master updated (a2618f1 -> 68ac1b4)

2020-11-18 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from a2618f1  PHOENIX-6191: Creating a view which has its own new columns 
should also do checkAndPut checks on SYSTEM.MUTEX
 add 68ac1b4  PHOENIX-6155 : Provide a coprocessor endpoint to avoid direct 
upserts into SYSTEM.TASK from the client

No new revisions were added by this update.

Summary of changes:
 .../phoenix/end2end/BackwardCompatibilityIT.java   |  71 ++-
 .../end2end/BackwardCompatibilityTestUtil.java |   3 +
 .../apache/phoenix/end2end/IndexRebuildTaskIT.java |  17 +-
 .../phoenix/end2end/SystemTablesUpgradeIT.java |  10 +-
 .../phoenix/end2end/index/IndexMetadataIT.java |  18 +-
 .../gold_files/gold_query_index_rebuild_async.txt} |  10 +-
 .../sql_files/index_rebuild_async.sql} |  19 +-
 .../sql_files/query_index_rebuild_async.sql}   |   7 +-
 .../coprocessor/BaseMetaDataEndpointObserver.java  |   6 +
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  22 ++-
 .../coprocessor/MetaDataEndpointObserver.java  |   3 +
 .../phoenix/coprocessor/MetaDataProtocol.java  |   3 +-
 .../PhoenixMetaDataCoprocessorHost.java|  10 +
 ...DataEndpoint.java => TaskMetaDataEndpoint.java} |  80 
 .../phoenix/coprocessor/TaskRegionObserver.java|  35 +++-
 .../coprocessor/generated/MetaDataProtos.java  |  62 ---
 ...MetaDataProtos.java => TaskMetaDataProtos.java} | 192 ++-
 .../coprocessor/tasks/IndexRebuildTask.java|  18 +-
 .../apache/phoenix/exception/SQLExceptionCode.java |   2 +
 .../org/apache/phoenix/protobuf/ProtobufUtil.java  |   7 +
 .../phoenix/query/ConnectionQueryServicesImpl.java |  21 ++-
 .../org/apache/phoenix/schema/MetaDataClient.java  |  38 +++-
 .../phoenix/schema/task/SystemTaskParams.java  | 188 +++
 .../java/org/apache/phoenix/schema/task/Task.java  | 147 ++-
 .../TaskMetaDataServiceCallBack.java}  |  43 +++--
 .../coprocessor/TaskMetaDataEndpointTest.java  | 203 +
 phoenix-protocol/src/main/MetaDataService.proto|   1 +
 ...DataService.proto => TaskMetaDataService.proto} |  12 +-
 28 files changed, 984 insertions(+), 264 deletions(-)
 copy 
phoenix-core/src/{main/java/org/apache/phoenix/expression/BaseDecimalAddSubtractExpression.java
 => it/resources/gold_files/gold_query_index_rebuild_async.txt} (87%)
 copy 
phoenix-core/src/it/{java/org/apache/phoenix/end2end/SplitSystemCatalogTests.java
 => resources/sql_files/index_rebuild_async.sql} (66%)
 copy 
phoenix-core/src/{main/java/org/apache/phoenix/expression/BaseDecimalAddSubtractExpression.java
 => it/resources/sql_files/query_index_rebuild_async.sql} (89%)
 copy 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/{ChildLinkMetaDataEndpoint.java
 => TaskMetaDataEndpoint.java} (58%)
 copy 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/{ChildLinkMetaDataProtos.java
 => TaskMetaDataProtos.java} (72%)
 create mode 100644 
phoenix-core/src/main/java/org/apache/phoenix/schema/task/SystemTaskParams.java
 copy 
phoenix-core/src/main/java/org/apache/phoenix/{query/ChildLinkMetaDataServiceCallBack.java
 => util/TaskMetaDataServiceCallBack.java} (55%)
 create mode 100644 
phoenix-core/src/test/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpointTest.java
 copy phoenix-protocol/src/main/{ChildLinkMetaDataService.proto => 
TaskMetaDataService.proto} (80%)



[phoenix] branch master updated (a2618f1 -> 68ac1b4)

2020-11-18 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from a2618f1  PHOENIX-6191: Creating a view which has its own new columns 
should also do checkAndPut checks on SYSTEM.MUTEX
 add 68ac1b4  PHOENIX-6155 : Provide a coprocessor endpoint to avoid direct 
upserts into SYSTEM.TASK from the client

No new revisions were added by this update.

Summary of changes:
 .../phoenix/end2end/BackwardCompatibilityIT.java   |  71 ++-
 .../end2end/BackwardCompatibilityTestUtil.java |   3 +
 .../apache/phoenix/end2end/IndexRebuildTaskIT.java |  17 +-
 .../phoenix/end2end/SystemTablesUpgradeIT.java |  10 +-
 .../phoenix/end2end/index/IndexMetadataIT.java |  18 +-
 .../gold_files/gold_query_index_rebuild_async.txt} |  10 +-
 .../sql_files/index_rebuild_async.sql} |  19 +-
 .../sql_files/query_index_rebuild_async.sql}   |   7 +-
 .../coprocessor/BaseMetaDataEndpointObserver.java  |   6 +
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  22 ++-
 .../coprocessor/MetaDataEndpointObserver.java  |   3 +
 .../phoenix/coprocessor/MetaDataProtocol.java  |   3 +-
 .../PhoenixMetaDataCoprocessorHost.java|  10 +
 ...DataEndpoint.java => TaskMetaDataEndpoint.java} |  80 
 .../phoenix/coprocessor/TaskRegionObserver.java|  35 +++-
 .../coprocessor/generated/MetaDataProtos.java  |  62 ---
 ...MetaDataProtos.java => TaskMetaDataProtos.java} | 192 ++-
 .../coprocessor/tasks/IndexRebuildTask.java|  18 +-
 .../apache/phoenix/exception/SQLExceptionCode.java |   2 +
 .../org/apache/phoenix/protobuf/ProtobufUtil.java  |   7 +
 .../phoenix/query/ConnectionQueryServicesImpl.java |  21 ++-
 .../org/apache/phoenix/schema/MetaDataClient.java  |  38 +++-
 .../phoenix/schema/task/SystemTaskParams.java  | 188 +++
 .../java/org/apache/phoenix/schema/task/Task.java  | 147 ++-
 .../TaskMetaDataServiceCallBack.java}  |  43 +++--
 .../coprocessor/TaskMetaDataEndpointTest.java  | 203 +
 phoenix-protocol/src/main/MetaDataService.proto|   1 +
 ...DataService.proto => TaskMetaDataService.proto} |  12 +-
 28 files changed, 984 insertions(+), 264 deletions(-)
 copy 
phoenix-core/src/{main/java/org/apache/phoenix/expression/BaseDecimalAddSubtractExpression.java
 => it/resources/gold_files/gold_query_index_rebuild_async.txt} (87%)
 copy 
phoenix-core/src/it/{java/org/apache/phoenix/end2end/SplitSystemCatalogTests.java
 => resources/sql_files/index_rebuild_async.sql} (66%)
 copy 
phoenix-core/src/{main/java/org/apache/phoenix/expression/BaseDecimalAddSubtractExpression.java
 => it/resources/sql_files/query_index_rebuild_async.sql} (89%)
 copy 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/{ChildLinkMetaDataEndpoint.java
 => TaskMetaDataEndpoint.java} (58%)
 copy 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/{ChildLinkMetaDataProtos.java
 => TaskMetaDataProtos.java} (72%)
 create mode 100644 
phoenix-core/src/main/java/org/apache/phoenix/schema/task/SystemTaskParams.java
 copy 
phoenix-core/src/main/java/org/apache/phoenix/{query/ChildLinkMetaDataServiceCallBack.java
 => util/TaskMetaDataServiceCallBack.java} (55%)
 create mode 100644 
phoenix-core/src/test/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpointTest.java
 copy phoenix-protocol/src/main/{ChildLinkMetaDataService.proto => 
TaskMetaDataService.proto} (80%)



[phoenix] branch master updated (d0c3caf -> a2618f1)

2020-11-18 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from d0c3caf  PHOENIX-5601 Add a new coprocessor for PHOENIX_TTL - 
PhoenixTTLRegionObserver
 add a2618f1  PHOENIX-6191: Creating a view which has its own new columns 
should also do checkAndPut checks on SYSTEM.MUTEX

No new revisions were added by this update.

Summary of changes:
 .../end2end/ViewConcurrencyAndFailureIT.java   |  793 +++
 .../it/java/org/apache/phoenix/end2end/ViewIT.java | 1046 +++-
 .../org/apache/phoenix/end2end/ViewMetadataIT.java |  889 +++--
 .../org/apache/phoenix/schema/MetaDataClient.java  |   26 +-
 4 files changed, 1759 insertions(+), 995 deletions(-)
 create mode 100644 
phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewConcurrencyAndFailureIT.java



[phoenix] branch master updated: PHOENIX-6212: Improve SystemCatalogIT.testSystemTableSplit() to ensure no splitting occurs when splitting is disabled

2020-11-11 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 2ad33a1  PHOENIX-6212: Improve SystemCatalogIT.testSystemTableSplit() 
to ensure no splitting occurs when splitting is disabled
2ad33a1 is described below

commit 2ad33a19b8d380bb1be411422de8e005047f76a1
Author: Chinmay Kulkarni 
AuthorDate: Tue Nov 10 17:57:59 2020 -0800

PHOENIX-6212: Improve SystemCatalogIT.testSystemTableSplit() to ensure no 
splitting occurs when splitting is disabled
---
 .../phoenix/end2end/SplitSystemCatalogIT.java  | 14 ++-
 .../end2end/SystemCatalogRollbackEnabledIT.java| 28 +-
 .../java/org/apache/phoenix/query/BaseTest.java|  3 ++-
 3 files changed, 27 insertions(+), 18 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitSystemCatalogIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitSystemCatalogIT.java
index 414f10c..9560a57 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitSystemCatalogIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitSystemCatalogIT.java
@@ -19,7 +19,6 @@ package org.apache.phoenix.end2end;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
-import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -54,7 +53,8 @@ public class SplitSystemCatalogIT extends BaseTest {
doSetup(null);
 }
 
-public static synchronized void doSetup(Map props) throws 
Exception {
+public static synchronized void doSetup(Map props)
+throws Exception {
 NUM_SLAVES_BASE = 6;
 if (props == null) {
 props = Collections.emptyMap();
@@ -69,18 +69,20 @@ public class SplitSystemCatalogIT extends BaseTest {
 }
 }
 
-protected static void splitSystemCatalog() throws SQLException, Exception {
-try (Connection conn = DriverManager.getConnection(getUrl())) {
+protected static void splitSystemCatalog() throws Exception {
+try (Connection ignored = DriverManager.getConnection(getUrl())) {
 }
 String tableName = "TABLE";
 String fullTableName1 = SchemaUtil.getTableName(SCHEMA1, tableName);
 String fullTableName2 = SchemaUtil.getTableName(SCHEMA2, tableName);
 String fullTableName3 = SchemaUtil.getTableName(SCHEMA3, tableName);
 String fullTableName4 = SchemaUtil.getTableName(SCHEMA4, tableName);
-ArrayList tableList = Lists.newArrayList(fullTableName1, 
fullTableName2, fullTableName3);
+ArrayList tableList = Lists.newArrayList(fullTableName1,
+fullTableName2, fullTableName3);
 Map> tenantToTableMap = Maps.newHashMap();
 tenantToTableMap.put(null, tableList);
-tenantToTableMap.put(TENANT1, Lists.newArrayList(fullTableName2, 
fullTableName3));
+tenantToTableMap.put(TENANT1, Lists.newArrayList(fullTableName2,
+fullTableName3));
 tenantToTableMap.put(TENANT2, Lists.newArrayList(fullTableName4));
 splitSystemCatalog(tenantToTableMap);
 }
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogRollbackEnabledIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogRollbackEnabledIT.java
index 5a34a4f..fb2e379 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogRollbackEnabledIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogRollbackEnabledIT.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.IOException;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
@@ -51,8 +52,7 @@ import org.junit.experimental.categories.Category;
  * Tests various scenarios when
  * {@link QueryServices#ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK}
  * is set to true and SYSTEM.CATALOG should not be allowed to split.
- * Note that this config must
- * be set on both the client and server
+ * Note that this config must be set on both the client and server
  */
 @Category(NeedsOwnMiniClusterTest.class)
 public class SystemCatalogRollbackEnabledIT extends BaseTest {
@@ -99,37 +99,43 @@ public class SystemCatalogRollbackEnabledIT extends 
BaseTest {
 return DriverManager.getConnection(getUrl(), tenantProps);
 }
 
+private void assertNumRegions(HBaseTestingUtility testUtil,
+TableName tableName, int expectedNumRegions) throws IOException {
+RegionLocator rl = 
testUtil.getConnection().getRegionLocator(tableName);
+assertEquals(expectedNumRegions, rl.getAllRegionLocations().size());
+}
 
 /**
  * Make sure that S

[phoenix] branch 4.x updated: PHOENIX-6212: Improve SystemCatalogIT.testSystemTableSplit() to ensure no splitting occurs when splitting is disabled

2020-11-11 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 565b0ea  PHOENIX-6212: Improve SystemCatalogIT.testSystemTableSplit() 
to ensure no splitting occurs when splitting is disabled
565b0ea is described below

commit 565b0eaa17de82e4bf6ea9634a5a30473e4167bf
Author: Chinmay Kulkarni 
AuthorDate: Tue Nov 10 17:57:59 2020 -0800

PHOENIX-6212: Improve SystemCatalogIT.testSystemTableSplit() to ensure no 
splitting occurs when splitting is disabled
---
 .../phoenix/end2end/SplitSystemCatalogIT.java  | 14 +
 .../end2end/SystemCatalogRollbackEnabledIT.java| 36 ++
 .../java/org/apache/phoenix/query/BaseTest.java|  3 +-
 3 files changed, 26 insertions(+), 27 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitSystemCatalogIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitSystemCatalogIT.java
index dce530f..b2075a7 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitSystemCatalogIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitSystemCatalogIT.java
@@ -19,7 +19,6 @@ package org.apache.phoenix.end2end;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
-import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -54,7 +53,8 @@ public class SplitSystemCatalogIT extends BaseTest {
doSetup(null);
 }
 
-public static synchronized void doSetup(Map props) throws 
Exception {
+public static synchronized void doSetup(Map props)
+throws Exception {
 NUM_SLAVES_BASE = 6;
 if (props == null) {
 props = Collections.emptyMap();
@@ -69,18 +69,20 @@ public class SplitSystemCatalogIT extends BaseTest {
 }
 }
 
-protected static void splitSystemCatalog() throws SQLException, Exception {
-try (Connection conn = DriverManager.getConnection(getUrl())) {
+protected static void splitSystemCatalog() throws Exception {
+try (Connection ignored = DriverManager.getConnection(getUrl())) {
 }
 String tableName = "TABLE";
 String fullTableName1 = SchemaUtil.getTableName(SCHEMA1, tableName);
 String fullTableName2 = SchemaUtil.getTableName(SCHEMA2, tableName);
 String fullTableName3 = SchemaUtil.getTableName(SCHEMA3, tableName);
 String fullTableName4 = SchemaUtil.getTableName(SCHEMA4, tableName);
-ArrayList tableList = Lists.newArrayList(fullTableName1, 
fullTableName2, fullTableName3);
+ArrayList tableList = Lists.newArrayList(fullTableName1,
+fullTableName2, fullTableName3);
 Map> tenantToTableMap = Maps.newHashMap();
 tenantToTableMap.put(null, tableList);
-tenantToTableMap.put(TENANT1, Lists.newArrayList(fullTableName2, 
fullTableName3));
+tenantToTableMap.put(TENANT1, Lists.newArrayList(fullTableName2,
+fullTableName3));
 tenantToTableMap.put(TENANT2, Lists.newArrayList(fullTableName4));
 splitSystemCatalog(tenantToTableMap);
 }
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogRollbackEnabledIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogRollbackEnabledIT.java
index 2b69596..0241eab 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogRollbackEnabledIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogRollbackEnabledIT.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.IOException;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
@@ -31,7 +32,6 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.Properties;
 
-import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionLocator;
@@ -51,8 +51,7 @@ import org.junit.experimental.categories.Category;
  * Tests various scenarios when
  * {@link QueryServices#ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK}
  * is set to true and SYSTEM.CATALOG should not be allowed to split.
- * Note that this config must
- * be set on both the client and server
+ * Note that this config must be set on both the client and server
  */
 @Category(NeedsOwnMiniClusterTest.class)
 public class SystemCatalogRollbackEnabledIT extends BaseTest {
@@ -100,37 +99,34 @@ public class SystemCatalogRollbackEnabledIT extends 
BaseTest {
 return DriverManager.getConnection(getUrl(), tenantProps);
 }
 
+private void assertNumRegions(HBaseTestingUti

[phoenix] branch master updated (6889645 -> e49c8ff)

2020-11-05 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 6889645  PHOENIX-6086 : Take snapshot of all SYSTEM tables before 
attempting to upgrade them
 add e49c8ff  PHOENIX-6083 View index creation does a checkAndPut on an 
incorrect row key

No new revisions were added by this update.

Summary of changes:
 .../apache/phoenix/end2end/index/ViewIndexIT.java  | 61 ++
 .../org/apache/phoenix/schema/MetaDataClient.java  | 22 +---
 2 files changed, 75 insertions(+), 8 deletions(-)



[phoenix] branch master updated (6889645 -> e49c8ff)

2020-11-05 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 6889645  PHOENIX-6086 : Take snapshot of all SYSTEM tables before 
attempting to upgrade them
 add e49c8ff  PHOENIX-6083 View index creation does a checkAndPut on an 
incorrect row key

No new revisions were added by this update.

Summary of changes:
 .../apache/phoenix/end2end/index/ViewIndexIT.java  | 61 ++
 .../org/apache/phoenix/schema/MetaDataClient.java  | 22 +---
 2 files changed, 75 insertions(+), 8 deletions(-)



[phoenix] branch 4.x updated (20d2a6d -> 9f91707)

2020-11-05 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a change to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 20d2a6d  PHOENIX-6086 : Take snapshot of all SYSTEM tables before 
attempting to upgrade them
 add 9f91707  PHOENIX-6083 View index creation does a checkAndPut on an 
incorrect row key

No new revisions were added by this update.

Summary of changes:
 .../apache/phoenix/end2end/index/ViewIndexIT.java  | 60 ++
 .../org/apache/phoenix/schema/MetaDataClient.java  | 22 +---
 2 files changed, 74 insertions(+), 8 deletions(-)



[phoenix] branch 4.x updated (20d2a6d -> 9f91707)

2020-11-05 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a change to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 20d2a6d  PHOENIX-6086 : Take snapshot of all SYSTEM tables before 
attempting to upgrade them
 add 9f91707  PHOENIX-6083 View index creation does a checkAndPut on an 
incorrect row key

No new revisions were added by this update.

Summary of changes:
 .../apache/phoenix/end2end/index/ViewIndexIT.java  | 60 ++
 .../org/apache/phoenix/schema/MetaDataClient.java  | 22 +---
 2 files changed, 74 insertions(+), 8 deletions(-)



[phoenix] branch 4.x updated (20d2a6d -> 9f91707)

2020-11-05 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a change to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 20d2a6d  PHOENIX-6086 : Take snapshot of all SYSTEM tables before 
attempting to upgrade them
 add 9f91707  PHOENIX-6083 View index creation does a checkAndPut on an 
incorrect row key

No new revisions were added by this update.

Summary of changes:
 .../apache/phoenix/end2end/index/ViewIndexIT.java  | 60 ++
 .../org/apache/phoenix/schema/MetaDataClient.java  | 22 +---
 2 files changed, 74 insertions(+), 8 deletions(-)



[phoenix] branch master updated (6889645 -> e49c8ff)

2020-11-05 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 6889645  PHOENIX-6086 : Take snapshot of all SYSTEM tables before 
attempting to upgrade them
 add e49c8ff  PHOENIX-6083 View index creation does a checkAndPut on an 
incorrect row key

No new revisions were added by this update.

Summary of changes:
 .../apache/phoenix/end2end/index/ViewIndexIT.java  | 61 ++
 .../org/apache/phoenix/schema/MetaDataClient.java  | 22 +---
 2 files changed, 75 insertions(+), 8 deletions(-)



[phoenix] branch 4.x updated: PHOENIX-6083 View index creation does a checkAndPut on an incorrect row key

2020-11-05 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 9f91707  PHOENIX-6083 View index creation does a checkAndPut on an 
incorrect row key
9f91707 is described below

commit 9f91707f5f0fe108d316ebda8df8b38aadd63b16
Author: Richard Antal 
AuthorDate: Mon Nov 2 17:45:14 2020 +0100

PHOENIX-6083 View index creation does a checkAndPut on an incorrect row key
---
 .../apache/phoenix/end2end/index/ViewIndexIT.java  | 60 ++
 .../org/apache/phoenix/schema/MetaDataClient.java  | 22 +---
 2 files changed, 74 insertions(+), 8 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
index 9a3f648..5856fbc 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
@@ -44,6 +44,9 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.Properties;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
@@ -116,6 +119,63 @@ public class ViewIndexIT extends SplitSystemCatalogIT {
 conn.close();
 }
 
+@Test
+public void testDroppingColumnWhileCreatingIndex() throws Exception {
+String schemaName = "S1";
+String tableName = generateUniqueName();
+String viewSchemaName = "S1";
+final String fullTableName = SchemaUtil.getTableName(schemaName, 
tableName);
+final String indexName = "IND_" + generateUniqueName();
+String viewName = "VIEW_" + generateUniqueName();
+final String fullViewName = SchemaUtil.getTableName(viewSchemaName, 
viewName);
+
+createBaseTable(schemaName, tableName, false, null, null, true);
+try (Connection conn = getConnection()) {
+conn.setAutoCommit(true);
+conn.createStatement().execute("CREATE VIEW " + fullViewName + " 
AS SELECT * FROM " + fullTableName);
+conn.commit();
+final AtomicInteger exceptionCode = new AtomicInteger();
+final CountDownLatch doneSignal = new CountDownLatch(2);
+Runnable r1 = new Runnable() {
+
+@Override public void run() {
+try {
+conn.createStatement().execute("CREATE INDEX " + 
indexName + " ON " + fullViewName + " (v1)");
+} catch (SQLException e) {
+exceptionCode.set(e.getErrorCode());
+throw new RuntimeException(e);
+} finally {
+doneSignal.countDown();
+}
+}
+
+};
+Runnable r2 = new Runnable() {
+
+@Override public void run() {
+try {
+conn.createStatement().execute("ALTER TABLE " + 
fullTableName + " DROP COLUMN v1");
+} catch (SQLException e) {
+exceptionCode.set(e.getErrorCode());
+throw new RuntimeException(e);
+} finally {
+doneSignal.countDown();
+}
+}
+
+};
+Thread t1 = new Thread(r1);
+t1.start();
+Thread t2 = new Thread(r2);
+t2.start();
+
+t1.join();
+t2.join();
+doneSignal.await(60, TimeUnit.SECONDS);
+assertEquals(exceptionCode.get(), 301);
+}
+}
+
 private void createView(Connection conn, String schemaName, String 
viewName, String baseTableName) throws SQLException {
 if (isNamespaceMapped) {
 conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + 
schemaName);
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 27daf66..7c9fa08 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -1682,12 +1682,17 @@ public class MetaDataClient {
 for (ColumnName colName : requiredCols) {
 // acquire the mutex using the global physical table name 
to
 // prevent this column from being dropped while the view 
is being created
+String 

[phoenix] branch master updated (6889645 -> e49c8ff)

2020-11-05 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 6889645  PHOENIX-6086 : Take snapshot of all SYSTEM tables before 
attempting to upgrade them
 add e49c8ff  PHOENIX-6083 View index creation does a checkAndPut on an 
incorrect row key

No new revisions were added by this update.

Summary of changes:
 .../apache/phoenix/end2end/index/ViewIndexIT.java  | 61 ++
 .../org/apache/phoenix/schema/MetaDataClient.java  | 22 +---
 2 files changed, 75 insertions(+), 8 deletions(-)



[phoenix] branch master updated: PHOENIX-6032: When phoenix.allow.system.catalog.rollback=true, a view still sees data from a column that was dropped

2020-11-03 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 184a054  PHOENIX-6032: When 
phoenix.allow.system.catalog.rollback=true, a view still sees data from a 
column that was dropped
184a054 is described below

commit 184a054aca70687648e62f9c62e287eae6555cfd
Author: Chinmay Kulkarni 
AuthorDate: Thu Oct 29 17:37:17 2020 -0700

PHOENIX-6032: When phoenix.allow.system.catalog.rollback=true, a view still 
sees data from a column that was dropped
---
 .../AlterParentTableWithSysCatRollbackIT.java  | 142 --
 .../apache/phoenix/end2end/SystemCatalogIT.java| 140 --
 .../end2end/SystemCatalogRollbackEnabledIT.java| 297 +
 .../java/org/apache/phoenix/util/ViewUtil.java |  20 +-
 4 files changed, 314 insertions(+), 285 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterParentTableWithSysCatRollbackIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterParentTableWithSysCatRollbackIT.java
deleted file mode 100644
index 8bf5405..000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterParentTableWithSysCatRollbackIT.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.phoenix.end2end;
-
-import org.apache.phoenix.exception.SQLExceptionCode;
-import org.apache.phoenix.query.BaseTest;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.apache.phoenix.util.SchemaUtil;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.util.Collections;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-@Category(NeedsOwnMiniClusterTest.class)
-public class AlterParentTableWithSysCatRollbackIT extends BaseTest {
-
-private String getJdbcUrl() {
-return "jdbc:phoenix:localhost:" + 
getUtility().getZkCluster().getClientPort()
-+ ":/hbase";
-}
-
-@BeforeClass
-public static synchronized void doSetup() throws Exception {
-Map serverProps = Maps.newHashMapWithExpectedSize(1);
-
serverProps.put(QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK, "true");
-setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()),
-new ReadOnlyProps(Collections.emptyIterator()));
-}
-
-@Test
-public void testAddColumnOnParentTableView() throws Exception {
-try (Connection conn = DriverManager.getConnection(getJdbcUrl())) {
-String parentTableName = 
SchemaUtil.getTableName(generateUniqueName(),
-generateUniqueName());
-String parentViewName = 
SchemaUtil.getTableName(generateUniqueName(),
-generateUniqueName());
-String childViewName = 
SchemaUtil.getTableName(generateUniqueName(),
-generateUniqueName());
-// create parent table
-String ddl = "CREATE TABLE " + parentTableName
-+ " (col1 INTEGER NOT NULL, col2 INTEGER " + "CONSTRAINT pk 
PRIMARY KEY (col1))";
-conn.createStatement().execute(ddl);
-
-// create view from table
-ddl = "CREATE VIEW " + parentViewName + " AS SELECT * FROM " + 
parentTableName;
-conn.createStatement().execute(ddl);
-try {
-ddl = "ALTER TABLE " + parentTableName + " ADD col4 INTEGER";
-conn.createStatement().execute(ddl);
-fail("ALTER TABLE should not be allowed on parent table");
-} catch (SQLException e) {
-
assertEquals(SQLExceptionCode.CANNOT_MUTATE_TABLE.getE

[phoenix] branch 4.x updated: PHOENIX-6032: When phoenix.allow.system.catalog.rollback=true, a view still sees data from a column that was dropped

2020-11-03 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 11575c0  PHOENIX-6032: When 
phoenix.allow.system.catalog.rollback=true, a view still sees data from a 
column that was dropped
11575c0 is described below

commit 11575c0ee931ae4e012ea2388068b317127f1882
Author: Chinmay Kulkarni 
AuthorDate: Thu Oct 29 17:21:06 2020 -0700

PHOENIX-6032: When phoenix.allow.system.catalog.rollback=true, a view still 
sees data from a column that was dropped
---
 .../AlterParentTableWithSysCatRollbackIT.java  | 142 --
 .../apache/phoenix/end2end/SystemCatalogIT.java| 140 --
 .../end2end/SystemCatalogRollbackEnabledIT.java| 298 +
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |   4 +-
 .../java/org/apache/phoenix/util/ViewUtil.java |  20 +-
 5 files changed, 316 insertions(+), 288 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterParentTableWithSysCatRollbackIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterParentTableWithSysCatRollbackIT.java
deleted file mode 100644
index 99a2124..000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterParentTableWithSysCatRollbackIT.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.phoenix.end2end;
-
-import com.google.common.collect.Maps;
-import org.apache.phoenix.exception.SQLExceptionCode;
-import org.apache.phoenix.query.BaseTest;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.apache.phoenix.util.SchemaUtil;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.util.Collections;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-@Category(NeedsOwnMiniClusterTest.class)
-public class AlterParentTableWithSysCatRollbackIT extends BaseTest {
-
-private String getJdbcUrl() {
-return "jdbc:phoenix:localhost:" + 
getUtility().getZkCluster().getClientPort()
-+ ":/hbase";
-}
-
-@BeforeClass
-public static synchronized void doSetup() throws Exception {
-Map serverProps = Maps.newHashMapWithExpectedSize(1);
-
serverProps.put(QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK, "true");
-setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()),
-new ReadOnlyProps(Collections.emptyMap()));
-}
-
-@Test
-public void testAddColumnOnParentTableView() throws Exception {
-try (Connection conn = DriverManager.getConnection(getJdbcUrl())) {
-String parentTableName = 
SchemaUtil.getTableName(generateUniqueName(),
-generateUniqueName());
-String parentViewName = 
SchemaUtil.getTableName(generateUniqueName(),
-generateUniqueName());
-String childViewName = 
SchemaUtil.getTableName(generateUniqueName(),
-generateUniqueName());
-// create parent table
-String ddl = "CREATE TABLE " + parentTableName
-+ " (col1 INTEGER NOT NULL, col2 INTEGER " + "CONSTRAINT pk 
PRIMARY KEY (col1))";
-conn.createStatement().execute(ddl);
-
-// create view from table
-ddl = "CREATE VIEW " + parentViewName + " AS SELECT * FROM " + 
parentTableName;
-conn.createStatement().execute(ddl);
-try {
-ddl = "ALTER TABLE " + parentTableName + " ADD col4 INTEGER";
-conn.createStatement().execute(ddl);
-fail("ALTER TABLE should not be allowed on parent table");
-} catch (SQLException e) {
-
assertEquals(SQLExceptionCode.CANNOT_MU

[phoenix] branch 4.x updated: PHOENIX-6129 : Optimize tableExists() call while retrieving correct MUTEX table

2020-10-26 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 1872f5f  PHOENIX-6129 : Optimize tableExists() call while retrieving 
correct MUTEX table
1872f5f is described below

commit 1872f5f2dc531d543ca6a9c111bb95966af6c6f8
Author: Viraj Jasani 
AuthorDate: Tue Oct 27 01:27:29 2020 +0530

PHOENIX-6129 : Optimize tableExists() call while retrieving correct MUTEX 
table

Signed-off-by: Chinmay Kulkarni 
---
 .../phoenix/query/ConnectionQueryServicesImpl.java | 52 +-
 .../query/ConnectionQueryServicesImplTest.java | 43 ++
 2 files changed, 64 insertions(+), 31 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 8fe3eec..62825d2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -4335,19 +4335,12 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
  * making use of HBase's checkAndPut api.
  *
  * @return true if client won the race, false otherwise
- * @throws IOException
  * @throws SQLException
  */
 @VisibleForTesting
 public boolean acquireUpgradeMutex(long currentServerSideTableTimestamp)
-throws IOException,
-SQLException {
+throws SQLException {
 Preconditions.checkArgument(currentServerSideTableTimestamp < 
MIN_SYSTEM_TABLE_TIMESTAMP);
-byte[] sysMutexPhysicalTableNameBytes = 
getSysMutexPhysicalTableNameBytes();
-if(sysMutexPhysicalTableNameBytes == null) {
-throw new 
UpgradeInProgressException(getVersion(currentServerSideTableTimestamp),
-getVersion(MIN_SYSTEM_TABLE_TIMESTAMP));
-}
 if (!writeMutexCell(null, 
PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA,
 PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE, null, null)) {
 throw new 
UpgradeInProgressException(getVersion(currentServerSideTableTimestamp),
@@ -4360,15 +4353,13 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 public boolean writeMutexCell(String tenantId, String schemaName, String 
tableName,
 String columnName, String familyName) throws SQLException {
 try {
-byte[] rowKey =
-columnName != null
-? SchemaUtil.getColumnKey(tenantId, schemaName, 
tableName, columnName,
-familyName)
-: SchemaUtil.getTableKey(tenantId, schemaName, 
tableName);
+byte[] rowKey = columnName != null
+? SchemaUtil.getColumnKey(tenantId, schemaName, tableName,
+columnName, familyName)
+: SchemaUtil.getTableKey(tenantId, schemaName, tableName);
 // at this point the system mutex table should have been created or
 // an exception thrown
-byte[] sysMutexPhysicalTableNameBytes = 
getSysMutexPhysicalTableNameBytes();
-try (HTableInterface sysMutexTable = 
getTable(sysMutexPhysicalTableNameBytes)) {
+try (Table sysMutexTable = getSysMutexTable()) {
 byte[] family = 
PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
 byte[] qualifier = 
PhoenixDatabaseMetaData.SYSTEM_MUTEX_COLUMN_NAME_BYTES;
 byte[] value = MUTEX_LOCKED;
@@ -4404,15 +4395,13 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 public void deleteMutexCell(String tenantId, String schemaName, String 
tableName,
 String columnName, String familyName) throws SQLException {
 try {
-byte[] rowKey =
-columnName != null
-? SchemaUtil.getColumnKey(tenantId, schemaName, 
tableName, columnName,
-familyName)
-: SchemaUtil.getTableKey(tenantId, schemaName, 
tableName);
+byte[] rowKey = columnName != null
+? SchemaUtil.getColumnKey(tenantId, schemaName, tableName,
+columnName, familyName)
+: SchemaUtil.getTableKey(tenantId, schemaName, tableName);
 // at this point the system mutex table should have been created or
 // an exception thrown
-byte[] sysMutexPhysicalTableNameBytes = 
getSysMutexPhysicalTableNameBytes();
-try (HTableInterface sysMutexTable = 
getTable(sysMutexPhysicalTableNameBytes)) {
+try (Table sysMutexTable = getSysMutexTa

[phoenix] branch master updated (601375e -> b9fb461)

2020-10-26 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 601375e  PHOENIX-6206 Update phoenix-thirdparty dependency version to 
1.0.0
 add b9fb461  PHOENIX-6129 : Optimize tableExists() call while retrieving 
correct MUTEX table (#920)

No new revisions were added by this update.

Summary of changes:
 .../phoenix/query/ConnectionQueryServicesImpl.java | 52 +-
 .../query/ConnectionQueryServicesImplTest.java | 40 +
 2 files changed, 61 insertions(+), 31 deletions(-)



[phoenix] branch 4.x updated: PHOENIX-6125 : Disable region split for SYSTEM.TASK

2020-10-19 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 4d2cf8c  PHOENIX-6125 : Disable region split for SYSTEM.TASK
4d2cf8c is described below

commit 4d2cf8c4c7e8e2bff4738843281ca1ba2bb49115
Author: Viraj Jasani 
AuthorDate: Mon Oct 19 23:07:21 2020 +0530

PHOENIX-6125 : Disable region split for SYSTEM.TASK

Signed-off-by: Chinmay Kulkarni 
---
 .../phoenix/end2end/BackwardCompatibilityIT.java   | 23 +++-
 ...ogUpgradeIT.java => SystemTablesUpgradeIT.java} | 42 ++
 .../InvalidRegionSplitPolicyException.java | 49 
 .../apache/phoenix/exception/SQLExceptionCode.java |  6 +-
 .../phoenix/query/ConnectionQueryServicesImpl.java | 65 +-
 .../org/apache/phoenix/query/QueryConstants.java   |  9 ++-
 .../phoenix/schema/SystemTaskSplitPolicy.java  | 28 ++
 .../query/ConnectionQueryServicesImplTest.java | 47 +++-
 8 files changed, 248 insertions(+), 21 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
index e8ef84f..d20610a 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
@@ -49,6 +49,7 @@ import 
org.apache.curator.shaded.com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -59,6 +60,7 @@ import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.SystemTaskSplitPolicy;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.After;
@@ -329,10 +331,27 @@ public class BackwardCompatibilityIT {
 executeQueriesWithCurrentVersion(QUERY_ADD_DELETE);
 assertExpectedOutput(QUERY_ADD_DELETE);
 }
-
+
+@Test
+public void testUpdatedSplitPolicyForSysTask() throws Exception {
+executeQueryWithClientVersion(compatibleClientVersion,
+CREATE_DIVERGED_VIEW);
+executeQueriesWithCurrentVersion(QUERY_CREATE_DIVERGED_VIEW);
+try (org.apache.hadoop.hbase.client.Connection conn =
+hbaseTestUtil.getConnection(); Admin admin = conn.getAdmin()) {
+HTableDescriptor tableDescriptor = admin.getTableDescriptor(
+TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME));
+assertEquals("split policy not updated with compatible client 
version: "
++ compatibleClientVersion,
+tableDescriptor.getRegionSplitPolicyClassName(),
+SystemTaskSplitPolicy.class.getName());
+}
+assertExpectedOutput(QUERY_CREATE_DIVERGED_VIEW);
+}
+
 private void checkForPreConditions() throws Exception {
 // For the first code cut of any major version, there wouldn't be any 
backward compatible
-// clients. Hence the test wouldn't run and just return true when the 
client  
+// clients. Hence the test wouldn't run and just return true when the 
client
 // version to be tested is same as current version
 
assumeFalse(compatibleClientVersion.contains(MetaDataProtocol.CURRENT_CLIENT_VERSION));
 // Make sure that cluster is clean before test execution with no 
system tables
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogUpgradeIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesUpgradeIT.java
similarity index 84%
rename from 
phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogUpgradeIT.java
rename to 
phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesUpgradeIT.java
index 3fcef36..e38c5e6 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogUpgradeIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesUpgradeIT.java
@@ -25,8 +25,11 @@ import java.sql.SQLException;
 import java.util.Map;
 import java.util.Properties;
 
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMeta

[phoenix] branch master updated (b9e0383 -> 370d8ed)

2020-10-19 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from b9e0383  PHOENIX-6193 PHOENIX-6151 slows down shading
 add 370d8ed  PHOENIX-6125 : Disable region split for SYSTEM.TASK

No new revisions were added by this update.

Summary of changes:
 .../phoenix/end2end/BackwardCompatibilityIT.java   | 23 ++-
 ...ogUpgradeIT.java => SystemTablesUpgradeIT.java} | 42 +
 .../InvalidRegionSplitPolicyException.java | 49 +++
 .../apache/phoenix/exception/SQLExceptionCode.java |  6 +-
 .../phoenix/query/ConnectionQueryServicesImpl.java | 70 +-
 .../org/apache/phoenix/query/QueryConstants.java   |  5 +-
 ...SplitPolicy.java => SystemTaskSplitPolicy.java} | 13 ++--
 .../query/ConnectionQueryServicesImplTest.java | 48 ++-
 8 files changed, 230 insertions(+), 26 deletions(-)
 rename 
phoenix-core/src/it/java/org/apache/phoenix/end2end/{SystemCatalogUpgradeIT.java
 => SystemTablesUpgradeIT.java} (84%)
 create mode 100644 
phoenix-core/src/main/java/org/apache/phoenix/exception/InvalidRegionSplitPolicyException.java
 copy 
phoenix-core/src/main/java/org/apache/phoenix/schema/{SystemFunctionSplitPolicy.java
 => SystemTaskSplitPolicy.java} (80%)



[phoenix] branch 4.x updated: PHOENIX-6153 Table Map Reduce job after a Snapshot based job fails with CorruptedSnapshotException

2020-09-30 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 53e5b1a  PHOENIX-6153 Table Map Reduce job after a Snapshot based job 
fails with CorruptedSnapshotException
53e5b1a is described below

commit 53e5b1a44ee70784c4c11716e73673f28c0f72f4
Author: sakshamgangwar 
AuthorDate: Tue Sep 29 14:46:41 2020 -0700

PHOENIX-6153 Table Map Reduce job after a Snapshot based job fails with 
CorruptedSnapshotException

Signed-off-by: Chinmay Kulkarni 
---
 .../end2end/TableSnapshotReadsMapReduceIT.java | 162 ++---
 .../phoenix/mapreduce/PhoenixInputFormat.java  |   7 +-
 2 files changed, 147 insertions(+), 22 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
index 8719c35..930ff0b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.sql.Array;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
@@ -40,14 +41,22 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.io.DoubleWritable;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
+import org.apache.phoenix.iterate.TestingMapReduceParallelScanGrouper;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.mapreduce.PhoenixOutputFormat;
+import org.apache.phoenix.mapreduce.PhoenixTestingInputFormat;
 import org.apache.phoenix.mapreduce.index.PhoenixIndexDBWritable;
+import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.mapreduce.util.PhoenixMapReduceUtil;
+import org.apache.phoenix.schema.types.PDouble;
+import org.apache.phoenix.schema.types.PhoenixArray;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.Assert;
 import org.junit.Before;
@@ -62,6 +71,10 @@ public class TableSnapshotReadsMapReduceIT extends 
BaseUniqueNamesOwnClusterIT {
 
   private static final Logger LOGGER = 
LoggerFactory.getLogger(TableSnapshotReadsMapReduceIT.class);
 
+  private static final String STOCK_NAME = "STOCK_NAME";
+  private static final String RECORDING_YEAR = "RECORDING_YEAR";
+  private static final String RECORDINGS_QUARTER = "RECORDINGS_QUARTER";
+  private static final String MAX_RECORDING = "MAX_RECORDING";
   private final static String SNAPSHOT_NAME = "FOO";
   private static final String FIELD1 = "FIELD1";
   private static final String FIELD2 = "FIELD2";
@@ -69,7 +82,14 @@ public class TableSnapshotReadsMapReduceIT extends 
BaseUniqueNamesOwnClusterIT {
   private String CREATE_TABLE = "CREATE TABLE IF NOT EXISTS %s ( " +
   " FIELD1 VARCHAR NOT NULL , FIELD2 VARCHAR , FIELD3 INTEGER CONSTRAINT 
pk PRIMARY KEY (FIELD1 ))";
   private String UPSERT = "UPSERT into %s values (?, ?, ?)";
-
+  private static final String CREATE_STOCK_TABLE =
+  "CREATE TABLE IF NOT EXISTS %s ( " + STOCK_NAME + " VARCHAR NOT NULL 
, " + RECORDING_YEAR
+  + "  INTEGER NOT  NULL,  " + RECORDINGS_QUARTER + " "
+  + " DOUBLE array[] CONSTRAINT pk PRIMARY KEY ( " + 
STOCK_NAME + ", "
+  + RECORDING_YEAR + " )) " + "SPLIT ON ('AA')";
+  private static final String CREATE_STOCK_STATS_TABLE =
+  "CREATE TABLE IF NOT EXISTS %s(" + STOCK_NAME + " VARCHAR NOT NULL , 
" + MAX_RECORDING
+  + " DOUBLE CONSTRAINT pk PRIMARY KEY (" + STOCK_NAME + " ))";
   private static List> result;
   private long timestamp;
   private String tableName;
@@ -86,10 +106,11 @@ public class TableSnapshotReadsMapReduceIT extends 
BaseUniqueNamesOwnClusterIT {
   @Before
   public void before() throws SQLException, IOException {
 // create table
-Connection conn = DriverManager.getConnection(getUrl());
-tableName = generateUniqueName();
-conn.createStatement().execute(String.format(CREATE_TABLE, tableName));
-   

[phoenix] branch master updated: PHOENIX-6153 Table Map Reduce job after a Snapshot based job fails with CorruptedSnapshotException

2020-09-30 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new bb7f000  PHOENIX-6153 Table Map Reduce job after a Snapshot based job 
fails with CorruptedSnapshotException
bb7f000 is described below

commit bb7f00095e0edec3a0678e054c9a7ab96fc0e09e
Author: sakshamgangwar 
AuthorDate: Thu Sep 24 17:03:40 2020 -0700

PHOENIX-6153 Table Map Reduce job after a Snapshot based job fails with 
CorruptedSnapshotException

Signed-off-by: Chinmay Kulkarni 
---
 .../end2end/TableSnapshotReadsMapReduceIT.java | 163 ++---
 .../phoenix/mapreduce/PhoenixInputFormat.java  |  14 +-
 2 files changed, 150 insertions(+), 27 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
index 3bc31f2..2f2a188 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.sql.Array;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
@@ -41,14 +42,21 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.SnapshotDescription;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.io.DoubleWritable;
 import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
+import org.apache.phoenix.iterate.TestingMapReduceParallelScanGrouper;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.mapreduce.PhoenixOutputFormat;
+import org.apache.phoenix.mapreduce.PhoenixTestingInputFormat;
 import org.apache.phoenix.mapreduce.index.PhoenixIndexDBWritable;
+import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.mapreduce.util.PhoenixMapReduceUtil;
-import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.schema.types.PDouble;
+import org.apache.phoenix.schema.types.PhoenixArray;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.Assert;
 import org.junit.Before;
@@ -63,6 +71,10 @@ public class TableSnapshotReadsMapReduceIT extends 
BaseUniqueNamesOwnClusterIT {
 
   private static final Logger LOGGER = 
LoggerFactory.getLogger(TableSnapshotReadsMapReduceIT.class);
 
+  private static final String STOCK_NAME = "STOCK_NAME";
+  private static final String RECORDING_YEAR = "RECORDING_YEAR";
+  private static final String RECORDINGS_QUARTER = "RECORDINGS_QUARTER";
+  private static final String MAX_RECORDING = "MAX_RECORDING";
   private final static String SNAPSHOT_NAME = "FOO";
   private static final String FIELD1 = "FIELD1";
   private static final String FIELD2 = "FIELD2";
@@ -70,7 +82,14 @@ public class TableSnapshotReadsMapReduceIT extends 
BaseUniqueNamesOwnClusterIT {
   private String CREATE_TABLE = "CREATE TABLE IF NOT EXISTS %s ( " +
   " FIELD1 VARCHAR NOT NULL , FIELD2 VARCHAR , FIELD3 INTEGER CONSTRAINT 
pk PRIMARY KEY (FIELD1 ))";
   private String UPSERT = "UPSERT into %s values (?, ?, ?)";
-
+  private static final String CREATE_STOCK_TABLE =
+  "CREATE TABLE IF NOT EXISTS %s ( " + STOCK_NAME + " VARCHAR NOT NULL 
, " + RECORDING_YEAR
+  + "  INTEGER NOT  NULL,  " + RECORDINGS_QUARTER + " "
+  + " DOUBLE array[] CONSTRAINT pk PRIMARY KEY ( " + 
STOCK_NAME + ", "
+  + RECORDING_YEAR + " )) " + "SPLIT ON ('AA')";
+  private static final String CREATE_STOCK_STATS_TABLE =
+  "CREATE TABLE IF NOT EXISTS %s(" + STOCK_NAME + " VARCHAR NOT NULL , 
" + MAX_RECORDING
+  + " DOUBLE CONSTRAINT pk PRIMARY KEY (" + STOCK_NAME + " ))";
   private static List> result;
   private long timestamp;
   private String tableName;
@@ -87,11 +106,11 @@ public class TableSnapshotReadsMapReduceIT extends 
BaseUniqueNamesOwnClusterIT {
   @Before
   public void before() throws SQLException, IOException {
 // create table
-Connection conn = DriverManager.getConnection(getUrl());
-tableName = generateUniqueName();
-conn.createStatement().execute(String.format(CREATE_TABLE, tableName));
-c

[phoenix] branch master updated: PHOENIX-6075: DDLs issued via a tenant-specific connection do not write SYSTEM.MUTEX cells

2020-09-18 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new db71d5f  PHOENIX-6075: DDLs issued via a tenant-specific connection do 
not write SYSTEM.MUTEX cells
db71d5f is described below

commit db71d5f796ccdc99e31308da9cc7a533f4518a8b
Author: Chinmay Kulkarni 
AuthorDate: Thu Sep 17 01:20:09 2020 -0700

PHOENIX-6075: DDLs issued via a tenant-specific connection do not write 
SYSTEM.MUTEX cells
---
 .../it/java/org/apache/phoenix/end2end/ViewIT.java | 1248 +++-
 .../query/DelegateConnectionQueryServices.java |4 +-
 2 files changed, 704 insertions(+), 548 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index 3e6027a..1a39049 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -39,7 +39,6 @@ import java.sql.Statement;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
@@ -98,8 +97,10 @@ public class ViewIT extends SplitSystemCatalogIT {
 protected String transactionProvider;
 protected boolean columnEncoded;
 
-private static final String FAILED_VIEWNAME = 
SchemaUtil.getTableName(SCHEMA2, "FAILED_VIEW");
-private static final String SLOW_VIEWNAME_PREFIX = 
SchemaUtil.getTableName(SCHEMA2, "SLOW_VIEW");
+private static final String FAILED_VIEWNAME =
+SchemaUtil.getTableName(SCHEMA2, "FAILED_VIEW");
+private static final String SLOW_VIEWNAME_PREFIX =
+SchemaUtil.getTableName(SCHEMA2, "SLOW_VIEW");
 
 private static volatile CountDownLatch latch1 = null;
 private static volatile CountDownLatch latch2 = null;
@@ -111,7 +112,7 @@ public class ViewIT extends SplitSystemCatalogIT {
 this.transactionProvider = transactionProvider;
 this.columnEncoded = columnEncoded;
 if (transactionProvider != null) {
-optionBuilder.append(" TRANSACTION_PROVIDER='" + 
transactionProvider + "'");
+optionBuilder.append(" 
TRANSACTION_PROVIDER='").append(transactionProvider).append("'");
 }
 if (!columnEncoded) {
 if (optionBuilder.length()!=0)
@@ -121,7 +122,8 @@ public class ViewIT extends SplitSystemCatalogIT {
 this.tableDDLOptions = optionBuilder.toString();
 }
 
-@Parameters(name="ViewIT_transactionProvider={0}, columnEncoded={1}") // 
name is used by failsafe as file name in reports
+// name is used by failsafe as file name in reports
+@Parameters(name="ViewIT_transactionProvider={0}, columnEncoded={1}")
 public static synchronized Collection data() {
 return TestUtil.filterTxParamData(Arrays.asList(new Object[][] { 
 { "TEPHRA", false }, { "TEPHRA", true },
@@ -132,14 +134,14 @@ public class ViewIT extends SplitSystemCatalogIT {
 @BeforeClass
 public static synchronized void doSetup() throws Exception {
 NUM_SLAVES_BASE = 6;
-Map props = Collections.emptyMap();
 boolean splitSystemCatalog = (driver == null);
 Map serverProps = Maps.newHashMapWithExpectedSize(1);
 serverProps.put(QueryServices.PHOENIX_ACLS_ENABLED, "true");
 
serverProps.put(PhoenixMetaDataCoprocessorHost.PHOENIX_META_DATA_COPROCESSOR_CONF_KEY,
 TestMetaDataRegionObserver.class.getName());
 serverProps.put("hbase.coprocessor.abortonerror", "false");
-setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), 
new ReadOnlyProps(props.entrySet().iterator()));
+setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()),
+ReadOnlyProps.EMPTY_PROPS);
 // Split SYSTEM.CATALOG once after the mini-cluster is started
 if (splitSystemCatalog) {
 // splitSystemCatalog is incompatible with the balancer chore
@@ -215,7 +217,7 @@ public class ViewIT extends SplitSystemCatalogIT {
 if (!result) {
 throw new RuntimeException("Second task took took 
long to complete");
 }
-} catch (InterruptedException e) {
+} catch (InterruptedException ignored) {
 }
 }
 }
@@ -228,135 +230,153 @@ public class ViewIT extends SplitSystemCatalogIT {
 String fullTableName = SchemaUtil.getTableName(SCHEMA1, 
generateUniqueName());
 String fullViewName1 = Schem

[phoenix] branch 4.x updated: PHOENIX-6075: DDLs issued via a tenant-specific connection do not write SYSTEM.MUTEX cells

2020-09-18 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 0b9e734  PHOENIX-6075: DDLs issued via a tenant-specific connection do 
not write SYSTEM.MUTEX cells
0b9e734 is described below

commit 0b9e7349517724d81f9a4ac005054cf36e9f75c8
Author: Chinmay Kulkarni 
AuthorDate: Thu Sep 17 01:20:09 2020 -0700

PHOENIX-6075: DDLs issued via a tenant-specific connection do not write 
SYSTEM.MUTEX cells
---
 .../it/java/org/apache/phoenix/end2end/ViewIT.java | 1261 +++-
 .../query/DelegateConnectionQueryServices.java |4 +-
 2 files changed, 712 insertions(+), 553 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index e896513..d4305ea 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -39,7 +39,6 @@ import java.sql.Statement;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
@@ -96,8 +95,10 @@ public class ViewIT extends SplitSystemCatalogIT {
 protected String transactionProvider;
 protected boolean columnEncoded;
 
-private static final String FAILED_VIEWNAME = 
SchemaUtil.getTableName(SCHEMA2, "FAILED_VIEW");
-private static final String SLOW_VIEWNAME_PREFIX = 
SchemaUtil.getTableName(SCHEMA2, "SLOW_VIEW");
+private static final String FAILED_VIEWNAME =
+SchemaUtil.getTableName(SCHEMA2, "FAILED_VIEW");
+private static final String SLOW_VIEWNAME_PREFIX =
+SchemaUtil.getTableName(SCHEMA2, "SLOW_VIEW");
 
 private static volatile CountDownLatch latch1 = null;
 private static volatile CountDownLatch latch2 = null;
@@ -109,7 +110,7 @@ public class ViewIT extends SplitSystemCatalogIT {
 this.transactionProvider = transactionProvider;
 this.columnEncoded = columnEncoded;
 if (transactionProvider != null) {
-optionBuilder.append(" TRANSACTION_PROVIDER='" + 
transactionProvider + "'");
+optionBuilder.append(" 
TRANSACTION_PROVIDER='").append(transactionProvider).append("'");
 }
 if (!columnEncoded) {
 if (optionBuilder.length()!=0)
@@ -119,7 +120,8 @@ public class ViewIT extends SplitSystemCatalogIT {
 this.tableDDLOptions = optionBuilder.toString();
 }
 
-@Parameters(name="ViewIT_transactionProvider={0}, columnEncoded={1}") // 
name is used by failsafe as file name in reports
+// name is used by failsafe as file name in reports
+@Parameters(name="ViewIT_transactionProvider={0}, columnEncoded={1}")
 public static synchronized Collection data() {
 return TestUtil.filterTxParamData(Arrays.asList(new Object[][] { 
 { "TEPHRA", false }, { "TEPHRA", true },
@@ -130,14 +132,14 @@ public class ViewIT extends SplitSystemCatalogIT {
 @BeforeClass
 public static synchronized void doSetup() throws Exception {
 NUM_SLAVES_BASE = 6;
-Map props = Collections.emptyMap();
 boolean splitSystemCatalog = (driver == null);
 Map serverProps = Maps.newHashMapWithExpectedSize(1);
 serverProps.put(QueryServices.PHOENIX_ACLS_ENABLED, "true");
 
serverProps.put(PhoenixMetaDataCoprocessorHost.PHOENIX_META_DATA_COPROCESSOR_CONF_KEY,
 TestMetaDataRegionObserver.class.getName());
 serverProps.put("hbase.coprocessor.abortonerror", "false");
-setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), 
new ReadOnlyProps(props.entrySet().iterator()));
+setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()),
+ReadOnlyProps.EMPTY_PROPS);
 // Split SYSTEM.CATALOG once after the mini-cluster is started
 if (splitSystemCatalog) {
 // splitSystemCatalog is incompatible with the balancer chore
@@ -157,21 +159,24 @@ public class ViewIT extends SplitSystemCatalogIT {
 public static class TestMetaDataRegionObserver extends 
BaseMetaDataEndpointObserver {
 
 @Override
-public void 
preAlterTable(ObserverContext ctx, String 
tenantId,
-String tableName, TableName physicalTableName, TableName 
parentPhysicalTableName, PTableType type) throws IOException{
+public void 
preAlterTable(ObserverContext ctx,
+String tenantId, String tableName, TableName physicalTableName,
+TableName parentPhysicalTableName, PTableType type) throws 
IOExcep

[phoenix] branch 4.x updated: PHOENIX-6072: SYSTEM.MUTEX not created with a TTL on a fresh cluster connected to by a 4.15+ client

2020-09-16 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 71f8af0  PHOENIX-6072: SYSTEM.MUTEX not created with a TTL on a fresh 
cluster connected to by a 4.15+ client
71f8af0 is described below

commit 71f8af07d50c49d0f681c4f2353d94cbd57166ea
Author: Chinmay Kulkarni 
AuthorDate: Tue Sep 15 20:15:10 2020 -0700

PHOENIX-6072: SYSTEM.MUTEX not created with a TTL on a fresh cluster 
connected to by a 4.15+ client
---
 ...ava => SystemTablesCreationOnConnectionIT.java} | 515 -
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java  |   1 +
 .../phoenix/query/ConnectionQueryServicesImpl.java |  67 ++-
 .../query/ConnectionlessQueryServicesImpl.java |   2 +-
 .../org/apache/phoenix/query/QueryConstants.java   | 392 ++--
 .../query/ConnectionQueryServicesImplTest.java |  98 +++-
 6 files changed, 694 insertions(+), 381 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesCreationOnConnectionIT.java
similarity index 55%
rename from 
phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
rename to 
phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesCreationOnConnectionIT.java
index d42ea28..f70e005 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesCreationOnConnectionIT.java
@@ -18,6 +18,9 @@
 package org.apache.phoenix.end2end;
 
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_HBASE_TABLE_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_FOR_MUTEX;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -29,6 +32,7 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -39,10 +43,12 @@ import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceNotFoundException;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.UpgradeRequiredException;
@@ -63,13 +69,13 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 @Category(NeedsOwnMiniClusterTest.class)
-public class SystemCatalogCreationOnConnectionIT {
+public class SystemTablesCreationOnConnectionIT {
 private HBaseTestingUtility testUtil = null;
 private Set hbaseTables;
 private static boolean setOldTimestampToInduceUpgrade = false;
 private static int countUpgradeAttempts;
-// This flag is used to figure out if the SYSCAT schema was actually 
upgraded or not, based on the timestamp of SYSCAT
-// (different from an upgrade attempt)
+// This flag is used to figure out if the SYSCAT schema was actually 
upgraded or not, based on
+// the timestamp of SYSCAT (different from an upgrade attempt)
 private static int actualSysCatUpgrades;
 private static final String PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG = 
"SYSTEM:CATALOG";
 private static final String PHOENIX_SYSTEM_CATALOG = "SYSTEM.CATALOG";
@@ -79,8 +85,9 @@ public class SystemCatalogCreationOnConnectionIT {
 + " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY 
KEY(K1,K2))";
 private static final String SELECT_STMT = "SELECT * FROM %s";
 private static final String DELETE_STMT = "DELETE FROM %s";
-private static final String CREATE_INDEX_STMT = "CREATE INDEX DUMMY_IDX ON 
%s (K1) INCLUDE (K2)";
+private static final String CREATE_INDEX_STMT = "CREATE INDEX DUMMYIDX ON 
%s (K1) INCLUDE (K2)";
 private static final String UPSERT_STMT = "UPSERT INTO %s VALUES ('A', 
'B')";
+private static final String QUERY_SYSTEM_CATALOG = "SELECT * FROM 
SYSTEM.CATALOG LIMIT 1";
 
 private static final Set PHOENIX_SYSTEM_TAB

[phoenix] branch master updated: PHOENIX-6072: SYSTEM.MUTEX not created with a TTL on a fresh cluster connected to by a 4.15+ client

2020-09-16 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 740ce13  PHOENIX-6072: SYSTEM.MUTEX not created with a TTL on a fresh 
cluster connected to by a 4.15+ client
740ce13 is described below

commit 740ce13210cf6d876abfde52a58e10fa407b5018
Author: Chinmay Kulkarni 
AuthorDate: Mon Sep 14 21:43:33 2020 -0700

PHOENIX-6072: SYSTEM.MUTEX not created with a TTL on a fresh cluster 
connected to by a 4.15+ client
---
 ...ava => SystemTablesCreationOnConnectionIT.java} | 515 -
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java  |   1 +
 .../phoenix/query/ConnectionQueryServicesImpl.java |  76 ++-
 .../query/ConnectionlessQueryServicesImpl.java |   2 +-
 .../org/apache/phoenix/query/QueryConstants.java   | 382 +--
 .../query/ConnectionQueryServicesImplTest.java | 101 +++-
 6 files changed, 698 insertions(+), 379 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesCreationOnConnectionIT.java
similarity index 55%
rename from 
phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
rename to 
phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesCreationOnConnectionIT.java
index de047a3..627d7b2 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesCreationOnConnectionIT.java
@@ -18,6 +18,9 @@
 package org.apache.phoenix.end2end;
 
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_HBASE_TABLE_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_FOR_MUTEX;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -29,6 +32,7 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -39,10 +43,12 @@ import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceNotFoundException;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.UpgradeRequiredException;
@@ -63,13 +69,13 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 @Category(NeedsOwnMiniClusterTest.class)
-public class SystemCatalogCreationOnConnectionIT {
+public class SystemTablesCreationOnConnectionIT {
 private HBaseTestingUtility testUtil = null;
 private Set hbaseTables;
 private static boolean setOldTimestampToInduceUpgrade = false;
 private static int countUpgradeAttempts;
-// This flag is used to figure out if the SYSCAT schema was actually 
upgraded or not, based on the timestamp of SYSCAT
-// (different from an upgrade attempt)
+// This flag is used to figure out if the SYSCAT schema was actually 
upgraded or not, based on
+// the timestamp of SYSCAT (different from an upgrade attempt)
 private static int actualSysCatUpgrades;
 private static final String PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG = 
"SYSTEM:CATALOG";
 private static final String PHOENIX_SYSTEM_CATALOG = "SYSTEM.CATALOG";
@@ -79,8 +85,9 @@ public class SystemCatalogCreationOnConnectionIT {
 + " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY 
KEY(K1,K2))";
 private static final String SELECT_STMT = "SELECT * FROM %s";
 private static final String DELETE_STMT = "DELETE FROM %s";
-private static final String CREATE_INDEX_STMT = "CREATE INDEX DUMMY_IDX ON 
%s (K1) INCLUDE (K2)";
+private static final String CREATE_INDEX_STMT = "CREATE INDEX DUMMYIDX ON 
%s (K1) INCLUDE (K2)";
 private static final String UPSERT_STMT = "UPSERT INTO %s VALUES ('A', 
'B')";
+private static final String QUERY_SYSTEM_CATALOG = "SELECT * FROM 
SYSTEM.CATALOG LIMIT 1";
 
 private static final Set PHOENIX_SYSTEM_TAB

[phoenix] branch 4.x updated: PHOENIX-6069: We should check that the parent table key is in the region in the MetaDataEndpointImpl.dropTable code

2020-09-10 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new e3c7b4b  PHOENIX-6069: We should check that the parent table key is in 
the region in the MetaDataEndpointImpl.dropTable code
e3c7b4b is described below

commit e3c7b4bdce2524eb4fd1e7eb0ccd3454fcca81ce
Author: Chinmay Kulkarni 
AuthorDate: Tue Aug 11 11:40:42 2020 -0700

PHOENIX-6069: We should check that the parent table key is in the region in 
the MetaDataEndpointImpl.dropTable code
---
 .../main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 00ce50f..236b3f5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -2228,7 +2228,7 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 // Only lock parent table for indexes
 if (parentTableName != null && pTableType == PTableType.INDEX) {
 parentLockKey = SchemaUtil.getTableKey(tenantIdBytes, 
schemaName, parentTableName);
-result = checkTableKeyInRegion(lockKey, region);
+result = checkTableKeyInRegion(parentLockKey, region);
 if (result != null) {
 done.run(MetaDataMutationResult.toProto(result));
 return;



[phoenix] branch master updated: PHOENIX-6069: We should check that the parent table key is in the region in the MetaDataEndpointImpl.dropTable code

2020-09-10 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 747004e  PHOENIX-6069: We should check that the parent table key is in 
the region in the MetaDataEndpointImpl.dropTable code
747004e is described below

commit 747004e70ac8f06d171a443e31657338616864bc
Author: Chinmay Kulkarni 
AuthorDate: Tue Aug 11 11:40:42 2020 -0700

PHOENIX-6069: We should check that the parent table key is in the region in 
the MetaDataEndpointImpl.dropTable code
---
 .../main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index b0901f1..54883f8 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -2248,7 +2248,7 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements RegionCopr
 // Only lock parent table for indexes
 if (parentTableName != null && pTableType == PTableType.INDEX) {
 parentLockKey = SchemaUtil.getTableKey(tenantIdBytes, 
schemaName, parentTableName);
-result = checkTableKeyInRegion(lockKey, region);
+result = checkTableKeyInRegion(parentLockKey, region);
 if (result != null) {
 done.run(MetaDataMutationResult.toProto(result));
 return;



[phoenix] branch master updated: PHOENIX-5958: Diverged view created via an older client still sees dropped column data

2020-08-03 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new ffeddf5  PHOENIX-5958: Diverged view created via an older client still 
sees dropped column data
ffeddf5 is described below

commit ffeddf55328ac69dac6ee565abec34e1f7002d13
Author: Chinmay Kulkarni 
AuthorDate: Wed Jul 29 23:48:47 2020 -0700

PHOENIX-5958: Diverged view created via an older client still sees dropped 
column data
---
 .../phoenix/end2end/BackwardCompatibilityIT.java   | 120 
 .../gold_query_create_diverged_view.txt}   |  16 +-
 .../{query.sql => create_diverged_view.sql}|  10 +-
 .../{query_more.sql => query_add_data.sql} |   0
 .../sql_files/{query.sql => query_create_add.sql}  |   0
 .../{query.sql => query_create_diverged_view.sql}  |  10 +-
 .../phoenix/coprocessor/AddColumnMutator.java  |   1 +
 .../apache/phoenix/coprocessor/ColumnMutator.java  |   4 +-
 .../phoenix/coprocessor/DropColumnMutator.java |  19 +-
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |   2 +-
 .../org/apache/phoenix/schema/MetaDataClient.java  |   9 +-
 .../java/org/apache/phoenix/schema/PTableImpl.java |   4 +-
 .../java/org/apache/phoenix/util/ViewUtil.java | 207 -
 13 files changed, 249 insertions(+), 153 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
index 8dae498..c0332f8 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
@@ -21,6 +21,7 @@ import static 
org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assume.assumeFalse;
 
@@ -84,20 +85,24 @@ import com.google.common.collect.Lists;
 public class BackwardCompatibilityIT {
 
 private static final String SQL_DIR = "sql_files/";
-private static final String RESULT_DIR = "gold_files/";
+private static final String RESULTS_AND_GOLD_FILES_DIR = "gold_files/";
 private static final String COMPATIBLE_CLIENTS_JSON =
 "compatible_client_versions.json";
 private static final String BASH = "/bin/bash";
 private static final String EXECUTE_QUERY_SH = "scripts/execute_query.sh";
+private static final String QUERY_PREFIX = "query_";
 private static final String RESULT_PREFIX = "result_";
+private static final String GOLD_PREFIX = "gold_";
 private static final String SQL_EXTENSION = ".sql";
 private static final String TEXT_EXTENSION = ".txt";
 private static final String CREATE_ADD = "create_add";
+private static final String CREATE_DIVERGED_VIEW = "create_diverged_view";
 private static final String ADD_DATA = "add_data";
 private static final String ADD_DELETE = "add_delete";
-private static final String QUERY = "query";
-private static final String QUERY_MORE = "query_more";
-private static final String QUERY_ADD_DELETE = "query_add_delete";
+private static final String QUERY_CREATE_ADD = QUERY_PREFIX + CREATE_ADD;
+private static final String QUERY_ADD_DATA = QUERY_PREFIX + ADD_DATA;
+private static final String QUERY_ADD_DELETE = QUERY_PREFIX + ADD_DELETE;
+private static final String QUERY_CREATE_DIVERGED_VIEW = QUERY_PREFIX + 
CREATE_DIVERGED_VIEW;
 private static final String MVN_HOME = "maven.home";
 private static final String JAVA_TMP_DIR = "java.io.tmpdir";
 
@@ -176,15 +181,58 @@ public class BackwardCompatibilityIT {
 public void testUpsertWithOldClient() throws Exception {
 // Insert data with old client and read with new client
 executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD);
-executeQueriesWithCurrentVersion(QUERY);
-assertExpectedOutput(CREATE_ADD, QUERY);
+executeQueriesWithCurrentVersion(QUERY_CREATE_ADD);
+assertExpectedOutput(QUERY_CREATE_ADD);
+}
+
+@Test
+public void testCreateDivergedViewWithOldClientReadFromNewClient() throws 
Exception {
+// Create a base table, view and make it diverge from an old client
+executeQueryWithClientVersion(compatibleClientVersion, 
CREATE_DIVERGED_VIEW);
+executeQueriesWithCurrentVersion(QUERY_CREATE_

[phoenix] branch 4.x updated: PHOENIX-5958: Diverged view created via an older client still sees dropped column data

2020-08-03 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 1d84495  PHOENIX-5958: Diverged view created via an older client still 
sees dropped column data
1d84495 is described below

commit 1d844950bb4ec8221873ecd2b094c20f427cd984
Author: Chinmay Kulkarni 
AuthorDate: Wed Jul 29 23:48:47 2020 -0700

PHOENIX-5958: Diverged view created via an older client still sees dropped 
column data
---
 .../phoenix/end2end/BackwardCompatibilityIT.java   | 120 
 .../gold_query_create_diverged_view.txt}   |  16 +-
 .../{query.sql => create_diverged_view.sql}|  10 +-
 .../{query_more.sql => query_add_data.sql} |   0
 .../sql_files/{query.sql => query_create_add.sql}  |   0
 .../{query.sql => query_create_diverged_view.sql}  |  10 +-
 .../phoenix/coprocessor/AddColumnMutator.java  |   3 +-
 .../apache/phoenix/coprocessor/ColumnMutator.java  |   3 +-
 .../phoenix/coprocessor/DropColumnMutator.java |  20 +-
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |   4 +-
 .../org/apache/phoenix/schema/MetaDataClient.java  |   8 +-
 .../java/org/apache/phoenix/schema/PTableImpl.java |   4 +-
 .../java/org/apache/phoenix/util/ViewUtil.java | 213 -
 13 files changed, 251 insertions(+), 160 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
index fd1adc9..fa614ce 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
@@ -21,6 +21,7 @@ import static 
org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assume.assumeFalse;
 
@@ -83,20 +84,24 @@ import com.fasterxml.jackson.databind.ObjectMapper;
 public class BackwardCompatibilityIT {
 
 private static final String SQL_DIR = "sql_files/";
-private static final String RESULT_DIR = "gold_files/";
+private static final String RESULTS_AND_GOLD_FILES_DIR = "gold_files/";
 private static final String COMPATIBLE_CLIENTS_JSON = 
 "compatible_client_versions.json";
 private static final String BASH = "/bin/bash";
 private static final String EXECUTE_QUERY_SH = "scripts/execute_query.sh";
+private static final String QUERY_PREFIX = "query_";
 private static final String RESULT_PREFIX = "result_";
+private static final String GOLD_PREFIX = "gold_";
 private static final String SQL_EXTENSION = ".sql";
 private static final String TEXT_EXTENSION = ".txt";
 private static final String CREATE_ADD = "create_add";
+private static final String CREATE_DIVERGED_VIEW = "create_diverged_view";
 private static final String ADD_DATA = "add_data";
 private static final String ADD_DELETE = "add_delete";
-private static final String QUERY = "query";
-private static final String QUERY_MORE = "query_more";
-private static final String QUERY_ADD_DELETE = "query_add_delete";
+private static final String QUERY_CREATE_ADD = QUERY_PREFIX + CREATE_ADD;
+private static final String QUERY_ADD_DATA = QUERY_PREFIX + ADD_DATA;
+private static final String QUERY_ADD_DELETE = QUERY_PREFIX + ADD_DELETE;
+private static final String QUERY_CREATE_DIVERGED_VIEW = QUERY_PREFIX + 
CREATE_DIVERGED_VIEW;
 private static final String MVN_HOME = "maven.home";
 private static final String JAVA_TMP_DIR = "java.io.tmpdir";
 
@@ -172,15 +177,58 @@ public class BackwardCompatibilityIT {
 public void testUpsertWithOldClient() throws Exception {
 // Insert data with old client and read with new client
 executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD);
-executeQueriesWithCurrentVersion(QUERY);
-assertExpectedOutput(CREATE_ADD, QUERY);
+executeQueriesWithCurrentVersion(QUERY_CREATE_ADD);
+assertExpectedOutput(QUERY_CREATE_ADD);
+}
+
+@Test
+public void testCreateDivergedViewWithOldClientReadFromNewClient() throws 
Exception {
+// Create a base table, view and make it diverge from an old client
+executeQueryWithClientVersion(compatibleClientVersion, 
CREATE_DIVERGED_VIEW);
+executeQueriesWithCurrentVersion(QUERY_

[phoenix] branch master updated: PHOENIX-6026: Fix BackwardCompatibilityIT so it can run locally

2020-07-20 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 218a71c  PHOENIX-6026: Fix BackwardCompatibilityIT so it can run 
locally
218a71c is described below

commit 218a71c07259a6ac4660335bf3636325e1ee138d
Author: Chinmay Kulkarni 
AuthorDate: Fri Jul 17 20:20:27 2020 -0700

PHOENIX-6026: Fix BackwardCompatibilityIT so it can run locally
---
 .../phoenix/end2end/BackwardCompatibilityIT.java   | 216 +++--
 .../it/{ => resources}/scripts/execute_query.sh|   0
 2 files changed, 118 insertions(+), 98 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
index cf28559..fde2f69 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
@@ -21,15 +21,18 @@ import static 
org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assume.assumeFalse;
 
 import java.io.BufferedReader;
 import java.io.BufferedWriter;
 import java.io.File;
+import java.io.FileNotFoundException;
 import java.io.FileReader;
 import java.io.FileWriter;
+import java.io.InputStream;
 import java.io.InputStreamReader;
+import java.net.URL;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
@@ -80,10 +83,12 @@ import com.google.common.collect.Lists;
 @Category(NeedsOwnMiniClusterTest.class)
 public class BackwardCompatibilityIT {
 
-private static final String SQL_DIR = "src/it/resources/sql_files/";
-private static final String RESULT_DIR = "src/it/resources/gold_files/";
+private static final String SQL_DIR = "sql_files/";
+private static final String RESULT_DIR = "gold_files/";
 private static final String COMPATIBLE_CLIENTS_JSON =
-"src/it/resources/compatible_client_versions.json";
+"compatible_client_versions.json";
+private static final String BASH = "/bin/bash";
+private static final String EXECUTE_QUERY_SH = "scripts/execute_query.sh";
 private static final String RESULT_PREFIX = "result_";
 private static final String SQL_EXTENSION = ".sql";
 private static final String TEXT_EXTENSION = ".txt";
@@ -93,6 +98,8 @@ public class BackwardCompatibilityIT {
 private static final String QUERY = "query";
 private static final String QUERY_MORE = "query_more";
 private static final String QUERY_ADD_DELETE = "query_add_delete";
+private static final String MVN_HOME = "maven.home";
+private static final String JAVA_TMP_DIR = "java.io.tmpdir";
 
 private final String compatibleClientVersion;
 private static Configuration conf;
@@ -119,6 +126,7 @@ public class BackwardCompatibilityIT {
 zkQuorum = "localhost:" + hbaseTestUtil.getZkCluster().getClientPort();
 url = PhoenixRuntime.JDBC_PROTOCOL + 
PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum;
 DriverManager.registerDriver(PhoenixDriver.INSTANCE);
+checkForPreConditions();
 }
 
 @After
@@ -141,10 +149,14 @@ public class BackwardCompatibilityIT {
 List clientVersions = Lists.newArrayList();
 ObjectMapper mapper = new ObjectMapper();
 mapper.configure(JsonParser.Feature.ALLOW_COMMENTS, true);
-JsonNode jsonNode = mapper.readTree(new 
FileReader(COMPATIBLE_CLIENTS_JSON));
-JsonNode HBaseProfile = jsonNode.get(hbaseProfile);
-for (final JsonNode clientVersion : HBaseProfile) {
-clientVersions.add(clientVersion.textValue() + "-HBase-" + 
hbaseProfile);
+try (InputStream inputStream = BackwardCompatibilityIT.class
+
.getClassLoader().getResourceAsStream(COMPATIBLE_CLIENTS_JSON)) {
+assertNotNull(inputStream);
+JsonNode jsonNode = mapper.readTree(inputStream);
+JsonNode HBaseProfile = jsonNode.get(hbaseProfile);
+for (final JsonNode clientVersion : HBaseProfile) {
+clientVersions.add(clientVersion.textValue() + "-HBase-" + 
hbaseProfile);
+}
 }
 return clientVersions;
 }
@@ -155,15 +167,14 @@ public class BackwardCompatibilityIT {
  * 2. Old Client creates tables and inserts data 

[phoenix] branch 4.x updated: PHOENIX-6026: Fix BackwardCompatibilityIT so it can run locally

2020-07-20 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new f935d5c  PHOENIX-6026: Fix BackwardCompatibilityIT so it can run 
locally
f935d5c is described below

commit f935d5c24cd20ff12a82cee585c4e1865ddea904
Author: Chinmay Kulkarni 
AuthorDate: Fri Jul 17 20:20:27 2020 -0700

PHOENIX-6026: Fix BackwardCompatibilityIT so it can run locally
---
 .../phoenix/end2end/BackwardCompatibilityIT.java   | 216 +++--
 .../it/{ => resources}/scripts/execute_query.sh|   0
 2 files changed, 118 insertions(+), 98 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
index 6dad745..fd1adc9 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
@@ -21,15 +21,18 @@ import static 
org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assume.assumeFalse;
 
 import java.io.BufferedReader;
 import java.io.BufferedWriter;
 import java.io.File;
+import java.io.FileNotFoundException;
 import java.io.FileReader;
 import java.io.FileWriter;
+import java.io.InputStream;
 import java.io.InputStreamReader;
+import java.net.URL;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
@@ -79,10 +82,12 @@ import com.fasterxml.jackson.databind.ObjectMapper;
 @Category(NeedsOwnMiniClusterTest.class)
 public class BackwardCompatibilityIT {
 
-private static final String SQL_DIR = "src/it/resources/sql_files/";
-private static final String RESULT_DIR = "src/it/resources/gold_files/";
+private static final String SQL_DIR = "sql_files/";
+private static final String RESULT_DIR = "gold_files/";
 private static final String COMPATIBLE_CLIENTS_JSON = 
-"src/it/resources/compatible_client_versions.json";
+"compatible_client_versions.json";
+private static final String BASH = "/bin/bash";
+private static final String EXECUTE_QUERY_SH = "scripts/execute_query.sh";
 private static final String RESULT_PREFIX = "result_";
 private static final String SQL_EXTENSION = ".sql";
 private static final String TEXT_EXTENSION = ".txt";
@@ -92,6 +97,8 @@ public class BackwardCompatibilityIT {
 private static final String QUERY = "query";
 private static final String QUERY_MORE = "query_more";
 private static final String QUERY_ADD_DELETE = "query_add_delete";
+private static final String MVN_HOME = "maven.home";
+private static final String JAVA_TMP_DIR = "java.io.tmpdir";
 
 private final String compatibleClientVersion;
 private static Configuration conf;
@@ -118,6 +125,7 @@ public class BackwardCompatibilityIT {
 zkQuorum = "localhost:" + hbaseTestUtil.getZkCluster().getClientPort();
 url = PhoenixRuntime.JDBC_PROTOCOL + 
PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum;
 DriverManager.registerDriver(PhoenixDriver.INSTANCE);
+checkForPreConditions();
 }
 
 @After
@@ -140,10 +148,14 @@ public class BackwardCompatibilityIT {
 List clientVersions = Lists.newArrayList();
 ObjectMapper mapper = new ObjectMapper();
 mapper.configure(JsonParser.Feature.ALLOW_COMMENTS, true);
-JsonNode jsonNode = mapper.readTree(new 
FileReader(COMPATIBLE_CLIENTS_JSON));
-JsonNode HBaseProfile = jsonNode.get(hbaseProfile);
-for (final JsonNode clientVersion : HBaseProfile) {
-clientVersions.add(clientVersion.textValue() + "-HBase-" + 
hbaseProfile);
+try (InputStream inputStream = BackwardCompatibilityIT.class
+
.getClassLoader().getResourceAsStream(COMPATIBLE_CLIENTS_JSON)) {
+assertNotNull(inputStream);
+JsonNode jsonNode = mapper.readTree(inputStream);
+JsonNode HBaseProfile = jsonNode.get(hbaseProfile);
+for (final JsonNode clientVersion : HBaseProfile) {
+clientVersions.add(clientVersion.textValue() + "-HBase-" + 
hbaseProfile);
+}
 }
 return clientVersions;
 }
@@ -154,15 +166,14 @@ public class BackwardCompatibilityIT {
  * 2. Old Client creates tables and inserts data 

[phoenix] branch master updated: PHOENIX-6017: Hadoop QA Precommit build keeps failing with release audit warning for phoenix-server/dependency-reduced-pom.xml

2020-07-17 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new a793686  PHOENIX-6017: Hadoop QA Precommit build keeps failing with 
release audit warning for phoenix-server/dependency-reduced-pom.xml
a793686 is described below

commit a79368664f04f460fdc1bbf41b223c984b5c257d
Author: Chinmay Kulkarni 
AuthorDate: Thu Jul 16 13:06:26 2020 -0700

PHOENIX-6017: Hadoop QA Precommit build keeps failing with release audit 
warning for phoenix-server/dependency-reduced-pom.xml
---
 phoenix-server/pom.xml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index 873380f..1ce0bea 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -83,6 +83,7 @@
   shade
 
 
+
${basedir}/target/pom.xml
 
true
 false
 



[phoenix] branch 4.x updated: PHOENIX-6017: Hadoop QA Precommit build keeps failing with release audit warning for phoenix-server/dependency-reduced-pom.xml

2020-07-17 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new ceb1b96  PHOENIX-6017: Hadoop QA Precommit build keeps failing with 
release audit warning for phoenix-server/dependency-reduced-pom.xml
ceb1b96 is described below

commit ceb1b96e29c83b463a3be3eb1e2357e8e3e4f28f
Author: Chinmay Kulkarni 
AuthorDate: Thu Jul 16 13:06:26 2020 -0700

PHOENIX-6017: Hadoop QA Precommit build keeps failing with release audit 
warning for phoenix-server/dependency-reduced-pom.xml
---
 phoenix-server/pom.xml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index 663ca6f..3cce3b5 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -74,6 +74,7 @@
   shade
 
 
+
${basedir}/target/pom.xml
 
true
 false
 



[phoenix] branch master updated: PHOENIX-5981: Wrong multiple counting of resultSetTimeMs and wallclockTimeMs in OverallQueryMetrics

2020-07-16 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 07f1fea  PHOENIX-5981: Wrong multiple counting of resultSetTimeMs and 
wallclockTimeMs in OverallQueryMetrics
07f1fea is described below

commit 07f1fea40804a9c65e04e336afbeecab534372e0
Author: Chinmay Kulkarni 
AuthorDate: Tue Jun 30 22:26:48 2020 -0700

PHOENIX-5981: Wrong multiple counting of resultSetTimeMs and 
wallclockTimeMs in OverallQueryMetrics
---
 .../phoenix/monitoring/MetricsStopWatch.java   |   4 +
 .../phoenix/monitoring/OverAllQueryMetrics.java|  21 ++-
 .../monitoring/OverAllQueryMetricsTest.java| 179 +
 3 files changed, 202 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
index a852ca9..5b74896 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
@@ -49,6 +49,10 @@ final class MetricsStopWatch {
 }
 }
 }
+
+boolean isRunning() {
+return isMetricsEnabled && stopwatch.isRunning();
+}
 
 long getElapsedTimeInMs() {
 if (isMetricsEnabled) {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
index 6202eee..ae3edab 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
@@ -27,6 +27,7 @@ import static 
org.apache.phoenix.monitoring.MetricType.WALL_CLOCK_TIME_MS;
 import java.util.HashMap;
 import java.util.Map;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.phoenix.log.LogLevel;
 
 /**
@@ -82,8 +83,11 @@ public class OverAllQueryMetrics {
 }
 
 public void endQuery() {
+boolean wasRunning = queryWatch.isRunning();
 queryWatch.stop();
-wallClockTimeMS.change(queryWatch.getElapsedTimeInMs());
+if (wasRunning) {
+wallClockTimeMS.change(queryWatch.getElapsedTimeInMs());
+}
 }
 
 public void startResultSetWatch() {
@@ -91,8 +95,21 @@ public class OverAllQueryMetrics {
 }
 
 public void stopResultSetWatch() {
+boolean wasRunning = resultSetWatch.isRunning();
 resultSetWatch.stop();
-resultSetTimeMS.change(resultSetWatch.getElapsedTimeInMs());
+if (wasRunning) {
+resultSetTimeMS.change(resultSetWatch.getElapsedTimeInMs());
+}
+}
+
+@VisibleForTesting
+long getWallClockTimeMs() {
+return wallClockTimeMS.getValue();
+}
+
+@VisibleForTesting
+long getResultSetTimeMs() {
+return resultSetTimeMS.getValue();
 }
 
 public Map publish() {
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/OverAllQueryMetricsTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/OverAllQueryMetricsTest.java
new file mode 100644
index 000..f97731d
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/OverAllQueryMetricsTest.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import org.apache.phoenix.log.LogLevel;
+import org.apache.phoenix.util.EnvironmentEdge;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static 
org.apache.phoenix.monitoring.MetricType.CACHE_REFRESH_SPLITS_COUNTER;
+import static org.apache.phoenix.monitoring.MetricType.NO_OP_METRIC;
+import static org.apache.phoenix.monitoring.MetricType.NUM_PARALLEL_SCANS;
+import static org.apache.phoenix.monitoring.MetricType.QUERY_FA

[phoenix] branch 4.x updated: PHOENIX-5981: Wrong multiple counting of resultSetTimeMs and wallclockTimeMs in OverallQueryMetrics

2020-07-16 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 0806875  PHOENIX-5981: Wrong multiple counting of resultSetTimeMs and 
wallclockTimeMs in OverallQueryMetrics
0806875 is described below

commit 0806875646a6a419d9442090b289958f3b9a4217
Author: Chinmay Kulkarni 
AuthorDate: Tue Jun 30 22:26:48 2020 -0700

PHOENIX-5981: Wrong multiple counting of resultSetTimeMs and 
wallclockTimeMs in OverallQueryMetrics
---
 .../phoenix/monitoring/MetricsStopWatch.java   |   4 +
 .../phoenix/monitoring/OverAllQueryMetrics.java|  21 ++-
 .../monitoring/OverAllQueryMetricsTest.java| 179 +
 3 files changed, 202 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
index a852ca9..5b74896 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
@@ -49,6 +49,10 @@ final class MetricsStopWatch {
 }
 }
 }
+
+boolean isRunning() {
+return isMetricsEnabled && stopwatch.isRunning();
+}
 
 long getElapsedTimeInMs() {
 if (isMetricsEnabled) {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
index 6202eee..ae3edab 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
@@ -27,6 +27,7 @@ import static 
org.apache.phoenix.monitoring.MetricType.WALL_CLOCK_TIME_MS;
 import java.util.HashMap;
 import java.util.Map;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.phoenix.log.LogLevel;
 
 /**
@@ -82,8 +83,11 @@ public class OverAllQueryMetrics {
 }
 
 public void endQuery() {
+boolean wasRunning = queryWatch.isRunning();
 queryWatch.stop();
-wallClockTimeMS.change(queryWatch.getElapsedTimeInMs());
+if (wasRunning) {
+wallClockTimeMS.change(queryWatch.getElapsedTimeInMs());
+}
 }
 
 public void startResultSetWatch() {
@@ -91,8 +95,21 @@ public class OverAllQueryMetrics {
 }
 
 public void stopResultSetWatch() {
+boolean wasRunning = resultSetWatch.isRunning();
 resultSetWatch.stop();
-resultSetTimeMS.change(resultSetWatch.getElapsedTimeInMs());
+if (wasRunning) {
+resultSetTimeMS.change(resultSetWatch.getElapsedTimeInMs());
+}
+}
+
+@VisibleForTesting
+long getWallClockTimeMs() {
+return wallClockTimeMS.getValue();
+}
+
+@VisibleForTesting
+long getResultSetTimeMs() {
+return resultSetTimeMS.getValue();
 }
 
 public Map publish() {
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/OverAllQueryMetricsTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/OverAllQueryMetricsTest.java
new file mode 100644
index 000..f97731d
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/OverAllQueryMetricsTest.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import org.apache.phoenix.log.LogLevel;
+import org.apache.phoenix.util.EnvironmentEdge;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static 
org.apache.phoenix.monitoring.MetricType.CACHE_REFRESH_SPLITS_COUNTER;
+import static org.apache.phoenix.monitoring.MetricType.NO_OP_METRIC;
+import static org.apache.phoenix.monitoring.MetricType.NUM_PARALLEL_SCANS;
+import static org.apache.phoenix.monitoring.MetricType.QUERY_FA

[phoenix] branch master updated: PHOENIX-5984: Query timeout counter is not updated in all timeouts cases

2020-07-15 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 2fe1dd0  PHOENIX-5984: Query timeout counter is not updated in all 
timeouts cases
2fe1dd0 is described below

commit 2fe1dd0f4ea962554443d11d65aa44d9361d888b
Author: Chinmay Kulkarni 
AuthorDate: Wed Jul 1 19:57:17 2020 -0700

PHOENIX-5984: Query timeout counter is not updated in all timeouts cases
---
 .../monitoring/GlobalPhoenixMetricsTestSink.java   |  2 +-
 .../phoenix/monitoring/PhoenixMetricsIT.java   | 88 +-
 .../phoenix/iterate/BaseResultIterators.java   | 15 +++-
 3 files changed, 82 insertions(+), 23 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/GlobalPhoenixMetricsTestSink.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/GlobalPhoenixMetricsTestSink.java
index 8234c1c..85bbae7 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/GlobalPhoenixMetricsTestSink.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/GlobalPhoenixMetricsTestSink.java
@@ -28,7 +28,7 @@ public class GlobalPhoenixMetricsTestSink implements 
MetricsSink {
 // PhoenixMetricsIT tests verifies these metrics from this sink in a 
separate thread
 // GlobalPhoenixMetricsTestSink is invoked based on time defined in 
hadoop-metrics2.properties
 // This lock is to prevent concurrent access to metrics Iterable for these 
threads
-static Object lock = new Object();
+static final Object lock = new Object();
 static Iterable metrics;
 
 @Override
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index 53fcf92..5d027e1 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -9,6 +9,7 @@
  */
 package org.apache.phoenix.monitoring;
 
+import static 
org.apache.phoenix.exception.SQLExceptionCode.OPERATION_TIMED_OUT;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_FAILED_QUERY_COUNTER;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_REGION_SERVER_RESULTS;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_MILLS_BETWEEN_NEXTS;
@@ -35,6 +36,7 @@ import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_SPOOL_FIL
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_TASK_END_TO_END_TIME;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_TASK_EXECUTION_TIME;
 import static org.apache.phoenix.monitoring.MetricType.MEMORY_CHUNK_BYTES;
+import static org.apache.phoenix.monitoring.MetricType.QUERY_TIMEOUT_COUNTER;
 import static org.apache.phoenix.monitoring.MetricType.TASK_EXECUTED_COUNTER;
 import static org.apache.phoenix.monitoring.MetricType.TASK_EXECUTION_TIME;
 import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
@@ -70,6 +72,8 @@ import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.jdbc.PhoenixResultSet;
 import org.apache.phoenix.log.LogLevel;
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.EnvironmentEdge;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.hamcrest.CoreMatchers;
 import org.junit.Test;
@@ -92,6 +96,23 @@ public class PhoenixMetricsIT extends BasePhoenixMetricsIT {
 
 private static final Logger LOGGER = 
LoggerFactory.getLogger(PhoenixMetricsIT.class);
 
+private static class MyClock extends EnvironmentEdge {
+private long time;
+private final long delay;
+
+public MyClock (long time, long delay) {
+this.time = time;
+this.delay = delay;
+}
+
+@Override
+public long currentTime() {
+long currentTime = this.time;
+this.time += this.delay;
+return currentTime;
+}
+}
+
 @Test
 public void testResetGlobalPhoenixMetrics() throws Exception {
 resetGlobalMetrics();
@@ -244,28 +265,32 @@ public class PhoenixMetricsIT extends 
BasePhoenixMetricsIT {
 }
 }
 }
-assertTrue("Metric expected but not present in Hadoop Metrics Sink 
(GlobalPhoenixMetricsTestSink)",
-expectedMetrics.size() == 0);
+assertEquals("Metric expected but not present in Hadoop Metrics Sink "
++ "(GlobalPhoenixMetricsTestSink)", 0, 
expectedMetrics.size());
 return true;
 }
 
-private static void createTableAndInsertValues(String tableName, boolean 
rese

[phoenix] branch 4.x updated: PHOENIX-5984: Query timeout counter is not updated in all timeouts cases

2020-07-15 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new db9dcca  PHOENIX-5984: Query timeout counter is not updated in all 
timeouts cases
db9dcca is described below

commit db9dccae63f6008fdb3518de70b4dc00464d20e8
Author: Chinmay Kulkarni 
AuthorDate: Wed Jul 1 19:57:17 2020 -0700

PHOENIX-5984: Query timeout counter is not updated in all timeouts cases
---
 .../monitoring/GlobalPhoenixMetricsTestSink.java   |  9 +--
 .../phoenix/monitoring/PhoenixMetricsIT.java   | 88 +-
 .../phoenix/iterate/BaseResultIterators.java   | 15 +++-
 3 files changed, 82 insertions(+), 30 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/GlobalPhoenixMetricsTestSink.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/GlobalPhoenixMetricsTestSink.java
index 10ff2e1..813df6b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/GlobalPhoenixMetricsTestSink.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/GlobalPhoenixMetricsTestSink.java
@@ -22,13 +22,6 @@ import org.apache.commons.configuration.SubsetConfiguration;
 import org.apache.hadoop.metrics2.AbstractMetric;
 import org.apache.hadoop.metrics2.MetricsRecord;
 import org.apache.hadoop.metrics2.MetricsSink;
-import org.apache.phoenix.util.PhoenixRuntime;
-
-import java.util.Map;
-
-import static junit.framework.TestCase.assertTrue;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
 
 public class GlobalPhoenixMetricsTestSink implements MetricsSink {
 
@@ -36,7 +29,7 @@ public class GlobalPhoenixMetricsTestSink implements 
MetricsSink {
 // PhoenixMetricsIT tests verifies these metrics from this sink in a 
separate thread
 // GlobalPhoenixMetricsTestSink is invoked based on time defined in 
hadoop-metrics2.properties
 // This lock is to prevent concurrent access to metrics Iterable for these 
threads
-static Object lock = new Object();
+static final Object lock = new Object();
 static Iterable metrics;
 
 @Override
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index 946a929..e8d9e40 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -9,6 +9,7 @@
  */
 package org.apache.phoenix.monitoring;
 
+import static 
org.apache.phoenix.exception.SQLExceptionCode.OPERATION_TIMED_OUT;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_FAILED_QUERY_COUNTER;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_REGION_SERVER_RESULTS;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_MILLS_BETWEEN_NEXTS;
@@ -35,6 +36,7 @@ import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_SPOOL_FIL
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_TASK_END_TO_END_TIME;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_TASK_EXECUTION_TIME;
 import static org.apache.phoenix.monitoring.MetricType.MEMORY_CHUNK_BYTES;
+import static org.apache.phoenix.monitoring.MetricType.QUERY_TIMEOUT_COUNTER;
 import static org.apache.phoenix.monitoring.MetricType.TASK_EXECUTED_COUNTER;
 import static org.apache.phoenix.monitoring.MetricType.TASK_EXECUTION_TIME;
 import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
@@ -71,6 +73,8 @@ import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.jdbc.PhoenixResultSet;
 import org.apache.phoenix.log.LogLevel;
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.EnvironmentEdge;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.hamcrest.CoreMatchers;
 import org.junit.Test;
@@ -93,6 +97,23 @@ public class PhoenixMetricsIT extends BasePhoenixMetricsIT {
 
 private static final Logger LOGGER = 
LoggerFactory.getLogger(PhoenixMetricsIT.class);
 
+private static class MyClock extends EnvironmentEdge {
+private long time;
+private final long delay;
+
+public MyClock (long time, long delay) {
+this.time = time;
+this.delay = delay;
+}
+
+@Override
+public long currentTime() {
+long currentTime = this.time;
+this.time += this.delay;
+return currentTime;
+}
+}
+
 @Test
 public void testResetGlobalPhoenixMetrics() throws Exception {
 resetGlobalMetrics();
@@ -249,28 +270,32 @@ public class PhoenixMetricsIT extends 
BasePhoenixMetricsIT

[phoenix] branch master updated: PHOENIX-5892 Add code coverage steps in build documentation

2020-05-14 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 49599fd  PHOENIX-5892 Add code coverage steps in build documentation
49599fd is described below

commit 49599fd89b6cfa9cab9d7d3f6dcf4bdc5428915a
Author: Sandeep Guggilam 
AuthorDate: Wed May 13 09:39:15 2020 -0700

PHOENIX-5892 Add code coverage steps in build documentation

Signed-off-by: Chinmay Kulkarni 
---
 BUILDING.md | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/BUILDING.md b/BUILDING.md
index 08f67af..dc75d21 100644
--- a/BUILDING.md
+++ b/BUILDING.md
@@ -83,7 +83,11 @@ All Unit Tests and Integration tests
 `$ mvn clean verify`
 
 The verify maven target will also run dependency:analyze-only, which checks if 
the dependencies
- used in the code and declared in the maven projects match.
+ used in the code and declared in the maven projects match. The code coverage 
report would be
+generated at /target/site/jacoco/index.html
+
+To skip code coverage analysis
+`$ mvn verify -Dskip.code-coverage`
 
 Findbugs
 



[phoenix] branch 4.x updated: PHOENIX-5892 Add code coverage steps in build documentation

2020-05-14 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new f21901e  PHOENIX-5892 Add code coverage steps in build documentation
f21901e is described below

commit f21901eaf43ca90189dcc3a5bebcee3250a17f43
Author: Sandeep Guggilam 
AuthorDate: Wed May 13 09:52:39 2020 -0700

PHOENIX-5892 Add code coverage steps in build documentation

Signed-off-by: Chinmay Kulkarni 
---
 BUILDING.md | 12 +++-
 1 file changed, 11 insertions(+), 1 deletion(-)

diff --git a/BUILDING.md b/BUILDING.md
index 74ea502..8aa8df7 100644
--- a/BUILDING.md
+++ b/BUILDING.md
@@ -73,9 +73,19 @@ Use the m2e eclipse plugin and do Import->Maven Project and 
just pick the root '
 Running the tests
 -
 
-All tests  
+All Unit tests  
 `$ mvn clean test`
 
+All Unit Tests and Integration tests
+`$ mvn clean verify`
+
+The verify maven target will also run dependency:analyze-only, which checks if 
the dependencies 
+used in the code and declared in the maven projects match. The code coverage 
report would be
+generated at /target/site/jacoco/index.html
+
+To skip code coverage analysis
+`$ mvn verify -Dskip.code-coverage`
+
 Findbugs
 
 



[phoenix] branch master updated: PHOENIX-5891: Ensure that code coverage does not drop with subsequent commits

2020-05-14 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new c5be827  PHOENIX-5891: Ensure that code coverage does not drop with 
subsequent commits
c5be827 is described below

commit c5be82757930b4c256c95947b11487b7a686212b
Author: Chinmay Kulkarni 
AuthorDate: Wed May 13 23:48:01 2020 -0700

PHOENIX-5891: Ensure that code coverage does not drop with subsequent 
commits
---
 phoenix-pherf/pom.xml |  3 +++
 pom.xml   | 30 +-
 2 files changed, 32 insertions(+), 1 deletion(-)

diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index a156318..3ac6898 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -41,6 +41,9 @@
 1.8.0
 1.0.1
 2.2.11
+
+
0.650
+
0.500
   
 
   
diff --git a/pom.xml b/pom.xml
index fc8bb9d..0392dfc 100644
--- a/pom.xml
+++ b/pom.xml
@@ -144,7 +144,9 @@
 
 UTF-8
 UTF-8
-
+
+
0.700
+
0.600
   
 
   
@@ -1542,6 +1544,32 @@
   report
 
   
+  
+check
+verify
+
+check
+
+
+  
+
+  BUNDLE
+  
+
+  INSTRUCTION
+  COVEREDRATIO
+  
${jacoco.instruction.coverage.percentage}
+
+
+  BRANCH
+  COVEREDRATIO
+  
${jacoco.branch.coverage.percentage}
+
+  
+
+  
+
+  
 
   
 



[phoenix] branch 4.x updated: PHOENIX-5891: Ensure that code coverage does not drop with subsequent commits

2020-05-14 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new b0b7308  PHOENIX-5891: Ensure that code coverage does not drop with 
subsequent commits
b0b7308 is described below

commit b0b730892e73593efa38a3b51b19f48b4981207e
Author: Chinmay Kulkarni 
AuthorDate: Wed May 13 23:33:10 2020 -0700

PHOENIX-5891: Ensure that code coverage does not drop with subsequent 
commits
---
 phoenix-pherf/pom.xml |  3 +++
 pom.xml   | 29 +
 2 files changed, 32 insertions(+)

diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index 6474110..2afb780 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -31,6 +31,9 @@

${project.basedir}/..
org.apache.phoenix.shaded
+   
+   
0.650
+   
0.500

 

diff --git a/pom.xml b/pom.xml
index 078731f..c19e963 100644
--- a/pom.xml
+++ b/pom.xml
@@ -149,6 +149,9 @@
 
 UTF-8
 UTF-8
+
+
0.700
+
0.600
2.12.0
 
   
@@ -1246,6 +1249,32 @@
   report
 
   
+  
+check
+verify
+
+  check
+
+
+  
+
+  BUNDLE
+  
+
+  INSTRUCTION
+  COVEREDRATIO
+  
${jacoco.instruction.coverage.percentage}
+
+
+  BRANCH
+  COVEREDRATIO
+  
${jacoco.branch.coverage.percentage}
+
+  
+
+  
+
+  
 
   
 



[phoenix] branch 4.x updated: (Addendum) PHOENIX-5842 Check for absence of system property to deactivate code coverage

2020-05-13 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 478b43c  (Addendum) PHOENIX-5842 Check for absence of system property 
to deactivate code coverage
478b43c is described below

commit 478b43ca24688265ddb333969265ed19ae889dc9
Author: Sandeep Guggilam 
AuthorDate: Tue May 12 17:08:42 2020 -0700

(Addendum) PHOENIX-5842 Check for absence of system property to deactivate 
code coverage

Signed-off-by: Chinmay Kulkarni 
---
 pom.xml | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/pom.xml b/pom.xml
index fbb295a..078731f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1220,8 +1220,7 @@
   codecoverage
   
 
-code.coverage
-!deactivate
+!skip.code-coverage
 
   
   



[phoenix] branch master updated: (Addendum) PHOENIX-5842 Check for absence of system property to deactivate code coverage

2020-05-13 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 753895f  (Addendum) PHOENIX-5842 Check for absence of system property 
to deactivate code coverage
753895f is described below

commit 753895fda79da19cd35e098e63abcab7310ca204
Author: Sandeep Guggilam 
AuthorDate: Tue May 12 16:54:08 2020 -0700

(Addendum) PHOENIX-5842 Check for absence of system property to deactivate 
code coverage

Signed-off-by: Chinmay Kulkarni 
---
 pom.xml | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/pom.xml b/pom.xml
index 46083e9..fc8bb9d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1516,8 +1516,7 @@
   codecoverage
   
 
-code.coverage
-!deactivate
+!skip.code-coverage
 
   
   



[phoenix] branch 4.x updated: PHOENIX-5842 Code Coverage tool for Phoenix

2020-05-11 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 182b2d3  PHOENIX-5842 Code Coverage tool for Phoenix
182b2d3 is described below

commit 182b2d3445fa0667189bbf7bef9c0cf74fa341e4
Author: Sandeep Guggilam 
AuthorDate: Tue May 5 20:26:35 2020 -0700

PHOENIX-5842 Code Coverage tool for Phoenix

Signed-off-by: Chinmay Kulkarni 
---
 dev/test-patch.sh |  2 ++
 pom.xml   | 51 +--
 2 files changed, 47 insertions(+), 6 deletions(-)

diff --git a/dev/test-patch.sh b/dev/test-patch.sh
index 090bf69..e36aa91 100755
--- a/dev/test-patch.sh
+++ b/dev/test-patch.sh
@@ -1108,6 +1108,8 @@ checkLineLengths
 if [[ $JENKINS == "true" ]] ; then
   runTests
   (( RESULT = RESULT + $? ))
+JIRA_COMMENT_FOOTER="Code Coverage results: 
$BUILD_URL/artifact/phoenix-core/target/site/jacoco/index.html
+$JIRA_COMMENT_FOOTER"
 JIRA_COMMENT_FOOTER="Test results: $BUILD_URL/testReport/
 $JIRA_COMMENT_FOOTER"
 fi
diff --git a/pom.xml b/pom.xml
index 9d8e0d2..fbb295a 100644
--- a/pom.xml
+++ b/pom.xml
@@ -136,12 +136,15 @@
 2.9
 
1.9.1
 3.0.0-M3
+0.7.9
 
 
 8
 7
 false
 false
+
+
 
 
 UTF-8
@@ -268,7 +271,7 @@
 true
 
--Xmx2000m -XX:MaxPermSize=256m 
-Djava.security.egd=file:/dev/./urandom 
"-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}"
 -XX:NewRatio=4 -XX:SurvivorRatio=8 -XX:+UseCompressedOops 
-XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+DisableExplicitGC 
-XX:+UseCMSInitiatingOccupancyOnly -XX:+CMSClassUnloadingEnabled 
-XX:+CMSScavengeBeforeRemark -XX:CMSInitiatingOccupancyFraction=68 
-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./target/@{jacocoArgLine} -Xmx2000m -XX:MaxPermSize=256m 
-Djava.security.egd=file:/dev/./urandom 
"-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}"
 -XX:NewRatio=4 -XX:SurvivorRatio=8 -XX:+UseCompressedOops 
-XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+DisableExplicitGC 
-XX:+UseCMSInitiatingOccupancyOnly -XX:+CMSClassUnloadingEnabled 
-XX:+CMSScavengeBeforeRemark -XX:CMSInitiatingOccupancyFraction=68 
-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDum [...]
 
org.apache.phoenix.end2end.ParallelStatsEnabledTest
   
   
@@ -290,7 +293,7 @@
 at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.doGetTable(MetaDataEndpointImpl.java:2835)
 at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.getTable(MetaDataEndpointImpl.java:490)
 -->

--Xmx3000m -XX:MaxPermSize=256m 
-Djava.security.egd=file:/dev/./urandom 
"-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}"
 -XX:NewRatio=4 -XX:SurvivorRatio=8 -XX:+UseCompressedOops 
-XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+DisableExplicitGC 
-XX:+UseCMSInitiatingOccupancyOnly -XX:+CMSClassUnloadingEnabled 
-XX:+CMSScavengeBeforeRemark -XX:CMSInitiatingOccupancyFraction=68 
-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./target/@{jacocoArgLine} -Xmx3000m -XX:MaxPermSize=256m 
-Djava.security.egd=file:/dev/./urandom 
"-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}"
 -XX:NewRatio=4 -XX:SurvivorRatio=8 -XX:+UseCompressedOops 
-XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+DisableExplicitGC 
-XX:+UseCMSInitiatingOccupancyOnly -XX:+CMSClassUnloadingEnabled 
-XX:+CMSScavengeBeforeRemark -XX:CMSInitiatingOccupancyFraction=68 
-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDum [...]
 
org.apache.phoenix.end2end.ParallelStatsDisabledTest
   
   
@@ -302,7 +305,7 @@
   HBaseManagedTimeTests
   
 true
--enableassertions -Xmx2000m -XX:MaxPermSize=128m 
-Djava.security.egd=file:/dev/./urandom 
"-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}"
 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./target/
+@{jacocoArgLine} -enableassertions -Xmx2000m 
-XX:MaxPermSize=128m -Djava.security.egd=file:/dev/./urandom 
"-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}"
 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./target/
 
org.apache.phoenix.end2end.HBaseManagedTimeTest
   
   
@@ -314,7 +317,7 @@
   NeedTheirOwnClusterTests
   
  false
- -enableassertions -Xmx2000m -XX:MaxPermSize=256m 
-Djava.security.egd=file:/dev/./urandom 
"-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.pa

[phoenix] branch master updated: PHOENIX-5842 Code Coverage tool for Phoenix

2020-05-11 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 8fd1178  PHOENIX-5842 Code Coverage tool for Phoenix
8fd1178 is described below

commit 8fd11783cd132817604f9e84a4e4b8d716042e0e
Author: Sandeep Guggilam 
AuthorDate: Thu May 7 15:04:32 2020 -0700

PHOENIX-5842 Code Coverage tool for Phoenix

Signed-off-by: Chinmay Kulkarni 
---
 dev/test-patch.sh |  2 ++
 pom.xml   | 51 +--
 2 files changed, 47 insertions(+), 6 deletions(-)

diff --git a/dev/test-patch.sh b/dev/test-patch.sh
index 69e6d10..cf5d8b4 100755
--- a/dev/test-patch.sh
+++ b/dev/test-patch.sh
@@ -1103,6 +1103,8 @@ checkLineLengths
 if [[ $JENKINS == "true" ]] ; then
   runTests
   (( RESULT = RESULT + $? ))
+JIRA_COMMENT_FOOTER="Code Coverage results: 
$BUILD_URL/artifact/phoenix-core/target/site/jacoco/index.html
+$JIRA_COMMENT_FOOTER"
 JIRA_COMMENT_FOOTER="Test results: $BUILD_URL/testReport/
 $JIRA_COMMENT_FOOTER"
 fi
diff --git a/pom.xml b/pom.xml
index a1efddd..46083e9 100644
--- a/pom.xml
+++ b/pom.xml
@@ -131,12 +131,15 @@
 
1.9.1
 3.0.0-M3
 
${antlr.version}
+0.7.9
 
 
 8
 7
 false
 false
+
+
 
 
 UTF-8
@@ -272,7 +275,7 @@
 true
 
--Xmx2000m -XX:MaxPermSize=256m 
-Djava.security.egd=file:/dev/./urandom 
"-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}"
 -XX:NewRatio=4 -XX:SurvivorRatio=8 -XX:+UseCompressedOops 
-XX:+UseConcMarkSweepGC -XX:+DisableExplicitGC 
-XX:+UseCMSInitiatingOccupancyOnly -XX:+CMSClassUnloadingEnabled 
-XX:+CMSScavengeBeforeRemark -XX:CMSInitiatingOccupancyFraction=68 
-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./target/ -Dorg.apache.hadoo 
[...]
+@{jacocoArgLine} -Xmx2000m -XX:MaxPermSize=256m 
-Djava.security.egd=file:/dev/./urandom 
"-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}"
 -XX:NewRatio=4 -XX:SurvivorRatio=8 -XX:+UseCompressedOops 
-XX:+UseConcMarkSweepGC -XX:+DisableExplicitGC 
-XX:+UseCMSInitiatingOccupancyOnly -XX:+CMSClassUnloadingEnabled 
-XX:+CMSScavengeBeforeRemark -XX:CMSInitiatingOccupancyFraction=68 
-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./target/ - [...]
 
org.apache.phoenix.end2end.ParallelStatsEnabledTest
   
   
@@ -294,7 +297,7 @@
 at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.doGetTable(MetaDataEndpointImpl.java:2835)
 at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.getTable(MetaDataEndpointImpl.java:490)
 -->

--Xmx3000m -XX:MaxPermSize=256m 
-Djava.security.egd=file:/dev/./urandom 
"-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}"
 -XX:NewRatio=4 -XX:SurvivorRatio=8 -XX:+UseCompressedOops 
-XX:+UseConcMarkSweepGC -XX:+DisableExplicitGC 
-XX:+UseCMSInitiatingOccupancyOnly -XX:+CMSClassUnloadingEnabled 
-XX:+CMSScavengeBeforeRemark -XX:CMSInitiatingOccupancyFraction=68 
-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./target/ -Dorg.apache.hadoo 
[...]
+@{jacocoArgLine} -Xmx3000m -XX:MaxPermSize=256m 
-Djava.security.egd=file:/dev/./urandom 
"-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}"
 -XX:NewRatio=4 -XX:SurvivorRatio=8 -XX:+UseCompressedOops 
-XX:+UseConcMarkSweepGC -XX:+DisableExplicitGC 
-XX:+UseCMSInitiatingOccupancyOnly -XX:+CMSClassUnloadingEnabled 
-XX:+CMSScavengeBeforeRemark -XX:CMSInitiatingOccupancyFraction=68 
-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./target/ - [...]
 
org.apache.phoenix.end2end.ParallelStatsDisabledTest
   
   
@@ -306,7 +309,7 @@
   HBaseManagedTimeTests
   
 true
--enableassertions -Xmx2000m -XX:MaxPermSize=128m 
-Djava.security.egd=file:/dev/./urandom 
"-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}"
 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./target/ 
-Dorg.apache.hadoop.hbase.shaded.io.netty.packagePrefix=org.apache.hadoop.hbase.shaded.
+@{jacocoArgLine} -enableassertions -Xmx2000m 
-XX:MaxPermSize=128m -Djava.security.egd=file:/dev/./urandom 
"-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}"
 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./target/ 
-Dorg.apache.hadoop.hbase.shaded.io.netty.packagePrefix=org.apache.hadoop.hbase.shaded.
 
org.apache.phoenix.end2end.HBaseManagedTimeTest
   
   
@@ -318,7 +321,7 @@
   NeedTheirOwnClust

[phoenix] branch 4.x updated: PHOENIX-5864: RuleGeneratorTest unit test seem to be failing

2020-04-25 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new de2da84  PHOENIX-5864: RuleGeneratorTest unit test seem to be failing
de2da84 is described below

commit de2da84381083e1cb5588f1e4be52d1e7e3aec5b
Author: Chinmay Kulkarni 
AuthorDate: Fri Apr 24 15:46:30 2020 -0700

PHOENIX-5864: RuleGeneratorTest unit test seem to be failing
---
 phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java | 6 +++---
 .../src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java   | 1 -
 .../org/apache/phoenix/pherf/workload/MultiThreadedRunnerTest.java  | 1 -
 .../{timeout_test_scenario.xml => scenario_with_query_timeouts.xml} | 0
 4 files changed, 3 insertions(+), 5 deletions(-)

diff --git 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
index be9b27a..c1a7b66 100644
--- a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
+++ b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
@@ -71,7 +71,7 @@ public class PherfMainIT extends ResultBaseTestIT {
 String[] args = {"-q", "-l",
 "-drop", "all",
 "-schemaFile", ".*timeout_test_schema.sql",
-"-scenarioFile", ".*timeout_test_scenario.xml" };
+"-scenarioFile", ".*scenario_with_query_timeouts.xml" };
 Pherf p = new Pherf(args);
 p.run();
 
@@ -93,7 +93,7 @@ public class PherfMainIT extends ResultBaseTestIT {
 String[] args = {"-q", "-l",
 "-drop", "all",
 "-schemaFile", ".*timeout_test_schema.sql",
-"-scenarioFile", ".*timeout_test_scenario.xml" };
+"-scenarioFile", ".*scenario_with_query_timeouts.xml" };
 Pherf p = new Pherf(args);
 p.run();
 
@@ -115,7 +115,7 @@ public class PherfMainIT extends ResultBaseTestIT {
 String[] args = {"-q", "-l",
 "-drop", "all",
 "-schemaFile", ".*timeout_test_schema.sql",
-"-scenarioFile", ".*timeout_test_scenario.xml" };
+"-scenarioFile", ".*scenario_with_query_timeouts.xml" };
 Pherf p = new Pherf(args);
 p.run();
 
diff --git 
a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java 
b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
index 6d4e785..c439d38 100644
--- 
a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
+++ 
b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
@@ -46,7 +46,6 @@ import org.joda.time.DateTime;
 import org.joda.time.DateTimeZone;
 import org.joda.time.format.DateTimeFormat;
 import org.joda.time.format.DateTimeFormatter;
-import org.junit.Ignore;
 import org.junit.Test;
 
 public class RuleGeneratorTest {
diff --git 
a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/MultiThreadedRunnerTest.java
 
b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/MultiThreadedRunnerTest.java
index d9c7ca3..592aceb 100644
--- 
a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/MultiThreadedRunnerTest.java
+++ 
b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/MultiThreadedRunnerTest.java
@@ -28,7 +28,6 @@ import org.apache.phoenix.pherf.rules.RulesApplier;
 import org.apache.phoenix.util.DefaultEnvironmentEdge;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
 import org.mockito.Mock;
 import org.mockito.Mockito;
diff --git 
a/phoenix-pherf/src/test/resources/scenario/timeout_test_scenario.xml 
b/phoenix-pherf/src/test/resources/scenario/scenario_with_query_timeouts.xml
similarity index 100%
rename from phoenix-pherf/src/test/resources/scenario/timeout_test_scenario.xml
rename to 
phoenix-pherf/src/test/resources/scenario/scenario_with_query_timeouts.xml



[phoenix] branch master updated: PHOENIX-5864: RuleGeneratorTest unit test seem to be failing

2020-04-25 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new b29d201  PHOENIX-5864: RuleGeneratorTest unit test seem to be failing
b29d201 is described below

commit b29d201581bbfff3765c2e6bd17f514a4c46ec20
Author: Chinmay Kulkarni 
AuthorDate: Fri Apr 24 15:46:30 2020 -0700

PHOENIX-5864: RuleGeneratorTest unit test seem to be failing
---
 phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java | 6 +++---
 .../src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java   | 1 -
 .../org/apache/phoenix/pherf/workload/MultiThreadedRunnerTest.java  | 1 -
 .../{timeout_test_scenario.xml => scenario_with_query_timeouts.xml} | 0
 4 files changed, 3 insertions(+), 5 deletions(-)

diff --git 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
index be9b27a..c1a7b66 100644
--- a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
+++ b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
@@ -71,7 +71,7 @@ public class PherfMainIT extends ResultBaseTestIT {
 String[] args = {"-q", "-l",
 "-drop", "all",
 "-schemaFile", ".*timeout_test_schema.sql",
-"-scenarioFile", ".*timeout_test_scenario.xml" };
+"-scenarioFile", ".*scenario_with_query_timeouts.xml" };
 Pherf p = new Pherf(args);
 p.run();
 
@@ -93,7 +93,7 @@ public class PherfMainIT extends ResultBaseTestIT {
 String[] args = {"-q", "-l",
 "-drop", "all",
 "-schemaFile", ".*timeout_test_schema.sql",
-"-scenarioFile", ".*timeout_test_scenario.xml" };
+"-scenarioFile", ".*scenario_with_query_timeouts.xml" };
 Pherf p = new Pherf(args);
 p.run();
 
@@ -115,7 +115,7 @@ public class PherfMainIT extends ResultBaseTestIT {
 String[] args = {"-q", "-l",
 "-drop", "all",
 "-schemaFile", ".*timeout_test_schema.sql",
-"-scenarioFile", ".*timeout_test_scenario.xml" };
+"-scenarioFile", ".*scenario_with_query_timeouts.xml" };
 Pherf p = new Pherf(args);
 p.run();
 
diff --git 
a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java 
b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
index 6d4e785..c439d38 100644
--- 
a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
+++ 
b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
@@ -46,7 +46,6 @@ import org.joda.time.DateTime;
 import org.joda.time.DateTimeZone;
 import org.joda.time.format.DateTimeFormat;
 import org.joda.time.format.DateTimeFormatter;
-import org.junit.Ignore;
 import org.junit.Test;
 
 public class RuleGeneratorTest {
diff --git 
a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/MultiThreadedRunnerTest.java
 
b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/MultiThreadedRunnerTest.java
index d9c7ca3..592aceb 100644
--- 
a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/MultiThreadedRunnerTest.java
+++ 
b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/MultiThreadedRunnerTest.java
@@ -28,7 +28,6 @@ import org.apache.phoenix.pherf.rules.RulesApplier;
 import org.apache.phoenix.util.DefaultEnvironmentEdge;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
 import org.mockito.Mock;
 import org.mockito.Mockito;
diff --git 
a/phoenix-pherf/src/test/resources/scenario/timeout_test_scenario.xml 
b/phoenix-pherf/src/test/resources/scenario/scenario_with_query_timeouts.xml
similarity index 100%
rename from phoenix-pherf/src/test/resources/scenario/timeout_test_scenario.xml
rename to 
phoenix-pherf/src/test/resources/scenario/scenario_with_query_timeouts.xml



svn commit: r1876770 - in /phoenix/site: publish/language/datatypes.html publish/language/functions.html publish/language/index.html publish/pherf.html source/src/site/markdown/pherf.md

2020-04-20 Thread chinmayskulkarni
Author: chinmayskulkarni
Date: Tue Apr 21 00:51:28 2020
New Revision: 1876770

URL: http://svn.apache.org/viewvc?rev=1876770=rev
Log:
(Christine Feng) PHOENIX-5818: Add documentation for query timeoutDuration 
attribute in Pherf scenarios

Modified:
phoenix/site/publish/language/datatypes.html
phoenix/site/publish/language/functions.html
phoenix/site/publish/language/index.html
phoenix/site/publish/pherf.html
phoenix/site/source/src/site/markdown/pherf.md

Modified: phoenix/site/publish/language/datatypes.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/datatypes.html?rev=1876770=1876769=1876770=diff
==
--- phoenix/site/publish/language/datatypes.html (original)
+++ phoenix/site/publish/language/datatypes.html Tue Apr 21 00:51:28 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/language/functions.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/functions.html?rev=1876770=1876769=1876770=diff
==
--- phoenix/site/publish/language/functions.html (original)
+++ phoenix/site/publish/language/functions.html Tue Apr 21 00:51:28 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/language/index.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/index.html?rev=1876770=1876769=1876770=diff
==
--- phoenix/site/publish/language/index.html (original)
+++ phoenix/site/publish/language/index.html Tue Apr 21 00:51:28 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/pherf.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/pherf.html?rev=1876770=1876769=1876770=diff
==
--- phoenix/site/publish/pherf.html (original)
+++ phoenix/site/publish/pherf.html Tue Apr 21 00:51:28 2020
@@ -1,7 +1,7 @@
 
 
 
 
@@ -267,27 +267,45 @@
  
  
  Defining Scenario 
- Scenario can have multiple querySets. Consider following example, 
concurrency of 1-4 means that each query will be executed starting with 
concurrency level of 1 and reach up to maximum concurrency of 4. Per thread, 
query would be executed to a minimum of 10 times or 10 seconds (whichever comes 
first). QuerySet by defult is executed serially but you can change 
executionType to PARALLEL so queries are executed concurrently. Scenarios are 
defined in XMLs stored in the resource directory. 
+ Scenario can have multiple querySets. Consider following example, 
concurrency of 1-4 means that each query will be executed starting with 
concurrency level of 1 and reach up to maximum concurrency of 4. Per thread, 
query would be executed to a minimum of 10 times or 10 seconds (whichever comes 
first). QuerySet by defult is executed serially but you can change 
executionType to PARALLEL so queries are executed concurrently. Each Query may 
have an optional timeoutDuration field that defines the amount of time (in 
milliseconds) before execution for that Query is cancelled. Scenarios are 
defined in the resource directory in XMLs stored in the resource 
directory.. 
   
   scenarios
 !--Minimum of executionDurationInMs or numberOfExecutions. Which ever 
is reached first --
 querySet concurrency=1-4 
executionType=PARALLEL executionDurationInMs=1 
numberOfExecutions=10
 query id=q1 verifyRowCount=false 
statement=select count(*) from PHERF.TEST_TABLE/
-query id=q2 tenantId=1234567890 
ddl=create view if not exists 
+query id=q2 tenantId=1234567890 
timeoutDuration=1 ddl=create view if not exists 
 myview(mypk varchar not null primary key, mycol varchar) 
statement=upsert select .../
 /querySet
 querySet concurrency=3 executionType=SERIAL 
executionDurationInMs=2 numberOfExecutions=100
 query id=q3 verifyRowCount=false 
statement=select count(*) from PHERF.TEST_TABLE/
 query id=q4 statement=select count(*) from 
PHERF.TEST_TABLE WHERE TENANT_ID='00D0062'/
 /querySet
-/scenario
+/scenarios
 
  
   
  
  
  Results 
- Results are written real time in results directory. Open the result 
that is saved in .jpg format for real time visualization. 
+ Results are written real time in results directory. Open the result 
that is saved in .jpg format for real time visualization. Results are written 
using DataModelResult objects, which are modified over the course of each Pherf 
run. 
+  
+  XML results 
+  Pherf XML results have a similar format to the corresponding scenario.xml 
file used for the Pherf run, but also include additional information, such as 
the execution time of queries, whether queries timed out, and result row 
count. 
+   
+queryResults expectedAggregateRowCount=10 
id=q1 statement=SELECT COUNT(*) FROM 
PHERF.USER_DEFINED_TEST timeoutDuration=0

[phoenix] branch master updated: PHOENIX-4521: Allow Pherf scenario to define per query max allowed query execution duration after which thread is interrupted

2020-04-20 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new dccc260  PHOENIX-4521: Allow Pherf scenario to define per query max 
allowed query execution duration after which thread is interrupted
dccc260 is described below

commit dccc260413591a7ab3133f8040b8547b8e993750
Author: Christine Feng 
AuthorDate: Mon Mar 16 19:32:27 2020 -0700

PHOENIX-4521: Allow Pherf scenario to define per query max allowed query 
execution duration after which thread is interrupted

Signed-off-by: Chinmay Kulkarni 
---
 phoenix-pherf/pom.xml  |   5 +
 .../java/org/apache/phoenix/pherf/PherfMainIT.java |  88 +
 .../apache/phoenix/pherf/configuration/Query.java  |  11 ++
 .../apache/phoenix/pherf/result/QueryResult.java   |   2 +
 .../org/apache/phoenix/pherf/result/RunTime.java   |  34 +++--
 .../apache/phoenix/pherf/result/ThreadTime.java|   5 +-
 .../apache/phoenix/pherf/result/file/Header.java   |   4 +-
 .../pherf/workload/MultiThreadedRunner.java|  85 +
 .../pherf/workload/MultithreadedDiffer.java|   4 +-
 .../java/org/apache/phoenix/pherf/ResultTest.java  |  10 +-
 .../pherf/workload/MultiThreadedRunnerTest.java| 121 ++
 .../resources/datamodel/timeout_test_schema.sql|  22 
 .../resources/scenario/timeout_test_scenario.xml   | 138 +
 13 files changed, 483 insertions(+), 46 deletions(-)

diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index 9b5914e..a156318 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -178,6 +178,11 @@
   hamcrest-core
   test
 
+
+  org.mockito
+  mockito-core
+  test
+
 
 
   javax.activation
diff --git 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
index 3ee9327..be9b27a 100644
--- a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
+++ b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
@@ -18,14 +18,35 @@
 
 package org.apache.phoenix.pherf;
 
+import org.apache.commons.lang3.StringUtils;
+import org.apache.phoenix.pherf.result.Result;
+import org.apache.phoenix.pherf.result.ResultValue;
+import org.apache.phoenix.pherf.result.file.ResultFileDetails;
+import org.apache.phoenix.pherf.result.impl.CSVFileResultHandler;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.contrib.java.lang.system.ExpectedSystemExit;
 
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
 import java.util.concurrent.Future;
 
+import static org.junit.Assert.assertEquals;
+
 public class PherfMainIT extends ResultBaseTestIT {
 
+public HashMap mapResults(Result r) throws IOException {
+HashMap map = new HashMap<>();
+List resultValues = r.getResultValues();
+String[] headerValues = 
r.getHeader().split(PherfConstants.RESULT_FILE_DELIMETER);
+for (int i = 0; i < headerValues.length; i++) {
+map.put(StringUtils.strip(headerValues[i],"[] "),
+StringUtils.strip(resultValues.get(i).toString(), "[] "));
+}
+return map;
+}
+
 @Rule
 public final ExpectedSystemExit exit = ExpectedSystemExit.none();
 
@@ -43,4 +64,71 @@ public class PherfMainIT extends ResultBaseTestIT {
 future.get();
 }
 }
+
+@Test
+public void testQueryTimeout() throws Exception {
+// Timeout of 0 ms means every query execution should time out
+String[] args = {"-q", "-l",
+"-drop", "all",
+"-schemaFile", ".*timeout_test_schema.sql",
+"-scenarioFile", ".*timeout_test_scenario.xml" };
+Pherf p = new Pherf(args);
+p.run();
+
+CSVFileResultHandler rh = new CSVFileResultHandler();
+rh.setResultFileDetails(ResultFileDetails.CSV_DETAILED_PERFORMANCE);
+rh.setResultFileName("COMBINED");
+List resultList = rh.read();
+for (Result r : resultList) {
+HashMap resultsMap = mapResults(r);
+if (resultsMap.get("QUERY_ID").equals("q1")) {
+assertEquals(resultsMap.get("TIMED_OUT"), "true");
+}
+}
+}
+
+@Test
+public void testLargeQueryTimeout() throws Exception {
+// Timeout of max_long ms means every query execution should finish 
without timing out
+String[] args = {"-q", "-l",
+"-drop", "all",
+"-schemaFile", &q

[phoenix] branch 4.x updated: PHOENIX-4521: Allow Pherf scenario to define per query max allowed query execution duration after which thread is interrupted

2020-04-20 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 8ee78e2  PHOENIX-4521: Allow Pherf scenario to define per query max 
allowed query execution duration after which thread is interrupted
8ee78e2 is described below

commit 8ee78e2be4858f3ed230dff2328269ebe67e1f03
Author: Christine Feng 
AuthorDate: Mon Mar 16 19:32:27 2020 -0700

PHOENIX-4521: Allow Pherf scenario to define per query max allowed query 
execution duration after which thread is interrupted

Signed-off-by: Chinmay Kulkarni 
---
 phoenix-pherf/pom.xml  |   5 +
 .../java/org/apache/phoenix/pherf/PherfMainIT.java |  88 +
 .../apache/phoenix/pherf/configuration/Query.java  |  11 ++
 .../apache/phoenix/pherf/result/QueryResult.java   |   2 +
 .../org/apache/phoenix/pherf/result/RunTime.java   |  34 +++--
 .../apache/phoenix/pherf/result/ThreadTime.java|   5 +-
 .../apache/phoenix/pherf/result/file/Header.java   |   4 +-
 .../pherf/workload/MultiThreadedRunner.java|  85 +
 .../pherf/workload/MultithreadedDiffer.java|   4 +-
 .../java/org/apache/phoenix/pherf/ResultTest.java  |  10 +-
 .../pherf/workload/MultiThreadedRunnerTest.java| 121 ++
 .../resources/datamodel/timeout_test_schema.sql|  22 
 .../resources/scenario/timeout_test_scenario.xml   | 138 +
 13 files changed, 483 insertions(+), 46 deletions(-)

diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index 8296fe1ab..6474110 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -109,6 +109,11 @@
hadoop-minicluster
test

+   
+   org.mockito
+   mockito-all
+   test
+   

 

diff --git 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
index 3ee9327..be9b27a 100644
--- a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
+++ b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
@@ -18,14 +18,35 @@
 
 package org.apache.phoenix.pherf;
 
+import org.apache.commons.lang3.StringUtils;
+import org.apache.phoenix.pherf.result.Result;
+import org.apache.phoenix.pherf.result.ResultValue;
+import org.apache.phoenix.pherf.result.file.ResultFileDetails;
+import org.apache.phoenix.pherf.result.impl.CSVFileResultHandler;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.contrib.java.lang.system.ExpectedSystemExit;
 
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
 import java.util.concurrent.Future;
 
+import static org.junit.Assert.assertEquals;
+
 public class PherfMainIT extends ResultBaseTestIT {
 
+public HashMap mapResults(Result r) throws IOException {
+HashMap map = new HashMap<>();
+List resultValues = r.getResultValues();
+String[] headerValues = 
r.getHeader().split(PherfConstants.RESULT_FILE_DELIMETER);
+for (int i = 0; i < headerValues.length; i++) {
+map.put(StringUtils.strip(headerValues[i],"[] "),
+StringUtils.strip(resultValues.get(i).toString(), "[] "));
+}
+return map;
+}
+
 @Rule
 public final ExpectedSystemExit exit = ExpectedSystemExit.none();
 
@@ -43,4 +64,71 @@ public class PherfMainIT extends ResultBaseTestIT {
 future.get();
 }
 }
+
+@Test
+public void testQueryTimeout() throws Exception {
+// Timeout of 0 ms means every query execution should time out
+String[] args = {"-q", "-l",
+"-drop", "all",
+"-schemaFile", ".*timeout_test_schema.sql",
+"-scenarioFile", ".*timeout_test_scenario.xml" };
+Pherf p = new Pherf(args);
+p.run();
+
+CSVFileResultHandler rh = new CSVFileResultHandler();
+rh.setResultFileDetails(ResultFileDetails.CSV_DETAILED_PERFORMANCE);
+rh.setResultFileName("COMBINED");
+List resultList = rh.read();
+for (Result r : resultList) {
+HashMap resultsMap = mapResults(r);
+if (resultsMap.get("QUERY_ID").equals("q1")) {
+assertEquals(resultsMap.get("TIMED_OUT"), "true");
+}
+}
+}
+
+@Test
+public void testLargeQueryTimeout() throws Exception {
+// Timeout of max_long ms means every query execution should finish 
without timing out
+String[] args = {"-q", "-l",
+  

[phoenix] branch master updated: PHOENIX-5802: Connection leaks in UPSERT SELECT/DELETE paths due to MutatingParallelIteratorFactory iterator not being closed

2020-03-27 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new affa9e8  PHOENIX-5802: Connection leaks in UPSERT SELECT/DELETE paths 
due to MutatingParallelIteratorFactory iterator not being closed
affa9e8 is described below

commit affa9e889efcc2ad7dac009a0d294b09447d281e
Author: Chinmay Kulkarni 
AuthorDate: Fri Mar 27 17:59:39 2020 -0700

PHOENIX-5802: Connection leaks in UPSERT SELECT/DELETE paths due to 
MutatingParallelIteratorFactory iterator not being closed
---
 .../org/apache/phoenix/end2end/UpsertSelectIT.java | 2119 +++-
 .../compile/MutatingParallelIteratorFactory.java   |  120 +-
 .../org/apache/phoenix/compile/UpsertCompiler.java |   32 +-
 .../phoenix/iterate/BaseResultIterators.java   |1 -
 4 files changed, 1279 insertions(+), 993 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
index 41e2d3c..6f0f877 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
@@ -17,6 +17,7 @@
  */
 package org.apache.phoenix.end2end;
 
+import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_OPEN_PHOENIX_CONNECTIONS;
 import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
 import static org.apache.phoenix.util.PhoenixRuntime.UPSERT_BATCH_SIZE_ATTRIB;
 import static org.apache.phoenix.util.TestUtil.A_VALUE;
@@ -42,6 +43,7 @@ import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.sql.Timestamp;
 import java.util.Properties;
 
@@ -49,6 +51,7 @@ import org.apache.phoenix.compile.QueryPlan;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixResultSet;
 import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.monitoring.GlobalMetric;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.types.PInteger;
@@ -57,6 +60,9 @@ import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.TestUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -70,7 +76,22 @@ public class UpsertSelectIT extends ParallelStatsDisabledIT {
 this.allowServerSideMutations = allowServerSideMutations;
 }
 
-@Parameters(name="UpsertSelecttIT_allowServerSideMutations={0}") // name 
is used by failsafe as file name in reports
+@Before
+public void setup() {
+assertTrue(PhoenixRuntime.areGlobalClientMetricsBeingCollected());
+for (GlobalMetric m : PhoenixRuntime.getGlobalPhoenixClientMetrics()) {
+m.reset();
+}
+}
+
+@After
+public void assertNoConnLeak() {
+assertTrue(PhoenixRuntime.areGlobalClientMetricsBeingCollected());
+assertEquals(0, 
GLOBAL_OPEN_PHOENIX_CONNECTIONS.getMetric().getValue());
+}
+
+// name is used by failsafe as file name in reports
+@Parameters(name="UpsertSelecttIT_allowServerSideMutations={0}")
 public static synchronized Object[] data() {
 return new Object[] {"true", "false"};
 }
@@ -81,17 +102,17 @@ public class UpsertSelectIT extends 
ParallelStatsDisabledIT {
 }
 
 @Test
-public void testUpsertSelecWithIndex() throws Exception {
+public void testUpsertSelectWithIndex() throws Exception {
 testUpsertSelect(true, false);
 }
 
 @Test
-public void testUpsertSelecWithIndexWithSalt() throws Exception {
+public void testUpsertSelectWithIndexWithSalt() throws Exception {
 testUpsertSelect(true, true);
 }
 
 @Test
-public void testUpsertSelecWithNoIndexWithSalt() throws Exception {
+public void testUpsertSelectWithNoIndexWithSalt() throws Exception {
 testUpsertSelect(false, true);
 }
 
@@ -101,145 +122,163 @@ public class UpsertSelectIT extends 
ParallelStatsDisabledIT {
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 props.setProperty(QueryServices.ENABLE_SERVER_SIDE_UPSERT_MUTATIONS,
 allowServerSideMutations);
-String aTable = initATableValues(tenantId, saltTable ? null : splits, 
null, null, getUrl(), saltTable ? "salt_buckets = 2" : null);
+String aTable = initATableValues(tenantId, saltTable ? null : splits, 
null,
+nul

[phoenix] branch 4.x updated: PHOENIX-5802: Connection leaks in UPSERT SELECT/DELETE paths due to MutatingParallelIteratorFactory iterator not being closed

2020-03-27 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 84f24cd  PHOENIX-5802: Connection leaks in UPSERT SELECT/DELETE paths 
due to MutatingParallelIteratorFactory iterator not being closed
84f24cd is described below

commit 84f24cd29c889654fa5cf6349af4583c178ba7b5
Author: Chinmay Kulkarni 
AuthorDate: Fri Mar 27 17:59:39 2020 -0700

PHOENIX-5802: Connection leaks in UPSERT SELECT/DELETE paths due to 
MutatingParallelIteratorFactory iterator not being closed
---
 .../org/apache/phoenix/end2end/UpsertSelectIT.java | 2119 +++-
 .../compile/MutatingParallelIteratorFactory.java   |  120 +-
 .../org/apache/phoenix/compile/UpsertCompiler.java |   32 +-
 .../phoenix/iterate/BaseResultIterators.java   |1 -
 4 files changed, 1279 insertions(+), 993 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
index 41e2d3c..6f0f877 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
@@ -17,6 +17,7 @@
  */
 package org.apache.phoenix.end2end;
 
+import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_OPEN_PHOENIX_CONNECTIONS;
 import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
 import static org.apache.phoenix.util.PhoenixRuntime.UPSERT_BATCH_SIZE_ATTRIB;
 import static org.apache.phoenix.util.TestUtil.A_VALUE;
@@ -42,6 +43,7 @@ import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.sql.Timestamp;
 import java.util.Properties;
 
@@ -49,6 +51,7 @@ import org.apache.phoenix.compile.QueryPlan;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixResultSet;
 import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.monitoring.GlobalMetric;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.types.PInteger;
@@ -57,6 +60,9 @@ import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.TestUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -70,7 +76,22 @@ public class UpsertSelectIT extends ParallelStatsDisabledIT {
 this.allowServerSideMutations = allowServerSideMutations;
 }
 
-@Parameters(name="UpsertSelecttIT_allowServerSideMutations={0}") // name 
is used by failsafe as file name in reports
+@Before
+public void setup() {
+assertTrue(PhoenixRuntime.areGlobalClientMetricsBeingCollected());
+for (GlobalMetric m : PhoenixRuntime.getGlobalPhoenixClientMetrics()) {
+m.reset();
+}
+}
+
+@After
+public void assertNoConnLeak() {
+assertTrue(PhoenixRuntime.areGlobalClientMetricsBeingCollected());
+assertEquals(0, 
GLOBAL_OPEN_PHOENIX_CONNECTIONS.getMetric().getValue());
+}
+
+// name is used by failsafe as file name in reports
+@Parameters(name="UpsertSelecttIT_allowServerSideMutations={0}")
 public static synchronized Object[] data() {
 return new Object[] {"true", "false"};
 }
@@ -81,17 +102,17 @@ public class UpsertSelectIT extends 
ParallelStatsDisabledIT {
 }
 
 @Test
-public void testUpsertSelecWithIndex() throws Exception {
+public void testUpsertSelectWithIndex() throws Exception {
 testUpsertSelect(true, false);
 }
 
 @Test
-public void testUpsertSelecWithIndexWithSalt() throws Exception {
+public void testUpsertSelectWithIndexWithSalt() throws Exception {
 testUpsertSelect(true, true);
 }
 
 @Test
-public void testUpsertSelecWithNoIndexWithSalt() throws Exception {
+public void testUpsertSelectWithNoIndexWithSalt() throws Exception {
 testUpsertSelect(false, true);
 }
 
@@ -101,145 +122,163 @@ public class UpsertSelectIT extends 
ParallelStatsDisabledIT {
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 props.setProperty(QueryServices.ENABLE_SERVER_SIDE_UPSERT_MUTATIONS,
 allowServerSideMutations);
-String aTable = initATableValues(tenantId, saltTable ? null : splits, 
null, null, getUrl(), saltTable ? "salt_buckets = 2" : null);
+String aTable = initATableValues(tenantId, saltTable ? null : splits, 
null,
+null, getUrl(), saltTab

[phoenix] branch master updated: PHOENIX-5801: Connection leak when creating a view with a where condition

2020-03-27 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 8247029  PHOENIX-5801: Connection leak when creating a view with a 
where condition
8247029 is described below

commit 8247029ea8e415d82610c55b9ab3515c3f176ac2
Author: Chinmay Kulkarni 
AuthorDate: Thu Mar 26 21:36:21 2020 -0700

PHOENIX-5801: Connection leak when creating a view with a where condition
---
 .../phoenix/monitoring/PhoenixMetricsIT.java   | 15 +
 .../phoenix/coprocessor/WhereConstantParser.java   | 75 +++---
 2 files changed, 54 insertions(+), 36 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index 48a02e6..ac21865 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -644,6 +644,21 @@ public class PhoenixMetricsIT extends BasePhoenixMetricsIT 
{
 }
 
 @Test
+public void createViewWithWhereConditionNoConnLeak() throws SQLException {
+resetGlobalMetrics();
+String tableName = generateUniqueName();
+String viewName = generateUniqueName();
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+conn.createStatement().execute("CREATE TABLE " + tableName +
+" (K INTEGER PRIMARY KEY, V VARCHAR(10))");
+conn.createStatement().execute("CREATE VIEW " + viewName +
+" AS SELECT * FROM " + tableName + " WHERE K = 1");
+}
+assertTrue(PhoenixRuntime.areGlobalClientMetricsBeingCollected());
+assertEquals(0, 
GLOBAL_OPEN_PHOENIX_CONNECTIONS.getMetric().getValue());
+}
+
+@Test
 public void testClosingConnectionClearsMetrics() throws Exception {
 Connection conn = null;
 try {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/WhereConstantParser.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/WhereConstantParser.java
index c12ad18..ae6b865 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/WhereConstantParser.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/WhereConstantParser.java
@@ -50,7 +50,6 @@ import com.google.common.collect.Lists;
 public class WhereConstantParser {
 
 public static PTable addViewInfoToPColumnsIfNeeded(PTable view) throws 
SQLException {
-   boolean[] viewColumnConstantsMatched = new 
boolean[view.getColumns().size()];
 byte[][] viewColumnConstantsToBe = new 
byte[view.getColumns().size()][];
 if (view.getViewStatement() == null) {
return view;
@@ -58,44 +57,48 @@ public class WhereConstantParser {
 SelectStatement select = new 
SQLParser(view.getViewStatement()).parseQuery();
 ParseNode whereNode = select.getWhere();
 ColumnResolver resolver = FromCompiler.getResolver(new TableRef(view));
-StatementContext context = new StatementContext(new 
PhoenixStatement(getConnectionlessConnection()), resolver);
-Expression expression = null;
-try {
-   expression = WhereCompiler.compile(context, whereNode);
-}
-catch (ColumnNotFoundException e) {
-   // if we could not find a column used in the view statement 
(which means its was dropped)
-   // this view is not valid any more
-   return null;
-}
-CreateTableCompiler.ViewWhereExpressionVisitor visitor =
-new CreateTableCompiler.ViewWhereExpressionVisitor(view, 
viewColumnConstantsToBe);
-expression.accept(visitor);
-
-BitSet isViewColumnReferencedToBe = new 
BitSet(view.getColumns().size());
-// Used to track column references in a view
-ExpressionCompiler expressionCompiler = new 
CreateTableCompiler.ColumnTrackingExpressionCompiler(context, 
isViewColumnReferencedToBe);
-whereNode.accept(expressionCompiler);
-
-List result = Lists.newArrayList();
-for (PColumn column : PTableImpl.getColumnsToClone(view)) {
-   boolean isViewReferenced = 
isViewColumnReferencedToBe.get(column.getPosition());
-   if ( (visitor.isUpdatable() || 
view.getPKColumns().get(MetaDataUtil.getAutoPartitionColIndex(view)).equals(column))
 
-   && 
viewColumnConstantsToBe[column.getPosition()] != null) {
-   result.add(new PColumnImpl(column, 
viewColumnConstantsToBe[column.getPosition()], isViewReferenced));
-   
viewColumnConstantsMatched[column.g

[phoenix] branch 4.x updated: PHOENIX-5801: Connection leak when creating a view with a where condition

2020-03-27 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new a149c41  PHOENIX-5801: Connection leak when creating a view with a 
where condition
a149c41 is described below

commit a149c4178cd094ed19f9f7ed1e3b828eff2c89c3
Author: Chinmay Kulkarni 
AuthorDate: Thu Mar 26 21:36:21 2020 -0700

PHOENIX-5801: Connection leak when creating a view with a where condition
---
 .../phoenix/monitoring/PhoenixMetricsIT.java   | 15 +
 .../phoenix/coprocessor/WhereConstantParser.java   | 75 +++---
 2 files changed, 54 insertions(+), 36 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index a49e3e2..6485421 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -649,6 +649,21 @@ public class PhoenixMetricsIT extends BasePhoenixMetricsIT 
{
 }
 
 @Test
+public void createViewWithWhereConditionNoConnLeak() throws SQLException {
+resetGlobalMetrics();
+String tableName = generateUniqueName();
+String viewName = generateUniqueName();
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+conn.createStatement().execute("CREATE TABLE " + tableName +
+" (K INTEGER PRIMARY KEY, V VARCHAR(10))");
+conn.createStatement().execute("CREATE VIEW " + viewName +
+" AS SELECT * FROM " + tableName + " WHERE K = 1");
+}
+assertTrue(PhoenixRuntime.areGlobalClientMetricsBeingCollected());
+assertEquals(0, 
GLOBAL_OPEN_PHOENIX_CONNECTIONS.getMetric().getValue());
+}
+
+@Test
 public void testClosingConnectionClearsMetrics() throws Exception {
 Connection conn = null;
 try {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/WhereConstantParser.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/WhereConstantParser.java
index c12ad18..ae6b865 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/WhereConstantParser.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/WhereConstantParser.java
@@ -50,7 +50,6 @@ import com.google.common.collect.Lists;
 public class WhereConstantParser {
 
 public static PTable addViewInfoToPColumnsIfNeeded(PTable view) throws 
SQLException {
-   boolean[] viewColumnConstantsMatched = new 
boolean[view.getColumns().size()];
 byte[][] viewColumnConstantsToBe = new 
byte[view.getColumns().size()][];
 if (view.getViewStatement() == null) {
return view;
@@ -58,44 +57,48 @@ public class WhereConstantParser {
 SelectStatement select = new 
SQLParser(view.getViewStatement()).parseQuery();
 ParseNode whereNode = select.getWhere();
 ColumnResolver resolver = FromCompiler.getResolver(new TableRef(view));
-StatementContext context = new StatementContext(new 
PhoenixStatement(getConnectionlessConnection()), resolver);
-Expression expression = null;
-try {
-   expression = WhereCompiler.compile(context, whereNode);
-}
-catch (ColumnNotFoundException e) {
-   // if we could not find a column used in the view statement 
(which means its was dropped)
-   // this view is not valid any more
-   return null;
-}
-CreateTableCompiler.ViewWhereExpressionVisitor visitor =
-new CreateTableCompiler.ViewWhereExpressionVisitor(view, 
viewColumnConstantsToBe);
-expression.accept(visitor);
-
-BitSet isViewColumnReferencedToBe = new 
BitSet(view.getColumns().size());
-// Used to track column references in a view
-ExpressionCompiler expressionCompiler = new 
CreateTableCompiler.ColumnTrackingExpressionCompiler(context, 
isViewColumnReferencedToBe);
-whereNode.accept(expressionCompiler);
-
-List result = Lists.newArrayList();
-for (PColumn column : PTableImpl.getColumnsToClone(view)) {
-   boolean isViewReferenced = 
isViewColumnReferencedToBe.get(column.getPosition());
-   if ( (visitor.isUpdatable() || 
view.getPKColumns().get(MetaDataUtil.getAutoPartitionColIndex(view)).equals(column))
 
-   && 
viewColumnConstantsToBe[column.getPosition()] != null) {
-   result.add(new PColumnImpl(column, 
viewColumnConstantsToBe[column.getPosition()], isViewReferenced));
-   
viewColumnConstantsMatched[column.g

[phoenix] branch 4.x updated: PHOENIX-5776 Phoenix pherf unit tests failing

2020-03-26 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 73a86be  PHOENIX-5776 Phoenix pherf unit tests failing
73a86be is described below

commit 73a86be9b588353457cd2c9de41239211330e7a7
Author: Sandeep Guggilam 
AuthorDate: Thu Mar 12 21:35:21 2020 -0700

PHOENIX-5776 Phoenix pherf unit tests failing

Signed-off-by: Chinmay Kulkarni 
---
 .../apache/phoenix/pherf/util/ResourceList.java| 29 +-
 1 file changed, 23 insertions(+), 6 deletions(-)

diff --git 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java
index df5dbf7..64ee6ee 100644
--- 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java
+++ 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java
@@ -18,11 +18,6 @@
 
 package org.apache.phoenix.pherf.util;
 
-import org.apache.commons.lang3.StringUtils;
-import org.apache.phoenix.pherf.exception.PherfException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -30,18 +25,32 @@ import java.net.URI;
 import java.net.URL;
 import java.nio.file.Path;
 import java.nio.file.Paths;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.List;
 import java.util.regex.Pattern;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipException;
 import java.util.zip.ZipFile;
 
+import org.apache.commons.lang3.StringUtils;
+import org.apache.phoenix.pherf.exception.PherfException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Lists;
+
 /**
  * list resources available from the classpath @ *
  */
 public class ResourceList {
 private static final Logger LOGGER = 
LoggerFactory.getLogger(ResourceList.class);
 private final String rootResourceDir;
+// Lists the directories to ignore meant for testing something else
+// when getting the resources from classpath
+private List dirsToIgnore = Lists.newArrayList("sql_files");
 
 public ResourceList(String rootResourceDir) {
 this.rootResourceDir = rootResourceDir;
@@ -165,6 +174,7 @@ public class ResourceList {
 final ArrayList retval = new ArrayList();
 final File[] fileList = directory.listFiles();
 for (final File file : fileList) {
+if (isIgnoredDir(file.getAbsolutePath())) continue;
 if (file.isDirectory()) {
 retval.addAll(getResourcesFromDirectory(file, pattern));
 } else {
@@ -178,4 +188,11 @@ public class ResourceList {
 }
 return retval;
 }
+
+private boolean isIgnoredDir(String path) {
+for (String dir : dirsToIgnore) {
+if (path.contains(dir)) return true;
+}
+return false;
+}
 }
\ No newline at end of file



[phoenix] branch master updated: PHOENIX-5776 Phoenix pherf unit tests failing

2020-03-26 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new d04ba4e  PHOENIX-5776 Phoenix pherf unit tests failing
d04ba4e is described below

commit d04ba4eb945ba502652888220097ebca44f4a4dd
Author: Sandeep Guggilam 
AuthorDate: Thu Mar 12 21:35:21 2020 -0700

PHOENIX-5776 Phoenix pherf unit tests failing

Signed-off-by: Chinmay Kulkarni 
---
 .../apache/phoenix/pherf/util/ResourceList.java| 29 +-
 1 file changed, 23 insertions(+), 6 deletions(-)

diff --git 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java
index cec12d1..ef1aa0e 100644
--- 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java
+++ 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java
@@ -18,11 +18,6 @@
 
 package org.apache.phoenix.pherf.util;
 
-import org.apache.commons.lang3.StringUtils;
-import org.apache.phoenix.pherf.exception.PherfException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -31,18 +26,32 @@ import java.net.URL;
 import java.nio.file.NoSuchFileException;
 import java.nio.file.Path;
 import java.nio.file.Paths;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.List;
 import java.util.regex.Pattern;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipException;
 import java.util.zip.ZipFile;
 
+import org.apache.commons.lang3.StringUtils;
+import org.apache.phoenix.pherf.exception.PherfException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Lists;
+
 /**
  * list resources available from the classpath @ *
  */
 public class ResourceList {
 private static final Logger LOGGER = 
LoggerFactory.getLogger(ResourceList.class);
 private final String rootResourceDir;
+// Lists the directories to ignore meant for testing something else
+// when getting the resources from classpath
+private List dirsToIgnore = Lists.newArrayList("sql_files");
 
 public ResourceList(String rootResourceDir) {
 this.rootResourceDir = rootResourceDir;
@@ -166,6 +175,7 @@ public class ResourceList {
 final ArrayList retval = new ArrayList();
 final File[] fileList = directory.listFiles();
 for (final File file : fileList) {
+if (isIgnoredDir(file.getAbsolutePath())) continue;
 if (file.isDirectory()) {
 retval.addAll(getResourcesFromDirectory(file, pattern));
 } else {
@@ -179,4 +189,11 @@ public class ResourceList {
 }
 return retval;
 }
+
+private boolean isIgnoredDir(String path) {
+for (String dir : dirsToIgnore) {
+if (path.contains(dir)) return true;
+}
+return false;
+}
 }
\ No newline at end of file



[phoenix] branch master updated: PHOENIX-5718 GetTable builds a table excluding the given clientTimeStamp

2020-03-23 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 3c0002c  PHOENIX-5718 GetTable builds a table excluding the given 
clientTimeStamp
3c0002c is described below

commit 3c0002c51d9ac047816f3ad0cfbb5188c2fafa9a
Author: Sandeep Guggilam 
AuthorDate: Mon Mar 23 15:47:03 2020 -0700

PHOENIX-5718 GetTable builds a table excluding the given clientTimeStamp

Signed-off-by: Chinmay Kulkarni 
---
 .../phoenix/end2end/MetaDataEndpointImplIT.java| 32 +-
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  | 22 +++
 2 files changed, 42 insertions(+), 12 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
index 6724da9..eeac2e9 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
@@ -15,11 +15,13 @@ import java.util.Properties;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.TableViewFinderResult;
 import org.apache.phoenix.util.ViewUtil;
 import org.junit.Test;
@@ -205,6 +207,34 @@ public class MetaDataEndpointImplIT extends 
ParallelStatsDisabledIT {
 assertColumnNamesEqual(PhoenixRuntime.getTableNoCache(conn, 
childView.toUpperCase()), "A", "B", "D");
 
 }
+
+@Test
+public void testUpdateCacheWithAlteringColumns() throws Exception {
+String tableName = generateUniqueName();
+try (PhoenixConnection conn = 
DriverManager.getConnection(getUrl()).unwrap(
+PhoenixConnection.class)) {
+String ddlFormat =
+"CREATE TABLE IF NOT EXISTS " + tableName + "  (" + " PK2 
INTEGER NOT NULL, "
++ "V1 INTEGER, V2 INTEGER "
++ " CONSTRAINT NAME_PK PRIMARY KEY (PK2)" + " )";
+conn.createStatement().execute(ddlFormat);
+conn.createStatement().execute("ALTER TABLE " + tableName + " 
ADD V3 integer");
+PTable table = PhoenixRuntime.getTable(conn, 
tableName.toUpperCase());
+assertColumnNamesEqual(table, "PK2", "V1", "V2", "V3");
+
+// Set the SCN to the timestamp when V3 column is added
+Properties props = 
PropertiesUtil.deepCopy(conn.getClientInfo());
+props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(table.getTimeStamp()));
+
+try (PhoenixConnection metaConnection = new 
PhoenixConnection(conn, 
+conn.getQueryServices(), props)) {
+// Force update the cache and check if V3 is present in 
the returned table result
+table = PhoenixRuntime.getTableNoCache(metaConnection, 
tableName.toUpperCase());
+assertColumnNamesEqual(table, "PK2", "V1", "V2", "V3");
+}  
+}  
+}
+
 
 @Test
 public void testDroppingAColumn() throws Exception {
@@ -370,4 +400,4 @@ public class MetaDataEndpointImplIT extends 
ParallelStatsDisabledIT {
 assertEquals(expected, actual);
 }
 
-}
\ No newline at end of file
+}
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 860e2d3..f371f98 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -18,7 +18,6 @@
 package org.apache.phoenix.coprocessor;
 
 import static org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow;
-import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.MIN_SPLITTABLE_SYSTEM_CATALOG;
 import static 
org.apache.phoenix.coprocessor.generated.MetaDataProtos.MutationCode.UNABLE_TO_CREATE_CHILD_LINK;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.APPEND_ONLY_

[phoenix] branch 4.x updated: PHOENIX-5718 GetTable builds a table excluding the given clientTimeStamp

2020-03-23 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new bbcc362  PHOENIX-5718 GetTable builds a table excluding the given 
clientTimeStamp
bbcc362 is described below

commit bbcc362ee459492bfca7bf66aa1bdde85c25464f
Author: Sandeep Guggilam 
AuthorDate: Mon Mar 23 15:47:43 2020 -0700

PHOENIX-5718 GetTable builds a table excluding the given clientTimeStamp

Signed-off-by: Chinmay Kulkarni 
---
 .../phoenix/end2end/MetaDataEndpointImplIT.java| 35 +-
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  | 23 +++---
 2 files changed, 46 insertions(+), 12 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
index dca4b6b..cd6c6f3 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
@@ -17,11 +17,13 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.TableViewFinderResult;
 import org.apache.phoenix.util.ViewUtil;
 import org.junit.Test;
@@ -207,6 +209,37 @@ public class MetaDataEndpointImplIT extends 
ParallelStatsDisabledIT {
 assertColumnNamesEqual(PhoenixRuntime.getTableNoCache(conn, 
childView.toUpperCase()), "A", "B", "D");
 
 }
+
+@Test
+public void testUpdateCacheWithAlteringColumns() throws Exception {
+String tableName = generateUniqueName();
+
+try (PhoenixConnection conn = 
DriverManager.getConnection(getUrl()).unwrap(
+PhoenixConnection.class)) {
+String ddlFormat =
+"CREATE TABLE IF NOT EXISTS " + tableName + "  (" + " PK2 
INTEGER NOT NULL, "
++ "V1 INTEGER, V2 INTEGER "
++ " CONSTRAINT NAME_PK PRIMARY KEY (PK2)" + " )";
+conn.createStatement().execute(ddlFormat);
+conn.createStatement().execute("ALTER TABLE " + tableName + " ADD 
V3 integer");
+PTable table = PhoenixRuntime.getTable(conn, 
tableName.toUpperCase());
+assertColumnNamesEqual(table, "PK2", "V1", "V2", "V3");
+
+// Set the SCN to the timestamp when V3 column is added
+Properties props = PropertiesUtil.deepCopy(conn.getClientInfo());
+props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(table.getTimeStamp()));
+
+try(PhoenixConnection metaConnection = new PhoenixConnection(conn, 
+conn.getQueryServices(), props)) {
+// Force update the cache and check if V3 is present in the 
returned table result
+table = PhoenixRuntime.getTableNoCache(metaConnection, 
tableName.toUpperCase());
+assertColumnNamesEqual(table, "PK2", "V1", "V2", "V3");
+}
+}
+
+
+}
+
 
 @Test
 public void testDroppingAColumn() throws Exception {
@@ -376,4 +409,4 @@ public class MetaDataEndpointImplIT extends 
ParallelStatsDisabledIT {
 return new HTable(utility.getConfiguration(), catalogTable);
 }
 
-}
\ No newline at end of file
+}
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 05c0b4d..02fd037 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -48,6 +48,7 @@ import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.JAR_PATH_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MAX_VALUE_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MIN_VALUE_BYTES;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MIN_V

svn commit: r1875560 - in /phoenix/site: publish/language/datatypes.html publish/language/functions.html publish/language/index.html publish/release.html source/src/site/markdown/release.md

2020-03-23 Thread chinmayskulkarni
Author: chinmayskulkarni
Date: Mon Mar 23 22:07:19 2020
New Revision: 1875560

URL: http://svn.apache.org/viewvc?rev=1875560=rev
Log:
(Sandeep Guggilam) PHOENIX-5746: Update release documentation to include 
versions information

Modified:
phoenix/site/publish/language/datatypes.html
phoenix/site/publish/language/functions.html
phoenix/site/publish/language/index.html
phoenix/site/publish/release.html
phoenix/site/source/src/site/markdown/release.md

Modified: phoenix/site/publish/language/datatypes.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/datatypes.html?rev=1875560=1875559=1875560=diff
==
--- phoenix/site/publish/language/datatypes.html (original)
+++ phoenix/site/publish/language/datatypes.html Mon Mar 23 22:07:19 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/language/functions.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/functions.html?rev=1875560=1875559=1875560=diff
==
--- phoenix/site/publish/language/functions.html (original)
+++ phoenix/site/publish/language/functions.html Mon Mar 23 22:07:19 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/language/index.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/index.html?rev=1875560=1875559=1875560=diff
==
--- phoenix/site/publish/language/index.html (original)
+++ phoenix/site/publish/language/index.html Mon Mar 23 22:07:19 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/release.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/release.html?rev=1875560=1875559=1875560=diff
==
--- phoenix/site/publish/release.html (original)
+++ phoenix/site/publish/release.html Mon Mar 23 22:07:19 2020
@@ -1,7 +1,7 @@
 
 
 
 
@@ -240,10 +240,10 @@ mvn clean deploy gpg:sign -DperformRelea
 mvn versions:set -DnewVersion=4.16.0-HBase-1.3-SNAPSHOT 
-DgenerateBackupPoms=false
  
 
+  Create a JIRA to update PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION and 
PHOENIX_PATCH_NUMBER in MetaDataProtocol.java appropriately to next version (4, 
16, 0 respectively in this case) and compatible_client_versions.json file with 
the client versions that are compatible against the next version ( In this case 
4.14.3 and 4.15.0 would be the backward compatible clients for 4.16.0 ). This 
Jira should be committed/marked with fixVersion of the next release 
candidate. 
   Add documentation of released version to the http://phoenix.apache.org/download.html;>downloads page and https://en.wikipedia.org/wiki/Apache_Phoenix;>wiki. 
   Send out an announcement email. See example https://www.mail-archive.com/dev@phoenix.apache.org/msg54764.html;>here.
 
   Bulk close Jiras that were marked for the release fixVersion. 
-  Finally, mark the version as released in Apache Phoenix Jira by 
searching for it in https://issues.apache.org/jira/projects/PHOENIX?selectedItem=com.atlassian.jira.jira-projects-plugin:release-pagestatus=unreleased;>Phoenix
 Releases. Go to the corresponding version and click “Release”. This 
way, the released version shows up in the “Released Versions” list when 
assigning “Affects Version/s” and “Fix Version/s” fields in Jira. 
   
  Congratulations! 
 

Modified: phoenix/site/source/src/site/markdown/release.md
URL: 
http://svn.apache.org/viewvc/phoenix/site/source/src/site/markdown/release.md?rev=1875560=1875559=1875560=diff
==
--- phoenix/site/source/src/site/markdown/release.md (original)
+++ phoenix/site/source/src/site/markdown/release.md Mon Mar 23 22:07:19 2020
@@ -64,8 +64,9 @@ Follow the instructions. Signed binary a
 
 mvn versions:set -DnewVersion=4.16.0-HBase-1.3-SNAPSHOT 
-DgenerateBackupPoms=false
 
-9. Add documentation of released version to the [downloads 
page](http://phoenix.apache.org/download.html) and 
[wiki](https://en.wikipedia.org/wiki/Apache_Phoenix).
-10. Send out an announcement email. See example 
[here](https://www.mail-archive.com/dev@phoenix.apache.org/msg54764.html).
-11. Bulk close Jiras that were marked for the release fixVersion.  
+9. Create a JIRA to update PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION and 
PHOENIX_PATCH_NUMBER in MetaDataProtocol.java appropriately to next version (4, 
16, 0 respectively in this case) and compatible_client_versions.json file with 
the client versions that are compatible against the next version ( In this case 
4.14.3 and 4.15.0 would be the backward compatible clients for 4.16.0 ). This 
Jira should be committed/marked with fixVersion of the next release candidate.
+10. Add documentation of released version to the [downloads 
page](http://phoeni

[phoenix] branch 4.x updated: PHOENIX-5673 : The mutation state is silently getting cleared on the execution of any DDL

2020-03-23 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 364b4bb  PHOENIX-5673 : The mutation state is silently getting cleared 
on the execution of any DDL
364b4bb is described below

commit 364b4bb6209c79bea1fd5d5d7059355fb17d0064
Author: Siddhi Mehta 
AuthorDate: Thu Feb 20 10:31:01 2020 -0800

PHOENIX-5673 : The mutation state is silently getting cleared on the 
execution of any DDL

Signed-off-by: Chinmay Kulkarni 
---
 .../apache/phoenix/end2end/MutationStateIT.java| 51 ++
 .../apache/phoenix/exception/SQLExceptionCode.java |  8 ++--
 .../org/apache/phoenix/execute/MutationState.java  | 10 +++--
 .../org/apache/phoenix/jdbc/PhoenixStatement.java  | 27 +++-
 .../org/apache/phoenix/query/QueryServices.java|  5 ++-
 .../apache/phoenix/query/QueryServicesOptions.java |  6 ++-
 .../apache/phoenix/execute/MutationStateTest.java  | 35 +++
 .../apache/phoenix/index/IndexMaintainerTest.java  |  1 +
 .../org/apache/phoenix/schema/MutationTest.java|  8 ++--
 9 files changed, 135 insertions(+), 16 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
index 096826d..f6a9993 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.end2end;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -37,6 +38,8 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
@@ -47,12 +50,15 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.Repeat;
 import org.apache.phoenix.util.RunUntilFailure;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TestUtil;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.ExpectedException;
 import org.junit.runner.RunWith;
 
 @RunWith(RunUntilFailure.class)
@@ -499,4 +505,49 @@ public class MutationStateIT extends 
ParallelStatsDisabledIT {
 }
 }
 
+@Rule
+public ExpectedException exceptionRule = ExpectedException.none();
+
+@Test
+public void testDDLwithPendingMutations() throws Exception {
+String tableName = generateUniqueName();
+ensureTableCreated(getUrl(), tableName, TestUtil.PTSDB_NAME, null, 
null, null);
+Properties props = new Properties();
+props.setProperty(QueryServices.PENDING_MUTATIONS_DDL_THROW_ATTRIB, 
Boolean.toString(true));
+try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+// setting auto commit to false
+conn.setAutoCommit(false);
+
+// Run upsert queries but do not commit
+PreparedStatement stmt =
+conn.prepareStatement("UPSERT INTO " + tableName
++ " (inst,host,\"DATE\") 
VALUES(?,'b',CURRENT_DATE())");
+stmt.setString(1, "a");
+stmt.execute();
+// Create a ddl statement
+String tableName2 = generateUniqueName();
+String ddl = "CREATE TABLE " + tableName2 + " (V BIGINT PRIMARY 
KEY, K BIGINT)";
+exceptionRule.expect(SQLException.class);
+exceptionRule.expectMessage(
+
SQLExceptionCode.CANNOT_PERFORM_DDL_WITH_PENDING_MUTATIONS.getMessage());
+conn.createStatement().execute(ddl);
+}
+}
+
+@Test
+public void testNoPendingMutationsOnDDL() throws Exception {
+Properties props = new Properties();
+props.setProperty(QueryServices.PENDING_MUTATIONS_DDL_THROW_ATTRIB, 
Boolean.toString(true));
+String tableName = generateUniqueName();
+try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+String ddl =
+"create table " + tableName + " ( id1 UNSIGNED_INT not 
null primary key,"
++ "appId1 VARCHAR)";

[phoenix] branch master updated: PHOENIX-5673 : The mutation state is silently getting cleared on the execution of any DDL

2020-03-23 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 788e753  PHOENIX-5673 : The mutation state is silently getting cleared 
on the execution of any DDL
788e753 is described below

commit 788e7530b2224882df161e2353d3d8d9b7b6eec7
Author: Siddhi Mehta 
AuthorDate: Thu Feb 20 10:31:01 2020 -0800

PHOENIX-5673 : The mutation state is silently getting cleared on the 
execution of any DDL

Signed-off-by: Chinmay Kulkarni 
---
 .../apache/phoenix/end2end/MutationStateIT.java| 52 ++
 .../apache/phoenix/exception/SQLExceptionCode.java |  8 ++--
 .../org/apache/phoenix/execute/MutationState.java  | 10 +++--
 .../org/apache/phoenix/jdbc/PhoenixStatement.java  | 27 ++-
 .../org/apache/phoenix/query/QueryServices.java|  5 ++-
 .../apache/phoenix/query/QueryServicesOptions.java |  6 ++-
 .../apache/phoenix/execute/MutationStateTest.java  | 42 +++--
 .../apache/phoenix/index/IndexMaintainerTest.java  |  1 +
 .../org/apache/phoenix/schema/MutationTest.java|  8 ++--
 9 files changed, 140 insertions(+), 19 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
index 4d70d0a..1c1ce1d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.end2end;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -37,6 +38,8 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
@@ -47,12 +50,15 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.Repeat;
 import org.apache.phoenix.util.RunUntilFailure;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TestUtil;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.ExpectedException;
 import org.junit.runner.RunWith;
 
 @RunWith(RunUntilFailure.class)
@@ -498,4 +504,50 @@ public class MutationStateIT extends 
ParallelStatsDisabledIT {
 htable.close();
 }
 }
+
+@Rule
+public ExpectedException exceptionRule = ExpectedException.none();
+
+@Test
+public void testDDLwithPendingMutations() throws Exception {
+String tableName = generateUniqueName();
+ensureTableCreated(getUrl(), tableName, TestUtil.PTSDB_NAME, null, 
null, null);
+Properties props = new Properties();
+props.setProperty(QueryServices.PENDING_MUTATIONS_DDL_THROW_ATTRIB, 
Boolean.toString(true));
+try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+// setting auto commit to false
+conn.setAutoCommit(false);
+
+// Run upsert queries but do not commit
+PreparedStatement stmt =
+conn.prepareStatement("UPSERT INTO " + tableName
++ " (inst,host,\"DATE\") 
VALUES(?,'b',CURRENT_DATE())");
+stmt.setString(1, "a");
+stmt.execute();
+// Create a ddl statement
+String tableName2 = generateUniqueName();
+String ddl = "CREATE TABLE " + tableName2 + " (V BIGINT PRIMARY 
KEY, K BIGINT)";
+exceptionRule.expect(SQLException.class);
+exceptionRule.expectMessage(
+
SQLExceptionCode.CANNOT_PERFORM_DDL_WITH_PENDING_MUTATIONS.getMessage());
+conn.createStatement().execute(ddl);
+}
+}
+
+@Test
+public void testNoPendingMutationsOnDDL() throws Exception {
+Properties props = new Properties();
+props.setProperty(QueryServices.PENDING_MUTATIONS_DDL_THROW_ATTRIB, 
Boolean.toString(true));
+String tableName = generateUniqueName();
+try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+String ddl =
+"create table " + tableName + " ( id1 UNSIGNED_INT not 
null primary key,"
++ "appId

[phoenix] branch master updated: PHOENIX-5607 (Addendum) Client-server backward compatibility tests

2020-03-18 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new c318925  PHOENIX-5607 (Addendum) Client-server backward compatibility 
tests
c318925 is described below

commit c31892589147815b89220a89253c54e3eaf70d13
Author: Sandeep Guggilam 
AuthorDate: Tue Mar 17 13:06:10 2020 -0700

PHOENIX-5607 (Addendum) Client-server backward compatibility tests

Signed-off-by: Chinmay Kulkarni 
---
 .../phoenix/end2end/BackwardCompatibilityIT.java   | 34 --
 .../it/resources/compatible_client_versions.json   | 10 +++
 phoenix-core/src/it/scripts/execute_query.sh   |  6 ++--
 .../phoenix/coprocessor/MetaDataProtocol.java  |  3 --
 4 files changed, 45 insertions(+), 8 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
index dfa0032..ae5dc38 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
@@ -38,6 +38,8 @@ import java.sql.ResultSetMetaData;
 import java.util.Collection;
 import java.util.List;
 import java.util.Properties;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import org.apache.curator.shaded.com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
@@ -46,6 +48,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.util.VersionInfo;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixDriver;
@@ -62,6 +65,11 @@ import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParser;
+import com.google.gson.stream.JsonReader;
+
 /**
  * This class is meant for testing all compatible client versions 
  * against the current server version. It runs SQL queries with given 
@@ -74,6 +82,8 @@ public class BackwardCompatibilityIT {
 
 private static final String SQL_DIR = "src/it/resources/sql_files/";
 private static final String RESULT_DIR = "src/it/resources/gold_files/";
+private static final String COMPATIBLE_CLIENTS_JSON =
+"src/it/resources/compatible_client_versions.json";
 private static final String RESULT_PREFIX = "result_";
 private static final String SQL_EXTENSION = ".sql";
 private static final String TEXT_EXTENSION = ".txt";
@@ -95,8 +105,8 @@ public class BackwardCompatibilityIT {
 }
 
 @Parameters(name = "BackwardCompatibilityIT_compatibleClientVersion={0}")
-public static synchronized Collection data() {
-return MetaDataProtocol.COMPATIBLE_CLIENT_VERSIONS;
+public static synchronized Collection data() throws Exception {
+return computeClientVersions();
 }
 
 @Before
@@ -119,6 +129,26 @@ public class BackwardCompatibilityIT {
 hbaseTestUtil.shutdownMiniCluster();
 }
 }
+
+private static List computeClientVersions() throws Exception {
+String hbaseVersion = VersionInfo.getVersion();
+Pattern p = Pattern.compile("\\d+\\.\\d+");
+Matcher m = p.matcher(hbaseVersion);
+String hbaseProfile = null;
+if (m.find()) {
+hbaseProfile = m.group();
+}
+List clientVersions = Lists.newArrayList();
+JsonParser jsonParser = new JsonParser();
+JsonReader jsonReader =
+new JsonReader(new FileReader(COMPATIBLE_CLIENTS_JSON));
+JsonObject jsonObject =
+jsonParser.parse(jsonReader).getAsJsonObject();
+for (JsonElement clientVersion : 
jsonObject.get(hbaseProfile).getAsJsonArray()) {
+clientVersions.add(clientVersion.getAsString() + "-HBase-" + 
hbaseProfile);
+}
+return clientVersions;
+}
 
 /**
  * Scenario: 
diff --git a/phoenix-core/src/it/resources/compatible_client_versions.json 
b/phoenix-core/src/it/resources/compatible_client_versions.json
new file mode 100644
index 000..6feabf5
--- /dev/null
+++ b/phoenix-core/src/it/resources/compatible_client_versions.json
@@ -0,0 +1,10 @@
+{
+"_comment": "Lists all phoenix compatible client versions against the 
current branch version for a given hbase profile \
+ If hbase profile is 

[phoenix] branch 4.x updated: PHOENIX-5607 Client-server backward compatibility tests

2020-03-17 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new b83593b  PHOENIX-5607 Client-server backward compatibility tests
b83593b is described below

commit b83593b953931e8bf7c94d8d9be26e7b3a4d1203
Author: Sandeep Guggilam 
AuthorDate: Wed Mar 11 20:32:48 2020 -0700

PHOENIX-5607 Client-server backward compatibility tests

Signed-off-by: Chinmay Kulkarni 
---
 .../phoenix/end2end/BackwardCompatibilityIT.java   | 428 +
 .../it/resources/compatible_client_versions.json   |   7 +
 .../resources/gold_files/gold_query_add_data.txt   |  43 +++
 .../resources/gold_files/gold_query_add_delete.txt |  22 ++
 .../resources/gold_files/gold_query_create_add.txt |  32 ++
 .../src/it/resources/sql_files/add_data.sql|  27 ++
 .../src/it/resources/sql_files/add_delete.sql  |  26 ++
 .../src/it/resources/sql_files/create_add.sql  |  33 ++
 phoenix-core/src/it/resources/sql_files/query.sql  |  24 ++
 .../it/resources/sql_files/query_add_delete.sql|  26 ++
 .../src/it/resources/sql_files/query_more.sql  |  25 ++
 phoenix-core/src/it/scripts/execute_query.sh   |  40 ++
 .../phoenix/coprocessor/MetaDataProtocol.java  |   5 +-
 13 files changed, 735 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
new file mode 100644
index 000..ee105e2
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
@@ -0,0 +1,428 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeFalse;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.InputStreamReader;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.util.Collection;
+import java.util.List;
+import java.util.Properties;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.curator.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.util.VersionInfo;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.jdbc.PhoenixDriver;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParser;
+import com.google.gson.stream.JsonReader;
+
+/**
+ * This class is meant for testing all compatible client versions 
+ * against the current server version. It runs SQL queries with given 
+ * client versions and compares the output against gold files
+ */
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.

[phoenix] branch 4.x-HBase-1.4 updated: (Addendum) PHOENIX-5607 Client-server backward compatibility tests

2020-03-10 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new ca290b5  (Addendum) PHOENIX-5607 Client-server backward compatibility 
tests
ca290b5 is described below

commit ca290b518c87debce5cb2c14d2ba9e4a12a07bff
Author: Sandeep Guggilam 
AuthorDate: Tue Mar 10 13:07:24 2020 -0700

(Addendum) PHOENIX-5607 Client-server backward compatibility tests

Signed-off-by: Chinmay Kulkarni 
---
 .../src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index 2982f25..2d543fc 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -147,7 +147,7 @@ public abstract class MetaDataProtocol extends 
MetaDataService {
 
 public static final String CURRENT_CLIENT_VERSION = PHOENIX_MAJOR_VERSION 
+ "." + PHOENIX_MINOR_VERSION + "." + PHOENIX_PATCH_NUMBER;
 public static final List COMPATIBLE_CLIENT_VERSIONS = 
-Arrays.asList("4.14.3-HBase-1.3", "4.15.0-HBase-1.3");
+Arrays.asList("4.14.3-HBase-1.4", "4.15.0-HBase-1.4");
  
 
 // TODO: pare this down to minimum, as we don't need duplicates for both 
table and column errors, nor should we need



[phoenix] branch 4.x-HBase-1.5 updated: (Addendum) PHOENIX-5607 Client-server backward compatibility tests

2020-03-10 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new cad00d4  (Addendum) PHOENIX-5607 Client-server backward compatibility 
tests
cad00d4 is described below

commit cad00d4c82cbe2062366287f13db6872b472f99c
Author: Sandeep Guggilam 
AuthorDate: Tue Mar 10 15:46:49 2020 -0700

(Addendum) PHOENIX-5607 Client-server backward compatibility tests

Signed-off-by: Chinmay Kulkarni 
---
 .../src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index 2982f25..148dd03 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -147,7 +147,7 @@ public abstract class MetaDataProtocol extends 
MetaDataService {
 
 public static final String CURRENT_CLIENT_VERSION = PHOENIX_MAJOR_VERSION 
+ "." + PHOENIX_MINOR_VERSION + "." + PHOENIX_PATCH_NUMBER;
 public static final List COMPATIBLE_CLIENT_VERSIONS = 
-Arrays.asList("4.14.3-HBase-1.3", "4.15.0-HBase-1.3");
+Arrays.asList("4.15.0-HBase-1.5");
  
 
 // TODO: pare this down to minimum, as we don't need duplicates for both 
table and column errors, nor should we need



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5607 Client-server backward compatibility tests

2020-03-09 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new bb6726d  PHOENIX-5607 Client-server backward compatibility tests
bb6726d is described below

commit bb6726da2eb6a057fabdd49562a888cf9010c66e
Author: Sandeep Guggilam 
AuthorDate: Tue Mar 3 13:04:39 2020 -0800

PHOENIX-5607 Client-server backward compatibility tests

Signed-off-by: Chinmay Kulkarni 
---
 .../phoenix/end2end/BackwardCompatibilityIT.java   | 398 +
 .../resources/gold_files/gold_query_add_data.txt   |  43 +++
 .../resources/gold_files/gold_query_add_delete.txt |  22 ++
 .../resources/gold_files/gold_query_create_add.txt |  32 ++
 .../src/it/resources/sql_files/add_data.sql|  27 ++
 .../src/it/resources/sql_files/add_delete.sql  |  26 ++
 .../src/it/resources/sql_files/create_add.sql  |  33 ++
 phoenix-core/src/it/resources/sql_files/query.sql  |  24 ++
 .../it/resources/sql_files/query_add_delete.sql|  26 ++
 .../src/it/resources/sql_files/query_more.sql  |  25 ++
 phoenix-core/src/it/scripts/execute_query.sh   |  40 +++
 .../phoenix/coprocessor/MetaDataProtocol.java  |   5 +-
 12 files changed, 700 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
new file mode 100644
index 000..dfa0032
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
@@ -0,0 +1,398 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeFalse;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.InputStreamReader;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.util.Collection;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.curator.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.jdbc.PhoenixDriver;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+/**
+ * This class is meant for testing all compatible client versions 
+ * against the current server version. It runs SQL queries with given 
+ * client versions and compares the output against gold files
+ */
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.class)
+public class BackwardCompatibilityIT {
+
+private static final String SQL_DIR = "src/it/resources/sql_files/";
+private static final String RESULT_DIR = "src/it/resources/gold_files/";
+private static final String RESULT_PREFIX = "result_";
+private static final 

[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5607 Client-server backward compatibility tests

2020-03-09 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new dcecdec  PHOENIX-5607 Client-server backward compatibility tests
dcecdec is described below

commit dcecdec7cf78c8c2872b42cbf0347a80c79f83de
Author: Sandeep Guggilam 
AuthorDate: Tue Mar 3 13:04:39 2020 -0800

PHOENIX-5607 Client-server backward compatibility tests

Signed-off-by: Chinmay Kulkarni 
---
 .../phoenix/end2end/BackwardCompatibilityIT.java   | 398 +
 .../resources/gold_files/gold_query_add_data.txt   |  43 +++
 .../resources/gold_files/gold_query_add_delete.txt |  22 ++
 .../resources/gold_files/gold_query_create_add.txt |  32 ++
 .../src/it/resources/sql_files/add_data.sql|  27 ++
 .../src/it/resources/sql_files/add_delete.sql  |  26 ++
 .../src/it/resources/sql_files/create_add.sql  |  33 ++
 phoenix-core/src/it/resources/sql_files/query.sql  |  24 ++
 .../it/resources/sql_files/query_add_delete.sql|  26 ++
 .../src/it/resources/sql_files/query_more.sql  |  25 ++
 phoenix-core/src/it/scripts/execute_query.sh   |  40 +++
 .../phoenix/coprocessor/MetaDataProtocol.java  |   5 +-
 12 files changed, 700 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
new file mode 100644
index 000..dfa0032
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
@@ -0,0 +1,398 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeFalse;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.InputStreamReader;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.util.Collection;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.curator.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.jdbc.PhoenixDriver;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+/**
+ * This class is meant for testing all compatible client versions 
+ * against the current server version. It runs SQL queries with given 
+ * client versions and compares the output against gold files
+ */
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.class)
+public class BackwardCompatibilityIT {
+
+private static final String SQL_DIR = "src/it/resources/sql_files/";
+private static final String RESULT_DIR = "src/it/resources/gold_files/";
+private static final String RESULT_PREFIX = "result_";
+private static final 

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5607 Client-server backward compatibility tests

2020-03-09 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new bdc0609  PHOENIX-5607 Client-server backward compatibility tests
bdc0609 is described below

commit bdc0609c00797861fd2d93e981fb92d5113d010b
Author: Sandeep Guggilam 
AuthorDate: Tue Mar 3 13:04:39 2020 -0800

PHOENIX-5607 Client-server backward compatibility tests

Signed-off-by: Chinmay Kulkarni 
---
 .../phoenix/end2end/BackwardCompatibilityIT.java   | 398 +
 .../resources/gold_files/gold_query_add_data.txt   |  43 +++
 .../resources/gold_files/gold_query_add_delete.txt |  22 ++
 .../resources/gold_files/gold_query_create_add.txt |  32 ++
 .../src/it/resources/sql_files/add_data.sql|  27 ++
 .../src/it/resources/sql_files/add_delete.sql  |  26 ++
 .../src/it/resources/sql_files/create_add.sql  |  33 ++
 phoenix-core/src/it/resources/sql_files/query.sql  |  24 ++
 .../it/resources/sql_files/query_add_delete.sql|  26 ++
 .../src/it/resources/sql_files/query_more.sql  |  25 ++
 phoenix-core/src/it/scripts/execute_query.sh   |  40 +++
 .../phoenix/coprocessor/MetaDataProtocol.java  |   5 +-
 12 files changed, 700 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
new file mode 100644
index 000..dfa0032
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
@@ -0,0 +1,398 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeFalse;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.InputStreamReader;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.util.Collection;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.curator.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.jdbc.PhoenixDriver;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+/**
+ * This class is meant for testing all compatible client versions 
+ * against the current server version. It runs SQL queries with given 
+ * client versions and compares the output against gold files
+ */
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.class)
+public class BackwardCompatibilityIT {
+
+private static final String SQL_DIR = "src/it/resources/sql_files/";
+private static final String RESULT_DIR = "src/it/resources/gold_files/";
+private static final String RESULT_PREFIX = "result_";
+private static final 

[phoenix] branch master updated: PHOENIX-5607 Client-server backward compatibility tests

2020-03-09 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 562711c  PHOENIX-5607 Client-server backward compatibility tests
562711c is described below

commit 562711cf4311f98fb07da85a36eede229c9388b4
Author: Sandeep Guggilam 
AuthorDate: Thu Mar 5 21:42:03 2020 -0800

PHOENIX-5607 Client-server backward compatibility tests

Signed-off-by: Chinmay Kulkarni 
---
 .../phoenix/end2end/BackwardCompatibilityIT.java   | 398 +
 .../resources/gold_files/gold_query_add_data.txt   |  43 +++
 .../resources/gold_files/gold_query_add_delete.txt |  22 ++
 .../resources/gold_files/gold_query_create_add.txt |  32 ++
 .../src/it/resources/sql_files/add_data.sql|  27 ++
 .../src/it/resources/sql_files/add_delete.sql  |  26 ++
 .../src/it/resources/sql_files/create_add.sql  |  33 ++
 phoenix-core/src/it/resources/sql_files/query.sql  |  24 ++
 .../it/resources/sql_files/query_add_delete.sql|  26 ++
 .../src/it/resources/sql_files/query_more.sql  |  25 ++
 phoenix-core/src/it/scripts/execute_query.sh   |  40 +++
 .../phoenix/coprocessor/MetaDataProtocol.java  |   3 +
 12 files changed, 699 insertions(+)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
new file mode 100644
index 000..dfa0032
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
@@ -0,0 +1,398 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeFalse;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.InputStreamReader;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.util.Collection;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.curator.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.jdbc.PhoenixDriver;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+/**
+ * This class is meant for testing all compatible client versions 
+ * against the current server version. It runs SQL queries with given 
+ * client versions and compares the output against gold files
+ */
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.class)
+public class BackwardCompatibilityIT {
+
+private static final String SQL_DIR = "src/it/resources/sql_files/";
+private static final String RESULT_DIR = "src/it/resources/gold_files/";
+private static final String RESULT_PREFIX = "result_";
+private static final String SQL_EXTENSION = "

[phoenix] branch master updated: PHOENIX-5636: Improve the error message when client connects to server with higher major version

2020-03-02 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 3013c3e  PHOENIX-5636: Improve the error message when client connects 
to server with higher major version
3013c3e is described below

commit 3013c3e954ebdbec7274cb950e3583bc08975135
Author: Christine Feng 
AuthorDate: Fri Jan 31 14:33:24 2020 -0800

PHOENIX-5636: Improve the error message when client connects to server with 
higher major version

Signed-off-by: Chinmay Kulkarni 
---
 .../phoenix/query/ConnectionQueryServicesImpl.java | 57 ++
 .../java/org/apache/phoenix/util/MetaDataUtil.java | 48 --
 .../org/apache/phoenix/util/MetaDataUtilTest.java  | 38 +++
 3 files changed, 109 insertions(+), 34 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 06f44d4..d5b0720 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -1487,20 +1487,10 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 return MetaDataUtil.decodeHasIndexWALCodec(serverVersion);
 }
 
-private static boolean isCompatible(Long serverVersion) {
-if (serverVersion == null) {
-return false;
-}
-return MetaDataUtil.areClientAndServerCompatible(serverVersion);
-}
 
 private void checkClientServerCompatibility(byte[] metaTable) throws 
SQLException,
 AccessDeniedException {
-StringBuilder buf = new StringBuilder("Newer Phoenix clients can't 
communicate with older "
-+ "Phoenix servers. The following servers require an updated "
-+ QueryConstants.DEFAULT_COPROCESS_JAR_NAME
-+ " to be put in the classpath of HBase: ");
-boolean isIncompatible = false;
+StringBuilder errorMessage = new StringBuilder();
 int minHBaseVersion = Integer.MAX_VALUE;
 boolean isTableNamespaceMappingEnabled = false;
 long systemCatalogTimestamp = Long.MAX_VALUE;
@@ -1553,11 +1543,26 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 long serverJarVersion = versionResponse.getVersion();
 isTableNamespaceMappingEnabled |= 
MetaDataUtil.decodeTableNamespaceMappingEnabled(serverJarVersion);
 
-if (!isCompatible(serverJarVersion)) {
-isIncompatible = true;
-HRegionLocation name = regionMap.get(result.getKey());
-buf.append(name);
-buf.append(';');
+MetaDataUtil.ClientServerCompatibility compatibility = 
MetaDataUtil.areClientAndServerCompatible(serverJarVersion);
+if (!compatibility.getIsCompatible()) {
+if (compatibility.getErrorCode() == 
SQLExceptionCode.OUTDATED_JARS.getErrorCode()) {
+HRegionLocation name = regionMap.get(result.getKey());
+errorMessage.append("Newer Phoenix clients can't 
communicate with older "
++ "Phoenix servers. Client version: "
++ MetaDataProtocol.CURRENT_CLIENT_VERSION
++ "; Server version: "
++ getServerVersion(serverJarVersion)
++ " The following servers require an updated "
++ QueryConstants.DEFAULT_COPROCESS_JAR_NAME
++ " to be put in the classpath of HBase: ");
+errorMessage.append(name);
+errorMessage.append(';');
+} else if (compatibility.getErrorCode() == 
SQLExceptionCode.INCOMPATIBLE_CLIENT_SERVER_JAR.getErrorCode()) {
+errorMessage.append("Major version of client is less 
than that of the server. Client version: "
++ MetaDataProtocol.CURRENT_CLIENT_VERSION
++ "; Server version: "
++ getServerVersion(serverJarVersion));
+}
 }
 hasIndexWALCodec &= hasIndexWALCodec(serverJarVersion);
 if (minHBaseVersion > 
MetaDataUtil.decodeHBaseVersion(serverJarVersion)) {
@@ -1570,6 +1575,15 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
  

[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5636: Improve the error message when client connects to server with higher major version

2020-03-02 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 9c7626f  PHOENIX-5636: Improve the error message when client connects 
to server with higher major version
9c7626f is described below

commit 9c7626f36f2b2bd7bb159d9029ff6bc66e93d728
Author: Christine Feng 
AuthorDate: Fri Jan 31 14:33:24 2020 -0800

PHOENIX-5636: Improve the error message when client connects to server with 
higher major version

Signed-off-by: Chinmay Kulkarni 
---
 .../phoenix/query/ConnectionQueryServicesImpl.java | 57 ++
 .../java/org/apache/phoenix/util/MetaDataUtil.java | 49 +--
 .../org/apache/phoenix/util/MetaDataUtilTest.java  | 38 +++
 3 files changed, 110 insertions(+), 34 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 49a1405..b708b75 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -1479,20 +1479,10 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 return MetaDataUtil.decodeHasIndexWALCodec(serverVersion);
 }
 
-private static boolean isCompatible(Long serverVersion) {
-if (serverVersion == null) {
-return false;
-}
-return MetaDataUtil.areClientAndServerCompatible(serverVersion);
-}
 
 private void checkClientServerCompatibility(byte[] metaTable) throws 
SQLException,
 AccessDeniedException {
-StringBuilder buf = new StringBuilder("Newer Phoenix clients can't 
communicate with older "
-+ "Phoenix servers. The following servers require an updated "
-+ QueryConstants.DEFAULT_COPROCESS_JAR_NAME
-+ " to be put in the classpath of HBase: ");
-boolean isIncompatible = false;
+StringBuilder errorMessage = new StringBuilder();
 int minHBaseVersion = Integer.MAX_VALUE;
 boolean isTableNamespaceMappingEnabled = false;
 long systemCatalogTimestamp = Long.MAX_VALUE;
@@ -1545,11 +1535,26 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 long serverJarVersion = versionResponse.getVersion();
 isTableNamespaceMappingEnabled |= 
MetaDataUtil.decodeTableNamespaceMappingEnabled(serverJarVersion);
 
-if (!isCompatible(serverJarVersion)) {
-isIncompatible = true;
-HRegionLocation name = regionMap.get(result.getKey());
-buf.append(name);
-buf.append(';');
+MetaDataUtil.ClientServerCompatibility compatibility = 
MetaDataUtil.areClientAndServerCompatible(serverJarVersion);
+if (!compatibility.getIsCompatible()) {
+if (compatibility.getErrorCode() == 
SQLExceptionCode.OUTDATED_JARS.getErrorCode()) {
+HRegionLocation name = regionMap.get(result.getKey());
+errorMessage.append("Newer Phoenix clients can't 
communicate with older "
++ "Phoenix servers. Client version: "
++ MetaDataProtocol.CURRENT_CLIENT_VERSION
++ "; Server version: "
++ getServerVersion(serverJarVersion)
++ " The following servers require an updated "
++ QueryConstants.DEFAULT_COPROCESS_JAR_NAME
++ " to be put in the classpath of HBase: ");
+errorMessage.append(name);
+errorMessage.append(';');
+} else if (compatibility.getErrorCode() == 
SQLExceptionCode.INCOMPATIBLE_CLIENT_SERVER_JAR.getErrorCode()) {
+errorMessage.append("Major version of client is less 
than that of the server. Client version: "
++ MetaDataProtocol.CURRENT_CLIENT_VERSION
++ "; Server version: "
++ getServerVersion(serverJarVersion));
+}
 }
 hasIndexWALCodec &= hasIndexWALCodec(serverJarVersion);
 if (minHBaseVersion > 
MetaDataUtil.decodeHBaseVersion(serverJarVersion)) {
@@ -1562,6 +1567,15 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
  

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5636: Improve the error message when client connects to server with higher major version

2020-03-02 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 4522286  PHOENIX-5636: Improve the error message when client connects 
to server with higher major version
4522286 is described below

commit 45222868179cde5d2e58849eb9ac2a23a1c073e6
Author: Christine Feng 
AuthorDate: Fri Jan 31 14:33:24 2020 -0800

PHOENIX-5636: Improve the error message when client connects to server with 
higher major version

Signed-off-by: Chinmay Kulkarni 
---
 .../phoenix/query/ConnectionQueryServicesImpl.java | 57 ++
 .../java/org/apache/phoenix/util/MetaDataUtil.java | 49 +--
 .../org/apache/phoenix/util/MetaDataUtilTest.java  | 38 +++
 3 files changed, 110 insertions(+), 34 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 49a1405..b708b75 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -1479,20 +1479,10 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 return MetaDataUtil.decodeHasIndexWALCodec(serverVersion);
 }
 
-private static boolean isCompatible(Long serverVersion) {
-if (serverVersion == null) {
-return false;
-}
-return MetaDataUtil.areClientAndServerCompatible(serverVersion);
-}
 
 private void checkClientServerCompatibility(byte[] metaTable) throws 
SQLException,
 AccessDeniedException {
-StringBuilder buf = new StringBuilder("Newer Phoenix clients can't 
communicate with older "
-+ "Phoenix servers. The following servers require an updated "
-+ QueryConstants.DEFAULT_COPROCESS_JAR_NAME
-+ " to be put in the classpath of HBase: ");
-boolean isIncompatible = false;
+StringBuilder errorMessage = new StringBuilder();
 int minHBaseVersion = Integer.MAX_VALUE;
 boolean isTableNamespaceMappingEnabled = false;
 long systemCatalogTimestamp = Long.MAX_VALUE;
@@ -1545,11 +1535,26 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 long serverJarVersion = versionResponse.getVersion();
 isTableNamespaceMappingEnabled |= 
MetaDataUtil.decodeTableNamespaceMappingEnabled(serverJarVersion);
 
-if (!isCompatible(serverJarVersion)) {
-isIncompatible = true;
-HRegionLocation name = regionMap.get(result.getKey());
-buf.append(name);
-buf.append(';');
+MetaDataUtil.ClientServerCompatibility compatibility = 
MetaDataUtil.areClientAndServerCompatible(serverJarVersion);
+if (!compatibility.getIsCompatible()) {
+if (compatibility.getErrorCode() == 
SQLExceptionCode.OUTDATED_JARS.getErrorCode()) {
+HRegionLocation name = regionMap.get(result.getKey());
+errorMessage.append("Newer Phoenix clients can't 
communicate with older "
++ "Phoenix servers. Client version: "
++ MetaDataProtocol.CURRENT_CLIENT_VERSION
++ "; Server version: "
++ getServerVersion(serverJarVersion)
++ " The following servers require an updated "
++ QueryConstants.DEFAULT_COPROCESS_JAR_NAME
++ " to be put in the classpath of HBase: ");
+errorMessage.append(name);
+errorMessage.append(';');
+} else if (compatibility.getErrorCode() == 
SQLExceptionCode.INCOMPATIBLE_CLIENT_SERVER_JAR.getErrorCode()) {
+errorMessage.append("Major version of client is less 
than that of the server. Client version: "
++ MetaDataProtocol.CURRENT_CLIENT_VERSION
++ "; Server version: "
++ getServerVersion(serverJarVersion));
+}
 }
 hasIndexWALCodec &= hasIndexWALCodec(serverJarVersion);
 if (minHBaseVersion > 
MetaDataUtil.decodeHBaseVersion(serverJarVersion)) {
@@ -1562,6 +1567,15 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
  

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5636: Improve the error message when client connects to server with higher major version

2020-03-02 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 9b92610  PHOENIX-5636: Improve the error message when client connects 
to server with higher major version
9b92610 is described below

commit 9b92610fbd65f3847af0b5ff3f48bfd16f2dd89b
Author: Christine Feng 
AuthorDate: Fri Jan 31 14:33:24 2020 -0800

PHOENIX-5636: Improve the error message when client connects to server with 
higher major version

Signed-off-by: Chinmay Kulkarni 
---
 .../phoenix/query/ConnectionQueryServicesImpl.java | 57 ++
 .../java/org/apache/phoenix/util/MetaDataUtil.java | 49 +--
 .../org/apache/phoenix/util/MetaDataUtilTest.java  | 38 +++
 3 files changed, 110 insertions(+), 34 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 49a1405..b708b75 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -1479,20 +1479,10 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 return MetaDataUtil.decodeHasIndexWALCodec(serverVersion);
 }
 
-private static boolean isCompatible(Long serverVersion) {
-if (serverVersion == null) {
-return false;
-}
-return MetaDataUtil.areClientAndServerCompatible(serverVersion);
-}
 
 private void checkClientServerCompatibility(byte[] metaTable) throws 
SQLException,
 AccessDeniedException {
-StringBuilder buf = new StringBuilder("Newer Phoenix clients can't 
communicate with older "
-+ "Phoenix servers. The following servers require an updated "
-+ QueryConstants.DEFAULT_COPROCESS_JAR_NAME
-+ " to be put in the classpath of HBase: ");
-boolean isIncompatible = false;
+StringBuilder errorMessage = new StringBuilder();
 int minHBaseVersion = Integer.MAX_VALUE;
 boolean isTableNamespaceMappingEnabled = false;
 long systemCatalogTimestamp = Long.MAX_VALUE;
@@ -1545,11 +1535,26 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 long serverJarVersion = versionResponse.getVersion();
 isTableNamespaceMappingEnabled |= 
MetaDataUtil.decodeTableNamespaceMappingEnabled(serverJarVersion);
 
-if (!isCompatible(serverJarVersion)) {
-isIncompatible = true;
-HRegionLocation name = regionMap.get(result.getKey());
-buf.append(name);
-buf.append(';');
+MetaDataUtil.ClientServerCompatibility compatibility = 
MetaDataUtil.areClientAndServerCompatible(serverJarVersion);
+if (!compatibility.getIsCompatible()) {
+if (compatibility.getErrorCode() == 
SQLExceptionCode.OUTDATED_JARS.getErrorCode()) {
+HRegionLocation name = regionMap.get(result.getKey());
+errorMessage.append("Newer Phoenix clients can't 
communicate with older "
++ "Phoenix servers. Client version: "
++ MetaDataProtocol.CURRENT_CLIENT_VERSION
++ "; Server version: "
++ getServerVersion(serverJarVersion)
++ " The following servers require an updated "
++ QueryConstants.DEFAULT_COPROCESS_JAR_NAME
++ " to be put in the classpath of HBase: ");
+errorMessage.append(name);
+errorMessage.append(';');
+} else if (compatibility.getErrorCode() == 
SQLExceptionCode.INCOMPATIBLE_CLIENT_SERVER_JAR.getErrorCode()) {
+errorMessage.append("Major version of client is less 
than that of the server. Client version: "
++ MetaDataProtocol.CURRENT_CLIENT_VERSION
++ "; Server version: "
++ getServerVersion(serverJarVersion));
+}
 }
 hasIndexWALCodec &= hasIndexWALCodec(serverJarVersion);
 if (minHBaseVersion > 
MetaDataUtil.decodeHBaseVersion(serverJarVersion)) {
@@ -1562,6 +1567,15 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
  

svn commit: r1874594 - in /phoenix/site/publish: language/datatypes.html language/functions.html language/index.html release.html

2020-02-27 Thread chinmayskulkarni
Author: chinmayskulkarni
Date: Thu Feb 27 21:32:37 2020
New Revision: 1874594

URL: http://svn.apache.org/viewvc?rev=1874594=rev
Log:
Add step to mark version as released in Jira in release.html

Modified:
phoenix/site/publish/language/datatypes.html
phoenix/site/publish/language/functions.html
phoenix/site/publish/language/index.html
phoenix/site/publish/release.html

Modified: phoenix/site/publish/language/datatypes.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/datatypes.html?rev=1874594=1874593=1874594=diff
==
--- phoenix/site/publish/language/datatypes.html (original)
+++ phoenix/site/publish/language/datatypes.html Thu Feb 27 21:32:37 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/language/functions.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/functions.html?rev=1874594=1874593=1874594=diff
==
--- phoenix/site/publish/language/functions.html (original)
+++ phoenix/site/publish/language/functions.html Thu Feb 27 21:32:37 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/language/index.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/index.html?rev=1874594=1874593=1874594=diff
==
--- phoenix/site/publish/language/index.html (original)
+++ phoenix/site/publish/language/index.html Thu Feb 27 21:32:37 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/release.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/release.html?rev=1874594=1874593=1874594=diff
==
--- phoenix/site/publish/release.html (original)
+++ phoenix/site/publish/release.html Thu Feb 27 21:32:37 2020
@@ -1,7 +1,7 @@
 
 
 
 
@@ -243,6 +243,7 @@ mvn versions:set -DnewVersion=4.16.0-HBa
   Add documentation of released version to the http://phoenix.apache.org/download.html;>downloads page and https://en.wikipedia.org/wiki/Apache_Phoenix;>wiki. 
   Send out an announcement email. See example https://www.mail-archive.com/dev@phoenix.apache.org/msg54764.html;>here.
 
   Bulk close Jiras that were marked for the release fixVersion. 
+  Finally, mark the version as released in Apache Phoenix Jira by 
searching for it in https://issues.apache.org/jira/projects/PHOENIX?selectedItem=com.atlassian.jira.jira-projects-plugin:release-pagestatus=unreleased;>Phoenix
 Releases. Go to the corresponding version and click “Release”. This 
way, the released version shows up in the “Released Versions” list when 
assigning “Affects Version/s” and “Fix Version/s” fields in Jira. 
   
  Congratulations! 
 




[phoenix] branch 4.15-HBase-1.4 updated: PHOENIX-5633: Add table name info to scan logging

2020-02-14 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.15-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.15-HBase-1.4 by this push:
 new 3e54447  PHOENIX-5633: Add table name info to scan logging
3e54447 is described below

commit 3e54447140302f3632af4efeebb94600f7460f86
Author: Christine Feng 
AuthorDate: Thu Jan 23 12:27:36 2020 -0800

PHOENIX-5633: Add table name info to scan logging

Signed-off-by: Chinmay Kulkarni 
---
 .../src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java   | 4 ++--
 .../src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index caa60a0..89c5233 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -359,13 +359,13 @@ public abstract class BaseQueryPlan implements QueryPlan {
 
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations(
-   "Scan ready for iteration: " + scan, connection));
+"Scan on table " + 
context.getCurrentTable().getTable().getName() + " ready for iteration: " + 
scan, connection));
 }
 
 ResultIterator iterator =  newIterator(scanGrouper, scan, caches);
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations(
-   "Iterator ready: " + iterator, connection));
+"Iterator for table " + 
context.getCurrentTable().getTable().getName() + " ready: " + iterator, 
connection));
 }
 
 // wrap the iterator so we start/end tracing as we expect
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 45b4d4d..56c27c9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -1229,7 +1229,7 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
 public List getIterators() throws SQLException {
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations("Getting iterators for " 
+ this,
-ScanUtil.getCustomAnnotations(scan)));
+ScanUtil.getCustomAnnotations(scan)) + "on table " + 
context.getCurrentTable().getTable().getName());
 }
 boolean isReverse = ScanUtil.isReversed(scan);
 boolean isLocalIndex = getTable().getIndexType() == IndexType.LOCAL;



[phoenix] branch 4.15-HBase-1.5 updated: PHOENIX-5633: Add table name info to scan logging

2020-02-14 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.15-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.15-HBase-1.5 by this push:
 new 3a3eee6  PHOENIX-5633: Add table name info to scan logging
3a3eee6 is described below

commit 3a3eee64a47927afecaf4b3c8f058ffa7238bc2e
Author: Christine Feng 
AuthorDate: Thu Jan 23 12:27:36 2020 -0800

PHOENIX-5633: Add table name info to scan logging

Signed-off-by: Chinmay Kulkarni 
---
 .../src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java   | 4 ++--
 .../src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index caa60a0..89c5233 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -359,13 +359,13 @@ public abstract class BaseQueryPlan implements QueryPlan {
 
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations(
-   "Scan ready for iteration: " + scan, connection));
+"Scan on table " + 
context.getCurrentTable().getTable().getName() + " ready for iteration: " + 
scan, connection));
 }
 
 ResultIterator iterator =  newIterator(scanGrouper, scan, caches);
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations(
-   "Iterator ready: " + iterator, connection));
+"Iterator for table " + 
context.getCurrentTable().getTable().getName() + " ready: " + iterator, 
connection));
 }
 
 // wrap the iterator so we start/end tracing as we expect
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 45b4d4d..56c27c9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -1229,7 +1229,7 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
 public List getIterators() throws SQLException {
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations("Getting iterators for " 
+ this,
-ScanUtil.getCustomAnnotations(scan)));
+ScanUtil.getCustomAnnotations(scan)) + "on table " + 
context.getCurrentTable().getTable().getName());
 }
 boolean isReverse = ScanUtil.isReversed(scan);
 boolean isLocalIndex = getTable().getIndexType() == IndexType.LOCAL;



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5633: Add table name info to scan logging

2020-02-14 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new a6e1c71  PHOENIX-5633: Add table name info to scan logging
a6e1c71 is described below

commit a6e1c713fccaae1c0f06c3e48e7b9e9d7f10f959
Author: Christine Feng 
AuthorDate: Thu Jan 23 12:27:36 2020 -0800

PHOENIX-5633: Add table name info to scan logging

Signed-off-by: Chinmay Kulkarni 
---
 .../src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java   | 4 ++--
 .../src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index caa60a0..89c5233 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -359,13 +359,13 @@ public abstract class BaseQueryPlan implements QueryPlan {
 
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations(
-   "Scan ready for iteration: " + scan, connection));
+"Scan on table " + 
context.getCurrentTable().getTable().getName() + " ready for iteration: " + 
scan, connection));
 }
 
 ResultIterator iterator =  newIterator(scanGrouper, scan, caches);
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations(
-   "Iterator ready: " + iterator, connection));
+"Iterator for table " + 
context.getCurrentTable().getTable().getName() + " ready: " + iterator, 
connection));
 }
 
 // wrap the iterator so we start/end tracing as we expect
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 2dcc88b..3d61236 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -1238,7 +1238,7 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
 public List getIterators() throws SQLException {
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations("Getting iterators for " 
+ this,
-ScanUtil.getCustomAnnotations(scan)));
+ScanUtil.getCustomAnnotations(scan)) + "on table " + 
context.getCurrentTable().getTable().getName());
 }
 boolean isReverse = ScanUtil.isReversed(scan);
 boolean isLocalIndex = getTable().getIndexType() == IndexType.LOCAL;



[phoenix] branch 4.15-HBase-1.3 updated: PHOENIX-5633: Add table name info to scan logging

2020-02-14 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.15-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.15-HBase-1.3 by this push:
 new 00594cc  PHOENIX-5633: Add table name info to scan logging
00594cc is described below

commit 00594cc900a57e8bdcae13a548d0dc1d4ccc9b15
Author: Christine Feng 
AuthorDate: Thu Jan 23 12:27:36 2020 -0800

PHOENIX-5633: Add table name info to scan logging

Signed-off-by: Chinmay Kulkarni 
---
 .../src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java   | 4 ++--
 .../src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index caa60a0..89c5233 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -359,13 +359,13 @@ public abstract class BaseQueryPlan implements QueryPlan {
 
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations(
-   "Scan ready for iteration: " + scan, connection));
+"Scan on table " + 
context.getCurrentTable().getTable().getName() + " ready for iteration: " + 
scan, connection));
 }
 
 ResultIterator iterator =  newIterator(scanGrouper, scan, caches);
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations(
-   "Iterator ready: " + iterator, connection));
+"Iterator for table " + 
context.getCurrentTable().getTable().getName() + " ready: " + iterator, 
connection));
 }
 
 // wrap the iterator so we start/end tracing as we expect
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 45b4d4d..56c27c9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -1229,7 +1229,7 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
 public List getIterators() throws SQLException {
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations("Getting iterators for " 
+ this,
-ScanUtil.getCustomAnnotations(scan)));
+ScanUtil.getCustomAnnotations(scan)) + "on table " + 
context.getCurrentTable().getTable().getName());
 }
 boolean isReverse = ScanUtil.isReversed(scan);
 boolean isLocalIndex = getTable().getIndexType() == IndexType.LOCAL;



[phoenix] branch master updated: PHOENIX-5633: Add table name info to scan logging

2020-02-14 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new f592bfd  PHOENIX-5633: Add table name info to scan logging
f592bfd is described below

commit f592bfdecbf5a50b54f71bad82f52a0504d6eaf5
Author: Christine Feng 
AuthorDate: Thu Jan 23 12:27:36 2020 -0800

PHOENIX-5633: Add table name info to scan logging

Signed-off-by: Chinmay Kulkarni 
---
 .../src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java   | 4 ++--
 .../src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index 3f10ad0..42cc6f7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -359,13 +359,13 @@ public abstract class BaseQueryPlan implements QueryPlan {
 
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations(
-"Scan ready for iteration: " + scan, connection));
+"Scan on table " + 
context.getCurrentTable().getTable().getName() + " ready for iteration: " + 
scan, connection));
 }
 
 ResultIterator iterator =  newIterator(scanGrouper, scan, caches);
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations(
-"Iterator ready: " + iterator, connection));
+"Iterator for table " + 
context.getCurrentTable().getTable().getName() + " ready: " + iterator, 
connection));
 }
 
 // wrap the iterator so we start/end tracing as we expect
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 12a6b3a..02e7e6c 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -1238,7 +1238,7 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
 public List getIterators() throws SQLException {
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations("Getting iterators for " 
+ this,
-ScanUtil.getCustomAnnotations(scan)));
+ScanUtil.getCustomAnnotations(scan)) + "on table " + 
context.getCurrentTable().getTable().getName());
 }
 boolean isReverse = ScanUtil.isReversed(scan);
 boolean isLocalIndex = getTable().getIndexType() == IndexType.LOCAL;



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5633: Add table name info to scan logging

2020-02-14 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new ec86e26  PHOENIX-5633: Add table name info to scan logging
ec86e26 is described below

commit ec86e26969434eb96a038824462703e6790dc377
Author: Christine Feng 
AuthorDate: Thu Jan 23 12:27:36 2020 -0800

PHOENIX-5633: Add table name info to scan logging

Signed-off-by: Chinmay Kulkarni 
---
 .../src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java   | 4 ++--
 .../src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index caa60a0..89c5233 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -359,13 +359,13 @@ public abstract class BaseQueryPlan implements QueryPlan {
 
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations(
-   "Scan ready for iteration: " + scan, connection));
+"Scan on table " + 
context.getCurrentTable().getTable().getName() + " ready for iteration: " + 
scan, connection));
 }
 
 ResultIterator iterator =  newIterator(scanGrouper, scan, caches);
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations(
-   "Iterator ready: " + iterator, connection));
+"Iterator for table " + 
context.getCurrentTable().getTable().getName() + " ready: " + iterator, 
connection));
 }
 
 // wrap the iterator so we start/end tracing as we expect
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 2dcc88b..3d61236 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -1238,7 +1238,7 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
 public List getIterators() throws SQLException {
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations("Getting iterators for " 
+ this,
-ScanUtil.getCustomAnnotations(scan)));
+ScanUtil.getCustomAnnotations(scan)) + "on table " + 
context.getCurrentTable().getTable().getName());
 }
 boolean isReverse = ScanUtil.isReversed(scan);
 boolean isLocalIndex = getTable().getIndexType() == IndexType.LOCAL;



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5633: Add table name info to scan logging

2020-02-14 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new efd443b  PHOENIX-5633: Add table name info to scan logging
efd443b is described below

commit efd443b12ededa000e73a32545266c16058f3b31
Author: Christine Feng 
AuthorDate: Thu Jan 23 12:27:36 2020 -0800

PHOENIX-5633: Add table name info to scan logging

Signed-off-by: Chinmay Kulkarni 
---
 .../src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java   | 4 ++--
 .../src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index caa60a0..89c5233 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -359,13 +359,13 @@ public abstract class BaseQueryPlan implements QueryPlan {
 
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations(
-   "Scan ready for iteration: " + scan, connection));
+"Scan on table " + 
context.getCurrentTable().getTable().getName() + " ready for iteration: " + 
scan, connection));
 }
 
 ResultIterator iterator =  newIterator(scanGrouper, scan, caches);
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations(
-   "Iterator ready: " + iterator, connection));
+"Iterator for table " + 
context.getCurrentTable().getTable().getName() + " ready: " + iterator, 
connection));
 }
 
 // wrap the iterator so we start/end tracing as we expect
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 2dcc88b..3d61236 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -1238,7 +1238,7 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
 public List getIterators() throws SQLException {
 if (LOGGER.isDebugEnabled()) {
 LOGGER.debug(LogUtil.addCustomAnnotations("Getting iterators for " 
+ this,
-ScanUtil.getCustomAnnotations(scan)));
+ScanUtil.getCustomAnnotations(scan)) + "on table " + 
context.getCurrentTable().getTable().getName());
 }
 boolean isReverse = ScanUtil.isReversed(scan);
 boolean isLocalIndex = getTable().getIndexType() == IndexType.LOCAL;



[phoenix] branch master updated: PHOENIX-5714 Upgrade to 4.16 throwing ColumnNotFoundException

2020-02-13 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new af0865f  PHOENIX-5714 Upgrade to 4.16 throwing ColumnNotFoundException
af0865f is described below

commit af0865f9e6139841f794d8a8f0b44fc57e139dcb
Author: Sandeep Guggilam 
AuthorDate: Thu Feb 6 17:29:50 2020 -0800

PHOENIX-5714 Upgrade to 4.16 throwing ColumnNotFoundException

Signed-off-by: Chinmay Kulkarni 
---
 .../phoenix/coprocessor/MetaDataProtocol.java  |  5 +--
 .../phoenix/query/ConnectionQueryServicesImpl.java | 37 +++---
 2 files changed, 29 insertions(+), 13 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index 52f0835..9fb308f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -40,11 +40,11 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.MetaDataUtil;
 
 import com.google.common.base.Function;
 import com.google.common.collect.Lists;
 import com.google.protobuf.ByteString;
-import org.apache.phoenix.util.MetaDataUtil;
 
 /**
  *
@@ -97,7 +97,8 @@ public abstract class MetaDataProtocol extends 
MetaDataService {
 // TODO Need to account for the inevitable 4.14 release too
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_5_0_0 = 
MIN_SYSTEM_TABLE_TIMESTAMP_4_14_0;
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0 = 
MIN_TABLE_TIMESTAMP + 29;
-public static final long MIN_SYSTEM_TABLE_TIMESTAMP_5_1_0 = 
MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0;
+public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0 = 
MIN_TABLE_TIMESTAMP + 31;
+public static final long MIN_SYSTEM_TABLE_TIMESTAMP_5_1_0 = 
MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0;
 // MIN_SYSTEM_TABLE_TIMESTAMP needs to be set to the max of all the 
MIN_SYSTEM_TABLE_TIMESTAMP_* constants
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP = 
MIN_SYSTEM_TABLE_TIMESTAMP_5_1_0;
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 20308a5..06f44d4 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -17,12 +17,13 @@
  */
 package org.apache.phoenix.query;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS;
 import static 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.MAX_VERSIONS;
-import static org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.TTL;
 import static 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.REPLICATION_SCOPE;
-import static 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS;
+import static org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.TTL;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0;
+import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MAJOR_VERSION;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MINOR_VERSION;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_PATCH_NUMBER;
@@ -68,8 +69,8 @@ import static 
org.apache.phoenix.util.UpgradeUtil.addParentToChildLinks;
 import static org.apache.phoenix.util.UpgradeUtil.addViewIndexToParentLinks;
 import static org.apache.phoenix.util.UpgradeUtil.getSysCatalogSnapshotName;
 import static org.apache.phoenix.util.UpgradeUtil.moveChildLinks;
-import static org.apache.phoenix.util.UpgradeUtil.upgradeTo4_5_0;
 import static org.apache.phoenix.util.UpgradeUtil.syncTableAndIndexProperties;
+import static org.apache.phoenix.util.UpgradeUtil.upgradeTo4_5_0;
 
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
@@ -110,7 +111,6 @@ import java.util.regex.Pattern;
 
 import javax.annotation.concurrent.GuardedBy;
 
-import com.google.common.base.Strings;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
@@ -164,8 +164,8 @@ import

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5714 Upgrade to 4.16 throwing ColumnNotFoundException

2020-02-13 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 8636770  PHOENIX-5714 Upgrade to 4.16 throwing ColumnNotFoundException
8636770 is described below

commit 86367709f39169281bb63d05980caf5c4066a62f
Author: Sandeep Guggilam 
AuthorDate: Thu Feb 6 17:09:11 2020 -0800

PHOENIX-5714 Upgrade to 4.16 throwing ColumnNotFoundException

Signed-off-by: Chinmay Kulkarni 
---
 .../phoenix/coprocessor/MetaDataProtocol.java  |  5 +--
 .../phoenix/query/ConnectionQueryServicesImpl.java | 37 +++---
 2 files changed, 29 insertions(+), 13 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index a83b04b..3f5e2fb 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -40,11 +40,11 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.MetaDataUtil;
 
 import com.google.common.base.Function;
 import com.google.common.collect.Lists;
 import com.google.protobuf.ByteString;
-import org.apache.phoenix.util.MetaDataUtil;
 
 /**
  *
@@ -94,8 +94,9 @@ public abstract class MetaDataProtocol extends 
MetaDataService {
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_13_0 = 
MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0;
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_14_0 = 
MIN_TABLE_TIMESTAMP + 28;
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0 = 
MIN_TABLE_TIMESTAMP + 29;
+public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0 = 
MIN_TABLE_TIMESTAMP + 31;
 // MIN_SYSTEM_TABLE_TIMESTAMP needs to be set to the max of all the 
MIN_SYSTEM_TABLE_TIMESTAMP_* constants
-public static final long MIN_SYSTEM_TABLE_TIMESTAMP = 
MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0;
+public static final long MIN_SYSTEM_TABLE_TIMESTAMP = 
MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0;
 // Version below which we should disallow usage of mutable secondary 
indexing.
 public static final int MUTABLE_SI_VERSION_THRESHOLD = 
VersionUtil.encodeVersion("0", "94", "10");
 public static final int MAX_LOCAL_SI_VERSION_DISALLOW = 
VersionUtil.encodeVersion("0", "98", "8");
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 4f79097..49a1405 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -17,11 +17,12 @@
  */
 package org.apache.phoenix.query;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
-import static org.apache.hadoop.hbase.HColumnDescriptor.TTL;
-import static org.apache.hadoop.hbase.HColumnDescriptor.REPLICATION_SCOPE;
 import static org.apache.hadoop.hbase.HColumnDescriptor.KEEP_DELETED_CELLS;
+import static org.apache.hadoop.hbase.HColumnDescriptor.REPLICATION_SCOPE;
+import static org.apache.hadoop.hbase.HColumnDescriptor.TTL;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0;
+import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MAJOR_VERSION;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MINOR_VERSION;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_PATCH_NUMBER;
@@ -67,8 +68,8 @@ import static 
org.apache.phoenix.util.UpgradeUtil.addParentToChildLinks;
 import static org.apache.phoenix.util.UpgradeUtil.addViewIndexToParentLinks;
 import static org.apache.phoenix.util.UpgradeUtil.getSysCatalogSnapshotName;
 import static org.apache.phoenix.util.UpgradeUtil.moveChildLinks;
-import static org.apache.phoenix.util.UpgradeUtil.upgradeTo4_5_0;
 import static org.apache.phoenix.util.UpgradeUtil.syncTableAndIndexProperties;
+import static org.apache.phoenix.util.UpgradeUtil.upgradeTo4_5_0;
 
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
@@ -108,7 +109,6 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import javax.annotation.concurrent.GuardedBy;
 
-import com.google.common.base.Strings;
 import org.apache.hadoop.conf.Configuration

[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5714 Upgrade to 4.16 throwing ColumnNotFoundException

2020-02-13 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 09f97e0  PHOENIX-5714 Upgrade to 4.16 throwing ColumnNotFoundException
09f97e0 is described below

commit 09f97e05b68167dc2abab5d632a0d89822e98353
Author: Sandeep Guggilam 
AuthorDate: Thu Feb 6 17:09:11 2020 -0800

PHOENIX-5714 Upgrade to 4.16 throwing ColumnNotFoundException

Signed-off-by: Chinmay Kulkarni 
---
 .../phoenix/coprocessor/MetaDataProtocol.java  |  5 +--
 .../phoenix/query/ConnectionQueryServicesImpl.java | 37 +++---
 2 files changed, 29 insertions(+), 13 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index a83b04b..3f5e2fb 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -40,11 +40,11 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.MetaDataUtil;
 
 import com.google.common.base.Function;
 import com.google.common.collect.Lists;
 import com.google.protobuf.ByteString;
-import org.apache.phoenix.util.MetaDataUtil;
 
 /**
  *
@@ -94,8 +94,9 @@ public abstract class MetaDataProtocol extends 
MetaDataService {
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_13_0 = 
MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0;
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_14_0 = 
MIN_TABLE_TIMESTAMP + 28;
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0 = 
MIN_TABLE_TIMESTAMP + 29;
+public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0 = 
MIN_TABLE_TIMESTAMP + 31;
 // MIN_SYSTEM_TABLE_TIMESTAMP needs to be set to the max of all the 
MIN_SYSTEM_TABLE_TIMESTAMP_* constants
-public static final long MIN_SYSTEM_TABLE_TIMESTAMP = 
MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0;
+public static final long MIN_SYSTEM_TABLE_TIMESTAMP = 
MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0;
 // Version below which we should disallow usage of mutable secondary 
indexing.
 public static final int MUTABLE_SI_VERSION_THRESHOLD = 
VersionUtil.encodeVersion("0", "94", "10");
 public static final int MAX_LOCAL_SI_VERSION_DISALLOW = 
VersionUtil.encodeVersion("0", "98", "8");
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 4f79097..49a1405 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -17,11 +17,12 @@
  */
 package org.apache.phoenix.query;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
-import static org.apache.hadoop.hbase.HColumnDescriptor.TTL;
-import static org.apache.hadoop.hbase.HColumnDescriptor.REPLICATION_SCOPE;
 import static org.apache.hadoop.hbase.HColumnDescriptor.KEEP_DELETED_CELLS;
+import static org.apache.hadoop.hbase.HColumnDescriptor.REPLICATION_SCOPE;
+import static org.apache.hadoop.hbase.HColumnDescriptor.TTL;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0;
+import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MAJOR_VERSION;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MINOR_VERSION;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_PATCH_NUMBER;
@@ -67,8 +68,8 @@ import static 
org.apache.phoenix.util.UpgradeUtil.addParentToChildLinks;
 import static org.apache.phoenix.util.UpgradeUtil.addViewIndexToParentLinks;
 import static org.apache.phoenix.util.UpgradeUtil.getSysCatalogSnapshotName;
 import static org.apache.phoenix.util.UpgradeUtil.moveChildLinks;
-import static org.apache.phoenix.util.UpgradeUtil.upgradeTo4_5_0;
 import static org.apache.phoenix.util.UpgradeUtil.syncTableAndIndexProperties;
+import static org.apache.phoenix.util.UpgradeUtil.upgradeTo4_5_0;
 
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
@@ -108,7 +109,6 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import javax.annotation.concurrent.GuardedBy;
 
-import com.google.common.base.Strings;
 import org.apache.hadoop.conf.Configuration

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5714 Upgrade to 4.16 throwing ColumnNotFoundException

2020-02-13 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 3743c7f  PHOENIX-5714 Upgrade to 4.16 throwing ColumnNotFoundException
3743c7f is described below

commit 3743c7f501a3a0583b26a2dd272631a5343f2602
Author: Sandeep Guggilam 
AuthorDate: Thu Feb 6 17:09:11 2020 -0800

PHOENIX-5714 Upgrade to 4.16 throwing ColumnNotFoundException

Signed-off-by: Chinmay Kulkarni 
---
 .../phoenix/coprocessor/MetaDataProtocol.java  |  5 +--
 .../phoenix/query/ConnectionQueryServicesImpl.java | 37 +++---
 2 files changed, 29 insertions(+), 13 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index a83b04b..3f5e2fb 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -40,11 +40,11 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.MetaDataUtil;
 
 import com.google.common.base.Function;
 import com.google.common.collect.Lists;
 import com.google.protobuf.ByteString;
-import org.apache.phoenix.util.MetaDataUtil;
 
 /**
  *
@@ -94,8 +94,9 @@ public abstract class MetaDataProtocol extends 
MetaDataService {
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_13_0 = 
MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0;
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_14_0 = 
MIN_TABLE_TIMESTAMP + 28;
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0 = 
MIN_TABLE_TIMESTAMP + 29;
+public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0 = 
MIN_TABLE_TIMESTAMP + 31;
 // MIN_SYSTEM_TABLE_TIMESTAMP needs to be set to the max of all the 
MIN_SYSTEM_TABLE_TIMESTAMP_* constants
-public static final long MIN_SYSTEM_TABLE_TIMESTAMP = 
MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0;
+public static final long MIN_SYSTEM_TABLE_TIMESTAMP = 
MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0;
 // Version below which we should disallow usage of mutable secondary 
indexing.
 public static final int MUTABLE_SI_VERSION_THRESHOLD = 
VersionUtil.encodeVersion("0", "94", "10");
 public static final int MAX_LOCAL_SI_VERSION_DISALLOW = 
VersionUtil.encodeVersion("0", "98", "8");
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 4f79097..49a1405 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -17,11 +17,12 @@
  */
 package org.apache.phoenix.query;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
-import static org.apache.hadoop.hbase.HColumnDescriptor.TTL;
-import static org.apache.hadoop.hbase.HColumnDescriptor.REPLICATION_SCOPE;
 import static org.apache.hadoop.hbase.HColumnDescriptor.KEEP_DELETED_CELLS;
+import static org.apache.hadoop.hbase.HColumnDescriptor.REPLICATION_SCOPE;
+import static org.apache.hadoop.hbase.HColumnDescriptor.TTL;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0;
+import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MAJOR_VERSION;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MINOR_VERSION;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_PATCH_NUMBER;
@@ -67,8 +68,8 @@ import static 
org.apache.phoenix.util.UpgradeUtil.addParentToChildLinks;
 import static org.apache.phoenix.util.UpgradeUtil.addViewIndexToParentLinks;
 import static org.apache.phoenix.util.UpgradeUtil.getSysCatalogSnapshotName;
 import static org.apache.phoenix.util.UpgradeUtil.moveChildLinks;
-import static org.apache.phoenix.util.UpgradeUtil.upgradeTo4_5_0;
 import static org.apache.phoenix.util.UpgradeUtil.syncTableAndIndexProperties;
+import static org.apache.phoenix.util.UpgradeUtil.upgradeTo4_5_0;
 
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
@@ -108,7 +109,6 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import javax.annotation.concurrent.GuardedBy;
 
-import com.google.common.base.Strings;
 import org.apache.hadoop.conf.Configuration

svn commit: r1873963 - in /phoenix/site/publish/language: datatypes.html functions.html index.html

2020-02-12 Thread chinmayskulkarni
Author: chinmayskulkarni
Date: Wed Feb 12 23:22:46 2020
New Revision: 1873963

URL: http://svn.apache.org/viewvc?rev=1873963=rev
Log:
(Nitesh Maheshwari) PHOENIX-5695: Phoenix website build.sh should return when 
child script has errors

Modified:
phoenix/site/publish/language/datatypes.html
phoenix/site/publish/language/functions.html
phoenix/site/publish/language/index.html

Modified: phoenix/site/publish/language/datatypes.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/datatypes.html?rev=1873963=1873962=1873963=diff
==
--- phoenix/site/publish/language/datatypes.html (original)
+++ phoenix/site/publish/language/datatypes.html Wed Feb 12 23:22:46 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/language/functions.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/functions.html?rev=1873963=1873962=1873963=diff
==
--- phoenix/site/publish/language/functions.html (original)
+++ phoenix/site/publish/language/functions.html Wed Feb 12 23:22:46 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/language/index.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/index.html?rev=1873963=1873962=1873963=diff
==
--- phoenix/site/publish/language/index.html (original)
+++ phoenix/site/publish/language/index.html Wed Feb 12 23:22:46 2020
@@ -1,7 +1,7 @@
 
 
 
 




[phoenix] branch 4.14-HBase-1.3 updated: PHOENIX-5697 : Use try-with-resources to avoid leakage

2020-02-07 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.3 by this push:
 new e2c0b33  PHOENIX-5697 : Use try-with-resources to avoid leakage
e2c0b33 is described below

commit e2c0b33ee54799e8bf28fe9c3fa8206699e8bb2e
Author: Viraj Jasani 
AuthorDate: Fri Jan 24 11:12:28 2020 -0800

PHOENIX-5697 : Use try-with-resources to avoid leakage

Signed-off-by: Chinmay Kulkarni 
---
 .../write/AbstractParallelWriterIndexCommitter.java| 13 -
 .../write/TrackingParallelWriterIndexCommitter.java| 12 
 .../java/org/apache/phoenix/util/MetaDataUtil.java | 18 --
 3 files changed, 12 insertions(+), 31 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/AbstractParallelWriterIndexCommitter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/AbstractParallelWriterIndexCommitter.java
index 9e94e87..2fd70de 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/AbstractParallelWriterIndexCommitter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/AbstractParallelWriterIndexCommitter.java
@@ -143,7 +143,6 @@ public abstract class AbstractParallelWriterIndexCommitter 
implements IndexCommi
 @SuppressWarnings("deprecation")
 @Override
 public Void call() throws Exception {
-Table table = null;
 // this may have been queued, so another task infront of 
us may have failed, so we should
 // early exit, if that's the case
 throwFailureIfDone();
@@ -176,9 +175,10 @@ public abstract class AbstractParallelWriterIndexCommitter 
implements IndexCommi
 else {
 factory = retryingFactory;
 }
-table = factory.getTable(tableReference.get());
-throwFailureIfDone();
-table.batch(mutations, null);
+try (Table table = 
factory.getTable(tableReference.get())) {
+throwFailureIfDone();
+table.batch(mutations, null);
+}
 } catch (SingleIndexWriteFailureException e) {
 throw e;
 } catch (IOException e) {
@@ -188,11 +188,6 @@ public abstract class AbstractParallelWriterIndexCommitter 
implements IndexCommi
 Thread.currentThread().interrupt();
 throw new 
SingleIndexWriteFailureException(tableReference.toString(), mutations, e, 
PhoenixIndexFailurePolicy.getDisableIndexOnFailure(env));
 }
-finally{
-if (table != null) {
-table.close();
-}
-}
 return null;
 }
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
index 76ec32a..7f85aee 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
@@ -160,7 +160,6 @@ public class TrackingParallelWriterIndexCommitter 
implements IndexCommitter {
 @SuppressWarnings("deprecation")
 @Override
 public Boolean call() throws Exception {
-HTableInterface table = null;
 try {
 // this may have been queued, but there was an 
abort/stop so we try to early exit
 throwFailureIfDone();
@@ -193,19 +192,16 @@ public class TrackingParallelWriterIndexCommitter 
implements IndexCommitter {
 else {
 factory = retryingFactory;
 }
-table = factory.getTable(tableReference.get());
-throwFailureIfDone();
-table.batch(mutations);
+try (HTableInterface table = 
factory.getTable(tableReference.get())) {
+  throwFailureIfDone();
+  table.batch(mutations);
+}
 } catch (InterruptedException e) {
 // reset the interrupt status on the thread
 Thread.currentThread().interrupt();

[phoenix] branch 4.14-HBase-1.4 updated: PHOENIX-5697 : Use try-with-resources to avoid leakage

2020-02-07 Thread chinmayskulkarni
This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.4 by this push:
 new b6b7d5e  PHOENIX-5697 : Use try-with-resources to avoid leakage
b6b7d5e is described below

commit b6b7d5ed201d1109e468929f8f8bb8c0f79cd7bb
Author: Viraj Jasani 
AuthorDate: Fri Jan 24 11:12:28 2020 -0800

PHOENIX-5697 : Use try-with-resources to avoid leakage

Signed-off-by: Chinmay Kulkarni 
---
 .../write/AbstractParallelWriterIndexCommitter.java| 13 -
 .../write/TrackingParallelWriterIndexCommitter.java| 12 
 .../java/org/apache/phoenix/util/MetaDataUtil.java | 18 --
 3 files changed, 12 insertions(+), 31 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/AbstractParallelWriterIndexCommitter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/AbstractParallelWriterIndexCommitter.java
index 9e94e87..2fd70de 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/AbstractParallelWriterIndexCommitter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/AbstractParallelWriterIndexCommitter.java
@@ -143,7 +143,6 @@ public abstract class AbstractParallelWriterIndexCommitter 
implements IndexCommi
 @SuppressWarnings("deprecation")
 @Override
 public Void call() throws Exception {
-Table table = null;
 // this may have been queued, so another task infront of 
us may have failed, so we should
 // early exit, if that's the case
 throwFailureIfDone();
@@ -176,9 +175,10 @@ public abstract class AbstractParallelWriterIndexCommitter 
implements IndexCommi
 else {
 factory = retryingFactory;
 }
-table = factory.getTable(tableReference.get());
-throwFailureIfDone();
-table.batch(mutations, null);
+try (Table table = 
factory.getTable(tableReference.get())) {
+throwFailureIfDone();
+table.batch(mutations, null);
+}
 } catch (SingleIndexWriteFailureException e) {
 throw e;
 } catch (IOException e) {
@@ -188,11 +188,6 @@ public abstract class AbstractParallelWriterIndexCommitter 
implements IndexCommi
 Thread.currentThread().interrupt();
 throw new 
SingleIndexWriteFailureException(tableReference.toString(), mutations, e, 
PhoenixIndexFailurePolicy.getDisableIndexOnFailure(env));
 }
-finally{
-if (table != null) {
-table.close();
-}
-}
 return null;
 }
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
index 072bcae..dba25e3 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
@@ -159,7 +159,6 @@ public class TrackingParallelWriterIndexCommitter 
implements IndexCommitter {
 @SuppressWarnings("deprecation")
 @Override
 public Boolean call() throws Exception {
-HTableInterface table = null;
 try {
 // this may have been queued, but there was an 
abort/stop so we try to early exit
 throwFailureIfDone();
@@ -192,19 +191,16 @@ public class TrackingParallelWriterIndexCommitter 
implements IndexCommitter {
 else {
 factory = retryingFactory;
 }
-table = factory.getTable(tableReference.get());
-throwFailureIfDone();
-table.batch(mutations);
+try (HTableInterface table = 
factory.getTable(tableReference.get())) {
+  throwFailureIfDone();
+  table.batch(mutations);
+}
 } catch (InterruptedException e) {
 // reset the interrupt status on the thread
 Thread.currentThread().interrupt();

  1   2   3   4   >