http://git-wip-us.apache.org/repos/asf/incubator-trafficcontrol/blob/13fd7151/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/encode_test.go
----------------------------------------------------------------------
diff --git 
a/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/encode_test.go 
b/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/encode_test.go
new file mode 100644
index 0000000..3a0f728
--- /dev/null
+++ b/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/encode_test.go
@@ -0,0 +1,758 @@
+package pq
+
+import (
+       "bytes"
+       "database/sql"
+       "fmt"
+       "strings"
+       "testing"
+       "time"
+
+       "github.com/lib/pq/oid"
+)
+
+func TestScanTimestamp(t *testing.T) {
+       var nt NullTime
+       tn := time.Now()
+       nt.Scan(tn)
+       if !nt.Valid {
+               t.Errorf("Expected Valid=false")
+       }
+       if nt.Time != tn {
+               t.Errorf("Time value mismatch")
+       }
+}
+
+func TestScanNilTimestamp(t *testing.T) {
+       var nt NullTime
+       nt.Scan(nil)
+       if nt.Valid {
+               t.Errorf("Expected Valid=false")
+       }
+}
+
+var timeTests = []struct {
+       str     string
+       timeval time.Time
+}{
+       {"22001-02-03", time.Date(22001, time.February, 3, 0, 0, 0, 0, 
time.FixedZone("", 0))},
+       {"2001-02-03", time.Date(2001, time.February, 3, 0, 0, 0, 0, 
time.FixedZone("", 0))},
+       {"2001-02-03 04:05:06", time.Date(2001, time.February, 3, 4, 5, 6, 0, 
time.FixedZone("", 0))},
+       {"2001-02-03 04:05:06.000001", time.Date(2001, time.February, 3, 4, 5, 
6, 1000, time.FixedZone("", 0))},
+       {"2001-02-03 04:05:06.00001", time.Date(2001, time.February, 3, 4, 5, 
6, 10000, time.FixedZone("", 0))},
+       {"2001-02-03 04:05:06.0001", time.Date(2001, time.February, 3, 4, 5, 6, 
100000, time.FixedZone("", 0))},
+       {"2001-02-03 04:05:06.001", time.Date(2001, time.February, 3, 4, 5, 6, 
1000000, time.FixedZone("", 0))},
+       {"2001-02-03 04:05:06.01", time.Date(2001, time.February, 3, 4, 5, 6, 
10000000, time.FixedZone("", 0))},
+       {"2001-02-03 04:05:06.1", time.Date(2001, time.February, 3, 4, 5, 6, 
100000000, time.FixedZone("", 0))},
+       {"2001-02-03 04:05:06.12", time.Date(2001, time.February, 3, 4, 5, 6, 
120000000, time.FixedZone("", 0))},
+       {"2001-02-03 04:05:06.123", time.Date(2001, time.February, 3, 4, 5, 6, 
123000000, time.FixedZone("", 0))},
+       {"2001-02-03 04:05:06.1234", time.Date(2001, time.February, 3, 4, 5, 6, 
123400000, time.FixedZone("", 0))},
+       {"2001-02-03 04:05:06.12345", time.Date(2001, time.February, 3, 4, 5, 
6, 123450000, time.FixedZone("", 0))},
+       {"2001-02-03 04:05:06.123456", time.Date(2001, time.February, 3, 4, 5, 
6, 123456000, time.FixedZone("", 0))},
+       {"2001-02-03 04:05:06.123-07", time.Date(2001, time.February, 3, 4, 5, 
6, 123000000,
+               time.FixedZone("", -7*60*60))},
+       {"2001-02-03 04:05:06-07", time.Date(2001, time.February, 3, 4, 5, 6, 0,
+               time.FixedZone("", -7*60*60))},
+       {"2001-02-03 04:05:06-07:42", time.Date(2001, time.February, 3, 4, 5, 
6, 0,
+               time.FixedZone("", -(7*60*60+42*60)))},
+       {"2001-02-03 04:05:06-07:30:09", time.Date(2001, time.February, 3, 4, 
5, 6, 0,
+               time.FixedZone("", -(7*60*60+30*60+9)))},
+       {"2001-02-03 04:05:06+07", time.Date(2001, time.February, 3, 4, 5, 6, 0,
+               time.FixedZone("", 7*60*60))},
+       {"0011-02-03 04:05:06 BC", time.Date(-10, time.February, 3, 4, 5, 6, 0, 
time.FixedZone("", 0))},
+       {"0011-02-03 04:05:06.123 BC", time.Date(-10, time.February, 3, 4, 5, 
6, 123000000, time.FixedZone("", 0))},
+       {"0011-02-03 04:05:06.123-07 BC", time.Date(-10, time.February, 3, 4, 
5, 6, 123000000,
+               time.FixedZone("", -7*60*60))},
+       {"0001-02-03 04:05:06.123", time.Date(1, time.February, 3, 4, 5, 6, 
123000000, time.FixedZone("", 0))},
+       {"0001-02-03 04:05:06.123 BC", time.Date(1, time.February, 3, 4, 5, 6, 
123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)},
+       {"0001-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 
123000000, time.FixedZone("", 0))},
+       {"0002-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 
123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)},
+       {"0002-02-03 04:05:06.123 BC", time.Date(-1, time.February, 3, 4, 5, 6, 
123000000, time.FixedZone("", 0))},
+       {"12345-02-03 04:05:06.1", time.Date(12345, time.February, 3, 4, 5, 6, 
100000000, time.FixedZone("", 0))},
+       {"123456-02-03 04:05:06.1", time.Date(123456, time.February, 3, 4, 5, 
6, 100000000, time.FixedZone("", 0))},
+}
+
+// Test that parsing the string results in the expected value.
+func TestParseTs(t *testing.T) {
+       for i, tt := range timeTests {
+               val, err := ParseTimestamp(nil, tt.str)
+               if err != nil {
+                       t.Errorf("%d: got error: %v", i, err)
+               } else if val.String() != tt.timeval.String() {
+                       t.Errorf("%d: expected to parse %q into %q; got %q",
+                               i, tt.str, tt.timeval, val)
+               }
+       }
+}
+
+var timeErrorTests = []string{
+       "2001",
+       "2001-2-03",
+       "2001-02-3",
+       "2001-02-03 ",
+       "2001-02-03 04",
+       "2001-02-03 04:",
+       "2001-02-03 04:05",
+       "2001-02-03 04:05:",
+       "2001-02-03 04:05:6",
+       "2001-02-03 04:05:06.123 B",
+}
+
+// Test that parsing the string results in an error.
+func TestParseTsErrors(t *testing.T) {
+       for i, tt := range timeErrorTests {
+               _, err := ParseTimestamp(nil, tt)
+               if err == nil {
+                       t.Errorf("%d: expected an error from parsing: %v", i, 
tt)
+               }
+       }
+}
+
+// Now test that sending the value into the database and parsing it back
+// returns the same time.Time value.
+func TestEncodeAndParseTs(t *testing.T) {
+       db, err := openTestConnConninfo("timezone='Etc/UTC'")
+       if err != nil {
+               t.Fatal(err)
+       }
+       defer db.Close()
+
+       for i, tt := range timeTests {
+               var dbstr string
+               err = db.QueryRow("SELECT ($1::timestamptz)::text", 
tt.timeval).Scan(&dbstr)
+               if err != nil {
+                       t.Errorf("%d: could not send value %q to the database: 
%s", i, tt.timeval, err)
+                       continue
+               }
+
+               val, err := ParseTimestamp(nil, dbstr)
+               if err != nil {
+                       t.Errorf("%d: could not parse value %q: %s", i, dbstr, 
err)
+                       continue
+               }
+               val = val.In(tt.timeval.Location())
+               if val.String() != tt.timeval.String() {
+                       t.Errorf("%d: expected to parse %q into %q; got %q", i, 
dbstr, tt.timeval, val)
+               }
+       }
+}
+
+var formatTimeTests = []struct {
+       time     time.Time
+       expected string
+}{
+       {time.Time{}, "0001-01-01 00:00:00Z"},
+       {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, 
time.FixedZone("", 0)), "2001-02-03 04:05:06.123456789Z"},
+       {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, 
time.FixedZone("", 2*60*60)), "2001-02-03 04:05:06.123456789+02:00"},
+       {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, 
time.FixedZone("", -6*60*60)), "2001-02-03 04:05:06.123456789-06:00"},
+       {time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 
-(7*60*60+30*60+9))), "2001-02-03 04:05:06-07:30:09"},
+
+       {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 
0)), "0001-02-03 04:05:06.123456789Z"},
+       {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 
2*60*60)), "0001-02-03 04:05:06.123456789+02:00"},
+       {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 
-6*60*60)), "0001-02-03 04:05:06.123456789-06:00"},
+
+       {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 
0)), "0001-02-03 04:05:06.123456789Z BC"},
+       {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 
2*60*60)), "0001-02-03 04:05:06.123456789+02:00 BC"},
+       {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 
-6*60*60)), "0001-02-03 04:05:06.123456789-06:00 BC"},
+
+       {time.Date(1, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 
-(7*60*60+30*60+9))), "0001-02-03 04:05:06-07:30:09"},
+       {time.Date(0, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 
-(7*60*60+30*60+9))), "0001-02-03 04:05:06-07:30:09 BC"},
+}
+
+func TestFormatTs(t *testing.T) {
+       for i, tt := range formatTimeTests {
+               val := string(formatTs(tt.time))
+               if val != tt.expected {
+                       t.Errorf("%d: incorrect time format %q, want %q", i, 
val, tt.expected)
+               }
+       }
+}
+
+func TestFormatTsBackend(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+
+       var str string
+       err := db.QueryRow("SELECT 
'2001-02-03T04:05:06.007-08:09:10'::time::text").Scan(&str)
+       if err == nil {
+               t.Fatalf("PostgreSQL is accepting an ISO timestamp input for 
time")
+       }
+
+       for i, tt := range formatTimeTests {
+               for _, typ := range []string{"date", "time", "timetz", 
"timestamp", "timestamptz"} {
+                       err = db.QueryRow("SELECT $1::"+typ+"::text", 
tt.time).Scan(&str)
+                       if err != nil {
+                               t.Errorf("%d: incorrect time format for %v on 
the backend: %v", i, typ, err)
+                       }
+               }
+       }
+}
+
+func TestTimestampWithTimeZone(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+
+       tx, err := db.Begin()
+       if err != nil {
+               t.Fatal(err)
+       }
+       defer tx.Rollback()
+
+       // try several different locations, all included in Go's zoneinfo.zip
+       for _, locName := range []string{
+               "UTC",
+               "America/Chicago",
+               "America/New_York",
+               "Australia/Darwin",
+               "Australia/Perth",
+       } {
+               loc, err := time.LoadLocation(locName)
+               if err != nil {
+                       t.Logf("Could not load time zone %s - skipping", 
locName)
+                       continue
+               }
+
+               // Postgres timestamps have a resolution of 1 microsecond, so 
don't
+               // use the full range of the Nanosecond argument
+               refTime := time.Date(2012, 11, 6, 10, 23, 42, 123456000, loc)
+
+               for _, pgTimeZone := range []string{"US/Eastern", 
"Australia/Darwin"} {
+                       // Switch Postgres's timezone to test different output 
timestamp formats
+                       _, err = tx.Exec(fmt.Sprintf("set time zone '%s'", 
pgTimeZone))
+                       if err != nil {
+                               t.Fatal(err)
+                       }
+
+                       var gotTime time.Time
+                       row := tx.QueryRow("select $1::timestamp with time 
zone", refTime)
+                       err = row.Scan(&gotTime)
+                       if err != nil {
+                               t.Fatal(err)
+                       }
+
+                       if !refTime.Equal(gotTime) {
+                               t.Errorf("timestamps not equal: %s != %s", 
refTime, gotTime)
+                       }
+
+                       // check that the time zone is set correctly based on 
TimeZone
+                       pgLoc, err := time.LoadLocation(pgTimeZone)
+                       if err != nil {
+                               t.Logf("Could not load time zone %s - 
skipping", pgLoc)
+                               continue
+                       }
+                       translated := refTime.In(pgLoc)
+                       if translated.String() != gotTime.String() {
+                               t.Errorf("timestamps not equal: %s != %s", 
translated, gotTime)
+                       }
+               }
+       }
+}
+
+func TestTimestampWithOutTimezone(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+
+       test := func(ts, pgts string) {
+               r, err := db.Query("SELECT $1::timestamp", pgts)
+               if err != nil {
+                       t.Fatalf("Could not run query: %v", err)
+               }
+
+               n := r.Next()
+
+               if n != true {
+                       t.Fatal("Expected at least one row")
+               }
+
+               var result time.Time
+               err = r.Scan(&result)
+               if err != nil {
+                       t.Fatalf("Did not expect error scanning row: %v", err)
+               }
+
+               expected, err := time.Parse(time.RFC3339, ts)
+               if err != nil {
+                       t.Fatalf("Could not parse test time literal: %v", err)
+               }
+
+               if !result.Equal(expected) {
+                       t.Fatalf("Expected time to match %v: got mismatch %v",
+                               expected, result)
+               }
+
+               n = r.Next()
+               if n != false {
+                       t.Fatal("Expected only one row")
+               }
+       }
+
+       test("2000-01-01T00:00:00Z", "2000-01-01T00:00:00")
+
+       // Test higher precision time
+       test("2013-01-04T20:14:58.80033Z", "2013-01-04 20:14:58.80033")
+}
+
+func TestInfinityTimestamp(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+       var err error
+       var resultT time.Time
+
+       expectedErrorStrPrefix := `sql: Scan error on column index 0: 
unsupported`
+       type testCases []struct {
+               Query                string
+               Param                string
+               ExpectedErrStrPrefix string
+               ExpectedVal          interface{}
+       }
+       tc := testCases{
+               {"SELECT $1::timestamp", "-infinity", expectedErrorStrPrefix, 
"-infinity"},
+               {"SELECT $1::timestamptz", "-infinity", expectedErrorStrPrefix, 
"-infinity"},
+               {"SELECT $1::timestamp", "infinity", expectedErrorStrPrefix, 
"infinity"},
+               {"SELECT $1::timestamptz", "infinity", expectedErrorStrPrefix, 
"infinity"},
+       }
+       // try to assert []byte to time.Time
+       for _, q := range tc {
+               err = db.QueryRow(q.Query, q.Param).Scan(&resultT)
+               if !strings.HasPrefix(err.Error(), q.ExpectedErrStrPrefix) {
+                       t.Errorf("Scanning -/+infinity, expected error to have 
prefix %q, got %q", q.ExpectedErrStrPrefix, err)
+               }
+       }
+       // yield []byte
+       for _, q := range tc {
+               var resultI interface{}
+               err = db.QueryRow(q.Query, q.Param).Scan(&resultI)
+               if err != nil {
+                       t.Errorf("Scanning -/+infinity, expected no error, got 
%q", err)
+               }
+               result, ok := resultI.([]byte)
+               if !ok {
+                       t.Errorf("Scanning -/+infinity, expected []byte, got 
%#v", resultI)
+               }
+               if string(result) != q.ExpectedVal {
+                       t.Errorf("Scanning -/+infinity, expected %q, got %q", 
q.ExpectedVal, result)
+               }
+       }
+
+       y1500 := time.Date(1500, time.January, 1, 0, 0, 0, 0, time.UTC)
+       y2500 := time.Date(2500, time.January, 1, 0, 0, 0, 0, time.UTC)
+       EnableInfinityTs(y1500, y2500)
+
+       err = db.QueryRow("SELECT $1::timestamp", "infinity").Scan(&resultT)
+       if err != nil {
+               t.Errorf("Scanning infinity, expected no error, got %q", err)
+       }
+       if !resultT.Equal(y2500) {
+               t.Errorf("Scanning infinity, expected %q, got %q", y2500, 
resultT)
+       }
+
+       err = db.QueryRow("SELECT $1::timestamptz", "infinity").Scan(&resultT)
+       if err != nil {
+               t.Errorf("Scanning infinity, expected no error, got %q", err)
+       }
+       if !resultT.Equal(y2500) {
+               t.Errorf("Scanning Infinity, expected time %q, got %q", y2500, 
resultT.String())
+       }
+
+       err = db.QueryRow("SELECT $1::timestamp", "-infinity").Scan(&resultT)
+       if err != nil {
+               t.Errorf("Scanning -infinity, expected no error, got %q", err)
+       }
+       if !resultT.Equal(y1500) {
+               t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, 
resultT.String())
+       }
+
+       err = db.QueryRow("SELECT $1::timestamptz", "-infinity").Scan(&resultT)
+       if err != nil {
+               t.Errorf("Scanning -infinity, expected no error, got %q", err)
+       }
+       if !resultT.Equal(y1500) {
+               t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, 
resultT.String())
+       }
+
+       ym1500 := time.Date(-1500, time.January, 1, 0, 0, 0, 0, time.UTC)
+       y11500 := time.Date(11500, time.January, 1, 0, 0, 0, 0, time.UTC)
+       var s string
+       err = db.QueryRow("SELECT $1::timestamp::text", ym1500).Scan(&s)
+       if err != nil {
+               t.Errorf("Encoding -infinity, expected no error, got %q", err)
+       }
+       if s != "-infinity" {
+               t.Errorf("Encoding -infinity, expected %q, got %q", 
"-infinity", s)
+       }
+       err = db.QueryRow("SELECT $1::timestamptz::text", ym1500).Scan(&s)
+       if err != nil {
+               t.Errorf("Encoding -infinity, expected no error, got %q", err)
+       }
+       if s != "-infinity" {
+               t.Errorf("Encoding -infinity, expected %q, got %q", 
"-infinity", s)
+       }
+
+       err = db.QueryRow("SELECT $1::timestamp::text", y11500).Scan(&s)
+       if err != nil {
+               t.Errorf("Encoding infinity, expected no error, got %q", err)
+       }
+       if s != "infinity" {
+               t.Errorf("Encoding infinity, expected %q, got %q", "infinity", 
s)
+       }
+       err = db.QueryRow("SELECT $1::timestamptz::text", y11500).Scan(&s)
+       if err != nil {
+               t.Errorf("Encoding infinity, expected no error, got %q", err)
+       }
+       if s != "infinity" {
+               t.Errorf("Encoding infinity, expected %q, got %q", "infinity", 
s)
+       }
+
+       disableInfinityTs()
+
+       var panicErrorString string
+       func() {
+               defer func() {
+                       panicErrorString, _ = recover().(string)
+               }()
+               EnableInfinityTs(y2500, y1500)
+       }()
+       if panicErrorString != infinityTsNegativeMustBeSmaller {
+               t.Errorf("Expected error, %q, got %q", 
infinityTsNegativeMustBeSmaller, panicErrorString)
+       }
+}
+
+func TestStringWithNul(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+
+       hello0world := string("hello\x00world")
+       _, err := db.Query("SELECT $1::text", &hello0world)
+       if err == nil {
+               t.Fatal("Postgres accepts a string with nul in it; " +
+                       "injection attacks may be plausible")
+       }
+}
+
+func TestByteSliceToText(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+
+       b := []byte("hello world")
+       row := db.QueryRow("SELECT $1::text", b)
+
+       var result []byte
+       err := row.Scan(&result)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       if string(result) != string(b) {
+               t.Fatalf("expected %v but got %v", b, result)
+       }
+}
+
+func TestStringToBytea(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+
+       b := "hello world"
+       row := db.QueryRow("SELECT $1::bytea", b)
+
+       var result []byte
+       err := row.Scan(&result)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       if !bytes.Equal(result, []byte(b)) {
+               t.Fatalf("expected %v but got %v", b, result)
+       }
+}
+
+func TestTextByteSliceToUUID(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+
+       b := []byte("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11")
+       row := db.QueryRow("SELECT $1::uuid", b)
+
+       var result string
+       err := row.Scan(&result)
+       if forceBinaryParameters() {
+               pqErr := err.(*Error)
+               if pqErr == nil {
+                       t.Errorf("Expected to get error")
+               } else if pqErr.Code != "22P03" {
+                       t.Fatalf("Expected to get invalid binary encoding error 
(22P03), got %s", pqErr.Code)
+               }
+       } else {
+               if err != nil {
+                       t.Fatal(err)
+               }
+
+               if result != string(b) {
+                       t.Fatalf("expected %v but got %v", b, result)
+               }
+       }
+}
+
+func TestBinaryByteSlicetoUUID(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+
+       b := []byte{'\xa0', '\xee', '\xbc', '\x99',
+               '\x9c', '\x0b',
+               '\x4e', '\xf8',
+               '\xbb', '\x00', '\x6b',
+               '\xb9', '\xbd', '\x38', '\x0a', '\x11'}
+       row := db.QueryRow("SELECT $1::uuid", b)
+
+       var result string
+       err := row.Scan(&result)
+       if forceBinaryParameters() {
+               if err != nil {
+                       t.Fatal(err)
+               }
+
+               if result != string("a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11") {
+                       t.Fatalf("expected %v but got %v", b, result)
+               }
+       } else {
+               pqErr := err.(*Error)
+               if pqErr == nil {
+                       t.Errorf("Expected to get error")
+               } else if pqErr.Code != "22021" {
+                       t.Fatalf("Expected to get invalid byte sequence for 
encoding error (22021), got %s", pqErr.Code)
+               }
+       }
+}
+
+func TestStringToUUID(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+
+       s := "a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11"
+       row := db.QueryRow("SELECT $1::uuid", s)
+
+       var result string
+       err := row.Scan(&result)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       if result != s {
+               t.Fatalf("expected %v but got %v", s, result)
+       }
+}
+
+func TestTextByteSliceToInt(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+
+       expected := 12345678
+       b := []byte(fmt.Sprintf("%d", expected))
+       row := db.QueryRow("SELECT $1::int", b)
+
+       var result int
+       err := row.Scan(&result)
+       if forceBinaryParameters() {
+               pqErr := err.(*Error)
+               if pqErr == nil {
+                       t.Errorf("Expected to get error")
+               } else if pqErr.Code != "22P03" {
+                       t.Fatalf("Expected to get invalid binary encoding error 
(22P03), got %s", pqErr.Code)
+               }
+       } else {
+               if err != nil {
+                       t.Fatal(err)
+               }
+               if result != expected {
+                       t.Fatalf("expected %v but got %v", expected, result)
+               }
+       }
+}
+
+func TestBinaryByteSliceToInt(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+
+       expected := 12345678
+       b := []byte{'\x00', '\xbc', '\x61', '\x4e'}
+       row := db.QueryRow("SELECT $1::int", b)
+
+       var result int
+       err := row.Scan(&result)
+       if forceBinaryParameters() {
+               if err != nil {
+                       t.Fatal(err)
+               }
+               if result != expected {
+                       t.Fatalf("expected %v but got %v", expected, result)
+               }
+       } else {
+               pqErr := err.(*Error)
+               if pqErr == nil {
+                       t.Errorf("Expected to get error")
+               } else if pqErr.Code != "22021" {
+                       t.Fatalf("Expected to get invalid byte sequence for 
encoding error (22021), got %s", pqErr.Code)
+               }
+       }
+}
+
+func TestTextDecodeIntoString(t *testing.T) {
+       input := []byte("hello world")
+       want := string(input)
+       for _, typ := range []oid.Oid{oid.T_char, oid.T_varchar, oid.T_text} {
+               got := decode(&parameterStatus{}, input, typ, formatText)
+               if got != want {
+                       t.Errorf("invalid string decoding output for %T(%+v), 
got %v but expected %v", typ, typ, got, want)
+               }
+       }
+}
+
+func TestByteaOutputFormatEncoding(t *testing.T) {
+       input := []byte("\\x\x00\x01\x02\xFF\xFEabcdefg0123")
+       want := []byte("\\x5c78000102fffe6162636465666730313233")
+       got := encode(&parameterStatus{serverVersion: 90000}, input, 
oid.T_bytea)
+       if !bytes.Equal(want, got) {
+               t.Errorf("invalid hex bytea output, got %v but expected %v", 
got, want)
+       }
+
+       want = []byte("\\\\x\\000\\001\\002\\377\\376abcdefg0123")
+       got = encode(&parameterStatus{serverVersion: 84000}, input, oid.T_bytea)
+       if !bytes.Equal(want, got) {
+               t.Errorf("invalid escape bytea output, got %v but expected %v", 
got, want)
+       }
+}
+
+func TestByteaOutputFormats(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+
+       if getServerVersion(t, db) < 90000 {
+               // skip
+               return
+       }
+
+       testByteaOutputFormat := func(f string, usePrepared bool) {
+               expectedData := []byte("\x5c\x78\x00\xff\x61\x62\x63\x01\x08")
+               sqlQuery := "SELECT decode('5c7800ff6162630108', 'hex')"
+
+               var data []byte
+
+               // use a txn to avoid relying on getting the same connection
+               txn, err := db.Begin()
+               if err != nil {
+                       t.Fatal(err)
+               }
+               defer txn.Rollback()
+
+               _, err = txn.Exec("SET LOCAL bytea_output TO " + f)
+               if err != nil {
+                       t.Fatal(err)
+               }
+               var rows *sql.Rows
+               var stmt *sql.Stmt
+               if usePrepared {
+                       stmt, err = txn.Prepare(sqlQuery)
+                       if err != nil {
+                               t.Fatal(err)
+                       }
+                       rows, err = stmt.Query()
+               } else {
+                       // use Query; QueryRow would hide the actual error
+                       rows, err = txn.Query(sqlQuery)
+               }
+               if err != nil {
+                       t.Fatal(err)
+               }
+               if !rows.Next() {
+                       if rows.Err() != nil {
+                               t.Fatal(rows.Err())
+                       }
+                       t.Fatal("shouldn't happen")
+               }
+               err = rows.Scan(&data)
+               if err != nil {
+                       t.Fatal(err)
+               }
+               err = rows.Close()
+               if err != nil {
+                       t.Fatal(err)
+               }
+               if stmt != nil {
+                       err = stmt.Close()
+                       if err != nil {
+                               t.Fatal(err)
+                       }
+               }
+               if !bytes.Equal(data, expectedData) {
+                       t.Errorf("unexpected bytea value %v for format %s; 
expected %v", data, f, expectedData)
+               }
+       }
+
+       testByteaOutputFormat("hex", false)
+       testByteaOutputFormat("escape", false)
+       testByteaOutputFormat("hex", true)
+       testByteaOutputFormat("escape", true)
+}
+
+func TestAppendEncodedText(t *testing.T) {
+       var buf []byte
+
+       buf = appendEncodedText(&parameterStatus{serverVersion: 90000}, buf, 
int64(10))
+       buf = append(buf, '\t')
+       buf = appendEncodedText(&parameterStatus{serverVersion: 90000}, buf, 
42.0000000001)
+       buf = append(buf, '\t')
+       buf = appendEncodedText(&parameterStatus{serverVersion: 90000}, buf, 
"hello\tworld")
+       buf = append(buf, '\t')
+       buf = appendEncodedText(&parameterStatus{serverVersion: 90000}, buf, 
[]byte{0, 128, 255})
+
+       if string(buf) != "10\t42.0000000001\thello\\tworld\t\\\\x0080ff" {
+               t.Fatal(string(buf))
+       }
+}
+
+func TestAppendEscapedText(t *testing.T) {
+       if esc := appendEscapedText(nil, "hallo\tescape"); string(esc) != 
"hallo\\tescape" {
+               t.Fatal(string(esc))
+       }
+       if esc := appendEscapedText(nil, "hallo\\tescape\n"); string(esc) != 
"hallo\\\\tescape\\n" {
+               t.Fatal(string(esc))
+       }
+       if esc := appendEscapedText(nil, "\n\r\t\f"); string(esc) != 
"\\n\\r\\t\f" {
+               t.Fatal(string(esc))
+       }
+}
+
+func TestAppendEscapedTextExistingBuffer(t *testing.T) {
+       var buf []byte
+       buf = []byte("123\t")
+       if esc := appendEscapedText(buf, "hallo\tescape"); string(esc) != 
"123\thallo\\tescape" {
+               t.Fatal(string(esc))
+       }
+       buf = []byte("123\t")
+       if esc := appendEscapedText(buf, "hallo\\tescape\n"); string(esc) != 
"123\thallo\\\\tescape\\n" {
+               t.Fatal(string(esc))
+       }
+       buf = []byte("123\t")
+       if esc := appendEscapedText(buf, "\n\r\t\f"); string(esc) != 
"123\t\\n\\r\\t\f" {
+               t.Fatal(string(esc))
+       }
+}
+
+func BenchmarkAppendEscapedText(b *testing.B) {
+       longString := ""
+       for i := 0; i < 100; i++ {
+               longString += "123456789\n"
+       }
+       for i := 0; i < b.N; i++ {
+               appendEscapedText(nil, longString)
+       }
+}
+
+func BenchmarkAppendEscapedTextNoEscape(b *testing.B) {
+       longString := ""
+       for i := 0; i < 100; i++ {
+               longString += "1234567890"
+       }
+       for i := 0; i < b.N; i++ {
+               appendEscapedText(nil, longString)
+       }
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafficcontrol/blob/13fd7151/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/error.go
----------------------------------------------------------------------
diff --git a/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/error.go 
b/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/error.go
new file mode 100644
index 0000000..b4bb44c
--- /dev/null
+++ b/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/error.go
@@ -0,0 +1,508 @@
+package pq
+
+import (
+       "database/sql/driver"
+       "fmt"
+       "io"
+       "net"
+       "runtime"
+)
+
+// Error severities
+const (
+       Efatal   = "FATAL"
+       Epanic   = "PANIC"
+       Ewarning = "WARNING"
+       Enotice  = "NOTICE"
+       Edebug   = "DEBUG"
+       Einfo    = "INFO"
+       Elog     = "LOG"
+)
+
+// Error represents an error communicating with the server.
+//
+// See 
http://www.postgresql.org/docs/current/static/protocol-error-fields.html for 
details of the fields
+type Error struct {
+       Severity         string
+       Code             ErrorCode
+       Message          string
+       Detail           string
+       Hint             string
+       Position         string
+       InternalPosition string
+       InternalQuery    string
+       Where            string
+       Schema           string
+       Table            string
+       Column           string
+       DataTypeName     string
+       Constraint       string
+       File             string
+       Line             string
+       Routine          string
+}
+
+// ErrorCode is a five-character error code.
+type ErrorCode string
+
+// Name returns a more human friendly rendering of the error code, namely the
+// "condition name".
+//
+// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for
+// details.
+func (ec ErrorCode) Name() string {
+       return errorCodeNames[ec]
+}
+
+// ErrorClass is only the class part of an error code.
+type ErrorClass string
+
+// Name returns the condition name of an error class.  It is equivalent to the
+// condition name of the "standard" error code (i.e. the one having the last
+// three characters "000").
+func (ec ErrorClass) Name() string {
+       return errorCodeNames[ErrorCode(ec+"000")]
+}
+
+// Class returns the error class, e.g. "28".
+//
+// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for
+// details.
+func (ec ErrorCode) Class() ErrorClass {
+       return ErrorClass(ec[0:2])
+}
+
+// errorCodeNames is a mapping between the five-character error codes and the
+// human readable "condition names". It is derived from the list at
+// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html
+var errorCodeNames = map[ErrorCode]string{
+       // Class 00 - Successful Completion
+       "00000": "successful_completion",
+       // Class 01 - Warning
+       "01000": "warning",
+       "0100C": "dynamic_result_sets_returned",
+       "01008": "implicit_zero_bit_padding",
+       "01003": "null_value_eliminated_in_set_function",
+       "01007": "privilege_not_granted",
+       "01006": "privilege_not_revoked",
+       "01004": "string_data_right_truncation",
+       "01P01": "deprecated_feature",
+       // Class 02 - No Data (this is also a warning class per the SQL 
standard)
+       "02000": "no_data",
+       "02001": "no_additional_dynamic_result_sets_returned",
+       // Class 03 - SQL Statement Not Yet Complete
+       "03000": "sql_statement_not_yet_complete",
+       // Class 08 - Connection Exception
+       "08000": "connection_exception",
+       "08003": "connection_does_not_exist",
+       "08006": "connection_failure",
+       "08001": "sqlclient_unable_to_establish_sqlconnection",
+       "08004": "sqlserver_rejected_establishment_of_sqlconnection",
+       "08007": "transaction_resolution_unknown",
+       "08P01": "protocol_violation",
+       // Class 09 - Triggered Action Exception
+       "09000": "triggered_action_exception",
+       // Class 0A - Feature Not Supported
+       "0A000": "feature_not_supported",
+       // Class 0B - Invalid Transaction Initiation
+       "0B000": "invalid_transaction_initiation",
+       // Class 0F - Locator Exception
+       "0F000": "locator_exception",
+       "0F001": "invalid_locator_specification",
+       // Class 0L - Invalid Grantor
+       "0L000": "invalid_grantor",
+       "0LP01": "invalid_grant_operation",
+       // Class 0P - Invalid Role Specification
+       "0P000": "invalid_role_specification",
+       // Class 0Z - Diagnostics Exception
+       "0Z000": "diagnostics_exception",
+       "0Z002": "stacked_diagnostics_accessed_without_active_handler",
+       // Class 20 - Case Not Found
+       "20000": "case_not_found",
+       // Class 21 - Cardinality Violation
+       "21000": "cardinality_violation",
+       // Class 22 - Data Exception
+       "22000": "data_exception",
+       "2202E": "array_subscript_error",
+       "22021": "character_not_in_repertoire",
+       "22008": "datetime_field_overflow",
+       "22012": "division_by_zero",
+       "22005": "error_in_assignment",
+       "2200B": "escape_character_conflict",
+       "22022": "indicator_overflow",
+       "22015": "interval_field_overflow",
+       "2201E": "invalid_argument_for_logarithm",
+       "22014": "invalid_argument_for_ntile_function",
+       "22016": "invalid_argument_for_nth_value_function",
+       "2201F": "invalid_argument_for_power_function",
+       "2201G": "invalid_argument_for_width_bucket_function",
+       "22018": "invalid_character_value_for_cast",
+       "22007": "invalid_datetime_format",
+       "22019": "invalid_escape_character",
+       "2200D": "invalid_escape_octet",
+       "22025": "invalid_escape_sequence",
+       "22P06": "nonstandard_use_of_escape_character",
+       "22010": "invalid_indicator_parameter_value",
+       "22023": "invalid_parameter_value",
+       "2201B": "invalid_regular_expression",
+       "2201W": "invalid_row_count_in_limit_clause",
+       "2201X": "invalid_row_count_in_result_offset_clause",
+       "22009": "invalid_time_zone_displacement_value",
+       "2200C": "invalid_use_of_escape_character",
+       "2200G": "most_specific_type_mismatch",
+       "22004": "null_value_not_allowed",
+       "22002": "null_value_no_indicator_parameter",
+       "22003": "numeric_value_out_of_range",
+       "22026": "string_data_length_mismatch",
+       "22001": "string_data_right_truncation",
+       "22011": "substring_error",
+       "22027": "trim_error",
+       "22024": "unterminated_c_string",
+       "2200F": "zero_length_character_string",
+       "22P01": "floating_point_exception",
+       "22P02": "invalid_text_representation",
+       "22P03": "invalid_binary_representation",
+       "22P04": "bad_copy_file_format",
+       "22P05": "untranslatable_character",
+       "2200L": "not_an_xml_document",
+       "2200M": "invalid_xml_document",
+       "2200N": "invalid_xml_content",
+       "2200S": "invalid_xml_comment",
+       "2200T": "invalid_xml_processing_instruction",
+       // Class 23 - Integrity Constraint Violation
+       "23000": "integrity_constraint_violation",
+       "23001": "restrict_violation",
+       "23502": "not_null_violation",
+       "23503": "foreign_key_violation",
+       "23505": "unique_violation",
+       "23514": "check_violation",
+       "23P01": "exclusion_violation",
+       // Class 24 - Invalid Cursor State
+       "24000": "invalid_cursor_state",
+       // Class 25 - Invalid Transaction State
+       "25000": "invalid_transaction_state",
+       "25001": "active_sql_transaction",
+       "25002": "branch_transaction_already_active",
+       "25008": "held_cursor_requires_same_isolation_level",
+       "25003": "inappropriate_access_mode_for_branch_transaction",
+       "25004": "inappropriate_isolation_level_for_branch_transaction",
+       "25005": "no_active_sql_transaction_for_branch_transaction",
+       "25006": "read_only_sql_transaction",
+       "25007": "schema_and_data_statement_mixing_not_supported",
+       "25P01": "no_active_sql_transaction",
+       "25P02": "in_failed_sql_transaction",
+       // Class 26 - Invalid SQL Statement Name
+       "26000": "invalid_sql_statement_name",
+       // Class 27 - Triggered Data Change Violation
+       "27000": "triggered_data_change_violation",
+       // Class 28 - Invalid Authorization Specification
+       "28000": "invalid_authorization_specification",
+       "28P01": "invalid_password",
+       // Class 2B - Dependent Privilege Descriptors Still Exist
+       "2B000": "dependent_privilege_descriptors_still_exist",
+       "2BP01": "dependent_objects_still_exist",
+       // Class 2D - Invalid Transaction Termination
+       "2D000": "invalid_transaction_termination",
+       // Class 2F - SQL Routine Exception
+       "2F000": "sql_routine_exception",
+       "2F005": "function_executed_no_return_statement",
+       "2F002": "modifying_sql_data_not_permitted",
+       "2F003": "prohibited_sql_statement_attempted",
+       "2F004": "reading_sql_data_not_permitted",
+       // Class 34 - Invalid Cursor Name
+       "34000": "invalid_cursor_name",
+       // Class 38 - External Routine Exception
+       "38000": "external_routine_exception",
+       "38001": "containing_sql_not_permitted",
+       "38002": "modifying_sql_data_not_permitted",
+       "38003": "prohibited_sql_statement_attempted",
+       "38004": "reading_sql_data_not_permitted",
+       // Class 39 - External Routine Invocation Exception
+       "39000": "external_routine_invocation_exception",
+       "39001": "invalid_sqlstate_returned",
+       "39004": "null_value_not_allowed",
+       "39P01": "trigger_protocol_violated",
+       "39P02": "srf_protocol_violated",
+       // Class 3B - Savepoint Exception
+       "3B000": "savepoint_exception",
+       "3B001": "invalid_savepoint_specification",
+       // Class 3D - Invalid Catalog Name
+       "3D000": "invalid_catalog_name",
+       // Class 3F - Invalid Schema Name
+       "3F000": "invalid_schema_name",
+       // Class 40 - Transaction Rollback
+       "40000": "transaction_rollback",
+       "40002": "transaction_integrity_constraint_violation",
+       "40001": "serialization_failure",
+       "40003": "statement_completion_unknown",
+       "40P01": "deadlock_detected",
+       // Class 42 - Syntax Error or Access Rule Violation
+       "42000": "syntax_error_or_access_rule_violation",
+       "42601": "syntax_error",
+       "42501": "insufficient_privilege",
+       "42846": "cannot_coerce",
+       "42803": "grouping_error",
+       "42P20": "windowing_error",
+       "42P19": "invalid_recursion",
+       "42830": "invalid_foreign_key",
+       "42602": "invalid_name",
+       "42622": "name_too_long",
+       "42939": "reserved_name",
+       "42804": "datatype_mismatch",
+       "42P18": "indeterminate_datatype",
+       "42P21": "collation_mismatch",
+       "42P22": "indeterminate_collation",
+       "42809": "wrong_object_type",
+       "42703": "undefined_column",
+       "42883": "undefined_function",
+       "42P01": "undefined_table",
+       "42P02": "undefined_parameter",
+       "42704": "undefined_object",
+       "42701": "duplicate_column",
+       "42P03": "duplicate_cursor",
+       "42P04": "duplicate_database",
+       "42723": "duplicate_function",
+       "42P05": "duplicate_prepared_statement",
+       "42P06": "duplicate_schema",
+       "42P07": "duplicate_table",
+       "42712": "duplicate_alias",
+       "42710": "duplicate_object",
+       "42702": "ambiguous_column",
+       "42725": "ambiguous_function",
+       "42P08": "ambiguous_parameter",
+       "42P09": "ambiguous_alias",
+       "42P10": "invalid_column_reference",
+       "42611": "invalid_column_definition",
+       "42P11": "invalid_cursor_definition",
+       "42P12": "invalid_database_definition",
+       "42P13": "invalid_function_definition",
+       "42P14": "invalid_prepared_statement_definition",
+       "42P15": "invalid_schema_definition",
+       "42P16": "invalid_table_definition",
+       "42P17": "invalid_object_definition",
+       // Class 44 - WITH CHECK OPTION Violation
+       "44000": "with_check_option_violation",
+       // Class 53 - Insufficient Resources
+       "53000": "insufficient_resources",
+       "53100": "disk_full",
+       "53200": "out_of_memory",
+       "53300": "too_many_connections",
+       "53400": "configuration_limit_exceeded",
+       // Class 54 - Program Limit Exceeded
+       "54000": "program_limit_exceeded",
+       "54001": "statement_too_complex",
+       "54011": "too_many_columns",
+       "54023": "too_many_arguments",
+       // Class 55 - Object Not In Prerequisite State
+       "55000": "object_not_in_prerequisite_state",
+       "55006": "object_in_use",
+       "55P02": "cant_change_runtime_param",
+       "55P03": "lock_not_available",
+       // Class 57 - Operator Intervention
+       "57000": "operator_intervention",
+       "57014": "query_canceled",
+       "57P01": "admin_shutdown",
+       "57P02": "crash_shutdown",
+       "57P03": "cannot_connect_now",
+       "57P04": "database_dropped",
+       // Class 58 - System Error (errors external to PostgreSQL itself)
+       "58000": "system_error",
+       "58030": "io_error",
+       "58P01": "undefined_file",
+       "58P02": "duplicate_file",
+       // Class F0 - Configuration File Error
+       "F0000": "config_file_error",
+       "F0001": "lock_file_exists",
+       // Class HV - Foreign Data Wrapper Error (SQL/MED)
+       "HV000": "fdw_error",
+       "HV005": "fdw_column_name_not_found",
+       "HV002": "fdw_dynamic_parameter_value_needed",
+       "HV010": "fdw_function_sequence_error",
+       "HV021": "fdw_inconsistent_descriptor_information",
+       "HV024": "fdw_invalid_attribute_value",
+       "HV007": "fdw_invalid_column_name",
+       "HV008": "fdw_invalid_column_number",
+       "HV004": "fdw_invalid_data_type",
+       "HV006": "fdw_invalid_data_type_descriptors",
+       "HV091": "fdw_invalid_descriptor_field_identifier",
+       "HV00B": "fdw_invalid_handle",
+       "HV00C": "fdw_invalid_option_index",
+       "HV00D": "fdw_invalid_option_name",
+       "HV090": "fdw_invalid_string_length_or_buffer_length",
+       "HV00A": "fdw_invalid_string_format",
+       "HV009": "fdw_invalid_use_of_null_pointer",
+       "HV014": "fdw_too_many_handles",
+       "HV001": "fdw_out_of_memory",
+       "HV00P": "fdw_no_schemas",
+       "HV00J": "fdw_option_name_not_found",
+       "HV00K": "fdw_reply_handle",
+       "HV00Q": "fdw_schema_not_found",
+       "HV00R": "fdw_table_not_found",
+       "HV00L": "fdw_unable_to_create_execution",
+       "HV00M": "fdw_unable_to_create_reply",
+       "HV00N": "fdw_unable_to_establish_connection",
+       // Class P0 - PL/pgSQL Error
+       "P0000": "plpgsql_error",
+       "P0001": "raise_exception",
+       "P0002": "no_data_found",
+       "P0003": "too_many_rows",
+       // Class XX - Internal Error
+       "XX000": "internal_error",
+       "XX001": "data_corrupted",
+       "XX002": "index_corrupted",
+}
+
+func parseError(r *readBuf) *Error {
+       err := new(Error)
+       for t := r.byte(); t != 0; t = r.byte() {
+               msg := r.string()
+               switch t {
+               case 'S':
+                       err.Severity = msg
+               case 'C':
+                       err.Code = ErrorCode(msg)
+               case 'M':
+                       err.Message = msg
+               case 'D':
+                       err.Detail = msg
+               case 'H':
+                       err.Hint = msg
+               case 'P':
+                       err.Position = msg
+               case 'p':
+                       err.InternalPosition = msg
+               case 'q':
+                       err.InternalQuery = msg
+               case 'W':
+                       err.Where = msg
+               case 's':
+                       err.Schema = msg
+               case 't':
+                       err.Table = msg
+               case 'c':
+                       err.Column = msg
+               case 'd':
+                       err.DataTypeName = msg
+               case 'n':
+                       err.Constraint = msg
+               case 'F':
+                       err.File = msg
+               case 'L':
+                       err.Line = msg
+               case 'R':
+                       err.Routine = msg
+               }
+       }
+       return err
+}
+
+// Fatal returns true if the Error Severity is fatal.
+func (err *Error) Fatal() bool {
+       return err.Severity == Efatal
+}
+
+// Get implements the legacy PGError interface. New code should use the fields
+// of the Error struct directly.
+func (err *Error) Get(k byte) (v string) {
+       switch k {
+       case 'S':
+               return err.Severity
+       case 'C':
+               return string(err.Code)
+       case 'M':
+               return err.Message
+       case 'D':
+               return err.Detail
+       case 'H':
+               return err.Hint
+       case 'P':
+               return err.Position
+       case 'p':
+               return err.InternalPosition
+       case 'q':
+               return err.InternalQuery
+       case 'W':
+               return err.Where
+       case 's':
+               return err.Schema
+       case 't':
+               return err.Table
+       case 'c':
+               return err.Column
+       case 'd':
+               return err.DataTypeName
+       case 'n':
+               return err.Constraint
+       case 'F':
+               return err.File
+       case 'L':
+               return err.Line
+       case 'R':
+               return err.Routine
+       }
+       return ""
+}
+
+func (err Error) Error() string {
+       return "pq: " + err.Message
+}
+
+// PGError is an interface used by previous versions of pq. It is provided
+// only to support legacy code. New code should use the Error type.
+type PGError interface {
+       Error() string
+       Fatal() bool
+       Get(k byte) (v string)
+}
+
+func errorf(s string, args ...interface{}) {
+       panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)))
+}
+
+func errRecoverNoErrBadConn(err *error) {
+       e := recover()
+       if e == nil {
+               // Do nothing
+               return
+       }
+       var ok bool
+       *err, ok = e.(error)
+       if !ok {
+               *err = fmt.Errorf("pq: unexpected error: %#v", e)
+       }
+}
+
+func (c *conn) errRecover(err *error) {
+       e := recover()
+       switch v := e.(type) {
+       case nil:
+               // Do nothing
+       case runtime.Error:
+               c.bad = true
+               panic(v)
+       case *Error:
+               if v.Fatal() {
+                       *err = driver.ErrBadConn
+               } else {
+                       *err = v
+               }
+       case *net.OpError:
+               *err = driver.ErrBadConn
+       case error:
+               if v == io.EOF || v.(error).Error() == "remote error: handshake 
failure" {
+                       *err = driver.ErrBadConn
+               } else {
+                       *err = v
+               }
+
+       default:
+               c.bad = true
+               panic(fmt.Sprintf("unknown error: %#v", e))
+       }
+
+       // Any time we return ErrBadConn, we need to remember it since *Tx 
doesn't
+       // mark the connection bad in database/sql.
+       if *err == driver.ErrBadConn {
+               c.bad = true
+       }
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafficcontrol/blob/13fd7151/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/go18_test.go
----------------------------------------------------------------------
diff --git 
a/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/go18_test.go 
b/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/go18_test.go
new file mode 100644
index 0000000..4bf6391
--- /dev/null
+++ b/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/go18_test.go
@@ -0,0 +1,319 @@
+// +build go1.8
+
+package pq
+
+import (
+       "context"
+       "database/sql"
+       "runtime"
+       "strings"
+       "testing"
+       "time"
+)
+
+func TestMultipleSimpleQuery(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+
+       rows, err := db.Query("select 1; set time zone default; select 2; 
select 3")
+       if err != nil {
+               t.Fatal(err)
+       }
+       defer rows.Close()
+
+       var i int
+       for rows.Next() {
+               if err := rows.Scan(&i); err != nil {
+                       t.Fatal(err)
+               }
+               if i != 1 {
+                       t.Fatalf("expected 1, got %d", i)
+               }
+       }
+       if !rows.NextResultSet() {
+               t.Fatal("expected more result sets", rows.Err())
+       }
+       for rows.Next() {
+               if err := rows.Scan(&i); err != nil {
+                       t.Fatal(err)
+               }
+               if i != 2 {
+                       t.Fatalf("expected 2, got %d", i)
+               }
+       }
+
+       // Make sure that if we ignore a result we can still query.
+
+       rows, err = db.Query("select 4; select 5")
+       if err != nil {
+               t.Fatal(err)
+       }
+       defer rows.Close()
+
+       for rows.Next() {
+               if err := rows.Scan(&i); err != nil {
+                       t.Fatal(err)
+               }
+               if i != 4 {
+                       t.Fatalf("expected 4, got %d", i)
+               }
+       }
+       if !rows.NextResultSet() {
+               t.Fatal("expected more result sets", rows.Err())
+       }
+       for rows.Next() {
+               if err := rows.Scan(&i); err != nil {
+                       t.Fatal(err)
+               }
+               if i != 5 {
+                       t.Fatalf("expected 5, got %d", i)
+               }
+       }
+       if rows.NextResultSet() {
+               t.Fatal("unexpected result set")
+       }
+}
+
+const contextRaceIterations = 100
+
+func TestContextCancelExec(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+
+       ctx, cancel := context.WithCancel(context.Background())
+
+       // Delay execution for just a bit until db.ExecContext has begun.
+       defer time.AfterFunc(time.Millisecond*10, cancel).Stop()
+
+       // Not canceled until after the exec has started.
+       if _, err := db.ExecContext(ctx, "select pg_sleep(1)"); err == nil {
+               t.Fatal("expected error")
+       } else if err.Error() != "pq: canceling statement due to user request" {
+               t.Fatalf("unexpected error: %s", err)
+       }
+
+       // Context is already canceled, so error should come before execution.
+       if _, err := db.ExecContext(ctx, "select pg_sleep(1)"); err == nil {
+               t.Fatal("expected error")
+       } else if err.Error() != "context canceled" {
+               t.Fatalf("unexpected error: %s", err)
+       }
+
+       for i := 0; i < contextRaceIterations; i++ {
+               func() {
+                       ctx, cancel := context.WithCancel(context.Background())
+                       defer cancel()
+                       if _, err := db.ExecContext(ctx, "select 1"); err != 
nil {
+                               t.Fatal(err)
+                       }
+               }()
+
+               if _, err := db.Exec("select 1"); err != nil {
+                       t.Fatal(err)
+               }
+       }
+}
+
+func TestContextCancelQuery(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+
+       ctx, cancel := context.WithCancel(context.Background())
+
+       // Delay execution for just a bit until db.QueryContext has begun.
+       defer time.AfterFunc(time.Millisecond*10, cancel).Stop()
+
+       // Not canceled until after the exec has started.
+       if _, err := db.QueryContext(ctx, "select pg_sleep(1)"); err == nil {
+               t.Fatal("expected error")
+       } else if err.Error() != "pq: canceling statement due to user request" {
+               t.Fatalf("unexpected error: %s", err)
+       }
+
+       // Context is already canceled, so error should come before execution.
+       if _, err := db.QueryContext(ctx, "select pg_sleep(1)"); err == nil {
+               t.Fatal("expected error")
+       } else if err.Error() != "context canceled" {
+               t.Fatalf("unexpected error: %s", err)
+       }
+
+       for i := 0; i < contextRaceIterations; i++ {
+               func() {
+                       ctx, cancel := context.WithCancel(context.Background())
+                       rows, err := db.QueryContext(ctx, "select 1")
+                       cancel()
+                       if err != nil {
+                               t.Fatal(err)
+                       } else if err := rows.Close(); err != nil {
+                               t.Fatal(err)
+                       }
+               }()
+
+               if rows, err := db.Query("select 1"); err != nil {
+                       t.Fatal(err)
+               } else if err := rows.Close(); err != nil {
+                       t.Fatal(err)
+               }
+       }
+}
+
+// TestIssue617 tests that a failed query in QueryContext doesn't lead to a
+// goroutine leak.
+func TestIssue617(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+
+       const N = 10
+
+       numGoroutineStart := runtime.NumGoroutine()
+       for i := 0; i < N; i++ {
+               func() {
+                       ctx, cancel := context.WithCancel(context.Background())
+                       defer cancel()
+                       _, err := db.QueryContext(ctx, `SELECT * FROM 
DOESNOTEXIST`)
+                       pqErr, _ := err.(*Error)
+                       // Expecting "pq: relation \"doesnotexist\" does not 
exist" error.
+                       if err == nil || pqErr == nil || pqErr.Code != "42P01" {
+                               t.Fatalf("expected undefined table error, got 
%v", err)
+                       }
+               }()
+       }
+       numGoroutineFinish := runtime.NumGoroutine()
+
+       // We use N/2 and not N because the GC and other actors may increase or
+       // decrease the number of goroutines.
+       if numGoroutineFinish-numGoroutineStart >= N/2 {
+               t.Errorf("goroutine leak detected, was %d, now %d", 
numGoroutineStart, numGoroutineFinish)
+       }
+}
+
+func TestContextCancelBegin(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+
+       ctx, cancel := context.WithCancel(context.Background())
+       tx, err := db.BeginTx(ctx, nil)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // Delay execution for just a bit until tx.Exec has begun.
+       defer time.AfterFunc(time.Millisecond*10, cancel).Stop()
+
+       // Not canceled until after the exec has started.
+       if _, err := tx.Exec("select pg_sleep(1)"); err == nil {
+               t.Fatal("expected error")
+       } else if err.Error() != "pq: canceling statement due to user request" {
+               t.Fatalf("unexpected error: %s", err)
+       }
+
+       // Transaction is canceled, so expect an error.
+       if _, err := tx.Query("select pg_sleep(1)"); err == nil {
+               t.Fatal("expected error")
+       } else if err != sql.ErrTxDone {
+               t.Fatalf("unexpected error: %s", err)
+       }
+
+       // Context is canceled, so cannot begin a transaction.
+       if _, err := db.BeginTx(ctx, nil); err == nil {
+               t.Fatal("expected error")
+       } else if err.Error() != "context canceled" {
+               t.Fatalf("unexpected error: %s", err)
+       }
+
+       for i := 0; i < contextRaceIterations; i++ {
+               func() {
+                       ctx, cancel := context.WithCancel(context.Background())
+                       tx, err := db.BeginTx(ctx, nil)
+                       cancel()
+                       if err != nil {
+                               t.Fatal(err)
+                       } else if err := tx.Rollback(); err != nil && err != 
sql.ErrTxDone {
+                               t.Fatal(err)
+                       }
+               }()
+
+               if tx, err := db.Begin(); err != nil {
+                       t.Fatal(err)
+               } else if err := tx.Rollback(); err != nil {
+                       t.Fatal(err)
+               }
+       }
+}
+
+func TestTxOptions(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+       ctx := context.Background()
+
+       tests := []struct {
+               level     sql.IsolationLevel
+               isolation string
+       }{
+               {
+                       level:     sql.LevelDefault,
+                       isolation: "",
+               },
+               {
+                       level:     sql.LevelReadUncommitted,
+                       isolation: "read uncommitted",
+               },
+               {
+                       level:     sql.LevelReadCommitted,
+                       isolation: "read committed",
+               },
+               {
+                       level:     sql.LevelRepeatableRead,
+                       isolation: "repeatable read",
+               },
+               {
+                       level:     sql.LevelSerializable,
+                       isolation: "serializable",
+               },
+       }
+
+       for _, test := range tests {
+               for _, ro := range []bool{true, false} {
+                       tx, err := db.BeginTx(ctx, &sql.TxOptions{
+                               Isolation: test.level,
+                               ReadOnly:  ro,
+                       })
+                       if err != nil {
+                               t.Fatal(err)
+                       }
+
+                       var isolation string
+                       err = tx.QueryRow("select 
current_setting('transaction_isolation')").Scan(&isolation)
+                       if err != nil {
+                               t.Fatal(err)
+                       }
+
+                       if test.isolation != "" && isolation != test.isolation {
+                               t.Errorf("wrong isolation level: %s != %s", 
isolation, test.isolation)
+                       }
+
+                       var isRO string
+                       err = tx.QueryRow("select 
current_setting('transaction_read_only')").Scan(&isRO)
+                       if err != nil {
+                               t.Fatal(err)
+                       }
+
+                       if ro != (isRO == "on") {
+                               t.Errorf("read/[write,only] not set: %t != %s 
for level %s",
+                                       ro, isRO, test.isolation)
+                       }
+
+                       tx.Rollback()
+               }
+       }
+
+       _, err := db.BeginTx(ctx, &sql.TxOptions{
+               Isolation: sql.LevelLinearizable,
+       })
+       if err == nil {
+               t.Fatal("expected LevelLinearizable to fail")
+       }
+       if !strings.Contains(err.Error(), "isolation level not supported") {
+               t.Errorf("Expected error to mention isolation level, got %q", 
err)
+       }
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafficcontrol/blob/13fd7151/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/hstore/hstore.go
----------------------------------------------------------------------
diff --git 
a/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/hstore/hstore.go 
b/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/hstore/hstore.go
new file mode 100644
index 0000000..72d5abf
--- /dev/null
+++ b/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/hstore/hstore.go
@@ -0,0 +1,118 @@
+package hstore
+
+import (
+       "database/sql"
+       "database/sql/driver"
+       "strings"
+)
+
+// A wrapper for transferring Hstore values back and forth easily.
+type Hstore struct {
+       Map map[string]sql.NullString
+}
+
+// escapes and quotes hstore keys/values
+// s should be a sql.NullString or string
+func hQuote(s interface{}) string {
+       var str string
+       switch v := s.(type) {
+       case sql.NullString:
+               if !v.Valid {
+                       return "NULL"
+               }
+               str = v.String
+       case string:
+               str = v
+       default:
+               panic("not a string or sql.NullString")
+       }
+
+       str = strings.Replace(str, "\\", "\\\\", -1)
+       return `"` + strings.Replace(str, "\"", "\\\"", -1) + `"`
+}
+
+// Scan implements the Scanner interface.
+//
+// Note h.Map is reallocated before the scan to clear existing values. If the
+// hstore column's database value is NULL, then h.Map is set to nil instead.
+func (h *Hstore) Scan(value interface{}) error {
+       if value == nil {
+               h.Map = nil
+               return nil
+       }
+       h.Map = make(map[string]sql.NullString)
+       var b byte
+       pair := [][]byte{{}, {}}
+       pi := 0
+       inQuote := false
+       didQuote := false
+       sawSlash := false
+       bindex := 0
+       for bindex, b = range value.([]byte) {
+               if sawSlash {
+                       pair[pi] = append(pair[pi], b)
+                       sawSlash = false
+                       continue
+               }
+
+               switch b {
+               case '\\':
+                       sawSlash = true
+                       continue
+               case '"':
+                       inQuote = !inQuote
+                       if !didQuote {
+                               didQuote = true
+                       }
+                       continue
+               default:
+                       if !inQuote {
+                               switch b {
+                               case ' ', '\t', '\n', '\r':
+                                       continue
+                               case '=':
+                                       continue
+                               case '>':
+                                       pi = 1
+                                       didQuote = false
+                                       continue
+                               case ',':
+                                       s := string(pair[1])
+                                       if !didQuote && len(s) == 4 && 
strings.ToLower(s) == "null" {
+                                               h.Map[string(pair[0])] = 
sql.NullString{String: "", Valid: false}
+                                       } else {
+                                               h.Map[string(pair[0])] = 
sql.NullString{String: string(pair[1]), Valid: true}
+                                       }
+                                       pair[0] = []byte{}
+                                       pair[1] = []byte{}
+                                       pi = 0
+                                       continue
+                               }
+                       }
+               }
+               pair[pi] = append(pair[pi], b)
+       }
+       if bindex > 0 {
+               s := string(pair[1])
+               if !didQuote && len(s) == 4 && strings.ToLower(s) == "null" {
+                       h.Map[string(pair[0])] = sql.NullString{String: "", 
Valid: false}
+               } else {
+                       h.Map[string(pair[0])] = sql.NullString{String: 
string(pair[1]), Valid: true}
+               }
+       }
+       return nil
+}
+
+// Value implements the driver Valuer interface. Note if h.Map is nil, the
+// database column value will be set to NULL.
+func (h Hstore) Value() (driver.Value, error) {
+       if h.Map == nil {
+               return nil, nil
+       }
+       parts := []string{}
+       for key, val := range h.Map {
+               thispart := hQuote(key) + "=>" + hQuote(val)
+               parts = append(parts, thispart)
+       }
+       return []byte(strings.Join(parts, ",")), nil
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafficcontrol/blob/13fd7151/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/hstore/hstore_test.go
----------------------------------------------------------------------
diff --git 
a/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/hstore/hstore_test.go 
b/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/hstore/hstore_test.go
new file mode 100644
index 0000000..1c9f2bd
--- /dev/null
+++ 
b/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/hstore/hstore_test.go
@@ -0,0 +1,148 @@
+package hstore
+
+import (
+       "database/sql"
+       "os"
+       "testing"
+
+       _ "github.com/lib/pq"
+)
+
+type Fatalistic interface {
+       Fatal(args ...interface{})
+}
+
+func openTestConn(t Fatalistic) *sql.DB {
+       datname := os.Getenv("PGDATABASE")
+       sslmode := os.Getenv("PGSSLMODE")
+
+       if datname == "" {
+               os.Setenv("PGDATABASE", "pqgotest")
+       }
+
+       if sslmode == "" {
+               os.Setenv("PGSSLMODE", "disable")
+       }
+
+       conn, err := sql.Open("postgres", "")
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       return conn
+}
+
+func TestHstore(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+
+       // quitely create hstore if it doesn't exist
+       _, err := db.Exec("CREATE EXTENSION IF NOT EXISTS hstore")
+       if err != nil {
+               t.Skipf("Skipping hstore tests - hstore extension create 
failed: %s", err.Error())
+       }
+
+       hs := Hstore{}
+
+       // test for null-valued hstores
+       err = db.QueryRow("SELECT NULL::hstore").Scan(&hs)
+       if err != nil {
+               t.Fatal(err)
+       }
+       if hs.Map != nil {
+               t.Fatalf("expected null map")
+       }
+
+       err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs)
+       if err != nil {
+               t.Fatalf("re-query null map failed: %s", err.Error())
+       }
+       if hs.Map != nil {
+               t.Fatalf("expected null map")
+       }
+
+       // test for empty hstores
+       err = db.QueryRow("SELECT ''::hstore").Scan(&hs)
+       if err != nil {
+               t.Fatal(err)
+       }
+       if hs.Map == nil {
+               t.Fatalf("expected empty map, got null map")
+       }
+       if len(hs.Map) != 0 {
+               t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map))
+       }
+
+       err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs)
+       if err != nil {
+               t.Fatalf("re-query empty map failed: %s", err.Error())
+       }
+       if hs.Map == nil {
+               t.Fatalf("expected empty map, got null map")
+       }
+       if len(hs.Map) != 0 {
+               t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map))
+       }
+
+       // a few example maps to test out
+       hsOnePair := Hstore{
+               Map: map[string]sql.NullString{
+                       "key1": {String: "value1", Valid: true},
+               },
+       }
+
+       hsThreePairs := Hstore{
+               Map: map[string]sql.NullString{
+                       "key1": {String: "value1", Valid: true},
+                       "key2": {String: "value2", Valid: true},
+                       "key3": {String: "value3", Valid: true},
+               },
+       }
+
+       hsSmorgasbord := Hstore{
+               Map: map[string]sql.NullString{
+                       "nullstring":             {String: "NULL", Valid: true},
+                       "actuallynull":           {String: "", Valid: false},
+                       "NULL":                   {String: "NULL string key", 
Valid: true},
+                       "withbracket":            {String: "value>42", Valid: 
true},
+                       "withequal":              {String: "value=42", Valid: 
true},
+                       `"withquotes1"`:          {String: `this "should" be 
fine`, Valid: true},
+                       `"withquotes"2"`:         {String: `this "should\" also 
be fine`, Valid: true},
+                       "embedded1":              {String: "value1=>x1", Valid: 
true},
+                       "embedded2":              {String: `"value2"=>x2`, 
Valid: true},
+                       "withnewlines":           {String: "\n\nvalue\t=>2", 
Valid: true},
+                       "<<all sorts of crazy>>": {String: `this, "should,\" 
also, => be fine`, Valid: true},
+               },
+       }
+
+       // test encoding in query params, then decoding during Scan
+       testBidirectional := func(h Hstore) {
+               err = db.QueryRow("SELECT $1::hstore", h).Scan(&hs)
+               if err != nil {
+                       t.Fatalf("re-query %d-pair map failed: %s", len(h.Map), 
err.Error())
+               }
+               if hs.Map == nil {
+                       t.Fatalf("expected %d-pair map, got null map", 
len(h.Map))
+               }
+               if len(hs.Map) != len(h.Map) {
+                       t.Fatalf("expected %d-pair map, got len(map)=%d", 
len(h.Map), len(hs.Map))
+               }
+
+               for key, val := range hs.Map {
+                       otherval, found := h.Map[key]
+                       if !found {
+                               t.Fatalf("  key '%v' not found in %d-pair map", 
key, len(h.Map))
+                       }
+                       if otherval.Valid != val.Valid {
+                               t.Fatalf("  value %v <> %v in %d-pair map", 
otherval, val, len(h.Map))
+                       }
+                       if otherval.String != val.String {
+                               t.Fatalf("  value '%v' <> '%v' in %d-pair map", 
otherval.String, val.String, len(h.Map))
+                       }
+               }
+       }
+
+       testBidirectional(hsOnePair)
+       testBidirectional(hsThreePairs)
+       testBidirectional(hsSmorgasbord)
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafficcontrol/blob/13fd7151/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/issues_test.go
----------------------------------------------------------------------
diff --git 
a/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/issues_test.go 
b/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/issues_test.go
new file mode 100644
index 0000000..3a330a0
--- /dev/null
+++ b/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/issues_test.go
@@ -0,0 +1,26 @@
+package pq
+
+import "testing"
+
+func TestIssue494(t *testing.T) {
+       db := openTestConn(t)
+       defer db.Close()
+
+       query := `CREATE TEMP TABLE t (i INT PRIMARY KEY)`
+       if _, err := db.Exec(query); err != nil {
+               t.Fatal(err)
+       }
+
+       txn, err := db.Begin()
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       if _, err := txn.Prepare(CopyIn("t", "i")); err != nil {
+               t.Fatal(err)
+       }
+
+       if _, err := txn.Query("SELECT 1"); err == nil {
+               t.Fatal("expected error")
+       }
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafficcontrol/blob/13fd7151/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/listen_example/doc.go
----------------------------------------------------------------------
diff --git 
a/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/listen_example/doc.go 
b/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/listen_example/doc.go
new file mode 100644
index 0000000..80f0a9b
--- /dev/null
+++ 
b/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/listen_example/doc.go
@@ -0,0 +1,96 @@
+/*
+
+Below you will find a self-contained Go program which uses the LISTEN / NOTIFY
+mechanism to avoid polling the database while waiting for more work to arrive.
+
+    //
+    // You can see the program in action by defining a function similar to
+    // the following:
+    //
+    // CREATE OR REPLACE FUNCTION public.get_work()
+    //   RETURNS bigint
+    //   LANGUAGE sql
+    //   AS $$
+    //     SELECT CASE WHEN random() >= 0.2 THEN int8 '1' END
+    //   $$
+    // ;
+
+    package main
+
+    import (
+        "database/sql"
+        "fmt"
+        "time"
+
+        "github.com/lib/pq"
+    )
+
+    func doWork(db *sql.DB, work int64) {
+        // work here
+    }
+
+    func getWork(db *sql.DB) {
+        for {
+            // get work from the database here
+            var work sql.NullInt64
+            err := db.QueryRow("SELECT get_work()").Scan(&work)
+            if err != nil {
+                fmt.Println("call to get_work() failed: ", err)
+                time.Sleep(10 * time.Second)
+                continue
+            }
+            if !work.Valid {
+                // no more work to do
+                fmt.Println("ran out of work")
+                return
+            }
+
+            fmt.Println("starting work on ", work.Int64)
+            go doWork(db, work.Int64)
+        }
+    }
+
+    func waitForNotification(l *pq.Listener) {
+        select {
+            case <-l.Notify:
+                fmt.Println("received notification, new work available")
+            case <-time.After(90 * time.Second):
+                go l.Ping()
+                // Check if there's more work available, just in case it takes
+                // a while for the Listener to notice connection loss and
+                // reconnect.
+                fmt.Println("received no work for 90 seconds, checking for new 
work")
+        }
+    }
+
+    func main() {
+        var conninfo string = ""
+
+        db, err := sql.Open("postgres", conninfo)
+        if err != nil {
+            panic(err)
+        }
+
+        reportProblem := func(ev pq.ListenerEventType, err error) {
+            if err != nil {
+                fmt.Println(err.Error())
+            }
+        }
+
+        listener := pq.NewListener(conninfo, 10 * time.Second, time.Minute, 
reportProblem)
+        err = listener.Listen("getwork")
+        if err != nil {
+            panic(err)
+        }
+
+        fmt.Println("entering main loop")
+        for {
+            // process all available work before waiting for notifications
+            getWork(db)
+            waitForNotification(listener)
+        }
+    }
+
+
+*/
+package listen_example

http://git-wip-us.apache.org/repos/asf/incubator-trafficcontrol/blob/13fd7151/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/notify.go
----------------------------------------------------------------------
diff --git a/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/notify.go 
b/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/notify.go
new file mode 100644
index 0000000..09f9424
--- /dev/null
+++ b/traffic_ops/traffic_ops_golang/vendor/github.com/lib/pq/notify.go
@@ -0,0 +1,782 @@
+package pq
+
+// Package pq is a pure Go Postgres driver for the database/sql package.
+// This module contains support for Postgres LISTEN/NOTIFY.
+
+import (
+       "errors"
+       "fmt"
+       "sync"
+       "sync/atomic"
+       "time"
+)
+
+// Notification represents a single notification from the database.
+type Notification struct {
+       // Process ID (PID) of the notifying postgres backend.
+       BePid int
+       // Name of the channel the notification was sent on.
+       Channel string
+       // Payload, or the empty string if unspecified.
+       Extra string
+}
+
+func recvNotification(r *readBuf) *Notification {
+       bePid := r.int32()
+       channel := r.string()
+       extra := r.string()
+
+       return &Notification{bePid, channel, extra}
+}
+
+const (
+       connStateIdle int32 = iota
+       connStateExpectResponse
+       connStateExpectReadyForQuery
+)
+
+type message struct {
+       typ byte
+       err error
+}
+
+var errListenerConnClosed = errors.New("pq: ListenerConn has been closed")
+
+// ListenerConn is a low-level interface for waiting for notifications.  You
+// should use Listener instead.
+type ListenerConn struct {
+       // guards cn and err
+       connectionLock sync.Mutex
+       cn             *conn
+       err            error
+
+       connState int32
+
+       // the sending goroutine will be holding this lock
+       senderLock sync.Mutex
+
+       notificationChan chan<- *Notification
+
+       replyChan chan message
+}
+
+// Creates a new ListenerConn.  Use NewListener instead.
+func NewListenerConn(name string, notificationChan chan<- *Notification) 
(*ListenerConn, error) {
+       return newDialListenerConn(defaultDialer{}, name, notificationChan)
+}
+
+func newDialListenerConn(d Dialer, name string, c chan<- *Notification) 
(*ListenerConn, error) {
+       cn, err := DialOpen(d, name)
+       if err != nil {
+               return nil, err
+       }
+
+       l := &ListenerConn{
+               cn:               cn.(*conn),
+               notificationChan: c,
+               connState:        connStateIdle,
+               replyChan:        make(chan message, 2),
+       }
+
+       go l.listenerConnMain()
+
+       return l, nil
+}
+
+// We can only allow one goroutine at a time to be running a query on the
+// connection for various reasons, so the goroutine sending on the connection
+// must be holding senderLock.
+//
+// Returns an error if an unrecoverable error has occurred and the ListenerConn
+// should be abandoned.
+func (l *ListenerConn) acquireSenderLock() error {
+       // we must acquire senderLock first to avoid deadlocks; see 
ExecSimpleQuery
+       l.senderLock.Lock()
+
+       l.connectionLock.Lock()
+       err := l.err
+       l.connectionLock.Unlock()
+       if err != nil {
+               l.senderLock.Unlock()
+               return err
+       }
+       return nil
+}
+
+func (l *ListenerConn) releaseSenderLock() {
+       l.senderLock.Unlock()
+}
+
+// setState advances the protocol state to newState.  Returns false if moving
+// to that state from the current state is not allowed.
+func (l *ListenerConn) setState(newState int32) bool {
+       var expectedState int32
+
+       switch newState {
+       case connStateIdle:
+               expectedState = connStateExpectReadyForQuery
+       case connStateExpectResponse:
+               expectedState = connStateIdle
+       case connStateExpectReadyForQuery:
+               expectedState = connStateExpectResponse
+       default:
+               panic(fmt.Sprintf("unexpected listenerConnState %d", newState))
+       }
+
+       return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState)
+}
+
+// Main logic is here: receive messages from the postgres backend, forward
+// notifications and query replies and keep the internal state in sync with the
+// protocol state.  Returns when the connection has been lost, is about to go
+// away or should be discarded because we couldn't agree on the state with the
+// server backend.
+func (l *ListenerConn) listenerConnLoop() (err error) {
+       defer errRecoverNoErrBadConn(&err)
+
+       r := &readBuf{}
+       for {
+               t, err := l.cn.recvMessage(r)
+               if err != nil {
+                       return err
+               }
+
+               switch t {
+               case 'A':
+                       // recvNotification copies all the data so we don't 
need to worry
+                       // about the scratch buffer being overwritten.
+                       l.notificationChan <- recvNotification(r)
+
+               case 'T', 'D':
+                       // only used by tests; ignore
+
+               case 'E':
+                       // We might receive an ErrorResponse even when not in a 
query; it
+                       // is expected that the server will close the 
connection after
+                       // that, but we should make sure that the error we 
display is the
+                       // one from the stray ErrorResponse, not 
io.ErrUnexpectedEOF.
+                       if !l.setState(connStateExpectReadyForQuery) {
+                               return parseError(r)
+                       }
+                       l.replyChan <- message{t, parseError(r)}
+
+               case 'C', 'I':
+                       if !l.setState(connStateExpectReadyForQuery) {
+                               // protocol out of sync
+                               return fmt.Errorf("unexpected CommandComplete")
+                       }
+                       // ExecSimpleQuery doesn't need to know about this 
message
+
+               case 'Z':
+                       if !l.setState(connStateIdle) {
+                               // protocol out of sync
+                               return fmt.Errorf("unexpected ReadyForQuery")
+                       }
+                       l.replyChan <- message{t, nil}
+
+               case 'N', 'S':
+                       // ignore
+               default:
+                       return fmt.Errorf("unexpected message %q from server in 
listenerConnLoop", t)
+               }
+       }
+}
+
+// This is the main routine for the goroutine receiving on the database
+// connection.  Most of the main logic is in listenerConnLoop.
+func (l *ListenerConn) listenerConnMain() {
+       err := l.listenerConnLoop()
+
+       // listenerConnLoop terminated; we're done, but we still have to clean 
up.
+       // Make sure nobody tries to start any new queries by making sure the 
err
+       // pointer is set.  It is important that we do not overwrite its value; 
a
+       // connection could be closed by either this goroutine or one sending on
+       // the connection -- whoever closes the connection is assumed to have 
the
+       // more meaningful error message (as the other one will probably get
+       // net.errClosed), so that goroutine sets the error we expose while the
+       // other error is discarded.  If the connection is lost while two
+       // goroutines are operating on the socket, it probably doesn't matter 
which
+       // error we expose so we don't try to do anything more complex.
+       l.connectionLock.Lock()
+       if l.err == nil {
+               l.err = err
+       }
+       l.cn.Close()
+       l.connectionLock.Unlock()
+
+       // There might be a query in-flight; make sure nobody's waiting for a
+       // response to it, since there's not going to be one.
+       close(l.replyChan)
+
+       // let the listener know we're done
+       close(l.notificationChan)
+
+       // this ListenerConn is done
+}
+
+// Send a LISTEN query to the server.  See ExecSimpleQuery.
+func (l *ListenerConn) Listen(channel string) (bool, error) {
+       return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel))
+}
+
+// Send an UNLISTEN query to the server.  See ExecSimpleQuery.
+func (l *ListenerConn) Unlisten(channel string) (bool, error) {
+       return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel))
+}
+
+// Send `UNLISTEN *` to the server.  See ExecSimpleQuery.
+func (l *ListenerConn) UnlistenAll() (bool, error) {
+       return l.ExecSimpleQuery("UNLISTEN *")
+}
+
+// Ping the remote server to make sure it's alive.  Non-nil error means the
+// connection has failed and should be abandoned.
+func (l *ListenerConn) Ping() error {
+       sent, err := l.ExecSimpleQuery("")
+       if !sent {
+               return err
+       }
+       if err != nil {
+               // shouldn't happen
+               panic(err)
+       }
+       return nil
+}
+
+// Attempt to send a query on the connection.  Returns an error if sending the
+// query failed, and the caller should initiate closure of this connection.
+// The caller must be holding senderLock (see acquireSenderLock and
+// releaseSenderLock).
+func (l *ListenerConn) sendSimpleQuery(q string) (err error) {
+       defer errRecoverNoErrBadConn(&err)
+
+       // must set connection state before sending the query
+       if !l.setState(connStateExpectResponse) {
+               panic("two queries running at the same time")
+       }
+
+       // Can't use l.cn.writeBuf here because it uses the scratch buffer which
+       // might get overwritten by listenerConnLoop.
+       b := &writeBuf{
+               buf: []byte("Q\x00\x00\x00\x00"),
+               pos: 1,
+       }
+       b.string(q)
+       l.cn.send(b)
+
+       return nil
+}
+
+// Execute a "simple query" (i.e. one with no bindable parameters) on the
+// connection.  The possible return values are:
+//   1) "executed" is true; the query was executed to completion on the
+//      database server.  If the query failed, err will be set to the error
+//      returned by the database, otherwise err will be nil.
+//   2) If "executed" is false, the query could not be executed on the remote
+//      server.  err will be non-nil.
+//
+// After a call to ExecSimpleQuery has returned an executed=false value, the
+// connection has either been closed or will be closed shortly thereafter, and
+// all subsequently executed queries will return an error.
+func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) {
+       if err = l.acquireSenderLock(); err != nil {
+               return false, err
+       }
+       defer l.releaseSenderLock()
+
+       err = l.sendSimpleQuery(q)
+       if err != nil {
+               // We can't know what state the protocol is in, so we need to 
abandon
+               // this connection.
+               l.connectionLock.Lock()
+               // Set the error pointer if it hasn't been set already; see
+               // listenerConnMain.
+               if l.err == nil {
+                       l.err = err
+               }
+               l.connectionLock.Unlock()
+               l.cn.c.Close()
+               return false, err
+       }
+
+       // now we just wait for a reply..
+       for {
+               m, ok := <-l.replyChan
+               if !ok {
+                       // We lost the connection to server, don't bother 
waiting for a
+                       // a response.  err should have been set already.
+                       l.connectionLock.Lock()
+                       err := l.err
+                       l.connectionLock.Unlock()
+                       return false, err
+               }
+               switch m.typ {
+               case 'Z':
+                       // sanity check
+                       if m.err != nil {
+                               panic("m.err != nil")
+                       }
+                       // done; err might or might not be set
+                       return true, err
+
+               case 'E':
+                       // sanity check
+                       if m.err == nil {
+                               panic("m.err == nil")
+                       }
+                       // server responded with an error; ReadyForQuery to 
follow
+                       err = m.err
+
+               default:
+                       return false, fmt.Errorf("unknown response for simple 
query: %q", m.typ)
+               }
+       }
+}
+
+func (l *ListenerConn) Close() error {
+       l.connectionLock.Lock()
+       if l.err != nil {
+               l.connectionLock.Unlock()
+               return errListenerConnClosed
+       }
+       l.err = errListenerConnClosed
+       l.connectionLock.Unlock()
+       // We can't send anything on the connection without holding senderLock.
+       // Simply close the net.Conn to wake up everyone operating on it.
+       return l.cn.c.Close()
+}
+
+// Err() returns the reason the connection was closed.  It is not safe to call
+// this function until l.Notify has been closed.
+func (l *ListenerConn) Err() error {
+       return l.err
+}
+
+var errListenerClosed = errors.New("pq: Listener has been closed")
+
+var ErrChannelAlreadyOpen = errors.New("pq: channel is already open")
+var ErrChannelNotOpen = errors.New("pq: channel is not open")
+
+type ListenerEventType int
+
+const (
+       // Emitted only when the database connection has been initially
+       // initialized.  err will always be nil.
+       ListenerEventConnected ListenerEventType = iota
+
+       // Emitted after a database connection has been lost, either because of 
an
+       // error or because Close has been called.  err will be set to the 
reason
+       // the database connection was lost.
+       ListenerEventDisconnected
+
+       // Emitted after a database connection has been re-established after
+       // connection loss.  err will always be nil.  After this event has been
+       // emitted, a nil pq.Notification is sent on the Listener.Notify 
channel.
+       ListenerEventReconnected
+
+       // Emitted after a connection to the database was attempted, but failed.
+       // err will be set to an error describing why the connection attempt did
+       // not succeed.
+       ListenerEventConnectionAttemptFailed
+)
+
+type EventCallbackType func(event ListenerEventType, err error)
+
+// Listener provides an interface for listening to notifications from a
+// PostgreSQL database.  For general usage information, see section
+// "Notifications".
+//
+// Listener can safely be used from concurrently running goroutines.
+type Listener struct {
+       // Channel for receiving notifications from the database.  In some 
cases a
+       // nil value will be sent.  See section "Notifications" above.
+       Notify chan *Notification
+
+       name                 string
+       minReconnectInterval time.Duration
+       maxReconnectInterval time.Duration
+       dialer               Dialer
+       eventCallback        EventCallbackType
+
+       lock                 sync.Mutex
+       isClosed             bool
+       reconnectCond        *sync.Cond
+       cn                   *ListenerConn
+       connNotificationChan <-chan *Notification
+       channels             map[string]struct{}
+}
+
+// NewListener creates a new database connection dedicated to LISTEN / NOTIFY.
+//
+// name should be set to a connection string to be used to establish the
+// database connection (see section "Connection String Parameters" above).
+//
+// minReconnectInterval controls the duration to wait before trying to
+// re-establish the database connection after connection loss.  After each
+// consecutive failure this interval is doubled, until maxReconnectInterval is
+// reached.  Successfully completing the connection establishment procedure
+// resets the interval back to minReconnectInterval.
+//
+// The last parameter eventCallback can be set to a function which will be
+// called by the Listener when the state of the underlying database connection
+// changes.  This callback will be called by the goroutine which dispatches the
+// notifications over the Notify channel, so you should try to avoid doing
+// potentially time-consuming operations from the callback.
+func NewListener(name string,
+       minReconnectInterval time.Duration,
+       maxReconnectInterval time.Duration,
+       eventCallback EventCallbackType) *Listener {
+       return NewDialListener(defaultDialer{}, name, minReconnectInterval, 
maxReconnectInterval, eventCallback)
+}
+
+// NewDialListener is like NewListener but it takes a Dialer.
+func NewDialListener(d Dialer,
+       name string,
+       minReconnectInterval time.Duration,
+       maxReconnectInterval time.Duration,
+       eventCallback EventCallbackType) *Listener {
+
+       l := &Listener{
+               name:                 name,
+               minReconnectInterval: minReconnectInterval,
+               maxReconnectInterval: maxReconnectInterval,
+               dialer:               d,
+               eventCallback:        eventCallback,
+
+               channels: make(map[string]struct{}),
+
+               Notify: make(chan *Notification, 32),
+       }
+       l.reconnectCond = sync.NewCond(&l.lock)
+
+       go l.listenerMain()
+
+       return l
+}
+
+// Returns the notification channel for this listener.  This is the same
+// channel as Notify, and will not be recreated during the life time of the
+// Listener.
+func (l *Listener) NotificationChannel() <-chan *Notification {
+       return l.Notify
+}
+
+// Listen starts listening for notifications on a channel.  Calls to this
+// function will block until an acknowledgement has been received from the
+// server.  Note that Listener automatically re-establishes the connection
+// after connection loss, so this function may block indefinitely if the
+// connection can not be re-established.
+//
+// Listen will only fail in three conditions:
+//   1) The channel is already open.  The returned error will be
+//      ErrChannelAlreadyOpen.
+//   2) The query was executed on the remote server, but PostgreSQL returned an
+//      error message in response to the query.  The returned error will be a
+//      pq.Error containing the information the server supplied.
+//   3) Close is called on the Listener before the request could be completed.
+//
+// The channel name is case-sensitive.
+func (l *Listener) Listen(channel string) error {
+       l.lock.Lock()
+       defer l.lock.Unlock()
+
+       if l.isClosed {
+               return errListenerClosed
+       }
+
+       // The server allows you to issue a LISTEN on a channel which is already
+       // open, but it seems useful to be able to detect this case to spot for
+       // mistakes in application logic.  If the application genuinely does't
+       // care, it can check the exported error and ignore it.
+       _, exists := l.channels[channel]
+       if exists {
+               return ErrChannelAlreadyOpen
+       }
+
+       if l.cn != nil {
+               // If gotResponse is true but error is set, the query was 
executed on
+               // the remote server, but resulted in an error.  This should be
+               // relatively rare, so it's fine if we just pass the error to 
our
+               // caller.  However, if gotResponse is false, we could not 
complete the
+               // query on the remote server and our underlying connection is 
about
+               // to go away, so we only add relname to l.channels, and wait 
for
+               // resync() to take care of the rest.
+               gotResponse, err := l.cn.Listen(channel)
+               if gotResponse && err != nil {
+                       return err
+               }
+       }
+
+       l.channels[channel] = struct{}{}
+       for l.cn == nil {
+               l.reconnectCond.Wait()
+               // we let go of the mutex for a while
+               if l.isClosed {
+                       return errListenerClosed
+               }
+       }
+
+       return nil
+}
+
+// Unlisten removes a channel from the Listener's channel list.  Returns
+// ErrChannelNotOpen if the Listener is not listening on the specified channel.
+// Returns immediately with no error if there is no connection.  Note that you
+// might still get notifications for this channel even after Unlisten has
+// returned.
+//
+// The channel name is case-sensitive.
+func (l *Listener) Unlisten(channel string) error {
+       l.lock.Lock()
+       defer l.lock.Unlock()
+
+       if l.isClosed {
+               return errListenerClosed
+       }
+
+       // Similarly to LISTEN, this is not an error in Postgres, but it seems
+       // useful to distinguish from the normal conditions.
+       _, exists := l.channels[channel]
+       if !exists {
+               return ErrChannelNotOpen
+       }
+
+       if l.cn != nil {
+               // Similarly to Listen (see comment in that function), the 
caller
+               // should only be bothered with an error if it came from the 
backend as
+               // a response to our query.
+               gotResponse, err := l.cn.Unlisten(channel)
+               if gotResponse && err != nil {
+                       return err
+               }
+       }
+
+       // Don't bother waiting for resync if there's no connection.
+       delete(l.channels, channel)
+       return nil
+}
+
+// UnlistenAll removes all channels from the Listener's channel list.  Returns
+// immediately with no error if there is no connection.  Note that you might
+// still get notifications for any of the deleted channels even after
+// UnlistenAll has returned.
+func (l *Listener) UnlistenAll() error {
+       l.lock.Lock()
+       defer l.lock.Unlock()
+
+       if l.isClosed {
+               return errListenerClosed
+       }
+
+       if l.cn != nil {
+               // Similarly to Listen (see comment in that function), the 
caller
+               // should only be bothered with an error if it came from the 
backend as
+               // a response to our query.
+               gotResponse, err := l.cn.UnlistenAll()
+               if gotResponse && err != nil {
+                       return err
+               }
+       }
+
+       // Don't bother waiting for resync if there's no connection.
+       l.channels = make(map[string]struct{})
+       return nil
+}
+
+// Ping the remote server to make sure it's alive.  Non-nil return value means
+// that there is no active connection.
+func (l *Listener) Ping() error {
+       l.lock.Lock()
+       defer l.lock.Unlock()
+
+       if l.isClosed {
+               return errListenerClosed
+       }
+       if l.cn == nil {
+               return errors.New("no connection")
+       }
+
+       return l.cn.Ping()
+}
+
+// Clean up after losing the server connection.  Returns l.cn.Err(), which
+// should have the reason the connection was lost.
+func (l *Listener) disconnectCleanup() error {
+       l.lock.Lock()
+       defer l.lock.Unlock()
+
+       // sanity check; can't look at Err() until the channel has been closed
+       select {
+       case _, ok := <-l.connNotificationChan:
+               if ok {
+                       panic("connNotificationChan not closed")
+               }
+       default:
+               panic("connNotificationChan not closed")
+       }
+
+       err := l.cn.Err()
+       l.cn.Close()
+       l.cn = nil
+       return err
+}
+
+// Synchronize the list of channels we want to be listening on with the server
+// after the connection has been established.
+func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan 
*Notification) error {
+       doneChan := make(chan error)
+       go func() {
+               for channel := range l.channels {
+                       // If we got a response, return that error to our 
caller as it's
+                       // going to be more descriptive than cn.Err().
+                       gotResponse, err := cn.Listen(channel)
+                       if gotResponse && err != nil {
+                               doneChan <- err
+                               return
+                       }
+
+                       // If we couldn't reach the server, wait for 
notificationChan to
+                       // close and then return the error message from the 
connection, as
+                       // per ListenerConn's interface.
+                       if err != nil {
+                               for _ = range notificationChan {
+                               }
+                               doneChan <- cn.Err()
+                               return
+                       }
+               }
+               doneChan <- nil
+       }()
+
+       // Ignore notifications while synchronization is going on to avoid
+       // deadlocks.  We have to send a nil notification over Notify anyway as
+       // we can't possibly know which notifications (if any) were lost while
+       // the connection was down, so there's no reason to try and process
+       // these messages at all.
+       for {
+               select {
+               case _, ok := <-notificationChan:
+                       if !ok {
+                               notificationChan = nil
+                       }
+
+               case err := <-doneChan:
+                       return err
+               }
+       }
+}
+
+// caller should NOT be holding l.lock
+func (l *Listener) closed() bool {
+       l.lock.Lock()
+       defer l.lock.Unlock()
+
+       return l.isClosed
+}
+
+func (l *Listener) connect() error {
+       notificationChan := make(chan *Notification, 32)
+       cn, err := newDialListenerConn(l.dialer, l.name, notificationChan)
+       if err != nil {
+               return err
+       }
+
+       l.lock.Lock()
+       defer l.lock.Unlock()
+
+       err = l.resync(cn, notificationChan)
+       if err != nil {
+               cn.Close()
+               return err
+       }
+
+       l.cn = cn
+       l.connNotificationChan = notificationChan
+       l.reconnectCond.Broadcast()
+
+       return nil
+}
+
+// Close disconnects the Listener from the database and shuts it down.
+// Subsequent calls to its methods will return an error.  Close returns an
+// error if the connection has already been closed.
+func (l *Listener) Close() error {
+       l.lock.Lock()
+       defer l.lock.Unlock()
+
+       if l.isClosed {
+               return errListenerClosed
+       }
+
+       if l.cn != nil {
+               l.cn.Close()
+       }
+       l.isClosed = true
+
+       return nil
+}
+
+func (l *Listener) emitEvent(event ListenerEventType, err error) {
+       if l.eventCallback != nil {
+               l.eventCallback(event, err)
+       }
+}
+
+// Main logic here: maintain a connection to the server when possible, wait
+// for notifications and emit events.
+func (l *Listener) listenerConnLoop() {
+       var nextReconnect time.Time
+
+       reconnectInterval := l.minReconnectInterval
+       for {
+               for {
+                       err := l.connect()
+                       if err == nil {
+                               break
+                       }
+
+                       if l.closed() {
+                               return
+                       }
+                       l.emitEvent(ListenerEventConnectionAttemptFailed, err)
+
+                       time.Sleep(reconnectInterval)
+                       reconnectInterval *= 2
+                       if reconnectInterval > l.maxReconnectInterval {
+                               reconnectInterval = l.maxReconnectInterval
+                       }
+               }
+
+               if nextReconnect.IsZero() {
+                       l.emitEvent(ListenerEventConnected, nil)
+               } else {
+                       l.emitEvent(ListenerEventReconnected, nil)
+                       l.Notify <- nil
+               }
+
+               reconnectInterval = l.minReconnectInterval
+               nextReconnect = time.Now().Add(reconnectInterval)
+
+               for {
+                       notification, ok := <-l.connNotificationChan
+                       if !ok {
+                               // lost connection, loop again
+                               break
+                       }
+                       l.Notify <- notification
+               }
+
+               err := l.disconnectCleanup()
+               if l.closed() {
+                       return
+               }
+               l.emitEvent(ListenerEventDisconnected, err)
+
+               time.Sleep(nextReconnect.Sub(time.Now()))
+       }
+}
+
+func (l *Listener) listenerMain() {
+       l.listenerConnLoop()
+       close(l.Notify)
+}

Reply via email to