http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/4ae9684f/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go
----------------------------------------------------------------------
diff --git a/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go 
b/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go
deleted file mode 100644
index 4e4c97a..0000000
--- a/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go
+++ /dev/null
@@ -1,549 +0,0 @@
-package toml
-
-import (
-       "bufio"
-       "errors"
-       "fmt"
-       "io"
-       "reflect"
-       "sort"
-       "strconv"
-       "strings"
-       "time"
-)
-
-type tomlEncodeError struct{ error }
-
-var (
-       errArrayMixedElementTypes = errors.New(
-               "can't encode array with mixed element types")
-       errArrayNilElement = errors.New(
-               "can't encode array with nil element")
-       errNonString = errors.New(
-               "can't encode a map with non-string key type")
-       errAnonNonStruct = errors.New(
-               "can't encode an anonymous field that is not a struct")
-       errArrayNoTable = errors.New(
-               "TOML array element can't contain a table")
-       errNoKey = errors.New(
-               "top-level values must be a Go map or struct")
-       errAnything = errors.New("") // used in testing
-)
-
-var quotedReplacer = strings.NewReplacer(
-       "\t", "\\t",
-       "\n", "\\n",
-       "\r", "\\r",
-       "\"", "\\\"",
-       "\\", "\\\\",
-)
-
-// Encoder controls the encoding of Go values to a TOML document to some
-// io.Writer.
-//
-// The indentation level can be controlled with the Indent field.
-type Encoder struct {
-       // A single indentation level. By default it is two spaces.
-       Indent string
-
-       // hasWritten is whether we have written any output to w yet.
-       hasWritten bool
-       w          *bufio.Writer
-}
-
-// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
-// given. By default, a single indentation level is 2 spaces.
-func NewEncoder(w io.Writer) *Encoder {
-       return &Encoder{
-               w:      bufio.NewWriter(w),
-               Indent: "  ",
-       }
-}
-
-// Encode writes a TOML representation of the Go value to the underlying
-// io.Writer. If the value given cannot be encoded to a valid TOML document,
-// then an error is returned.
-//
-// The mapping between Go values and TOML values should be precisely the same
-// as for the Decode* functions. Similarly, the TextMarshaler interface is
-// supported by encoding the resulting bytes as strings. (If you want to write
-// arbitrary binary data then you will need to use something like base64 since
-// TOML does not have any binary types.)
-//
-// When encoding TOML hashes (i.e., Go maps or structs), keys without any
-// sub-hashes are encoded first.
-//
-// If a Go map is encoded, then its keys are sorted alphabetically for
-// deterministic output. More control over this behavior may be provided if
-// there is demand for it.
-//
-// Encoding Go values without a corresponding TOML representation---like map
-// types with non-string keys---will cause an error to be returned. Similarly
-// for mixed arrays/slices, arrays/slices with nil elements, embedded
-// non-struct types and nested slices containing maps or structs.
-// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
-// and so is []map[string][]string.)
-func (enc *Encoder) Encode(v interface{}) error {
-       rv := eindirect(reflect.ValueOf(v))
-       if err := enc.safeEncode(Key([]string{}), rv); err != nil {
-               return err
-       }
-       return enc.w.Flush()
-}
-
-func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
-       defer func() {
-               if r := recover(); r != nil {
-                       if terr, ok := r.(tomlEncodeError); ok {
-                               err = terr.error
-                               return
-                       }
-                       panic(r)
-               }
-       }()
-       enc.encode(key, rv)
-       return nil
-}
-
-func (enc *Encoder) encode(key Key, rv reflect.Value) {
-       // Special case. Time needs to be in ISO8601 format.
-       // Special case. If we can marshal the type to text, then we used that.
-       // Basically, this prevents the encoder for handling these types as
-       // generic structs (or whatever the underlying type of a TextMarshaler 
is).
-       switch rv.Interface().(type) {
-       case time.Time, TextMarshaler:
-               enc.keyEqElement(key, rv)
-               return
-       }
-
-       k := rv.Kind()
-       switch k {
-       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
-               reflect.Int64,
-               reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
-               reflect.Uint64,
-               reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
-               enc.keyEqElement(key, rv)
-       case reflect.Array, reflect.Slice:
-               if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
-                       enc.eArrayOfTables(key, rv)
-               } else {
-                       enc.keyEqElement(key, rv)
-               }
-       case reflect.Interface:
-               if rv.IsNil() {
-                       return
-               }
-               enc.encode(key, rv.Elem())
-       case reflect.Map:
-               if rv.IsNil() {
-                       return
-               }
-               enc.eTable(key, rv)
-       case reflect.Ptr:
-               if rv.IsNil() {
-                       return
-               }
-               enc.encode(key, rv.Elem())
-       case reflect.Struct:
-               enc.eTable(key, rv)
-       default:
-               panic(e("Unsupported type for key '%s': %s", key, k))
-       }
-}
-
-// eElement encodes any value that can be an array element (primitives and
-// arrays).
-func (enc *Encoder) eElement(rv reflect.Value) {
-       switch v := rv.Interface().(type) {
-       case time.Time:
-               // Special case time.Time as a primitive. Has to come before
-               // TextMarshaler below because time.Time implements
-               // encoding.TextMarshaler, but we need to always use UTC.
-               enc.wf(v.In(time.FixedZone("UTC", 
0)).Format("2006-01-02T15:04:05Z"))
-               return
-       case TextMarshaler:
-               // Special case. Use text marshaler if it's available for this 
value.
-               if s, err := v.MarshalText(); err != nil {
-                       encPanic(err)
-               } else {
-                       enc.writeQuoted(string(s))
-               }
-               return
-       }
-       switch rv.Kind() {
-       case reflect.Bool:
-               enc.wf(strconv.FormatBool(rv.Bool()))
-       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
-               reflect.Int64:
-               enc.wf(strconv.FormatInt(rv.Int(), 10))
-       case reflect.Uint, reflect.Uint8, reflect.Uint16,
-               reflect.Uint32, reflect.Uint64:
-               enc.wf(strconv.FormatUint(rv.Uint(), 10))
-       case reflect.Float32:
-               enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 
32)))
-       case reflect.Float64:
-               enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 
64)))
-       case reflect.Array, reflect.Slice:
-               enc.eArrayOrSliceElement(rv)
-       case reflect.Interface:
-               enc.eElement(rv.Elem())
-       case reflect.String:
-               enc.writeQuoted(rv.String())
-       default:
-               panic(e("Unexpected primitive type: %s", rv.Kind()))
-       }
-}
-
-// By the TOML spec, all floats must have a decimal with at least one
-// number on either side.
-func floatAddDecimal(fstr string) string {
-       if !strings.Contains(fstr, ".") {
-               return fstr + ".0"
-       }
-       return fstr
-}
-
-func (enc *Encoder) writeQuoted(s string) {
-       enc.wf("\"%s\"", quotedReplacer.Replace(s))
-}
-
-func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
-       length := rv.Len()
-       enc.wf("[")
-       for i := 0; i < length; i++ {
-               elem := rv.Index(i)
-               enc.eElement(elem)
-               if i != length-1 {
-                       enc.wf(", ")
-               }
-       }
-       enc.wf("]")
-}
-
-func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
-       if len(key) == 0 {
-               encPanic(errNoKey)
-       }
-       for i := 0; i < rv.Len(); i++ {
-               trv := rv.Index(i)
-               if isNil(trv) {
-                       continue
-               }
-               panicIfInvalidKey(key)
-               enc.newline()
-               enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
-               enc.newline()
-               enc.eMapOrStruct(key, trv)
-       }
-}
-
-func (enc *Encoder) eTable(key Key, rv reflect.Value) {
-       panicIfInvalidKey(key)
-       if len(key) == 1 {
-               // Output an extra new line between top-level tables.
-               // (The newline isn't written if nothing else has been written 
though.)
-               enc.newline()
-       }
-       if len(key) > 0 {
-               enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
-               enc.newline()
-       }
-       enc.eMapOrStruct(key, rv)
-}
-
-func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
-       switch rv := eindirect(rv); rv.Kind() {
-       case reflect.Map:
-               enc.eMap(key, rv)
-       case reflect.Struct:
-               enc.eStruct(key, rv)
-       default:
-               panic("eTable: unhandled reflect.Value Kind: " + 
rv.Kind().String())
-       }
-}
-
-func (enc *Encoder) eMap(key Key, rv reflect.Value) {
-       rt := rv.Type()
-       if rt.Key().Kind() != reflect.String {
-               encPanic(errNonString)
-       }
-
-       // Sort keys so that we have deterministic output. And write keys 
directly
-       // underneath this key first, before writing sub-structs or sub-maps.
-       var mapKeysDirect, mapKeysSub []string
-       for _, mapKey := range rv.MapKeys() {
-               k := mapKey.String()
-               if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
-                       mapKeysSub = append(mapKeysSub, k)
-               } else {
-                       mapKeysDirect = append(mapKeysDirect, k)
-               }
-       }
-
-       var writeMapKeys = func(mapKeys []string) {
-               sort.Strings(mapKeys)
-               for _, mapKey := range mapKeys {
-                       mrv := rv.MapIndex(reflect.ValueOf(mapKey))
-                       if isNil(mrv) {
-                               // Don't write anything for nil fields.
-                               continue
-                       }
-                       enc.encode(key.add(mapKey), mrv)
-               }
-       }
-       writeMapKeys(mapKeysDirect)
-       writeMapKeys(mapKeysSub)
-}
-
-func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
-       // Write keys for fields directly under this key first, because if we 
write
-       // a field that creates a new table, then all keys under it will be in 
that
-       // table (not the one we're writing here).
-       rt := rv.Type()
-       var fieldsDirect, fieldsSub [][]int
-       var addFields func(rt reflect.Type, rv reflect.Value, start []int)
-       addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
-               for i := 0; i < rt.NumField(); i++ {
-                       f := rt.Field(i)
-                       // skip unexported fields
-                       if f.PkgPath != "" && !f.Anonymous {
-                               continue
-                       }
-                       frv := rv.Field(i)
-                       if f.Anonymous {
-                               t := f.Type
-                               switch t.Kind() {
-                               case reflect.Struct:
-                                       addFields(t, frv, f.Index)
-                                       continue
-                               case reflect.Ptr:
-                                       if t.Elem().Kind() == reflect.Struct {
-                                               if !frv.IsNil() {
-                                                       addFields(t.Elem(), 
frv.Elem(), f.Index)
-                                               }
-                                               continue
-                                       }
-                                       // Fall through to the normal field 
encoding logic below
-                                       // for non-struct anonymous fields.
-                               }
-                       }
-
-                       if typeIsHash(tomlTypeOfGo(frv)) {
-                               fieldsSub = append(fieldsSub, append(start, 
f.Index...))
-                       } else {
-                               fieldsDirect = append(fieldsDirect, 
append(start, f.Index...))
-                       }
-               }
-       }
-       addFields(rt, rv, nil)
-
-       var writeFields = func(fields [][]int) {
-               for _, fieldIndex := range fields {
-                       sft := rt.FieldByIndex(fieldIndex)
-                       sf := rv.FieldByIndex(fieldIndex)
-                       if isNil(sf) {
-                               // Don't write anything for nil fields.
-                               continue
-                       }
-
-                       tag := sft.Tag.Get("toml")
-                       if tag == "-" {
-                               continue
-                       }
-                       keyName, opts := getOptions(tag)
-                       if keyName == "" {
-                               keyName = sft.Name
-                       }
-                       if _, ok := opts["omitempty"]; ok && isEmpty(sf) {
-                               continue
-                       } else if _, ok := opts["omitzero"]; ok && isZero(sf) {
-                               continue
-                       }
-
-                       enc.encode(key.add(keyName), sf)
-               }
-       }
-       writeFields(fieldsDirect)
-       writeFields(fieldsSub)
-}
-
-// tomlTypeName returns the TOML type name of the Go value's type. It is
-// used to determine whether the types of array elements are mixed (which is
-// forbidden). If the Go value is nil, then it is illegal for it to be an array
-// element, and valueIsNil is returned as true.
-
-// Returns the TOML type of a Go value. The type may be `nil`, which means
-// no concrete TOML type could be found.
-func tomlTypeOfGo(rv reflect.Value) tomlType {
-       if isNil(rv) || !rv.IsValid() {
-               return nil
-       }
-       switch rv.Kind() {
-       case reflect.Bool:
-               return tomlBool
-       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
-               reflect.Int64,
-               reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
-               reflect.Uint64:
-               return tomlInteger
-       case reflect.Float32, reflect.Float64:
-               return tomlFloat
-       case reflect.Array, reflect.Slice:
-               if typeEqual(tomlHash, tomlArrayType(rv)) {
-                       return tomlArrayHash
-               } else {
-                       return tomlArray
-               }
-       case reflect.Ptr, reflect.Interface:
-               return tomlTypeOfGo(rv.Elem())
-       case reflect.String:
-               return tomlString
-       case reflect.Map:
-               return tomlHash
-       case reflect.Struct:
-               switch rv.Interface().(type) {
-               case time.Time:
-                       return tomlDatetime
-               case TextMarshaler:
-                       return tomlString
-               default:
-                       return tomlHash
-               }
-       default:
-               panic("unexpected reflect.Kind: " + rv.Kind().String())
-       }
-}
-
-// tomlArrayType returns the element type of a TOML array. The type returned
-// may be nil if it cannot be determined (e.g., a nil slice or a zero length
-// slize). This function may also panic if it finds a type that cannot be
-// expressed in TOML (such as nil elements, heterogeneous arrays or directly
-// nested arrays of tables).
-func tomlArrayType(rv reflect.Value) tomlType {
-       if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
-               return nil
-       }
-       firstType := tomlTypeOfGo(rv.Index(0))
-       if firstType == nil {
-               encPanic(errArrayNilElement)
-       }
-
-       rvlen := rv.Len()
-       for i := 1; i < rvlen; i++ {
-               elem := rv.Index(i)
-               switch elemType := tomlTypeOfGo(elem); {
-               case elemType == nil:
-                       encPanic(errArrayNilElement)
-               case !typeEqual(firstType, elemType):
-                       encPanic(errArrayMixedElementTypes)
-               }
-       }
-       // If we have a nested array, then we must make sure that the nested
-       // array contains ONLY primitives.
-       // This checks arbitrarily nested arrays.
-       if typeEqual(firstType, tomlArray) || typeEqual(firstType, 
tomlArrayHash) {
-               nest := tomlArrayType(eindirect(rv.Index(0)))
-               if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
-                       encPanic(errArrayNoTable)
-               }
-       }
-       return firstType
-}
-
-func getOptions(keyName string) (string, map[string]struct{}) {
-       opts := make(map[string]struct{})
-       ss := strings.Split(keyName, ",")
-       name := ss[0]
-       if len(ss) > 1 {
-               for _, opt := range ss {
-                       opts[opt] = struct{}{}
-               }
-       }
-
-       return name, opts
-}
-
-func isZero(rv reflect.Value) bool {
-       switch rv.Kind() {
-       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, 
reflect.Int64:
-               return rv.Int() == 0
-       case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, 
reflect.Uint64:
-               return rv.Uint() == 0
-       case reflect.Float32, reflect.Float64:
-               return rv.Float() == 0.0
-       }
-       return false
-}
-
-func isEmpty(rv reflect.Value) bool {
-       switch rv.Kind() {
-       case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
-               return rv.Len() == 0
-       case reflect.Bool:
-               return !rv.Bool()
-       }
-       return false
-}
-
-func (enc *Encoder) newline() {
-       if enc.hasWritten {
-               enc.wf("\n")
-       }
-}
-
-func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
-       if len(key) == 0 {
-               encPanic(errNoKey)
-       }
-       panicIfInvalidKey(key)
-       enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
-       enc.eElement(val)
-       enc.newline()
-}
-
-func (enc *Encoder) wf(format string, v ...interface{}) {
-       if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
-               encPanic(err)
-       }
-       enc.hasWritten = true
-}
-
-func (enc *Encoder) indentStr(key Key) string {
-       return strings.Repeat(enc.Indent, len(key)-1)
-}
-
-func encPanic(err error) {
-       panic(tomlEncodeError{err})
-}
-
-func eindirect(v reflect.Value) reflect.Value {
-       switch v.Kind() {
-       case reflect.Ptr, reflect.Interface:
-               return eindirect(v.Elem())
-       default:
-               return v
-       }
-}
-
-func isNil(rv reflect.Value) bool {
-       switch rv.Kind() {
-       case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
-               return rv.IsNil()
-       default:
-               return false
-       }
-}
-
-func panicIfInvalidKey(key Key) {
-       for _, k := range key {
-               if len(k) == 0 {
-                       encPanic(e("Key '%s' is not a valid table name. Key 
names "+
-                               "cannot be empty.", key.maybeQuotedAll()))
-               }
-       }
-}
-
-func isValidKeyName(s string) bool {
-       return len(s) != 0
-}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/4ae9684f/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go
----------------------------------------------------------------------
diff --git 
a/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go 
b/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go
deleted file mode 100644
index ef7acdd..0000000
--- a/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go
+++ /dev/null
@@ -1,590 +0,0 @@
-package toml
-
-import (
-       "bytes"
-       "fmt"
-       "log"
-       "net"
-       "testing"
-       "time"
-)
-
-func TestEncodeRoundTrip(t *testing.T) {
-       type Config struct {
-               Age        int
-               Cats       []string
-               Pi         float64
-               Perfection []int
-               DOB        time.Time
-               Ipaddress  net.IP
-       }
-
-       var inputs = Config{
-               13,
-               []string{"one", "two", "three"},
-               3.145,
-               []int{11, 2, 3, 4},
-               time.Now(),
-               net.ParseIP("192.168.59.254"),
-       }
-
-       var firstBuffer bytes.Buffer
-       e := NewEncoder(&firstBuffer)
-       err := e.Encode(inputs)
-       if err != nil {
-               t.Fatal(err)
-       }
-       var outputs Config
-       if _, err := Decode(firstBuffer.String(), &outputs); err != nil {
-               log.Printf("Could not decode:\n-----\n%s\n-----\n",
-                       firstBuffer.String())
-               t.Fatal(err)
-       }
-
-       // could test each value individually, but I'm lazy
-       var secondBuffer bytes.Buffer
-       e2 := NewEncoder(&secondBuffer)
-       err = e2.Encode(outputs)
-       if err != nil {
-               t.Fatal(err)
-       }
-       if firstBuffer.String() != secondBuffer.String() {
-               t.Error(
-                       firstBuffer.String(),
-                       "\n\n is not identical to\n\n",
-                       secondBuffer.String())
-       }
-}
-
-// XXX(burntsushi)
-// I think these tests probably should be removed. They are good, but they
-// ought to be obsolete by toml-test.
-func TestEncode(t *testing.T) {
-       type Embedded struct {
-               Int int `toml:"_int"`
-       }
-       type NonStruct int
-
-       date := time.Date(2014, 5, 11, 20, 30, 40, 0, time.FixedZone("IST", 
3600))
-       dateStr := "2014-05-11T19:30:40Z"
-
-       tests := map[string]struct {
-               input      interface{}
-               wantOutput string
-               wantError  error
-       }{
-               "bool field": {
-                       input: struct {
-                               BoolTrue  bool
-                               BoolFalse bool
-                       }{true, false},
-                       wantOutput: "BoolTrue = true\nBoolFalse = false\n",
-               },
-               "int fields": {
-                       input: struct {
-                               Int   int
-                               Int8  int8
-                               Int16 int16
-                               Int32 int32
-                               Int64 int64
-                       }{1, 2, 3, 4, 5},
-                       wantOutput: "Int = 1\nInt8 = 2\nInt16 = 3\nInt32 = 
4\nInt64 = 5\n",
-               },
-               "uint fields": {
-                       input: struct {
-                               Uint   uint
-                               Uint8  uint8
-                               Uint16 uint16
-                               Uint32 uint32
-                               Uint64 uint64
-                       }{1, 2, 3, 4, 5},
-                       wantOutput: "Uint = 1\nUint8 = 2\nUint16 = 3\nUint32 = 
4" +
-                               "\nUint64 = 5\n",
-               },
-               "float fields": {
-                       input: struct {
-                               Float32 float32
-                               Float64 float64
-                       }{1.5, 2.5},
-                       wantOutput: "Float32 = 1.5\nFloat64 = 2.5\n",
-               },
-               "string field": {
-                       input:      struct{ String string }{"foo"},
-                       wantOutput: "String = \"foo\"\n",
-               },
-               "string field and unexported field": {
-                       input: struct {
-                               String     string
-                               unexported int
-                       }{"foo", 0},
-                       wantOutput: "String = \"foo\"\n",
-               },
-               "datetime field in UTC": {
-                       input:      struct{ Date time.Time }{date},
-                       wantOutput: fmt.Sprintf("Date = %s\n", dateStr),
-               },
-               "datetime field as primitive": {
-                       // Using a map here to fail if isStructOrMap() returns 
true for
-                       // time.Time.
-                       input: map[string]interface{}{
-                               "Date": date,
-                               "Int":  1,
-                       },
-                       wantOutput: fmt.Sprintf("Date = %s\nInt = 1\n", 
dateStr),
-               },
-               "array fields": {
-                       input: struct {
-                               IntArray0 [0]int
-                               IntArray3 [3]int
-                       }{[0]int{}, [3]int{1, 2, 3}},
-                       wantOutput: "IntArray0 = []\nIntArray3 = [1, 2, 3]\n",
-               },
-               "slice fields": {
-                       input: struct{ IntSliceNil, IntSlice0, IntSlice3 []int 
}{
-                               nil, []int{}, []int{1, 2, 3},
-                       },
-                       wantOutput: "IntSlice0 = []\nIntSlice3 = [1, 2, 3]\n",
-               },
-               "datetime slices": {
-                       input: struct{ DatetimeSlice []time.Time }{
-                               []time.Time{date, date},
-                       },
-                       wantOutput: fmt.Sprintf("DatetimeSlice = [%s, %s]\n",
-                               dateStr, dateStr),
-               },
-               "nested arrays and slices": {
-                       input: struct {
-                               SliceOfArrays         [][2]int
-                               ArrayOfSlices         [2][]int
-                               SliceOfArraysOfSlices [][2][]int
-                               ArrayOfSlicesOfArrays [2][][2]int
-                               SliceOfMixedArrays    [][2]interface{}
-                               ArrayOfMixedSlices    [2][]interface{}
-                       }{
-                               [][2]int{{1, 2}, {3, 4}},
-                               [2][]int{{1, 2}, {3, 4}},
-                               [][2][]int{
-                                       {
-                                               {1, 2}, {3, 4},
-                                       },
-                                       {
-                                               {5, 6}, {7, 8},
-                                       },
-                               },
-                               [2][][2]int{
-                                       {
-                                               {1, 2}, {3, 4},
-                                       },
-                                       {
-                                               {5, 6}, {7, 8},
-                                       },
-                               },
-                               [][2]interface{}{
-                                       {1, 2}, {"a", "b"},
-                               },
-                               [2][]interface{}{
-                                       {1, 2}, {"a", "b"},
-                               },
-                       },
-                       wantOutput: `SliceOfArrays = [[1, 2], [3, 4]]
-ArrayOfSlices = [[1, 2], [3, 4]]
-SliceOfArraysOfSlices = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
-ArrayOfSlicesOfArrays = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
-SliceOfMixedArrays = [[1, 2], ["a", "b"]]
-ArrayOfMixedSlices = [[1, 2], ["a", "b"]]
-`,
-               },
-               "empty slice": {
-                       input:      struct{ Empty []interface{} 
}{[]interface{}{}},
-                       wantOutput: "Empty = []\n",
-               },
-               "(error) slice with element type mismatch (string and 
integer)": {
-                       input:     struct{ Mixed []interface{} 
}{[]interface{}{1, "a"}},
-                       wantError: errArrayMixedElementTypes,
-               },
-               "(error) slice with element type mismatch (integer and float)": 
{
-                       input:     struct{ Mixed []interface{} 
}{[]interface{}{1, 2.5}},
-                       wantError: errArrayMixedElementTypes,
-               },
-               "slice with elems of differing Go types, same TOML types": {
-                       input: struct {
-                               MixedInts   []interface{}
-                               MixedFloats []interface{}
-                       }{
-                               []interface{}{
-                                       int(1), int8(2), int16(3), int32(4), 
int64(5),
-                                       uint(1), uint8(2), uint16(3), 
uint32(4), uint64(5),
-                               },
-                               []interface{}{float32(1.5), float64(2.5)},
-                       },
-                       wantOutput: "MixedInts = [1, 2, 3, 4, 5, 1, 2, 3, 4, 
5]\n" +
-                               "MixedFloats = [1.5, 2.5]\n",
-               },
-               "(error) slice w/ element type mismatch (one is nested array)": 
{
-                       input: struct{ Mixed []interface{} }{
-                               []interface{}{1, []interface{}{2}},
-                       },
-                       wantError: errArrayMixedElementTypes,
-               },
-               "(error) slice with 1 nil element": {
-                       input:     struct{ NilElement1 []interface{} 
}{[]interface{}{nil}},
-                       wantError: errArrayNilElement,
-               },
-               "(error) slice with 1 nil element (and other non-nil 
elements)": {
-                       input: struct{ NilElement []interface{} }{
-                               []interface{}{1, nil},
-                       },
-                       wantError: errArrayNilElement,
-               },
-               "simple map": {
-                       input:      map[string]int{"a": 1, "b": 2},
-                       wantOutput: "a = 1\nb = 2\n",
-               },
-               "map with interface{} value type": {
-                       input:      map[string]interface{}{"a": 1, "b": "c"},
-                       wantOutput: "a = 1\nb = \"c\"\n",
-               },
-               "map with interface{} value type, some of which are structs": {
-                       input: map[string]interface{}{
-                               "a": struct{ Int int }{2},
-                               "b": 1,
-                       },
-                       wantOutput: "b = 1\n\n[a]\n  Int = 2\n",
-               },
-               "nested map": {
-                       input: map[string]map[string]int{
-                               "a": {"b": 1},
-                               "c": {"d": 2},
-                       },
-                       wantOutput: "[a]\n  b = 1\n\n[c]\n  d = 2\n",
-               },
-               "nested struct": {
-                       input: struct{ Struct struct{ Int int } }{
-                               struct{ Int int }{1},
-                       },
-                       wantOutput: "[Struct]\n  Int = 1\n",
-               },
-               "nested struct and non-struct field": {
-                       input: struct {
-                               Struct struct{ Int int }
-                               Bool   bool
-                       }{struct{ Int int }{1}, true},
-                       wantOutput: "Bool = true\n\n[Struct]\n  Int = 1\n",
-               },
-               "2 nested structs": {
-                       input: struct{ Struct1, Struct2 struct{ Int int } }{
-                               struct{ Int int }{1}, struct{ Int int }{2},
-                       },
-                       wantOutput: "[Struct1]\n  Int = 1\n\n[Struct2]\n  Int = 
2\n",
-               },
-               "deeply nested structs": {
-                       input: struct {
-                               Struct1, Struct2 struct{ Struct3 *struct{ Int 
int } }
-                       }{
-                               struct{ Struct3 *struct{ Int int } }{&struct{ 
Int int }{1}},
-                               struct{ Struct3 *struct{ Int int } }{nil},
-                       },
-                       wantOutput: "[Struct1]\n  [Struct1.Struct3]\n    Int = 
1" +
-                               "\n\n[Struct2]\n",
-               },
-               "nested struct with nil struct elem": {
-                       input: struct {
-                               Struct struct{ Inner *struct{ Int int } }
-                       }{
-                               struct{ Inner *struct{ Int int } }{nil},
-                       },
-                       wantOutput: "[Struct]\n",
-               },
-               "nested struct with no fields": {
-                       input: struct {
-                               Struct struct{ Inner struct{} }
-                       }{
-                               struct{ Inner struct{} }{struct{}{}},
-                       },
-                       wantOutput: "[Struct]\n  [Struct.Inner]\n",
-               },
-               "struct with tags": {
-                       input: struct {
-                               Struct struct {
-                                       Int int `toml:"_int"`
-                               } `toml:"_struct"`
-                               Bool bool `toml:"_bool"`
-                       }{
-                               struct {
-                                       Int int `toml:"_int"`
-                               }{1}, true,
-                       },
-                       wantOutput: "_bool = true\n\n[_struct]\n  _int = 1\n",
-               },
-               "embedded struct": {
-                       input:      struct{ Embedded }{Embedded{1}},
-                       wantOutput: "_int = 1\n",
-               },
-               "embedded *struct": {
-                       input:      struct{ *Embedded }{&Embedded{1}},
-                       wantOutput: "_int = 1\n",
-               },
-               "nested embedded struct": {
-                       input: struct {
-                               Struct struct{ Embedded } `toml:"_struct"`
-                       }{struct{ Embedded }{Embedded{1}}},
-                       wantOutput: "[_struct]\n  _int = 1\n",
-               },
-               "nested embedded *struct": {
-                       input: struct {
-                               Struct struct{ *Embedded } `toml:"_struct"`
-                       }{struct{ *Embedded }{&Embedded{1}}},
-                       wantOutput: "[_struct]\n  _int = 1\n",
-               },
-               "embedded non-struct": {
-                       input:      struct{ NonStruct }{5},
-                       wantOutput: "NonStruct = 5\n",
-               },
-               "array of tables": {
-                       input: struct {
-                               Structs []*struct{ Int int } `toml:"struct"`
-                       }{
-                               []*struct{ Int int }{{1}, {3}},
-                       },
-                       wantOutput: "[[struct]]\n  Int = 1\n\n[[struct]]\n  Int 
= 3\n",
-               },
-               "array of tables order": {
-                       input: map[string]interface{}{
-                               "map": map[string]interface{}{
-                                       "zero": 5,
-                                       "arr": []map[string]int{
-                                               {
-                                                       "friend": 5,
-                                               },
-                                       },
-                               },
-                       },
-                       wantOutput: "[map]\n  zero = 5\n\n  [[map.arr]]\n    
friend = 5\n",
-               },
-               "(error) top-level slice": {
-                       input:     []struct{ Int int }{{1}, {2}, {3}},
-                       wantError: errNoKey,
-               },
-               "(error) slice of slice": {
-                       input: struct {
-                               Slices [][]struct{ Int int }
-                       }{
-                               [][]struct{ Int int }{{{1}}, {{2}}, {{3}}},
-                       },
-                       wantError: errArrayNoTable,
-               },
-               "(error) map no string key": {
-                       input:     map[int]string{1: ""},
-                       wantError: errNonString,
-               },
-               "(error) empty key name": {
-                       input:     map[string]int{"": 1},
-                       wantError: errAnything,
-               },
-               "(error) empty map name": {
-                       input: map[string]interface{}{
-                               "": map[string]int{"v": 1},
-                       },
-                       wantError: errAnything,
-               },
-       }
-       for label, test := range tests {
-               encodeExpected(t, label, test.input, test.wantOutput, 
test.wantError)
-       }
-}
-
-func TestEncodeNestedTableArrays(t *testing.T) {
-       type song struct {
-               Name string `toml:"name"`
-       }
-       type album struct {
-               Name  string `toml:"name"`
-               Songs []song `toml:"songs"`
-       }
-       type springsteen struct {
-               Albums []album `toml:"albums"`
-       }
-       value := springsteen{
-               []album{
-                       {"Born to Run",
-                               []song{{"Jungleland"}, {"Meeting Across the 
River"}}},
-                       {"Born in the USA",
-                               []song{{"Glory Days"}, {"Dancing in the 
Dark"}}},
-               },
-       }
-       expected := `[[albums]]
-  name = "Born to Run"
-
-  [[albums.songs]]
-    name = "Jungleland"
-
-  [[albums.songs]]
-    name = "Meeting Across the River"
-
-[[albums]]
-  name = "Born in the USA"
-
-  [[albums.songs]]
-    name = "Glory Days"
-
-  [[albums.songs]]
-    name = "Dancing in the Dark"
-`
-       encodeExpected(t, "nested table arrays", value, expected, nil)
-}
-
-func TestEncodeArrayHashWithNormalHashOrder(t *testing.T) {
-       type Alpha struct {
-               V int
-       }
-       type Beta struct {
-               V int
-       }
-       type Conf struct {
-               V int
-               A Alpha
-               B []Beta
-       }
-
-       val := Conf{
-               V: 1,
-               A: Alpha{2},
-               B: []Beta{{3}},
-       }
-       expected := "V = 1\n\n[A]\n  V = 2\n\n[[B]]\n  V = 3\n"
-       encodeExpected(t, "array hash with normal hash order", val, expected, 
nil)
-}
-
-func TestEncodeWithOmitEmpty(t *testing.T) {
-       type simple struct {
-               Bool   bool              `toml:"bool,omitempty"`
-               String string            `toml:"string,omitempty"`
-               Array  [0]byte           `toml:"array,omitempty"`
-               Slice  []int             `toml:"slice,omitempty"`
-               Map    map[string]string `toml:"map,omitempty"`
-       }
-
-       var v simple
-       encodeExpected(t, "fields with omitempty are omitted when empty", v, 
"", nil)
-       v = simple{
-               Bool:   true,
-               String: " ",
-               Slice:  []int{2, 3, 4},
-               Map:    map[string]string{"foo": "bar"},
-       }
-       expected := `bool = true
-string = " "
-slice = [2, 3, 4]
-
-[map]
-  foo = "bar"
-`
-       encodeExpected(t, "fields with omitempty are not omitted when 
non-empty",
-               v, expected, nil)
-}
-
-func TestEncodeWithOmitZero(t *testing.T) {
-       type simple struct {
-               Number   int     `toml:"number,omitzero"`
-               Real     float64 `toml:"real,omitzero"`
-               Unsigned uint    `toml:"unsigned,omitzero"`
-       }
-
-       value := simple{0, 0.0, uint(0)}
-       expected := ""
-
-       encodeExpected(t, "simple with omitzero, all zero", value, expected, 
nil)
-
-       value.Number = 10
-       value.Real = 20
-       value.Unsigned = 5
-       expected = `number = 10
-real = 20.0
-unsigned = 5
-`
-       encodeExpected(t, "simple with omitzero, non-zero", value, expected, 
nil)
-}
-
-func TestEncodeOmitemptyWithEmptyName(t *testing.T) {
-       type simple struct {
-               S []int `toml:",omitempty"`
-       }
-       v := simple{[]int{1, 2, 3}}
-       expected := "S = [1, 2, 3]\n"
-       encodeExpected(t, "simple with omitempty, no name, non-empty field",
-               v, expected, nil)
-}
-
-func TestEncodeAnonymousStructPointerField(t *testing.T) {
-       type Sub struct{}
-       type simple struct {
-               *Sub
-       }
-
-       value := simple{}
-       expected := ""
-       encodeExpected(t, "nil anonymous struct pointer field", value, 
expected, nil)
-
-       value = simple{Sub: &Sub{}}
-       expected = ""
-       encodeExpected(t, "non-nil anonymous struct pointer field", value, 
expected, nil)
-}
-
-func TestEncodeIgnoredFields(t *testing.T) {
-       type simple struct {
-               Number int `toml:"-"`
-       }
-       value := simple{}
-       expected := ""
-       encodeExpected(t, "ignored field", value, expected, nil)
-}
-
-func encodeExpected(
-       t *testing.T, label string, val interface{}, wantStr string, wantErr 
error,
-) {
-       var buf bytes.Buffer
-       enc := NewEncoder(&buf)
-       err := enc.Encode(val)
-       if err != wantErr {
-               if wantErr != nil {
-                       if wantErr == errAnything && err != nil {
-                               return
-                       }
-                       t.Errorf("%s: want Encode error %v, got %v", label, 
wantErr, err)
-               } else {
-                       t.Errorf("%s: Encode failed: %s", label, err)
-               }
-       }
-       if err != nil {
-               return
-       }
-       if got := buf.String(); wantStr != got {
-               t.Errorf("%s: want\n-----\n%q\n-----\nbut 
got\n-----\n%q\n-----\n",
-                       label, wantStr, got)
-       }
-}
-
-func ExampleEncoder_Encode() {
-       date, _ := time.Parse(time.RFC822, "14 Mar 10 18:00 UTC")
-       var config = map[string]interface{}{
-               "date":   date,
-               "counts": []int{1, 1, 2, 3, 5, 8},
-               "hash": map[string]string{
-                       "key1": "val1",
-                       "key2": "val2",
-               },
-       }
-       buf := new(bytes.Buffer)
-       if err := NewEncoder(buf).Encode(config); err != nil {
-               log.Fatal(err)
-       }
-       fmt.Println(buf.String())
-
-       // Output:
-       // counts = [1, 1, 2, 3, 5, 8]
-       // date = 2010-03-14T18:00:00Z
-       //
-       // [hash]
-       //   key1 = "val1"
-       //   key2 = "val2"
-}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/4ae9684f/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go
----------------------------------------------------------------------
diff --git 
a/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go 
b/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go
deleted file mode 100644
index d36e1dd..0000000
--- a/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build go1.2
-
-package toml
-
-// In order to support Go 1.1, we define our own TextMarshaler and
-// TextUnmarshaler types. For Go 1.2+, we just alias them with the
-// standard library interfaces.
-
-import (
-       "encoding"
-)
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler encoding.TextMarshaler
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler encoding.TextUnmarshaler

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/4ae9684f/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go
----------------------------------------------------------------------
diff --git 
a/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go 
b/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go
deleted file mode 100644
index e8d503d..0000000
--- 
a/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build !go1.2
-
-package toml
-
-// These interfaces were introduced in Go 1.2, so we add them manually when
-// compiling for Go 1.1.
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler interface {
-       MarshalText() (text []byte, err error)
-}
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler interface {
-       UnmarshalText(text []byte) error
-}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/4ae9684f/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go
----------------------------------------------------------------------
diff --git a/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go 
b/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go
deleted file mode 100644
index 9b20b3a..0000000
--- a/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go
+++ /dev/null
@@ -1,871 +0,0 @@
-package toml
-
-import (
-       "fmt"
-       "strings"
-       "unicode/utf8"
-)
-
-type itemType int
-
-const (
-       itemError itemType = iota
-       itemNIL            // used in the parser to indicate no type
-       itemEOF
-       itemText
-       itemString
-       itemRawString
-       itemMultilineString
-       itemRawMultilineString
-       itemBool
-       itemInteger
-       itemFloat
-       itemDatetime
-       itemArray // the start of an array
-       itemArrayEnd
-       itemTableStart
-       itemTableEnd
-       itemArrayTableStart
-       itemArrayTableEnd
-       itemKeyStart
-       itemCommentStart
-)
-
-const (
-       eof             = 0
-       tableStart      = '['
-       tableEnd        = ']'
-       arrayTableStart = '['
-       arrayTableEnd   = ']'
-       tableSep        = '.'
-       keySep          = '='
-       arrayStart      = '['
-       arrayEnd        = ']'
-       arrayValTerm    = ','
-       commentStart    = '#'
-       stringStart     = '"'
-       stringEnd       = '"'
-       rawStringStart  = '\''
-       rawStringEnd    = '\''
-)
-
-type stateFn func(lx *lexer) stateFn
-
-type lexer struct {
-       input string
-       start int
-       pos   int
-       width int
-       line  int
-       state stateFn
-       items chan item
-
-       // A stack of state functions used to maintain context.
-       // The idea is to reuse parts of the state machine in various places.
-       // For example, values can appear at the top level or within arbitrarily
-       // nested arrays. The last state on the stack is used after a value has
-       // been lexed. Similarly for comments.
-       stack []stateFn
-}
-
-type item struct {
-       typ  itemType
-       val  string
-       line int
-}
-
-func (lx *lexer) nextItem() item {
-       for {
-               select {
-               case item := <-lx.items:
-                       return item
-               default:
-                       lx.state = lx.state(lx)
-               }
-       }
-}
-
-func lex(input string) *lexer {
-       lx := &lexer{
-               input: input + "\n",
-               state: lexTop,
-               line:  1,
-               items: make(chan item, 10),
-               stack: make([]stateFn, 0, 10),
-       }
-       return lx
-}
-
-func (lx *lexer) push(state stateFn) {
-       lx.stack = append(lx.stack, state)
-}
-
-func (lx *lexer) pop() stateFn {
-       if len(lx.stack) == 0 {
-               return lx.errorf("BUG in lexer: no states to pop.")
-       }
-       last := lx.stack[len(lx.stack)-1]
-       lx.stack = lx.stack[0 : len(lx.stack)-1]
-       return last
-}
-
-func (lx *lexer) current() string {
-       return lx.input[lx.start:lx.pos]
-}
-
-func (lx *lexer) emit(typ itemType) {
-       lx.items <- item{typ, lx.current(), lx.line}
-       lx.start = lx.pos
-}
-
-func (lx *lexer) emitTrim(typ itemType) {
-       lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
-       lx.start = lx.pos
-}
-
-func (lx *lexer) next() (r rune) {
-       if lx.pos >= len(lx.input) {
-               lx.width = 0
-               return eof
-       }
-
-       if lx.input[lx.pos] == '\n' {
-               lx.line++
-       }
-       r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:])
-       lx.pos += lx.width
-       return r
-}
-
-// ignore skips over the pending input before this point.
-func (lx *lexer) ignore() {
-       lx.start = lx.pos
-}
-
-// backup steps back one rune. Can be called only once per call of next.
-func (lx *lexer) backup() {
-       lx.pos -= lx.width
-       if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
-               lx.line--
-       }
-}
-
-// accept consumes the next rune if it's equal to `valid`.
-func (lx *lexer) accept(valid rune) bool {
-       if lx.next() == valid {
-               return true
-       }
-       lx.backup()
-       return false
-}
-
-// peek returns but does not consume the next rune in the input.
-func (lx *lexer) peek() rune {
-       r := lx.next()
-       lx.backup()
-       return r
-}
-
-// errorf stops all lexing by emitting an error and returning `nil`.
-// Note that any value that is a character is escaped if it's a special
-// character (new lines, tabs, etc.).
-func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
-       lx.items <- item{
-               itemError,
-               fmt.Sprintf(format, values...),
-               lx.line,
-       }
-       return nil
-}
-
-// lexTop consumes elements at the top level of TOML data.
-func lexTop(lx *lexer) stateFn {
-       r := lx.next()
-       if isWhitespace(r) || isNL(r) {
-               return lexSkip(lx, lexTop)
-       }
-
-       switch r {
-       case commentStart:
-               lx.push(lexTop)
-               return lexCommentStart
-       case tableStart:
-               return lexTableStart
-       case eof:
-               if lx.pos > lx.start {
-                       return lx.errorf("Unexpected EOF.")
-               }
-               lx.emit(itemEOF)
-               return nil
-       }
-
-       // At this point, the only valid item can be a key, so we back up
-       // and let the key lexer do the rest.
-       lx.backup()
-       lx.push(lexTopEnd)
-       return lexKeyStart
-}
-
-// lexTopEnd is entered whenever a top-level item has been consumed. (A value
-// or a table.) It must see only whitespace, and will turn back to lexTop
-// upon a new line. If it sees EOF, it will quit the lexer successfully.
-func lexTopEnd(lx *lexer) stateFn {
-       r := lx.next()
-       switch {
-       case r == commentStart:
-               // a comment will read to a new line for us.
-               lx.push(lexTop)
-               return lexCommentStart
-       case isWhitespace(r):
-               return lexTopEnd
-       case isNL(r):
-               lx.ignore()
-               return lexTop
-       case r == eof:
-               lx.ignore()
-               return lexTop
-       }
-       return lx.errorf("Expected a top-level item to end with a new line, "+
-               "comment or EOF, but got %q instead.", r)
-}
-
-// lexTable lexes the beginning of a table. Namely, it makes sure that
-// it starts with a character other than '.' and ']'.
-// It assumes that '[' has already been consumed.
-// It also handles the case that this is an item in an array of tables.
-// e.g., '[[name]]'.
-func lexTableStart(lx *lexer) stateFn {
-       if lx.peek() == arrayTableStart {
-               lx.next()
-               lx.emit(itemArrayTableStart)
-               lx.push(lexArrayTableEnd)
-       } else {
-               lx.emit(itemTableStart)
-               lx.push(lexTableEnd)
-       }
-       return lexTableNameStart
-}
-
-func lexTableEnd(lx *lexer) stateFn {
-       lx.emit(itemTableEnd)
-       return lexTopEnd
-}
-
-func lexArrayTableEnd(lx *lexer) stateFn {
-       if r := lx.next(); r != arrayTableEnd {
-               return lx.errorf("Expected end of table array name delimiter 
%q, "+
-                       "but got %q instead.", arrayTableEnd, r)
-       }
-       lx.emit(itemArrayTableEnd)
-       return lexTopEnd
-}
-
-func lexTableNameStart(lx *lexer) stateFn {
-       switch r := lx.peek(); {
-       case r == tableEnd || r == eof:
-               return lx.errorf("Unexpected end of table name. (Table names 
cannot " +
-                       "be empty.)")
-       case r == tableSep:
-               return lx.errorf("Unexpected table separator. (Table names 
cannot " +
-                       "be empty.)")
-       case r == stringStart || r == rawStringStart:
-               lx.ignore()
-               lx.push(lexTableNameEnd)
-               return lexValue // reuse string lexing
-       default:
-               return lexBareTableName
-       }
-}
-
-// lexTableName lexes the name of a table. It assumes that at least one
-// valid character for the table has already been read.
-func lexBareTableName(lx *lexer) stateFn {
-       switch r := lx.next(); {
-       case isBareKeyChar(r):
-               return lexBareTableName
-       case r == tableSep || r == tableEnd:
-               lx.backup()
-               lx.emitTrim(itemText)
-               return lexTableNameEnd
-       default:
-               return lx.errorf("Bare keys cannot contain %q.", r)
-       }
-}
-
-// lexTableNameEnd reads the end of a piece of a table name, optionally
-// consuming whitespace.
-func lexTableNameEnd(lx *lexer) stateFn {
-       switch r := lx.next(); {
-       case isWhitespace(r):
-               return lexTableNameEnd
-       case r == tableSep:
-               lx.ignore()
-               return lexTableNameStart
-       case r == tableEnd:
-               return lx.pop()
-       default:
-               return lx.errorf("Expected '.' or ']' to end table name, but 
got %q "+
-                       "instead.", r)
-       }
-}
-
-// lexKeyStart consumes a key name up until the first non-whitespace character.
-// lexKeyStart will ignore whitespace.
-func lexKeyStart(lx *lexer) stateFn {
-       r := lx.peek()
-       switch {
-       case r == keySep:
-               return lx.errorf("Unexpected key separator %q.", keySep)
-       case isWhitespace(r) || isNL(r):
-               lx.next()
-               return lexSkip(lx, lexKeyStart)
-       case r == stringStart || r == rawStringStart:
-               lx.ignore()
-               lx.emit(itemKeyStart)
-               lx.push(lexKeyEnd)
-               return lexValue // reuse string lexing
-       default:
-               lx.ignore()
-               lx.emit(itemKeyStart)
-               return lexBareKey
-       }
-}
-
-// lexBareKey consumes the text of a bare key. Assumes that the first character
-// (which is not whitespace) has not yet been consumed.
-func lexBareKey(lx *lexer) stateFn {
-       switch r := lx.next(); {
-       case isBareKeyChar(r):
-               return lexBareKey
-       case isWhitespace(r):
-               lx.emitTrim(itemText)
-               return lexKeyEnd
-       case r == keySep:
-               lx.backup()
-               lx.emitTrim(itemText)
-               return lexKeyEnd
-       default:
-               return lx.errorf("Bare keys cannot contain %q.", r)
-       }
-}
-
-// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
-// separator).
-func lexKeyEnd(lx *lexer) stateFn {
-       switch r := lx.next(); {
-       case r == keySep:
-               return lexSkip(lx, lexValue)
-       case isWhitespace(r):
-               return lexSkip(lx, lexKeyEnd)
-       default:
-               return lx.errorf("Expected key separator %q, but got %q 
instead.",
-                       keySep, r)
-       }
-}
-
-// lexValue starts the consumption of a value anywhere a value is expected.
-// lexValue will ignore whitespace.
-// After a value is lexed, the last state on the next is popped and returned.
-func lexValue(lx *lexer) stateFn {
-       // We allow whitespace to precede a value, but NOT new lines.
-       // In array syntax, the array states are responsible for ignoring new
-       // lines.
-       r := lx.next()
-       if isWhitespace(r) {
-               return lexSkip(lx, lexValue)
-       }
-
-       switch {
-       case r == arrayStart:
-               lx.ignore()
-               lx.emit(itemArray)
-               return lexArrayValue
-       case r == stringStart:
-               if lx.accept(stringStart) {
-                       if lx.accept(stringStart) {
-                               lx.ignore() // Ignore """
-                               return lexMultilineString
-                       }
-                       lx.backup()
-               }
-               lx.ignore() // ignore the '"'
-               return lexString
-       case r == rawStringStart:
-               if lx.accept(rawStringStart) {
-                       if lx.accept(rawStringStart) {
-                               lx.ignore() // Ignore """
-                               return lexMultilineRawString
-                       }
-                       lx.backup()
-               }
-               lx.ignore() // ignore the "'"
-               return lexRawString
-       case r == 't':
-               return lexTrue
-       case r == 'f':
-               return lexFalse
-       case r == '-':
-               return lexNumberStart
-       case isDigit(r):
-               lx.backup() // avoid an extra state and use the same as above
-               return lexNumberOrDateStart
-       case r == '.': // special error case, be kind to users
-               return lx.errorf("Floats must start with a digit, not '.'.")
-       }
-       return lx.errorf("Expected value but found %q instead.", r)
-}
-
-// lexArrayValue consumes one value in an array. It assumes that '[' or ','
-// have already been consumed. All whitespace and new lines are ignored.
-func lexArrayValue(lx *lexer) stateFn {
-       r := lx.next()
-       switch {
-       case isWhitespace(r) || isNL(r):
-               return lexSkip(lx, lexArrayValue)
-       case r == commentStart:
-               lx.push(lexArrayValue)
-               return lexCommentStart
-       case r == arrayValTerm:
-               return lx.errorf("Unexpected array value terminator %q.",
-                       arrayValTerm)
-       case r == arrayEnd:
-               return lexArrayEnd
-       }
-
-       lx.backup()
-       lx.push(lexArrayValueEnd)
-       return lexValue
-}
-
-// lexArrayValueEnd consumes the cruft between values of an array. Namely,
-// it ignores whitespace and expects either a ',' or a ']'.
-func lexArrayValueEnd(lx *lexer) stateFn {
-       r := lx.next()
-       switch {
-       case isWhitespace(r) || isNL(r):
-               return lexSkip(lx, lexArrayValueEnd)
-       case r == commentStart:
-               lx.push(lexArrayValueEnd)
-               return lexCommentStart
-       case r == arrayValTerm:
-               lx.ignore()
-               return lexArrayValue // move on to the next value
-       case r == arrayEnd:
-               return lexArrayEnd
-       }
-       return lx.errorf("Expected an array value terminator %q or an array "+
-               "terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r)
-}
-
-// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has
-// just been consumed.
-func lexArrayEnd(lx *lexer) stateFn {
-       lx.ignore()
-       lx.emit(itemArrayEnd)
-       return lx.pop()
-}
-
-// lexString consumes the inner contents of a string. It assumes that the
-// beginning '"' has already been consumed and ignored.
-func lexString(lx *lexer) stateFn {
-       r := lx.next()
-       switch {
-       case isNL(r):
-               return lx.errorf("Strings cannot contain new lines.")
-       case r == '\\':
-               lx.push(lexString)
-               return lexStringEscape
-       case r == stringEnd:
-               lx.backup()
-               lx.emit(itemString)
-               lx.next()
-               lx.ignore()
-               return lx.pop()
-       }
-       return lexString
-}
-
-// lexMultilineString consumes the inner contents of a string. It assumes that
-// the beginning '"""' has already been consumed and ignored.
-func lexMultilineString(lx *lexer) stateFn {
-       r := lx.next()
-       switch {
-       case r == '\\':
-               return lexMultilineStringEscape
-       case r == stringEnd:
-               if lx.accept(stringEnd) {
-                       if lx.accept(stringEnd) {
-                               lx.backup()
-                               lx.backup()
-                               lx.backup()
-                               lx.emit(itemMultilineString)
-                               lx.next()
-                               lx.next()
-                               lx.next()
-                               lx.ignore()
-                               return lx.pop()
-                       }
-                       lx.backup()
-               }
-       }
-       return lexMultilineString
-}
-
-// lexRawString consumes a raw string. Nothing can be escaped in such a string.
-// It assumes that the beginning "'" has already been consumed and ignored.
-func lexRawString(lx *lexer) stateFn {
-       r := lx.next()
-       switch {
-       case isNL(r):
-               return lx.errorf("Strings cannot contain new lines.")
-       case r == rawStringEnd:
-               lx.backup()
-               lx.emit(itemRawString)
-               lx.next()
-               lx.ignore()
-               return lx.pop()
-       }
-       return lexRawString
-}
-
-// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
-// a string. It assumes that the beginning "'" has already been consumed and
-// ignored.
-func lexMultilineRawString(lx *lexer) stateFn {
-       r := lx.next()
-       switch {
-       case r == rawStringEnd:
-               if lx.accept(rawStringEnd) {
-                       if lx.accept(rawStringEnd) {
-                               lx.backup()
-                               lx.backup()
-                               lx.backup()
-                               lx.emit(itemRawMultilineString)
-                               lx.next()
-                               lx.next()
-                               lx.next()
-                               lx.ignore()
-                               return lx.pop()
-                       }
-                       lx.backup()
-               }
-       }
-       return lexMultilineRawString
-}
-
-// lexMultilineStringEscape consumes an escaped character. It assumes that the
-// preceding '\\' has already been consumed.
-func lexMultilineStringEscape(lx *lexer) stateFn {
-       // Handle the special case first:
-       if isNL(lx.next()) {
-               return lexMultilineString
-       } else {
-               lx.backup()
-               lx.push(lexMultilineString)
-               return lexStringEscape(lx)
-       }
-}
-
-func lexStringEscape(lx *lexer) stateFn {
-       r := lx.next()
-       switch r {
-       case 'b':
-               fallthrough
-       case 't':
-               fallthrough
-       case 'n':
-               fallthrough
-       case 'f':
-               fallthrough
-       case 'r':
-               fallthrough
-       case '"':
-               fallthrough
-       case '\\':
-               return lx.pop()
-       case 'u':
-               return lexShortUnicodeEscape
-       case 'U':
-               return lexLongUnicodeEscape
-       }
-       return lx.errorf("Invalid escape character %q. Only the following "+
-               "escape characters are allowed: "+
-               "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+
-               "\\uXXXX and \\UXXXXXXXX.", r)
-}
-
-func lexShortUnicodeEscape(lx *lexer) stateFn {
-       var r rune
-       for i := 0; i < 4; i++ {
-               r = lx.next()
-               if !isHexadecimal(r) {
-                       return lx.errorf("Expected four hexadecimal digits 
after '\\u', "+
-                               "but got '%s' instead.", lx.current())
-               }
-       }
-       return lx.pop()
-}
-
-func lexLongUnicodeEscape(lx *lexer) stateFn {
-       var r rune
-       for i := 0; i < 8; i++ {
-               r = lx.next()
-               if !isHexadecimal(r) {
-                       return lx.errorf("Expected eight hexadecimal digits 
after '\\U', "+
-                               "but got '%s' instead.", lx.current())
-               }
-       }
-       return lx.pop()
-}
-
-// lexNumberOrDateStart consumes either a (positive) integer, float or
-// datetime. It assumes that NO negative sign has been consumed.
-func lexNumberOrDateStart(lx *lexer) stateFn {
-       r := lx.next()
-       if !isDigit(r) {
-               if r == '.' {
-                       return lx.errorf("Floats must start with a digit, not 
'.'.")
-               } else {
-                       return lx.errorf("Expected a digit but got %q.", r)
-               }
-       }
-       return lexNumberOrDate
-}
-
-// lexNumberOrDate consumes either a (positive) integer, float or datetime.
-func lexNumberOrDate(lx *lexer) stateFn {
-       r := lx.next()
-       switch {
-       case r == '-':
-               if lx.pos-lx.start != 5 {
-                       return lx.errorf("All ISO8601 dates must be in full 
Zulu form.")
-               }
-               return lexDateAfterYear
-       case isDigit(r):
-               return lexNumberOrDate
-       case r == '.':
-               return lexFloatStart
-       }
-
-       lx.backup()
-       lx.emit(itemInteger)
-       return lx.pop()
-}
-
-// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format.
-// It assumes that "YYYY-" has already been consumed.
-func lexDateAfterYear(lx *lexer) stateFn {
-       formats := []rune{
-               // digits are '0'.
-               // everything else is direct equality.
-               '0', '0', '-', '0', '0',
-               'T',
-               '0', '0', ':', '0', '0', ':', '0', '0',
-               'Z',
-       }
-       for _, f := range formats {
-               r := lx.next()
-               if f == '0' {
-                       if !isDigit(r) {
-                               return lx.errorf("Expected digit in ISO8601 
datetime, "+
-                                       "but found %q instead.", r)
-                       }
-               } else if f != r {
-                       return lx.errorf("Expected %q in ISO8601 datetime, "+
-                               "but found %q instead.", f, r)
-               }
-       }
-       lx.emit(itemDatetime)
-       return lx.pop()
-}
-
-// lexNumberStart consumes either an integer or a float. It assumes that
-// a negative sign has already been read, but that *no* digits have been
-// consumed. lexNumberStart will move to the appropriate integer or float
-// states.
-func lexNumberStart(lx *lexer) stateFn {
-       // we MUST see a digit. Even floats have to start with a digit.
-       r := lx.next()
-       if !isDigit(r) {
-               if r == '.' {
-                       return lx.errorf("Floats must start with a digit, not 
'.'.")
-               } else {
-                       return lx.errorf("Expected a digit but got %q.", r)
-               }
-       }
-       return lexNumber
-}
-
-// lexNumber consumes an integer or a float after seeing the first digit.
-func lexNumber(lx *lexer) stateFn {
-       r := lx.next()
-       switch {
-       case isDigit(r):
-               return lexNumber
-       case r == '.':
-               return lexFloatStart
-       }
-
-       lx.backup()
-       lx.emit(itemInteger)
-       return lx.pop()
-}
-
-// lexFloatStart starts the consumption of digits of a float after a '.'.
-// Namely, at least one digit is required.
-func lexFloatStart(lx *lexer) stateFn {
-       r := lx.next()
-       if !isDigit(r) {
-               return lx.errorf("Floats must have a digit after the '.', but 
got "+
-                       "%q instead.", r)
-       }
-       return lexFloat
-}
-
-// lexFloat consumes the digits of a float after a '.'.
-// Assumes that one digit has been consumed after a '.' already.
-func lexFloat(lx *lexer) stateFn {
-       r := lx.next()
-       if isDigit(r) {
-               return lexFloat
-       }
-
-       lx.backup()
-       lx.emit(itemFloat)
-       return lx.pop()
-}
-
-// lexConst consumes the s[1:] in s. It assumes that s[0] has already been
-// consumed.
-func lexConst(lx *lexer, s string) stateFn {
-       for i := range s[1:] {
-               if r := lx.next(); r != rune(s[i+1]) {
-                       return lx.errorf("Expected %q, but found %q instead.", 
s[:i+1],
-                               s[:i]+string(r))
-               }
-       }
-       return nil
-}
-
-// lexTrue consumes the "rue" in "true". It assumes that 't' has already
-// been consumed.
-func lexTrue(lx *lexer) stateFn {
-       if fn := lexConst(lx, "true"); fn != nil {
-               return fn
-       }
-       lx.emit(itemBool)
-       return lx.pop()
-}
-
-// lexFalse consumes the "alse" in "false". It assumes that 'f' has already
-// been consumed.
-func lexFalse(lx *lexer) stateFn {
-       if fn := lexConst(lx, "false"); fn != nil {
-               return fn
-       }
-       lx.emit(itemBool)
-       return lx.pop()
-}
-
-// lexCommentStart begins the lexing of a comment. It will emit
-// itemCommentStart and consume no characters, passing control to lexComment.
-func lexCommentStart(lx *lexer) stateFn {
-       lx.ignore()
-       lx.emit(itemCommentStart)
-       return lexComment
-}
-
-// lexComment lexes an entire comment. It assumes that '#' has been consumed.
-// It will consume *up to* the first new line character, and pass control
-// back to the last state on the stack.
-func lexComment(lx *lexer) stateFn {
-       r := lx.peek()
-       if isNL(r) || r == eof {
-               lx.emit(itemText)
-               return lx.pop()
-       }
-       lx.next()
-       return lexComment
-}
-
-// lexSkip ignores all slurped input and moves on to the next state.
-func lexSkip(lx *lexer, nextState stateFn) stateFn {
-       return func(lx *lexer) stateFn {
-               lx.ignore()
-               return nextState
-       }
-}
-
-// isWhitespace returns true if `r` is a whitespace character according
-// to the spec.
-func isWhitespace(r rune) bool {
-       return r == '\t' || r == ' '
-}
-
-func isNL(r rune) bool {
-       return r == '\n' || r == '\r'
-}
-
-func isDigit(r rune) bool {
-       return r >= '0' && r <= '9'
-}
-
-func isHexadecimal(r rune) bool {
-       return (r >= '0' && r <= '9') ||
-               (r >= 'a' && r <= 'f') ||
-               (r >= 'A' && r <= 'F')
-}
-
-func isBareKeyChar(r rune) bool {
-       return (r >= 'A' && r <= 'Z') ||
-               (r >= 'a' && r <= 'z') ||
-               (r >= '0' && r <= '9') ||
-               r == '_' ||
-               r == '-'
-}
-
-func (itype itemType) String() string {
-       switch itype {
-       case itemError:
-               return "Error"
-       case itemNIL:
-               return "NIL"
-       case itemEOF:
-               return "EOF"
-       case itemText:
-               return "Text"
-       case itemString:
-               return "String"
-       case itemRawString:
-               return "String"
-       case itemMultilineString:
-               return "String"
-       case itemRawMultilineString:
-               return "String"
-       case itemBool:
-               return "Bool"
-       case itemInteger:
-               return "Integer"
-       case itemFloat:
-               return "Float"
-       case itemDatetime:
-               return "DateTime"
-       case itemTableStart:
-               return "TableStart"
-       case itemTableEnd:
-               return "TableEnd"
-       case itemKeyStart:
-               return "KeyStart"
-       case itemArray:
-               return "Array"
-       case itemArrayEnd:
-               return "ArrayEnd"
-       case itemCommentStart:
-               return "CommentStart"
-       }
-       panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
-}
-
-func (item item) String() string {
-       return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
-}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/4ae9684f/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go
----------------------------------------------------------------------
diff --git a/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go 
b/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go
deleted file mode 100644
index 6a82e84..0000000
--- a/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go
+++ /dev/null
@@ -1,493 +0,0 @@
-package toml
-
-import (
-       "fmt"
-       "log"
-       "strconv"
-       "strings"
-       "time"
-       "unicode"
-       "unicode/utf8"
-)
-
-type parser struct {
-       mapping map[string]interface{}
-       types   map[string]tomlType
-       lx      *lexer
-
-       // A list of keys in the order that they appear in the TOML data.
-       ordered []Key
-
-       // the full key for the current hash in scope
-       context Key
-
-       // the base key name for everything except hashes
-       currentKey string
-
-       // rough approximation of line number
-       approxLine int
-
-       // A map of 'key.group.names' to whether they were created implicitly.
-       implicits map[string]bool
-}
-
-type parseError string
-
-func (pe parseError) Error() string {
-       return string(pe)
-}
-
-func parse(data string) (p *parser, err error) {
-       defer func() {
-               if r := recover(); r != nil {
-                       var ok bool
-                       if err, ok = r.(parseError); ok {
-                               return
-                       }
-                       panic(r)
-               }
-       }()
-
-       p = &parser{
-               mapping:   make(map[string]interface{}),
-               types:     make(map[string]tomlType),
-               lx:        lex(data),
-               ordered:   make([]Key, 0),
-               implicits: make(map[string]bool),
-       }
-       for {
-               item := p.next()
-               if item.typ == itemEOF {
-                       break
-               }
-               p.topLevel(item)
-       }
-
-       return p, nil
-}
-
-func (p *parser) panicf(format string, v ...interface{}) {
-       msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
-               p.approxLine, p.current(), fmt.Sprintf(format, v...))
-       panic(parseError(msg))
-}
-
-func (p *parser) next() item {
-       it := p.lx.nextItem()
-       if it.typ == itemError {
-               p.panicf("%s", it.val)
-       }
-       return it
-}
-
-func (p *parser) bug(format string, v ...interface{}) {
-       log.Panicf("BUG: %s\n\n", fmt.Sprintf(format, v...))
-}
-
-func (p *parser) expect(typ itemType) item {
-       it := p.next()
-       p.assertEqual(typ, it.typ)
-       return it
-}
-
-func (p *parser) assertEqual(expected, got itemType) {
-       if expected != got {
-               p.bug("Expected '%s' but got '%s'.", expected, got)
-       }
-}
-
-func (p *parser) topLevel(item item) {
-       switch item.typ {
-       case itemCommentStart:
-               p.approxLine = item.line
-               p.expect(itemText)
-       case itemTableStart:
-               kg := p.next()
-               p.approxLine = kg.line
-
-               var key Key
-               for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = 
p.next() {
-                       key = append(key, p.keyString(kg))
-               }
-               p.assertEqual(itemTableEnd, kg.typ)
-
-               p.establishContext(key, false)
-               p.setType("", tomlHash)
-               p.ordered = append(p.ordered, key)
-       case itemArrayTableStart:
-               kg := p.next()
-               p.approxLine = kg.line
-
-               var key Key
-               for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = 
p.next() {
-                       key = append(key, p.keyString(kg))
-               }
-               p.assertEqual(itemArrayTableEnd, kg.typ)
-
-               p.establishContext(key, true)
-               p.setType("", tomlArrayHash)
-               p.ordered = append(p.ordered, key)
-       case itemKeyStart:
-               kname := p.next()
-               p.approxLine = kname.line
-               p.currentKey = p.keyString(kname)
-
-               val, typ := p.value(p.next())
-               p.setValue(p.currentKey, val)
-               p.setType(p.currentKey, typ)
-               p.ordered = append(p.ordered, p.context.add(p.currentKey))
-               p.currentKey = ""
-       default:
-               p.bug("Unexpected type at top level: %s", item.typ)
-       }
-}
-
-// Gets a string for a key (or part of a key in a table name).
-func (p *parser) keyString(it item) string {
-       switch it.typ {
-       case itemText:
-               return it.val
-       case itemString, itemMultilineString,
-               itemRawString, itemRawMultilineString:
-               s, _ := p.value(it)
-               return s.(string)
-       default:
-               p.bug("Unexpected key type: %s", it.typ)
-               panic("unreachable")
-       }
-}
-
-// value translates an expected value from the lexer into a Go value wrapped
-// as an empty interface.
-func (p *parser) value(it item) (interface{}, tomlType) {
-       switch it.typ {
-       case itemString:
-               return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
-       case itemMultilineString:
-               trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
-               return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
-       case itemRawString:
-               return it.val, p.typeOfPrimitive(it)
-       case itemRawMultilineString:
-               return stripFirstNewline(it.val), p.typeOfPrimitive(it)
-       case itemBool:
-               switch it.val {
-               case "true":
-                       return true, p.typeOfPrimitive(it)
-               case "false":
-                       return false, p.typeOfPrimitive(it)
-               }
-               p.bug("Expected boolean value, but got '%s'.", it.val)
-       case itemInteger:
-               num, err := strconv.ParseInt(it.val, 10, 64)
-               if err != nil {
-                       // See comment below for floats describing why we make a
-                       // distinction between a bug and a user error.
-                       if e, ok := err.(*strconv.NumError); ok &&
-                               e.Err == strconv.ErrRange {
-
-                               p.panicf("Integer '%s' is out of the range of 
64-bit "+
-                                       "signed integers.", it.val)
-                       } else {
-                               p.bug("Expected integer value, but got '%s'.", 
it.val)
-                       }
-               }
-               return num, p.typeOfPrimitive(it)
-       case itemFloat:
-               num, err := strconv.ParseFloat(it.val, 64)
-               if err != nil {
-                       // Distinguish float values. Normally, it'd be a bug if 
the lexer
-                       // provides an invalid float, but it's possible that 
the float is
-                       // out of range of valid values (which the lexer cannot 
determine).
-                       // So mark the former as a bug but the latter as a 
legitimate user
-                       // error.
-                       //
-                       // This is also true for integers.
-                       if e, ok := err.(*strconv.NumError); ok &&
-                               e.Err == strconv.ErrRange {
-
-                               p.panicf("Float '%s' is out of the range of 
64-bit "+
-                                       "IEEE-754 floating-point numbers.", 
it.val)
-                       } else {
-                               p.bug("Expected float value, but got '%s'.", 
it.val)
-                       }
-               }
-               return num, p.typeOfPrimitive(it)
-       case itemDatetime:
-               t, err := time.Parse("2006-01-02T15:04:05Z", it.val)
-               if err != nil {
-                       p.panicf("Invalid RFC3339 Zulu DateTime: '%s'.", it.val)
-               }
-               return t, p.typeOfPrimitive(it)
-       case itemArray:
-               array := make([]interface{}, 0)
-               types := make([]tomlType, 0)
-
-               for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
-                       if it.typ == itemCommentStart {
-                               p.expect(itemText)
-                               continue
-                       }
-
-                       val, typ := p.value(it)
-                       array = append(array, val)
-                       types = append(types, typ)
-               }
-               return array, p.typeOfArray(types)
-       }
-       p.bug("Unexpected value type: %s", it.typ)
-       panic("unreachable")
-}
-
-// establishContext sets the current context of the parser,
-// where the context is either a hash or an array of hashes. Which one is
-// set depends on the value of the `array` parameter.
-//
-// Establishing the context also makes sure that the key isn't a duplicate, and
-// will create implicit hashes automatically.
-func (p *parser) establishContext(key Key, array bool) {
-       var ok bool
-
-       // Always start at the top level and drill down for our context.
-       hashContext := p.mapping
-       keyContext := make(Key, 0)
-
-       // We only need implicit hashes for key[0:-1]
-       for _, k := range key[0 : len(key)-1] {
-               _, ok = hashContext[k]
-               keyContext = append(keyContext, k)
-
-               // No key? Make an implicit hash and move on.
-               if !ok {
-                       p.addImplicit(keyContext)
-                       hashContext[k] = make(map[string]interface{})
-               }
-
-               // If the hash context is actually an array of tables, then set
-               // the hash context to the last element in that array.
-               //
-               // Otherwise, it better be a table, since this MUST be a key 
group (by
-               // virtue of it not being the last element in a key).
-               switch t := hashContext[k].(type) {
-               case []map[string]interface{}:
-                       hashContext = t[len(t)-1]
-               case map[string]interface{}:
-                       hashContext = t
-               default:
-                       p.panicf("Key '%s' was already created as a hash.", 
keyContext)
-               }
-       }
-
-       p.context = keyContext
-       if array {
-               // If this is the first element for this array, then allocate a 
new
-               // list of tables for it.
-               k := key[len(key)-1]
-               if _, ok := hashContext[k]; !ok {
-                       hashContext[k] = make([]map[string]interface{}, 0, 5)
-               }
-
-               // Add a new table. But make sure the key hasn't already been 
used
-               // for something else.
-               if hash, ok := hashContext[k].([]map[string]interface{}); ok {
-                       hashContext[k] = append(hash, 
make(map[string]interface{}))
-               } else {
-                       p.panicf("Key '%s' was already created and cannot be 
used as "+
-                               "an array.", keyContext)
-               }
-       } else {
-               p.setValue(key[len(key)-1], make(map[string]interface{}))
-       }
-       p.context = append(p.context, key[len(key)-1])
-}
-
-// setValue sets the given key to the given value in the current context.
-// It will make sure that the key hasn't already been defined, account for
-// implicit key groups.
-func (p *parser) setValue(key string, value interface{}) {
-       var tmpHash interface{}
-       var ok bool
-
-       hash := p.mapping
-       keyContext := make(Key, 0)
-       for _, k := range p.context {
-               keyContext = append(keyContext, k)
-               if tmpHash, ok = hash[k]; !ok {
-                       p.bug("Context for key '%s' has not been established.", 
keyContext)
-               }
-               switch t := tmpHash.(type) {
-               case []map[string]interface{}:
-                       // The context is a table of hashes. Pick the most 
recent table
-                       // defined as the current hash.
-                       hash = t[len(t)-1]
-               case map[string]interface{}:
-                       hash = t
-               default:
-                       p.bug("Expected hash to have type 
'map[string]interface{}', but "+
-                               "it has '%T' instead.", tmpHash)
-               }
-       }
-       keyContext = append(keyContext, key)
-
-       if _, ok := hash[key]; ok {
-               // Typically, if the given key has already been set, then we 
have
-               // to raise an error since duplicate keys are disallowed. 
However,
-               // it's possible that a key was previously defined implicitly. 
In this
-               // case, it is allowed to be redefined concretely. (See the
-               // `tests/valid/implicit-and-explicit-after.toml` test in 
`toml-test`.)
-               //
-               // But we have to make sure to stop marking it as an implicit. 
(So that
-               // another redefinition provokes an error.)
-               //
-               // Note that since it has already been defined (as a hash), we 
don't
-               // want to overwrite it. So our business is done.
-               if p.isImplicit(keyContext) {
-                       p.removeImplicit(keyContext)
-                       return
-               }
-
-               // Otherwise, we have a concrete key trying to override a 
previous
-               // key, which is *always* wrong.
-               p.panicf("Key '%s' has already been defined.", keyContext)
-       }
-       hash[key] = value
-}
-
-// setType sets the type of a particular value at a given key.
-// It should be called immediately AFTER setValue.
-//
-// Note that if `key` is empty, then the type given will be applied to the
-// current context (which is either a table or an array of tables).
-func (p *parser) setType(key string, typ tomlType) {
-       keyContext := make(Key, 0, len(p.context)+1)
-       for _, k := range p.context {
-               keyContext = append(keyContext, k)
-       }
-       if len(key) > 0 { // allow type setting for hashes
-               keyContext = append(keyContext, key)
-       }
-       p.types[keyContext.String()] = typ
-}
-
-// addImplicit sets the given Key as having been created implicitly.
-func (p *parser) addImplicit(key Key) {
-       p.implicits[key.String()] = true
-}
-
-// removeImplicit stops tagging the given key as having been implicitly
-// created.
-func (p *parser) removeImplicit(key Key) {
-       p.implicits[key.String()] = false
-}
-
-// isImplicit returns true if the key group pointed to by the key was created
-// implicitly.
-func (p *parser) isImplicit(key Key) bool {
-       return p.implicits[key.String()]
-}
-
-// current returns the full key name of the current context.
-func (p *parser) current() string {
-       if len(p.currentKey) == 0 {
-               return p.context.String()
-       }
-       if len(p.context) == 0 {
-               return p.currentKey
-       }
-       return fmt.Sprintf("%s.%s", p.context, p.currentKey)
-}
-
-func stripFirstNewline(s string) string {
-       if len(s) == 0 || s[0] != '\n' {
-               return s
-       }
-       return s[1:]
-}
-
-func stripEscapedWhitespace(s string) string {
-       esc := strings.Split(s, "\\\n")
-       if len(esc) > 1 {
-               for i := 1; i < len(esc); i++ {
-                       esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
-               }
-       }
-       return strings.Join(esc, "")
-}
-
-func (p *parser) replaceEscapes(str string) string {
-       var replaced []rune
-       s := []byte(str)
-       r := 0
-       for r < len(s) {
-               if s[r] != '\\' {
-                       c, size := utf8.DecodeRune(s[r:])
-                       r += size
-                       replaced = append(replaced, c)
-                       continue
-               }
-               r += 1
-               if r >= len(s) {
-                       p.bug("Escape sequence at end of string.")
-                       return ""
-               }
-               switch s[r] {
-               default:
-                       p.bug("Expected valid escape code after \\, but got 
%q.", s[r])
-                       return ""
-               case 'b':
-                       replaced = append(replaced, rune(0x0008))
-                       r += 1
-               case 't':
-                       replaced = append(replaced, rune(0x0009))
-                       r += 1
-               case 'n':
-                       replaced = append(replaced, rune(0x000A))
-                       r += 1
-               case 'f':
-                       replaced = append(replaced, rune(0x000C))
-                       r += 1
-               case 'r':
-                       replaced = append(replaced, rune(0x000D))
-                       r += 1
-               case '"':
-                       replaced = append(replaced, rune(0x0022))
-                       r += 1
-               case '\\':
-                       replaced = append(replaced, rune(0x005C))
-                       r += 1
-               case 'u':
-                       // At this point, we know we have a Unicode escape of 
the form
-                       // `uXXXX` at [r, r+5). (Because the lexer guarantees 
this
-                       // for us.)
-                       escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
-                       replaced = append(replaced, escaped)
-                       r += 5
-               case 'U':
-                       // At this point, we know we have a Unicode escape of 
the form
-                       // `uXXXX` at [r, r+9). (Because the lexer guarantees 
this
-                       // for us.)
-                       escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
-                       replaced = append(replaced, escaped)
-                       r += 9
-               }
-       }
-       return string(replaced)
-}
-
-func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
-       s := string(bs)
-       hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
-       if err != nil {
-               p.bug("Could not parse '%s' as a hexadecimal number, but the "+
-                       "lexer claims it's OK: %s", s, err)
-       }
-       if !utf8.ValidRune(rune(hex)) {
-               p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
-       }
-       return rune(hex)
-}
-
-func isStringType(ty itemType) bool {
-       return ty == itemString || ty == itemMultilineString ||
-               ty == itemRawString || ty == itemRawMultilineString
-}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/4ae9684f/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/session.vim
----------------------------------------------------------------------
diff --git a/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/session.vim 
b/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/session.vim
deleted file mode 100644
index 562164b..0000000
--- a/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/session.vim
+++ /dev/null
@@ -1 +0,0 @@
-au BufWritePost *.go silent!make tags > /dev/null 2>&1

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/4ae9684f/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go
----------------------------------------------------------------------
diff --git 
a/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go 
b/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go
deleted file mode 100644
index c73f8af..0000000
--- a/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package toml
-
-// tomlType represents any Go type that corresponds to a TOML type.
-// While the first draft of the TOML spec has a simplistic type system that
-// probably doesn't need this level of sophistication, we seem to be militating
-// toward adding real composite types.
-type tomlType interface {
-       typeString() string
-}
-
-// typeEqual accepts any two types and returns true if they are equal.
-func typeEqual(t1, t2 tomlType) bool {
-       if t1 == nil || t2 == nil {
-               return false
-       }
-       return t1.typeString() == t2.typeString()
-}
-
-func typeIsHash(t tomlType) bool {
-       return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
-}
-
-type tomlBaseType string
-
-func (btype tomlBaseType) typeString() string {
-       return string(btype)
-}
-
-func (btype tomlBaseType) String() string {
-       return btype.typeString()
-}
-
-var (
-       tomlInteger   tomlBaseType = "Integer"
-       tomlFloat     tomlBaseType = "Float"
-       tomlDatetime  tomlBaseType = "Datetime"
-       tomlString    tomlBaseType = "String"
-       tomlBool      tomlBaseType = "Bool"
-       tomlArray     tomlBaseType = "Array"
-       tomlHash      tomlBaseType = "Hash"
-       tomlArrayHash tomlBaseType = "ArrayHash"
-)
-
-// typeOfPrimitive returns a tomlType of any primitive value in TOML.
-// Primitive values are: Integer, Float, Datetime, String and Bool.
-//
-// Passing a lexer item other than the following will cause a BUG message
-// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
-func (p *parser) typeOfPrimitive(lexItem item) tomlType {
-       switch lexItem.typ {
-       case itemInteger:
-               return tomlInteger
-       case itemFloat:
-               return tomlFloat
-       case itemDatetime:
-               return tomlDatetime
-       case itemString:
-               return tomlString
-       case itemMultilineString:
-               return tomlString
-       case itemRawString:
-               return tomlString
-       case itemRawMultilineString:
-               return tomlString
-       case itemBool:
-               return tomlBool
-       }
-       p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
-       panic("unreachable")
-}
-
-// typeOfArray returns a tomlType for an array given a list of types of its
-// values.
-//
-// In the current spec, if an array is homogeneous, then its type is always
-// "Array". If the array is not homogeneous, an error is generated.
-func (p *parser) typeOfArray(types []tomlType) tomlType {
-       // Empty arrays are cool.
-       if len(types) == 0 {
-               return tomlArray
-       }
-
-       theType := types[0]
-       for _, t := range types[1:] {
-               if !typeEqual(theType, t) {
-                       p.panicf("Array contains values of type '%s' and '%s', 
but "+
-                               "arrays must be homogeneous.", theType, t)
-               }
-       }
-       return tomlArray
-}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/4ae9684f/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/type_fields.go
----------------------------------------------------------------------
diff --git 
a/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/type_fields.go 
b/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/type_fields.go
deleted file mode 100644
index 6da608a..0000000
--- a/newt/Godeps/_workspace/src/github.com/BurntSushi/toml/type_fields.go
+++ /dev/null
@@ -1,241 +0,0 @@
-package toml
-
-// Struct field handling is adapted from code in encoding/json:
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the Go distribution.
-
-import (
-       "reflect"
-       "sort"
-       "sync"
-)
-
-// A field represents a single field found in a struct.
-type field struct {
-       name  string       // the name of the field (`toml` tag included)
-       tag   bool         // whether field has a `toml` tag
-       index []int        // represents the depth of an anonymous field
-       typ   reflect.Type // the type of the field
-}
-
-// byName sorts field by name, breaking ties with depth,
-// then breaking ties with "name came from toml tag", then
-// breaking ties with index sequence.
-type byName []field
-
-func (x byName) Len() int { return len(x) }
-
-func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byName) Less(i, j int) bool {
-       if x[i].name != x[j].name {
-               return x[i].name < x[j].name
-       }
-       if len(x[i].index) != len(x[j].index) {
-               return len(x[i].index) < len(x[j].index)
-       }
-       if x[i].tag != x[j].tag {
-               return x[i].tag
-       }
-       return byIndex(x).Less(i, j)
-}
-
-// byIndex sorts field by index sequence.
-type byIndex []field
-
-func (x byIndex) Len() int { return len(x) }
-
-func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byIndex) Less(i, j int) bool {
-       for k, xik := range x[i].index {
-               if k >= len(x[j].index) {
-                       return false
-               }
-               if xik != x[j].index[k] {
-                       return xik < x[j].index[k]
-               }
-       }
-       return len(x[i].index) < len(x[j].index)
-}
-
-// typeFields returns a list of fields that TOML should recognize for the given
-// type. The algorithm is breadth-first search over the set of structs to
-// include - the top struct and then any reachable anonymous structs.
-func typeFields(t reflect.Type) []field {
-       // Anonymous fields to explore at the current level and the next.
-       current := []field{}
-       next := []field{{typ: t}}
-
-       // Count of queued names for current level and the next.
-       count := map[reflect.Type]int{}
-       nextCount := map[reflect.Type]int{}
-
-       // Types already visited at an earlier level.
-       visited := map[reflect.Type]bool{}
-
-       // Fields found.
-       var fields []field
-
-       for len(next) > 0 {
-               current, next = next, current[:0]
-               count, nextCount = nextCount, map[reflect.Type]int{}
-
-               for _, f := range current {
-                       if visited[f.typ] {
-                               continue
-                       }
-                       visited[f.typ] = true
-
-                       // Scan f.typ for fields to include.
-                       for i := 0; i < f.typ.NumField(); i++ {
-                               sf := f.typ.Field(i)
-                               if sf.PkgPath != "" && !sf.Anonymous { // 
unexported
-                                       continue
-                               }
-                               name, _ := getOptions(sf.Tag.Get("toml"))
-                               if name == "-" {
-                                       continue
-                               }
-                               index := make([]int, len(f.index)+1)
-                               copy(index, f.index)
-                               index[len(f.index)] = i
-
-                               ft := sf.Type
-                               if ft.Name() == "" && ft.Kind() == reflect.Ptr {
-                                       // Follow pointer.
-                                       ft = ft.Elem()
-                               }
-
-                               // Record found field and index sequence.
-                               if name != "" || !sf.Anonymous || ft.Kind() != 
reflect.Struct {
-                                       tagged := name != ""
-                                       if name == "" {
-                                               name = sf.Name
-                                       }
-                                       fields = append(fields, field{name, 
tagged, index, ft})
-                                       if count[f.typ] > 1 {
-                                               // If there were multiple 
instances, add a second,
-                                               // so that the annihilation 
code will see a duplicate.
-                                               // It only cares about the 
distinction between 1 or 2,
-                                               // so don't bother generating 
any more copies.
-                                               fields = append(fields, 
fields[len(fields)-1])
-                                       }
-                                       continue
-                               }
-
-                               // Record new anonymous struct to explore in 
next round.
-                               nextCount[ft]++
-                               if nextCount[ft] == 1 {
-                                       f := field{name: ft.Name(), index: 
index, typ: ft}
-                                       next = append(next, f)
-                               }
-                       }
-               }
-       }
-
-       sort.Sort(byName(fields))
-
-       // Delete all fields that are hidden by the Go rules for embedded 
fields,
-       // except that fields with TOML tags are promoted.
-
-       // The fields are sorted in primary order of name, secondary order
-       // of field index length. Loop over names; for each name, delete
-       // hidden fields by choosing the one dominant field that survives.
-       out := fields[:0]
-       for advance, i := 0, 0; i < len(fields); i += advance {
-               // One iteration per name.
-               // Find the sequence of fields with the name of this first 
field.
-               fi := fields[i]
-               name := fi.name
-               for advance = 1; i+advance < len(fields); advance++ {
-                       fj := fields[i+advance]
-                       if fj.name != name {
-                               break
-                       }
-               }
-               if advance == 1 { // Only one field with this name
-                       out = append(out, fi)
-                       continue
-               }
-               dominant, ok := dominantField(fields[i : i+advance])
-               if ok {
-                       out = append(out, dominant)
-               }
-       }
-
-       fields = out
-       sort.Sort(byIndex(fields))
-
-       return fields
-}
-
-// dominantField looks through the fields, all of which are known to
-// have the same name, to find the single field that dominates the
-// others using Go's embedding rules, modified by the presence of
-// TOML tags. If there are multiple top-level fields, the boolean
-// will be false: This condition is an error in Go and we skip all
-// the fields.
-func dominantField(fields []field) (field, bool) {
-       // The fields are sorted in increasing index-length order. The winner
-       // must therefore be one with the shortest index length. Drop all
-       // longer entries, which is easy: just truncate the slice.
-       length := len(fields[0].index)
-       tagged := -1 // Index of first tagged field.
-       for i, f := range fields {
-               if len(f.index) > length {
-                       fields = fields[:i]
-                       break
-               }
-               if f.tag {
-                       if tagged >= 0 {
-                               // Multiple tagged fields at the same level: 
conflict.
-                               // Return no field.
-                               return field{}, false
-                       }
-                       tagged = i
-               }
-       }
-       if tagged >= 0 {
-               return fields[tagged], true
-       }
-       // All remaining fields have the same length. If there's more than one,
-       // we have a conflict (two fields named "X" at the same level) and we
-       // return no field.
-       if len(fields) > 1 {
-               return field{}, false
-       }
-       return fields[0], true
-}
-
-var fieldCache struct {
-       sync.RWMutex
-       m map[reflect.Type][]field
-}
-
-// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
-func cachedTypeFields(t reflect.Type) []field {
-       fieldCache.RLock()
-       f := fieldCache.m[t]
-       fieldCache.RUnlock()
-       if f != nil {
-               return f
-       }
-
-       // Compute fields without lock.
-       // Might duplicate effort but won't hold other computations back.
-       f = typeFields(t)
-       if f == nil {
-               f = []field{}
-       }
-
-       fieldCache.Lock()
-       if fieldCache.m == nil {
-               fieldCache.m = map[reflect.Type][]field{}
-       }
-       fieldCache.m[t] = f
-       fieldCache.Unlock()
-       return f
-}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/4ae9684f/newt/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore
----------------------------------------------------------------------
diff --git a/newt/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore 
b/newt/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore
new file mode 100644
index 0000000..66be63a
--- /dev/null
+++ b/newt/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore
@@ -0,0 +1 @@
+logrus

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/4ae9684f/newt/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml
----------------------------------------------------------------------
diff --git a/newt/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml 
b/newt/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml
new file mode 100644
index 0000000..ff23150
--- /dev/null
+++ b/newt/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+go:
+  - 1.3
+  - 1.4
+  - 1.5
+  - tip
+install:
+  - go get -t ./...
+script: GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v ./...

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/4ae9684f/newt/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md
----------------------------------------------------------------------
diff --git a/newt/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md 
b/newt/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000..9e9e600
--- /dev/null
+++ b/newt/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,59 @@
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/4ae9684f/newt/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE
----------------------------------------------------------------------
diff --git a/newt/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE 
b/newt/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/newt/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

Reply via email to