1
0
Fork 0
mirror of https://github.com/vbatts/sl-feeds.git synced 2024-11-24 08:35:40 +00:00

go*: update dependencies

Signed-off-by: Vincent Batts <vbatts@hashbangbash.com>
This commit is contained in:
Vincent Batts 2022-04-27 21:12:07 -04:00
parent c0efaff923
commit 1e645e4616
Signed by: vbatts
GPG key ID: 10937E57733F1362
27 changed files with 1988 additions and 1171 deletions

6
go.mod
View file

@ -3,9 +3,9 @@ module github.com/vbatts/sl-feeds
go 1.13 go 1.13
require ( require (
github.com/BurntSushi/toml v1.0.0 github.com/BurntSushi/toml v1.1.0
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/gorilla/feeds v1.1.1 github.com/gorilla/feeds v1.1.1
github.com/kr/pretty v0.2.1 // indirect github.com/kr/pretty v0.2.1 // indirect
github.com/urfave/cli v1.22.5 github.com/urfave/cli v1.22.7
) )

13
go.sum
View file

@ -1,10 +1,9 @@
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/gorilla/feeds v1.1.1 h1:HwKXxqzcRNg9to+BbvJog4+f3s/xzvtZXICcQGutYfY= github.com/gorilla/feeds v1.1.1 h1:HwKXxqzcRNg9to+BbvJog4+f3s/xzvtZXICcQGutYfY=
github.com/gorilla/feeds v1.1.1/go.mod h1:Nk0jZrvPFZX1OBe5NPiddPw7CfwF6Q9eqzaBbaightA= github.com/gorilla/feeds v1.1.1/go.mod h1:Nk0jZrvPFZX1OBe5NPiddPw7CfwF6Q9eqzaBbaightA=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
@ -17,7 +16,7 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/urfave/cli v1.22.5 h1:lNq9sAHXK2qfdI8W+GRItjCEkI+2oR4d+MEHy1CKXoU= github.com/urfave/cli v1.22.7 h1:aXiFAgRugfJ27UFDsGJ9DB2FvTC73hlVXFSqq5bo9eU=
github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.7/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View file

@ -1,5 +1,2 @@
TAGS
tags
.*.swp
tomlcheck/tomlcheck
toml.test toml.test
/toml-test

View file

@ -1,15 +0,0 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- 1.5
- 1.6
- tip
install:
- go install ./...
- go get github.com/BurntSushi/toml-test
script:
- export PATH="$PATH:$HOME/gopath/bin"
- make test

View file

@ -1,3 +1 @@
Compatible with TOML version Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)

View file

@ -1,19 +0,0 @@
install:
go install ./...
test: install
go test -v
toml-test toml-test-decoder
toml-test -encoder toml-test-encoder
fmt:
gofmt -w *.go */*.go
colcheck *.go */*.go
tags:
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
push:
git push origin master
git push github master

View file

@ -1,46 +1,36 @@
## TOML parser and encoder for Go with reflection
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
reflection interface similar to Go's standard library `json` and `xml` reflection interface similar to Go's standard library `json` and `xml`
packages. This package also supports the `encoding.TextUnmarshaler` and packages.
`encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.)
Spec: https://github.com/toml-lang/toml Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
Compatible with TOML version Documentation: https://godocs.io/github.com/BurntSushi/toml
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
Documentation: https://godoc.org/github.com/BurntSushi/toml See the [releases page](https://github.com/BurntSushi/toml/releases) for a
changelog; this information is also in the git tag annotations (e.g. `git show
v0.4.0`).
Installation: This library requires Go 1.13 or newer; install it with:
```bash % go get github.com/BurntSushi/toml@latest
go get github.com/BurntSushi/toml
```
Try the toml validator: It also comes with a TOML validator CLI tool:
```bash % go install github.com/BurntSushi/toml/cmd/tomlv@latest
go get github.com/BurntSushi/toml/cmd/tomlv % tomlv some-toml-file.toml
tomlv some-toml-file.toml
```
[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
### Testing ### Testing
This package passes all tests in [toml-test] for both the decoder and the
encoder.
This package passes all tests in [toml-test]: https://github.com/BurntSushi/toml-test
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
and the encoder.
### Examples ### Examples
This package works similar to how the Go standard library handles XML and JSON.
Namely, data is loaded into Go values via reflection.
This package works similarly to how the Go standard library handles `XML` For the simplest example, consider some TOML file as just a list of keys and
and `JSON`. Namely, data is loaded into Go values via reflection. values:
For the simplest example, consider some TOML file as just a list of keys
and values:
```toml ```toml
Age = 25 Age = 25
@ -66,9 +56,8 @@ And then decoded with:
```go ```go
var conf Config var conf Config
if _, err := toml.Decode(tomlData, &conf); err != nil { _, err := toml.Decode(tomlData, &conf)
// handle error // handle error
}
``` ```
You can also use struct tags if your struct field name doesn't map to a TOML You can also use struct tags if your struct field name doesn't map to a TOML
@ -84,8 +73,10 @@ type TOML struct {
} }
``` ```
### Using the `encoding.TextUnmarshaler` interface Beware that like other most other decoders **only exported fields** are
considered when encoding and decoding; private fields are silently ignored.
### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces
Here's an example that automatically parses duration strings into Here's an example that automatically parses duration strings into
`time.Duration` values: `time.Duration` values:
@ -134,8 +125,10 @@ func (d *duration) UnmarshalText(text []byte) error {
} }
``` ```
### More complex usage To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
a similar way.
### More complex usage
Here's an example of how to load the example from the official spec page: Here's an example of how to load the example from the official spec page:
```toml ```toml
@ -215,4 +208,4 @@ type clients struct {
Note that a case insensitive match will be tried if an exact match can't be Note that a case insensitive match will be tried if an exact match can't be
found. found.
A working example of the above can be found in `_examples/example.{go,toml}`. A working example of the above can be found in `_example/example.{go,toml}`.

View file

@ -1,19 +1,17 @@
package toml package toml
import ( import (
"bytes"
"encoding"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"math" "math"
"os"
"reflect" "reflect"
"strings" "strings"
"time"
) )
func e(format string, args ...interface{}) error {
return fmt.Errorf("toml: "+format, args...)
}
// Unmarshaler is the interface implemented by objects that can unmarshal a // Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves. // TOML description of themselves.
type Unmarshaler interface { type Unmarshaler interface {
@ -21,34 +19,145 @@ type Unmarshaler interface {
} }
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. // Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
func Unmarshal(p []byte, v interface{}) error { func Unmarshal(data []byte, v interface{}) error {
_, err := Decode(string(p), v) _, err := NewDecoder(bytes.NewReader(data)).Decode(v)
return err return err
} }
// Decode the TOML data in to the pointer v.
//
// See the documentation on Decoder for a description of the decoding process.
func Decode(data string, v interface{}) (MetaData, error) {
return NewDecoder(strings.NewReader(data)).Decode(v)
}
// DecodeFile is just like Decode, except it will automatically read the
// contents of the file at path and decode it for you.
func DecodeFile(path string, v interface{}) (MetaData, error) {
fp, err := os.Open(path)
if err != nil {
return MetaData{}, err
}
defer fp.Close()
return NewDecoder(fp).Decode(v)
}
// Primitive is a TOML value that hasn't been decoded into a Go value. // Primitive is a TOML value that hasn't been decoded into a Go value.
// When using the various `Decode*` functions, the type `Primitive` may
// be given to any value, and its decoding will be delayed.
// //
// A `Primitive` value can be decoded using the `PrimitiveDecode` function. // This type can be used for any value, which will cause decoding to be delayed.
// You can use the PrimitiveDecode() function to "manually" decode these values.
// //
// The underlying representation of a `Primitive` value is subject to change. // NOTE: The underlying representation of a `Primitive` value is subject to
// Do not rely on it. // change. Do not rely on it.
// //
// N.B. Primitive values are still parsed, so using them will only avoid // NOTE: Primitive values are still parsed, so using them will only avoid the
// the overhead of reflection. They can be useful when you don't know the // overhead of reflection. They can be useful when you don't know the exact type
// exact type of TOML data until run time. // of TOML data until runtime.
type Primitive struct { type Primitive struct {
undecoded interface{} undecoded interface{}
context Key context Key
} }
// DEPRECATED! // The significand precision for float32 and float64 is 24 and 53 bits; this is
// the range a natural number can be stored in a float without loss of data.
const (
maxSafeFloat32Int = 16777215 // 2^24-1
maxSafeFloat64Int = int64(9007199254740991) // 2^53-1
)
// Decoder decodes TOML data.
// //
// Use MetaData.PrimitiveDecode instead. // TOML tables correspond to Go structs or maps (dealer's choice they can be
func PrimitiveDecode(primValue Primitive, v interface{}) error { // used interchangeably).
md := MetaData{decoded: make(map[string]bool)} //
return md.unify(primValue.undecoded, rvalue(v)) // TOML table arrays correspond to either a slice of structs or a slice of maps.
//
// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
// in the local timezone.
//
// All other TOML types (float, string, int, bool and array) correspond to the
// obvious Go types.
//
// An exception to the above rules is if a type implements the TextUnmarshaler
// interface, in which case any primitive TOML value (floats, strings, integers,
// booleans, datetimes) will be converted to a []byte and given to the value's
// UnmarshalText method. See the Unmarshaler example for a demonstration with
// time duration strings.
//
// Key mapping
//
// TOML keys can map to either keys in a Go map or field names in a Go struct.
// The special `toml` struct tag can be used to map TOML keys to struct fields
// that don't match the key name exactly (see the example). A case insensitive
// match to struct names will be tried if an exact match can't be found.
//
// The mapping between TOML values and Go values is loose. That is, there may
// exist TOML values that cannot be placed into your representation, and there
// may be parts of your representation that do not correspond to TOML values.
// This loose mapping can be made stricter by using the IsDefined and/or
// Undecoded methods on the MetaData returned.
//
// This decoder does not handle cyclic types. Decode will not terminate if a
// cyclic type is passed.
type Decoder struct {
r io.Reader
}
// NewDecoder creates a new Decoder.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
var (
unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
)
// Decode TOML data in to the pointer `v`.
func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
s := "%q"
if reflect.TypeOf(v) == nil {
s = "%v"
}
return MetaData{}, e("cannot decode to non-pointer "+s, reflect.TypeOf(v))
}
if rv.IsNil() {
return MetaData{}, e("cannot decode to nil value of %q", reflect.TypeOf(v))
}
// Check if this is a supported type: struct, map, interface{}, or something
// that implements UnmarshalTOML or UnmarshalText.
rv = indirect(rv)
rt := rv.Type()
if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map &&
!(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) &&
!rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) {
return MetaData{}, e("cannot decode to type %s", rt)
}
// TODO: parser should read from io.Reader? Or at the very least, make it
// read from []byte rather than string
data, err := ioutil.ReadAll(dec.r)
if err != nil {
return MetaData{}, err
}
p, err := parse(string(data))
if err != nil {
return MetaData{}, err
}
md := MetaData{
mapping: p.mapping,
types: p.types,
keys: p.ordered,
decoded: make(map[string]struct{}, len(p.ordered)),
context: nil,
}
return md, md.unify(p.mapping, rv)
} }
// PrimitiveDecode is just like the other `Decode*` functions, except it // PrimitiveDecode is just like the other `Decode*` functions, except it
@ -68,89 +177,14 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
return md.unify(primValue.undecoded, rvalue(v)) return md.unify(primValue.undecoded, rvalue(v))
} }
// Decode will decode the contents of `data` in TOML format into a pointer
// `v`.
//
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
// used interchangeably.)
//
// TOML arrays of tables correspond to either a slice of structs or a slice
// of maps.
//
// TOML datetimes correspond to Go `time.Time` values.
//
// All other TOML types (float, string, int, bool and array) correspond
// to the obvious Go types.
//
// An exception to the above rules is if a type implements the
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
// (floats, strings, integers, booleans and datetimes) will be converted to
// a byte string and given to the value's UnmarshalText method. See the
// Unmarshaler example for a demonstration with time duration strings.
//
// Key mapping
//
// TOML keys can map to either keys in a Go map or field names in a Go
// struct. The special `toml` struct tag may be used to map TOML keys to
// struct fields that don't match the key name exactly. (See the example.)
// A case insensitive match to struct names will be tried if an exact match
// can't be found.
//
// The mapping between TOML values and Go values is loose. That is, there
// may exist TOML values that cannot be placed into your representation, and
// there may be parts of your representation that do not correspond to
// TOML values. This loose mapping can be made stricter by using the IsDefined
// and/or Undecoded methods on the MetaData returned.
//
// This decoder will not handle cyclic types. If a cyclic type is passed,
// `Decode` will not terminate.
func Decode(data string, v interface{}) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
}
if rv.IsNil() {
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
}
p, err := parse(data)
if err != nil {
return MetaData{}, err
}
md := MetaData{
p.mapping, p.types, p.ordered,
make(map[string]bool, len(p.ordered)), nil,
}
return md, md.unify(p.mapping, indirect(rv))
}
// DecodeFile is just like Decode, except it will automatically read the
// contents of the file at `fpath` and decode it for you.
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadFile(fpath)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// DecodeReader is just like Decode, except it will consume all bytes
// from the reader and decode it for you.
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadAll(r)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// unify performs a sort of type unification based on the structure of `rv`, // unify performs a sort of type unification based on the structure of `rv`,
// which is the client representation. // which is the client representation.
// //
// Any type mismatch produces an error. Finding a type that we don't know // Any type mismatch produces an error. Finding a type that we don't know
// how to handle produces an unsupported type error. // how to handle produces an unsupported type error.
func (md *MetaData) unify(data interface{}, rv reflect.Value) error { func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
// Special case. Look for a `Primitive` value. // Special case. Look for a `Primitive` value.
// TODO: #76 would make this superfluous after implemented.
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
// Save the undecoded data and the key context into the primitive // Save the undecoded data and the key context into the primitive
// value. // value.
@ -170,25 +204,17 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
} }
} }
// Special case. Handle time.Time values specifically.
// TODO: Remove this code when we decide to drop support for Go 1.1.
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
// interfaces.
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
return md.unifyDatetime(data, rv)
}
// Special case. Look for a value satisfying the TextUnmarshaler interface. // Special case. Look for a value satisfying the TextUnmarshaler interface.
if v, ok := rv.Interface().(TextUnmarshaler); ok { if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
return md.unifyText(data, v) return md.unifyText(data, v)
} }
// BUG(burntsushi) // TODO:
// The behavior here is incorrect whenever a Go type satisfies the // The behavior here is incorrect whenever a Go type satisfies the
// encoding.TextUnmarshaler interface but also corresponds to a TOML // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or
// hash or array. In particular, the unmarshaler should only be applied // array. In particular, the unmarshaler should only be applied to primitive
// to primitive TOML values. But at this point, it will be applied to // TOML values. But at this point, it will be applied to all kinds of values
// all kinds of values and produce an incorrect error whenever those values // and produce an incorrect error whenever those values are hashes or arrays
// are hashes or arrays (including arrays of tables). // (including arrays of tables).
k := rv.Kind() k := rv.Kind()
@ -223,9 +249,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
return e("unsupported type %s", rv.Type()) return e("unsupported type %s", rv.Type())
} }
return md.unifyAnything(data, rv) return md.unifyAnything(data, rv)
case reflect.Float32: case reflect.Float32, reflect.Float64:
fallthrough
case reflect.Float64:
return md.unifyFloat64(data, rv) return md.unifyFloat64(data, rv)
} }
return e("unsupported type %s", rv.Kind()) return e("unsupported type %s", rv.Kind())
@ -259,17 +283,17 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
for _, i := range f.index { for _, i := range f.index {
subv = indirect(subv.Field(i)) subv = indirect(subv.Field(i))
} }
if isUnifiable(subv) { if isUnifiable(subv) {
md.decoded[md.context.add(key).String()] = true md.decoded[md.context.add(key).String()] = struct{}{}
md.context = append(md.context, key) md.context = append(md.context, key)
if err := md.unify(datum, subv); err != nil { err := md.unify(datum, subv)
if err != nil {
return err return err
} }
md.context = md.context[0 : len(md.context)-1] md.context = md.context[0 : len(md.context)-1]
} else if f.name != "" { } else if f.name != "" {
// Bad user! No soup for you! return e("cannot write unexported field %s.%s", rv.Type().String(), f.name)
return e("cannot write unexported field %s.%s",
rv.Type().String(), f.name)
} }
} }
} }
@ -277,27 +301,33 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
} }
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
if k := rv.Type().Key().Kind(); k != reflect.String {
return fmt.Errorf(
"toml: cannot decode to a map with non-string key type (%s in %q)",
k, rv.Type())
}
tmap, ok := mapping.(map[string]interface{}) tmap, ok := mapping.(map[string]interface{})
if !ok { if !ok {
if tmap == nil { if tmap == nil {
return nil return nil
} }
return badtype("map", mapping) return md.badtype("map", mapping)
} }
if rv.IsNil() { if rv.IsNil() {
rv.Set(reflect.MakeMap(rv.Type())) rv.Set(reflect.MakeMap(rv.Type()))
} }
for k, v := range tmap { for k, v := range tmap {
md.decoded[md.context.add(k).String()] = true md.decoded[md.context.add(k).String()] = struct{}{}
md.context = append(md.context, k) md.context = append(md.context, k)
rvkey := indirect(reflect.New(rv.Type().Key()))
rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
if err := md.unify(v, rvval); err != nil { if err := md.unify(v, rvval); err != nil {
return err return err
} }
md.context = md.context[0 : len(md.context)-1] md.context = md.context[0 : len(md.context)-1]
rvkey := indirect(reflect.New(rv.Type().Key()))
rvkey.SetString(k) rvkey.SetString(k)
rv.SetMapIndex(rvkey, rvval) rv.SetMapIndex(rvkey, rvval)
} }
@ -310,12 +340,10 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
if !datav.IsValid() { if !datav.IsValid() {
return nil return nil
} }
return badtype("slice", data) return md.badtype("slice", data)
} }
sliceLen := datav.Len() if l := datav.Len(); l != rv.Len() {
if sliceLen != rv.Len() { return e("expected array length %d; got TOML array of length %d", rv.Len(), l)
return e("expected array length %d; got TOML array of length %d",
rv.Len(), sliceLen)
} }
return md.unifySliceArray(datav, rv) return md.unifySliceArray(datav, rv)
} }
@ -326,7 +354,7 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
if !datav.IsValid() { if !datav.IsValid() {
return nil return nil
} }
return badtype("slice", data) return md.badtype("slice", data)
} }
n := datav.Len() n := datav.Len()
if rv.IsNil() || rv.Cap() < n { if rv.IsNil() || rv.Cap() < n {
@ -337,37 +365,31 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
} }
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
sliceLen := data.Len() l := data.Len()
for i := 0; i < sliceLen; i++ { for i := 0; i < l; i++ {
v := data.Index(i).Interface() err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i)))
sliceval := indirect(rv.Index(i)) if err != nil {
if err := md.unify(v, sliceval); err != nil {
return err return err
} }
} }
return nil return nil
} }
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
if _, ok := data.(time.Time); ok {
rv.Set(reflect.ValueOf(data))
return nil
}
return badtype("time.Time", data)
}
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
if s, ok := data.(string); ok { if s, ok := data.(string); ok {
rv.SetString(s) rv.SetString(s)
return nil return nil
} }
return badtype("string", data) return md.badtype("string", data)
} }
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
if num, ok := data.(float64); ok { if num, ok := data.(float64); ok {
switch rv.Kind() { switch rv.Kind() {
case reflect.Float32: case reflect.Float32:
if num < -math.MaxFloat32 || num > math.MaxFloat32 {
return e("value %f is out of range for float32", num)
}
fallthrough fallthrough
case reflect.Float64: case reflect.Float64:
rv.SetFloat(num) rv.SetFloat(num)
@ -376,7 +398,26 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
} }
return nil return nil
} }
return badtype("float", data)
if num, ok := data.(int64); ok {
switch rv.Kind() {
case reflect.Float32:
if num < -maxSafeFloat32Int || num > maxSafeFloat32Int {
return e("value %d is out of range for float32", num)
}
fallthrough
case reflect.Float64:
if num < -maxSafeFloat64Int || num > maxSafeFloat64Int {
return e("value %d is out of range for float64", num)
}
rv.SetFloat(float64(num))
default:
panic("bug")
}
return nil
}
return md.badtype("float", data)
} }
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
@ -423,7 +464,7 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
} }
return nil return nil
} }
return badtype("integer", data) return md.badtype("integer", data)
} }
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
@ -431,7 +472,7 @@ func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
rv.SetBool(b) rv.SetBool(b)
return nil return nil
} }
return badtype("boolean", data) return md.badtype("boolean", data)
} }
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
@ -439,9 +480,15 @@ func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
return nil return nil
} }
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
var s string var s string
switch sdata := data.(type) { switch sdata := data.(type) {
case Marshaler:
text, err := sdata.MarshalTOML()
if err != nil {
return err
}
s = string(text)
case TextMarshaler: case TextMarshaler:
text, err := sdata.MarshalText() text, err := sdata.MarshalText()
if err != nil { if err != nil {
@ -459,7 +506,7 @@ func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
case float64: case float64:
s = fmt.Sprintf("%f", sdata) s = fmt.Sprintf("%f", sdata)
default: default:
return badtype("primitive (string-like)", data) return md.badtype("primitive (string-like)", data)
} }
if err := v.UnmarshalText([]byte(s)); err != nil { if err := v.UnmarshalText([]byte(s)); err != nil {
return err return err
@ -467,22 +514,27 @@ func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
return nil return nil
} }
func (md *MetaData) badtype(dst string, data interface{}) error {
return e("incompatible types: TOML key %q has type %T; destination has type %s", md.context, data, dst)
}
// rvalue returns a reflect.Value of `v`. All pointers are resolved. // rvalue returns a reflect.Value of `v`. All pointers are resolved.
func rvalue(v interface{}) reflect.Value { func rvalue(v interface{}) reflect.Value {
return indirect(reflect.ValueOf(v)) return indirect(reflect.ValueOf(v))
} }
// indirect returns the value pointed to by a pointer. // indirect returns the value pointed to by a pointer.
// Pointers are followed until the value is not a pointer.
// New values are allocated for each nil pointer.
// //
// An exception to this rule is if the value satisfies an interface of // Pointers are followed until the value is not a pointer. New values are
// interest to us (like encoding.TextUnmarshaler). // allocated for each nil pointer.
//
// An exception to this rule is if the value satisfies an interface of interest
// to us (like encoding.TextUnmarshaler).
func indirect(v reflect.Value) reflect.Value { func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr { if v.Kind() != reflect.Ptr {
if v.CanSet() { if v.CanSet() {
pv := v.Addr() pv := v.Addr()
if _, ok := pv.Interface().(TextUnmarshaler); ok { if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok {
return pv return pv
} }
} }
@ -498,12 +550,12 @@ func isUnifiable(rv reflect.Value) bool {
if rv.CanSet() { if rv.CanSet() {
return true return true
} }
if _, ok := rv.Interface().(TextUnmarshaler); ok { if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
return true return true
} }
return false return false
} }
func badtype(expected string, data interface{}) error { func e(format string, args ...interface{}) error {
return e("cannot load TOML value of type %T into a Go %s", data, expected) return fmt.Errorf("toml: "+format, args...)
} }

19
vendor/github.com/BurntSushi/toml/decode_go116.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
//go:build go1.16
// +build go1.16
package toml
import (
"io/fs"
)
// DecodeFS is just like Decode, except it will automatically read the contents
// of the file at `path` from a fs.FS instance.
func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
fp, err := fsys.Open(path)
if err != nil {
return MetaData{}, err
}
defer fp.Close()
return NewDecoder(fp).Decode(v)
}

21
vendor/github.com/BurntSushi/toml/deprecated.go generated vendored Normal file
View file

@ -0,0 +1,21 @@
package toml
import (
"encoding"
"io"
)
// Deprecated: use encoding.TextMarshaler
type TextMarshaler encoding.TextMarshaler
// Deprecated: use encoding.TextUnmarshaler
type TextUnmarshaler encoding.TextUnmarshaler
// Deprecated: use MetaData.PrimitiveDecode.
func PrimitiveDecode(primValue Primitive, v interface{}) error {
md := MetaData{decoded: make(map[string]struct{})}
return md.unify(primValue.undecoded, rvalue(v))
}
// Deprecated: use NewDecoder(reader).Decode(&value).
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) }

View file

@ -1,27 +1,13 @@
/* /*
Package toml provides facilities for decoding and encoding TOML configuration Package toml implements decoding and encoding of TOML files.
files via reflection. There is also support for delaying decoding with
the Primitive type, and querying the set of keys in a TOML document with the
MetaData type.
The specification implemented: https://github.com/toml-lang/toml This package supports TOML v1.0.0, as listed on https://toml.io
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify There is also support for delaying decoding with the Primitive type, and
whether a file is a valid TOML document. It can also be used to print the querying the set of keys in a TOML document with the MetaData type.
type of each key in a TOML document.
Testing The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
and can be used to verify if TOML document is valid. It can also be used to
There are two important types of tests used for this package. The first is print the type of each key.
contained inside '*_test.go' files and uses the standard Go unit testing
framework. These tests are primarily devoted to holistically testing the
decoder and encoder.
The second type of testing is used to verify the implementation's adherence
to the TOML specification. These tests have been factored into their own
project: https://github.com/BurntSushi/toml-test
The reason the tests are in a separate project is so that they can be used by
any implementation of TOML. Namely, it is language agnostic.
*/ */
package toml package toml

View file

@ -2,57 +2,106 @@ package toml
import ( import (
"bufio" "bufio"
"encoding"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"math"
"reflect" "reflect"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/BurntSushi/toml/internal"
) )
type tomlEncodeError struct{ error } type tomlEncodeError struct{ error }
var ( var (
errArrayMixedElementTypes = errors.New( errArrayNilElement = errors.New("toml: cannot encode array with nil element")
"toml: cannot encode array with mixed element types") errNonString = errors.New("toml: cannot encode a map with non-string key type")
errArrayNilElement = errors.New( errNoKey = errors.New("toml: top-level values must be Go maps or structs")
"toml: cannot encode array with nil element")
errNonString = errors.New(
"toml: cannot encode a map with non-string key type")
errAnonNonStruct = errors.New(
"toml: cannot encode an anonymous field that is not a struct")
errArrayNoTable = errors.New(
"toml: TOML array element cannot contain a table")
errNoKey = errors.New(
"toml: top-level values must be Go maps or structs")
errAnything = errors.New("") // used in testing errAnything = errors.New("") // used in testing
) )
var quotedReplacer = strings.NewReplacer( var dblQuotedReplacer = strings.NewReplacer(
"\t", "\\t",
"\n", "\\n",
"\r", "\\r",
"\"", "\\\"", "\"", "\\\"",
"\\", "\\\\", "\\", "\\\\",
"\x00", `\u0000`,
"\x01", `\u0001`,
"\x02", `\u0002`,
"\x03", `\u0003`,
"\x04", `\u0004`,
"\x05", `\u0005`,
"\x06", `\u0006`,
"\x07", `\u0007`,
"\b", `\b`,
"\t", `\t`,
"\n", `\n`,
"\x0b", `\u000b`,
"\f", `\f`,
"\r", `\r`,
"\x0e", `\u000e`,
"\x0f", `\u000f`,
"\x10", `\u0010`,
"\x11", `\u0011`,
"\x12", `\u0012`,
"\x13", `\u0013`,
"\x14", `\u0014`,
"\x15", `\u0015`,
"\x16", `\u0016`,
"\x17", `\u0017`,
"\x18", `\u0018`,
"\x19", `\u0019`,
"\x1a", `\u001a`,
"\x1b", `\u001b`,
"\x1c", `\u001c`,
"\x1d", `\u001d`,
"\x1e", `\u001e`,
"\x1f", `\u001f`,
"\x7f", `\u007f`,
) )
// Encoder controls the encoding of Go values to a TOML document to some // Marshaler is the interface implemented by types that can marshal themselves
// io.Writer. // into valid TOML.
// type Marshaler interface {
// The indentation level can be controlled with the Indent field. MarshalTOML() ([]byte, error)
type Encoder struct {
// A single indentation level. By default it is two spaces.
Indent string
// hasWritten is whether we have written any output to w yet.
hasWritten bool
w *bufio.Writer
} }
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer // Encoder encodes a Go to a TOML document.
// given. By default, a single indentation level is 2 spaces. //
// The mapping between Go values and TOML values should be precisely the same as
// for the Decode* functions.
//
// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to
// encoding the value as custom TOML.
//
// If you want to write arbitrary binary data then you will need to use
// something like base64 since TOML does not have any binary types.
//
// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes
// are encoded first.
//
// Go maps will be sorted alphabetically by key for deterministic output.
//
// Encoding Go values without a corresponding TOML representation will return an
// error. Examples of this includes maps with non-string keys, slices with nil
// elements, embedded non-struct types, and nested slices containing maps or
// structs. (e.g. [][]map[string]string is not allowed but []map[string]string
// is okay, as is []map[string][]string).
//
// NOTE: only exported keys are encoded due to the use of reflection. Unexported
// keys are silently discarded.
type Encoder struct {
// String to use for a single indentation level; default is two spaces.
Indent string
w *bufio.Writer
hasWritten bool // written any output to w yet?
}
// NewEncoder create a new Encoder.
func NewEncoder(w io.Writer) *Encoder { func NewEncoder(w io.Writer) *Encoder {
return &Encoder{ return &Encoder{
w: bufio.NewWriter(w), w: bufio.NewWriter(w),
@ -60,29 +109,10 @@ func NewEncoder(w io.Writer) *Encoder {
} }
} }
// Encode writes a TOML representation of the Go value to the underlying // Encode writes a TOML representation of the Go value to the Encoder's writer.
// io.Writer. If the value given cannot be encoded to a valid TOML document,
// then an error is returned.
// //
// The mapping between Go values and TOML values should be precisely the same // An error is returned if the value given cannot be encoded to a valid TOML
// as for the Decode* functions. Similarly, the TextMarshaler interface is // document.
// supported by encoding the resulting bytes as strings. (If you want to write
// arbitrary binary data then you will need to use something like base64 since
// TOML does not have any binary types.)
//
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
// sub-hashes are encoded first.
//
// If a Go map is encoded, then its keys are sorted alphabetically for
// deterministic output. More control over this behavior may be provided if
// there is demand for it.
//
// Encoding Go values without a corresponding TOML representation---like map
// types with non-string keys---will cause an error to be returned. Similarly
// for mixed arrays/slices, arrays/slices with nil elements, embedded
// non-struct types and nested slices containing maps or structs.
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
// and so is []map[string][]string.)
func (enc *Encoder) Encode(v interface{}) error { func (enc *Encoder) Encode(v interface{}) error {
rv := eindirect(reflect.ValueOf(v)) rv := eindirect(reflect.ValueOf(v))
if err := enc.safeEncode(Key([]string{}), rv); err != nil { if err := enc.safeEncode(Key([]string{}), rv); err != nil {
@ -106,13 +136,18 @@ func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
} }
func (enc *Encoder) encode(key Key, rv reflect.Value) { func (enc *Encoder) encode(key Key, rv reflect.Value) {
// Special case. Time needs to be in ISO8601 format. // Special case: time needs to be in ISO8601 format.
// Special case. If we can marshal the type to text, then we used that. //
// Basically, this prevents the encoder for handling these types as // Special case: if we can marshal the type to text, then we used that. This
// generic structs (or whatever the underlying type of a TextMarshaler is). // prevents the encoder for handling these types as generic structs (or
switch rv.Interface().(type) { // whatever the underlying type of a TextMarshaler is).
case time.Time, TextMarshaler: switch t := rv.Interface().(type) {
enc.keyEqElement(key, rv) case time.Time, encoding.TextMarshaler, Marshaler:
enc.writeKeyValue(key, rv, false)
return
// TODO: #76 would make this superfluous after implemented.
case Primitive:
enc.encode(key, reflect.ValueOf(t.undecoded))
return return
} }
@ -123,12 +158,12 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64, reflect.Uint64,
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
enc.keyEqElement(key, rv) enc.writeKeyValue(key, rv, false)
case reflect.Array, reflect.Slice: case reflect.Array, reflect.Slice:
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
enc.eArrayOfTables(key, rv) enc.eArrayOfTables(key, rv)
} else { } else {
enc.keyEqElement(key, rv) enc.writeKeyValue(key, rv, false)
} }
case reflect.Interface: case reflect.Interface:
if rv.IsNil() { if rv.IsNil() {
@ -148,55 +183,88 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
case reflect.Struct: case reflect.Struct:
enc.eTable(key, rv) enc.eTable(key, rv)
default: default:
panic(e("unsupported type for key '%s': %s", key, k)) encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k))
} }
} }
// eElement encodes any value that can be an array element (primitives and // eElement encodes any value that can be an array element.
// arrays).
func (enc *Encoder) eElement(rv reflect.Value) { func (enc *Encoder) eElement(rv reflect.Value) {
switch v := rv.Interface().(type) { switch v := rv.Interface().(type) {
case time.Time: case time.Time: // Using TextMarshaler adds extra quotes, which we don't want.
// Special case time.Time as a primitive. Has to come before format := time.RFC3339Nano
// TextMarshaler below because time.Time implements switch v.Location() {
// encoding.TextMarshaler, but we need to always use UTC. case internal.LocalDatetime:
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) format = "2006-01-02T15:04:05.999999999"
case internal.LocalDate:
format = "2006-01-02"
case internal.LocalTime:
format = "15:04:05.999999999"
}
switch v.Location() {
default:
enc.wf(v.Format(format))
case internal.LocalDatetime, internal.LocalDate, internal.LocalTime:
enc.wf(v.In(time.UTC).Format(format))
}
return return
case TextMarshaler: case Marshaler:
// Special case. Use text marshaler if it's available for this value. s, err := v.MarshalTOML()
if s, err := v.MarshalText(); err != nil { if err != nil {
encPanic(err) encPanic(err)
} else {
enc.writeQuoted(string(s))
} }
enc.w.Write(s)
return
case encoding.TextMarshaler:
s, err := v.MarshalText()
if err != nil {
encPanic(err)
}
enc.writeQuoted(string(s))
return return
} }
switch rv.Kind() { switch rv.Kind() {
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16,
reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
case reflect.Float64:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
case reflect.Interface:
enc.eElement(rv.Elem())
case reflect.String: case reflect.String:
enc.writeQuoted(rv.String()) enc.writeQuoted(rv.String())
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
f := rv.Float()
if math.IsNaN(f) {
enc.wf("nan")
} else if math.IsInf(f, 0) {
enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
} else {
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
}
case reflect.Float64:
f := rv.Float()
if math.IsNaN(f) {
enc.wf("nan")
} else if math.IsInf(f, 0) {
enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
} else {
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
}
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
case reflect.Struct:
enc.eStruct(nil, rv, true)
case reflect.Map:
enc.eMap(nil, rv, true)
case reflect.Interface:
enc.eElement(rv.Elem())
default: default:
panic(e("unexpected primitive type: %s", rv.Kind())) encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface()))
} }
} }
// By the TOML spec, all floats must have a decimal with at least one // By the TOML spec, all floats must have a decimal with at least one number on
// number on either side. // either side.
func floatAddDecimal(fstr string) string { func floatAddDecimal(fstr string) string {
if !strings.Contains(fstr, ".") { if !strings.Contains(fstr, ".") {
return fstr + ".0" return fstr + ".0"
@ -205,7 +273,7 @@ func floatAddDecimal(fstr string) string {
} }
func (enc *Encoder) writeQuoted(s string) { func (enc *Encoder) writeQuoted(s string) {
enc.wf("\"%s\"", quotedReplacer.Replace(s)) enc.wf("\"%s\"", dblQuotedReplacer.Replace(s))
} }
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
@ -230,40 +298,39 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
if isNil(trv) { if isNil(trv) {
continue continue
} }
panicIfInvalidKey(key)
enc.newline() enc.newline()
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) enc.wf("%s[[%s]]", enc.indentStr(key), key)
enc.newline() enc.newline()
enc.eMapOrStruct(key, trv) enc.eMapOrStruct(key, trv, false)
} }
} }
func (enc *Encoder) eTable(key Key, rv reflect.Value) { func (enc *Encoder) eTable(key Key, rv reflect.Value) {
panicIfInvalidKey(key)
if len(key) == 1 { if len(key) == 1 {
// Output an extra newline between top-level tables. // Output an extra newline between top-level tables.
// (The newline isn't written if nothing else has been written though.) // (The newline isn't written if nothing else has been written though.)
enc.newline() enc.newline()
} }
if len(key) > 0 { if len(key) > 0 {
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) enc.wf("%s[%s]", enc.indentStr(key), key)
enc.newline() enc.newline()
} }
enc.eMapOrStruct(key, rv) enc.eMapOrStruct(key, rv, false)
} }
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) {
switch rv := eindirect(rv); rv.Kind() { switch rv := eindirect(rv); rv.Kind() {
case reflect.Map: case reflect.Map:
enc.eMap(key, rv) enc.eMap(key, rv, inline)
case reflect.Struct: case reflect.Struct:
enc.eStruct(key, rv) enc.eStruct(key, rv, inline)
default: default:
// Should never happen?
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
} }
} }
func (enc *Encoder) eMap(key Key, rv reflect.Value) { func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
rt := rv.Type() rt := rv.Type()
if rt.Key().Kind() != reflect.String { if rt.Key().Kind() != reflect.String {
encPanic(errNonString) encPanic(errNonString)
@ -274,114 +341,163 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value) {
var mapKeysDirect, mapKeysSub []string var mapKeysDirect, mapKeysSub []string
for _, mapKey := range rv.MapKeys() { for _, mapKey := range rv.MapKeys() {
k := mapKey.String() k := mapKey.String()
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { if typeIsTable(tomlTypeOfGo(rv.MapIndex(mapKey))) {
mapKeysSub = append(mapKeysSub, k) mapKeysSub = append(mapKeysSub, k)
} else { } else {
mapKeysDirect = append(mapKeysDirect, k) mapKeysDirect = append(mapKeysDirect, k)
} }
} }
var writeMapKeys = func(mapKeys []string) { var writeMapKeys = func(mapKeys []string, trailC bool) {
sort.Strings(mapKeys) sort.Strings(mapKeys)
for _, mapKey := range mapKeys { for i, mapKey := range mapKeys {
mrv := rv.MapIndex(reflect.ValueOf(mapKey)) val := rv.MapIndex(reflect.ValueOf(mapKey))
if isNil(mrv) { if isNil(val) {
// Don't write anything for nil fields.
continue continue
} }
enc.encode(key.add(mapKey), mrv)
if inline {
enc.writeKeyValue(Key{mapKey}, val, true)
if trailC || i != len(mapKeys)-1 {
enc.wf(", ")
}
} else {
enc.encode(key.add(mapKey), val)
} }
} }
writeMapKeys(mapKeysDirect)
writeMapKeys(mapKeysSub)
} }
func (enc *Encoder) eStruct(key Key, rv reflect.Value) { if inline {
enc.wf("{")
}
writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0)
writeMapKeys(mapKeysSub, false)
if inline {
enc.wf("}")
}
}
const is32Bit = (32 << (^uint(0) >> 63)) == 32
func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
// Write keys for fields directly under this key first, because if we write // Write keys for fields directly under this key first, because if we write
// a field that creates a new table, then all keys under it will be in that // a field that creates a new table then all keys under it will be in that
// table (not the one we're writing here). // table (not the one we're writing here).
rt := rv.Type() //
var fieldsDirect, fieldsSub [][]int // Fields is a [][]int: for fieldsDirect this always has one entry (the
var addFields func(rt reflect.Type, rv reflect.Value, start []int) // struct index). For fieldsSub it contains two entries: the parent field
// index from tv, and the field indexes for the fields of the sub.
var (
rt = rv.Type()
fieldsDirect, fieldsSub [][]int
addFields func(rt reflect.Type, rv reflect.Value, start []int)
)
addFields = func(rt reflect.Type, rv reflect.Value, start []int) { addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
for i := 0; i < rt.NumField(); i++ { for i := 0; i < rt.NumField(); i++ {
f := rt.Field(i) f := rt.Field(i)
// skip unexported fields if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields.
if f.PkgPath != "" && !f.Anonymous {
continue continue
} }
opts := getOptions(f.Tag)
if opts.skip {
continue
}
frv := rv.Field(i) frv := rv.Field(i)
// Treat anonymous struct fields with tag names as though they are
// not anonymous, like encoding/json does.
//
// Non-struct anonymous fields use the normal encoding logic.
if f.Anonymous { if f.Anonymous {
t := f.Type t := f.Type
switch t.Kind() { switch t.Kind() {
case reflect.Struct: case reflect.Struct:
// Treat anonymous struct fields with
// tag names as though they are not
// anonymous, like encoding/json does.
if getOptions(f.Tag).name == "" { if getOptions(f.Tag).name == "" {
addFields(t, frv, f.Index) addFields(t, frv, append(start, f.Index...))
continue continue
} }
case reflect.Ptr: case reflect.Ptr:
if t.Elem().Kind() == reflect.Struct && if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" {
getOptions(f.Tag).name == "" {
if !frv.IsNil() { if !frv.IsNil() {
addFields(t.Elem(), frv.Elem(), f.Index) addFields(t.Elem(), frv.Elem(), append(start, f.Index...))
} }
continue continue
} }
// Fall through to the normal field encoding logic below
// for non-struct anonymous fields.
} }
} }
if typeIsHash(tomlTypeOfGo(frv)) { if typeIsTable(tomlTypeOfGo(frv)) {
fieldsSub = append(fieldsSub, append(start, f.Index...)) fieldsSub = append(fieldsSub, append(start, f.Index...))
} else {
// Copy so it works correct on 32bit archs; not clear why this
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
// This also works fine on 64bit, but 32bit archs are somewhat
// rare and this is a wee bit faster.
if is32Bit {
copyStart := make([]int, len(start))
copy(copyStart, start)
fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...))
} else { } else {
fieldsDirect = append(fieldsDirect, append(start, f.Index...)) fieldsDirect = append(fieldsDirect, append(start, f.Index...))
} }
} }
} }
}
addFields(rt, rv, nil) addFields(rt, rv, nil)
var writeFields = func(fields [][]int) { writeFields := func(fields [][]int) {
for _, fieldIndex := range fields { for _, fieldIndex := range fields {
sft := rt.FieldByIndex(fieldIndex) fieldType := rt.FieldByIndex(fieldIndex)
sf := rv.FieldByIndex(fieldIndex) fieldVal := rv.FieldByIndex(fieldIndex)
if isNil(sf) {
// Don't write anything for nil fields. if isNil(fieldVal) { /// Don't write anything for nil fields.
continue continue
} }
opts := getOptions(sft.Tag) opts := getOptions(fieldType.Tag)
if opts.skip { if opts.skip {
continue continue
} }
keyName := sft.Name keyName := fieldType.Name
if opts.name != "" { if opts.name != "" {
keyName = opts.name keyName = opts.name
} }
if opts.omitempty && isEmpty(sf) { if opts.omitempty && isEmpty(fieldVal) {
continue continue
} }
if opts.omitzero && isZero(sf) { if opts.omitzero && isZero(fieldVal) {
continue continue
} }
enc.encode(key.add(keyName), sf) if inline {
enc.writeKeyValue(Key{keyName}, fieldVal, true)
if fieldIndex[0] != len(fields)-1 {
enc.wf(", ")
} }
} else {
enc.encode(key.add(keyName), fieldVal)
}
}
}
if inline {
enc.wf("{")
} }
writeFields(fieldsDirect) writeFields(fieldsDirect)
writeFields(fieldsSub) writeFields(fieldsSub)
if inline {
enc.wf("}")
}
} }
// tomlTypeName returns the TOML type name of the Go value's type. It is // tomlTypeOfGo returns the TOML type name of the Go value's type.
// used to determine whether the types of array elements are mixed (which is //
// forbidden). If the Go value is nil, then it is illegal for it to be an array // It is used to determine whether the types of array elements are mixed (which
// element, and valueIsNil is returned as true. // is forbidden). If the Go value is nil, then it is illegal for it to be an
// array element, and valueIsNil is returned as true.
// Returns the TOML type of a Go value. The type may be `nil`, which means //
// no concrete TOML type could be found. // The type may be `nil`, which means no concrete TOML type could be found.
func tomlTypeOfGo(rv reflect.Value) tomlType { func tomlTypeOfGo(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() { if isNil(rv) || !rv.IsValid() {
return nil return nil
@ -408,17 +524,41 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
case reflect.Map: case reflect.Map:
return tomlHash return tomlHash
case reflect.Struct: case reflect.Struct:
switch rv.Interface().(type) { if _, ok := rv.Interface().(time.Time); ok {
case time.Time:
return tomlDatetime return tomlDatetime
case TextMarshaler: }
if isMarshaler(rv) {
return tomlString return tomlString
default: }
return tomlHash return tomlHash
}
default: default:
panic("unexpected reflect.Kind: " + rv.Kind().String()) if isMarshaler(rv) {
return tomlString
} }
encPanic(errors.New("unsupported type: " + rv.Kind().String()))
panic("unreachable")
}
}
func isMarshaler(rv reflect.Value) bool {
switch rv.Interface().(type) {
case encoding.TextMarshaler:
return true
case Marshaler:
return true
}
// Someone used a pointer receiver: we can make it work for pointer values.
if rv.CanAddr() {
if _, ok := rv.Addr().Interface().(encoding.TextMarshaler); ok {
return true
}
if _, ok := rv.Addr().Interface().(Marshaler); ok {
return true
}
}
return false
} }
// tomlArrayType returns the element type of a TOML array. The type returned // tomlArrayType returns the element type of a TOML array. The type returned
@ -430,30 +570,19 @@ func tomlArrayType(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
return nil return nil
} }
/// Don't allow nil.
rvlen := rv.Len()
for i := 1; i < rvlen; i++ {
if tomlTypeOfGo(rv.Index(i)) == nil {
encPanic(errArrayNilElement)
}
}
firstType := tomlTypeOfGo(rv.Index(0)) firstType := tomlTypeOfGo(rv.Index(0))
if firstType == nil { if firstType == nil {
encPanic(errArrayNilElement) encPanic(errArrayNilElement)
} }
rvlen := rv.Len()
for i := 1; i < rvlen; i++ {
elem := rv.Index(i)
switch elemType := tomlTypeOfGo(elem); {
case elemType == nil:
encPanic(errArrayNilElement)
case !typeEqual(firstType, elemType):
encPanic(errArrayMixedElementTypes)
}
}
// If we have a nested array, then we must make sure that the nested
// array contains ONLY primitives.
// This checks arbitrarily nested arrays.
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
nest := tomlArrayType(eindirect(rv.Index(0)))
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
encPanic(errArrayNoTable)
}
}
return firstType return firstType
} }
@ -511,18 +640,32 @@ func (enc *Encoder) newline() {
} }
} }
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { // Write a key/value pair:
//
// key = <any value>
//
// This is also used for "k = v" in inline tables; so something like this will
// be written in three calls:
//
// ┌────────────────────┐
// │ ┌───┐ ┌─────┐│
// v v v v vv
// key = {k = v, k2 = v2}
//
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
if len(key) == 0 { if len(key) == 0 {
encPanic(errNoKey) encPanic(errNoKey)
} }
panicIfInvalidKey(key)
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val) enc.eElement(val)
if !inline {
enc.newline() enc.newline()
} }
}
func (enc *Encoder) wf(format string, v ...interface{}) { func (enc *Encoder) wf(format string, v ...interface{}) {
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { _, err := fmt.Fprintf(enc.w, format, v...)
if err != nil {
encPanic(err) encPanic(err)
} }
enc.hasWritten = true enc.hasWritten = true
@ -553,16 +696,3 @@ func isNil(rv reflect.Value) bool {
return false return false
} }
} }
func panicIfInvalidKey(key Key) {
for _, k := range key {
if len(k) == 0 {
encPanic(e("Key '%s' is not a valid table name. Key names "+
"cannot be empty.", key.maybeQuotedAll()))
}
}
}
func isValidKeyName(s string) bool {
return len(s) != 0
}

View file

@ -1,19 +0,0 @@
// +build go1.2
package toml
// In order to support Go 1.1, we define our own TextMarshaler and
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
// standard library interfaces.
import (
"encoding"
)
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler encoding.TextMarshaler
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler encoding.TextUnmarshaler

View file

@ -1,18 +0,0 @@
// +build !go1.2
package toml
// These interfaces were introduced in Go 1.2, so we add them manually when
// compiling for Go 1.1.
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler interface {
MarshalText() (text []byte, err error)
}
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler interface {
UnmarshalText(text []byte) error
}

229
vendor/github.com/BurntSushi/toml/error.go generated vendored Normal file
View file

@ -0,0 +1,229 @@
package toml
import (
"fmt"
"strings"
)
// ParseError is returned when there is an error parsing the TOML syntax.
//
// For example invalid syntax, duplicate keys, etc.
//
// In addition to the error message itself, you can also print detailed location
// information with context by using ErrorWithPosition():
//
// toml: error: Key 'fruit' was already created and cannot be used as an array.
//
// At line 4, column 2-7:
//
// 2 | fruit = []
// 3 |
// 4 | [[fruit]] # Not allowed
// ^^^^^
//
// Furthermore, the ErrorWithUsage() can be used to print the above with some
// more detailed usage guidance:
//
// toml: error: newlines not allowed within inline tables
//
// At line 1, column 18:
//
// 1 | x = [{ key = 42 #
// ^
//
// Error help:
//
// Inline tables must always be on a single line:
//
// table = {key = 42, second = 43}
//
// It is invalid to split them over multiple lines like so:
//
// # INVALID
// table = {
// key = 42,
// second = 43
// }
//
// Use regular for this:
//
// [table]
// key = 42
// second = 43
type ParseError struct {
Message string // Short technical message.
Usage string // Longer message with usage guidance; may be blank.
Position Position // Position of the error
LastKey string // Last parsed key, may be blank.
Line int // Line the error occurred. Deprecated: use Position.
err error
input string
}
// Position of an error.
type Position struct {
Line int // Line number, starting at 1.
Start int // Start of error, as byte offset starting at 0.
Len int // Lenght in bytes.
}
func (pe ParseError) Error() string {
msg := pe.Message
if msg == "" { // Error from errorf()
msg = pe.err.Error()
}
if pe.LastKey == "" {
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
}
return fmt.Sprintf("toml: line %d (last key %q): %s",
pe.Position.Line, pe.LastKey, msg)
}
// ErrorWithUsage() returns the error with detailed location context.
//
// See the documentation on ParseError.
func (pe ParseError) ErrorWithPosition() string {
if pe.input == "" { // Should never happen, but just in case.
return pe.Error()
}
var (
lines = strings.Split(pe.input, "\n")
col = pe.column(lines)
b = new(strings.Builder)
)
msg := pe.Message
if msg == "" {
msg = pe.err.Error()
}
// TODO: don't show control characters as literals? This may not show up
// well everywhere.
if pe.Position.Len == 1 {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
msg, pe.Position.Line, col+1)
} else {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
msg, pe.Position.Line, col, col+pe.Position.Len)
}
if pe.Position.Line > 2 {
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3])
}
if pe.Position.Line > 1 {
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2])
}
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1])
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len))
return b.String()
}
// ErrorWithUsage() returns the error with detailed location context and usage
// guidance.
//
// See the documentation on ParseError.
func (pe ParseError) ErrorWithUsage() string {
m := pe.ErrorWithPosition()
if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" {
return m + "Error help:\n\n " +
strings.ReplaceAll(strings.TrimSpace(u.Usage()), "\n", "\n ") +
"\n"
}
return m
}
func (pe ParseError) column(lines []string) int {
var pos, col int
for i := range lines {
ll := len(lines[i]) + 1 // +1 for the removed newline
if pos+ll >= pe.Position.Start {
col = pe.Position.Start - pos
if col < 0 { // Should never happen, but just in case.
col = 0
}
break
}
pos += ll
}
return col
}
type (
errLexControl struct{ r rune }
errLexEscape struct{ r rune }
errLexUTF8 struct{ b byte }
errLexInvalidNum struct{ v string }
errLexInvalidDate struct{ v string }
errLexInlineTableNL struct{}
errLexStringNL struct{}
)
func (e errLexControl) Error() string {
return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r)
}
func (e errLexControl) Usage() string { return "" }
func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) }
func (e errLexEscape) Usage() string { return usageEscape }
func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) }
func (e errLexUTF8) Usage() string { return "" }
func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) }
func (e errLexInvalidNum) Usage() string { return "" }
func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) }
func (e errLexInvalidDate) Usage() string { return "" }
func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" }
func (e errLexInlineTableNL) Usage() string { return usageInlineNewline }
func (e errLexStringNL) Error() string { return "strings cannot contain newlines" }
func (e errLexStringNL) Usage() string { return usageStringNewline }
const usageEscape = `
A '\' inside a "-delimited string is interpreted as an escape character.
The following escape sequences are supported:
\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX
To prevent a '\' from being recognized as an escape character, use either:
- a ' or '''-delimited string; escape characters aren't processed in them; or
- write two backslashes to get a single backslash: '\\'.
If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/'
instead of '\' will usually also work: "C:/Users/martin".
`
const usageInlineNewline = `
Inline tables must always be on a single line:
table = {key = 42, second = 43}
It is invalid to split them over multiple lines like so:
# INVALID
table = {
key = 42,
second = 43
}
Use regular for this:
[table]
key = 42
second = 43
`
const usageStringNewline = `
Strings must always be on a single line, and cannot span more than one line:
# INVALID
string = "Hello,
world!"
Instead use """ or ''' to split strings over multiple lines:
string = """Hello,
world!"""
`

3
vendor/github.com/BurntSushi/toml/go.mod generated vendored Normal file
View file

@ -0,0 +1,3 @@
module github.com/BurntSushi/toml
go 1.16

36
vendor/github.com/BurntSushi/toml/internal/tz.go generated vendored Normal file
View file

@ -0,0 +1,36 @@
package internal
import "time"
// Timezones used for local datetime, date, and time TOML types.
//
// The exact way times and dates without a timezone should be interpreted is not
// well-defined in the TOML specification and left to the implementation. These
// defaults to current local timezone offset of the computer, but this can be
// changed by changing these variables before decoding.
//
// TODO:
// Ideally we'd like to offer people the ability to configure the used timezone
// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit
// tricky: the reason we use three different variables for this is to support
// round-tripping without these specific TZ names we wouldn't know which
// format to use.
//
// There isn't a good way to encode this right now though, and passing this sort
// of information also ties in to various related issues such as string format
// encoding, encoding of comments, etc.
//
// So, for the time being, just put this in internal until we can write a good
// comprehensive API for doing all of this.
//
// The reason they're exported is because they're referred from in e.g.
// internal/tag.
//
// Note that this behaviour is valid according to the TOML spec as the exact
// behaviour is left up to implementations.
var (
localOffset = func() int { _, o := time.Now().Zone(); return o }()
LocalDatetime = time.FixedZone("datetime-local", localOffset)
LocalDate = time.FixedZone("date-local", localOffset)
LocalTime = time.FixedZone("time-local", localOffset)
)

File diff suppressed because it is too large Load diff

View file

@ -1,33 +1,39 @@
package toml package toml
import "strings" import (
"strings"
)
// MetaData allows access to meta information about TOML data that may not // MetaData allows access to meta information about TOML data that's not
// be inferrable via reflection. In particular, whether a key has been defined // accessible otherwise.
// and the TOML type of a key. //
// It allows checking if a key is defined in the TOML data, whether any keys
// were undecoded, and the TOML type of a key.
type MetaData struct { type MetaData struct {
context Key // Used only during decoding.
mapping map[string]interface{} mapping map[string]interface{}
types map[string]tomlType types map[string]tomlType
keys []Key keys []Key
decoded map[string]bool decoded map[string]struct{}
context Key // Used only during decoding.
} }
// IsDefined returns true if the key given exists in the TOML data. The key // IsDefined reports if the key exists in the TOML data.
// should be specified hierarchially. e.g.,
// //
// // access the TOML key 'a.b.c' // The key should be specified hierarchically, for example to access the TOML
// IsDefined("a", "b", "c") // key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive.
// //
// IsDefined will return false if an empty key given. Keys are case sensitive. // Returns false for an empty key.
func (md *MetaData) IsDefined(key ...string) bool { func (md *MetaData) IsDefined(key ...string) bool {
if len(key) == 0 { if len(key) == 0 {
return false return false
} }
var hash map[string]interface{} var (
var ok bool hash map[string]interface{}
var hashOrVal interface{} = md.mapping ok bool
hashOrVal interface{} = md.mapping
)
for _, k := range key { for _, k := range key {
if hash, ok = hashOrVal.(map[string]interface{}); !ok { if hash, ok = hashOrVal.(map[string]interface{}); !ok {
return false return false
@ -41,58 +47,20 @@ func (md *MetaData) IsDefined(key ...string) bool {
// Type returns a string representation of the type of the key specified. // Type returns a string representation of the type of the key specified.
// //
// Type will return the empty string if given an empty key or a key that // Type will return the empty string if given an empty key or a key that does
// does not exist. Keys are case sensitive. // not exist. Keys are case sensitive.
func (md *MetaData) Type(key ...string) string { func (md *MetaData) Type(key ...string) string {
fullkey := strings.Join(key, ".") if typ, ok := md.types[Key(key).String()]; ok {
if typ, ok := md.types[fullkey]; ok {
return typ.typeString() return typ.typeString()
} }
return "" return ""
} }
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
// to get values of this type.
type Key []string
func (k Key) String() string {
return strings.Join(k, ".")
}
func (k Key) maybeQuotedAll() string {
var ss []string
for i := range k {
ss = append(ss, k.maybeQuoted(i))
}
return strings.Join(ss, ".")
}
func (k Key) maybeQuoted(i int) string {
quote := false
for _, c := range k[i] {
if !isBareKeyChar(c) {
quote = true
break
}
}
if quote {
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
}
return k[i]
}
func (k Key) add(piece string) Key {
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
return newKey
}
// Keys returns a slice of every key in the TOML data, including key groups. // Keys returns a slice of every key in the TOML data, including key groups.
// Each key is itself a slice, where the first element is the top of the
// hierarchy and the last is the most specific.
// //
// The list will have the same order as the keys appeared in the TOML data. // Each key is itself a slice, where the first element is the top of the
// hierarchy and the last is the most specific. The list will have the same
// order as the keys appeared in the TOML data.
// //
// All keys returned are non-empty. // All keys returned are non-empty.
func (md *MetaData) Keys() []Key { func (md *MetaData) Keys() []Key {
@ -113,9 +81,40 @@ func (md *MetaData) Keys() []Key {
func (md *MetaData) Undecoded() []Key { func (md *MetaData) Undecoded() []Key {
undecoded := make([]Key, 0, len(md.keys)) undecoded := make([]Key, 0, len(md.keys))
for _, key := range md.keys { for _, key := range md.keys {
if !md.decoded[key.String()] { if _, ok := md.decoded[key.String()]; !ok {
undecoded = append(undecoded, key) undecoded = append(undecoded, key)
} }
} }
return undecoded return undecoded
} }
// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
// values of this type.
type Key []string
func (k Key) String() string {
ss := make([]string, len(k))
for i := range k {
ss[i] = k.maybeQuoted(i)
}
return strings.Join(ss, ".")
}
func (k Key) maybeQuoted(i int) string {
if k[i] == "" {
return `""`
}
for _, c := range k[i] {
if !isBareKeyChar(c) {
return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
}
}
return k[i]
}
func (k Key) add(piece string) Key {
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
return newKey
}

View file

@ -5,54 +5,63 @@ import (
"strconv" "strconv"
"strings" "strings"
"time" "time"
"unicode"
"unicode/utf8" "unicode/utf8"
"github.com/BurntSushi/toml/internal"
) )
type parser struct { type parser struct {
mapping map[string]interface{}
types map[string]tomlType
lx *lexer lx *lexer
context Key // Full key for the current hash in scope.
currentKey string // Base key name for everything except hashes.
pos Position // Current position in the TOML file.
// A list of keys in the order that they appear in the TOML data. ordered []Key // List of keys in the order that they appear in the TOML data.
ordered []Key mapping map[string]interface{} // Map keyname → key value.
types map[string]tomlType // Map keyname → TOML type.
// the full key for the current hash in scope implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
context Key
// the base key name for everything except hashes
currentKey string
// rough approximation of line number
approxLine int
// A map of 'key.group.names' to whether they were created implicitly.
implicits map[string]bool
}
type parseError string
func (pe parseError) Error() string {
return string(pe)
} }
func parse(data string) (p *parser, err error) { func parse(data string) (p *parser, err error) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
var ok bool if pErr, ok := r.(ParseError); ok {
if err, ok = r.(parseError); ok { pErr.input = data
err = pErr
return return
} }
panic(r) panic(r)
} }
}() }()
// Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
// which mangles stuff.
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") {
data = data[2:]
}
// Examine first few bytes for NULL bytes; this probably means it's a UTF-16
// file (second byte in surrogate pair being NULL). Again, do this here to
// avoid having to deal with UTF-8/16 stuff in the lexer.
ex := 6
if len(data) < 6 {
ex = len(data)
}
if i := strings.IndexRune(data[:ex], 0); i > -1 {
return nil, ParseError{
Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
Position: Position{Line: 1, Start: i, Len: 1},
Line: 1,
input: data,
}
}
p = &parser{ p = &parser{
mapping: make(map[string]interface{}), mapping: make(map[string]interface{}),
types: make(map[string]tomlType), types: make(map[string]tomlType),
lx: lex(data), lx: lex(data),
ordered: make([]Key, 0), ordered: make([]Key, 0),
implicits: make(map[string]bool), implicits: make(map[string]struct{}),
} }
for { for {
item := p.next() item := p.next()
@ -65,17 +74,45 @@ func parse(data string) (p *parser, err error) {
return p, nil return p, nil
} }
func (p *parser) panicItemf(it item, format string, v ...interface{}) {
panic(ParseError{
Message: fmt.Sprintf(format, v...),
Position: it.pos,
Line: it.pos.Len,
LastKey: p.current(),
})
}
func (p *parser) panicf(format string, v ...interface{}) { func (p *parser) panicf(format string, v ...interface{}) {
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", panic(ParseError{
p.approxLine, p.current(), fmt.Sprintf(format, v...)) Message: fmt.Sprintf(format, v...),
panic(parseError(msg)) Position: p.pos,
Line: p.pos.Line,
LastKey: p.current(),
})
} }
func (p *parser) next() item { func (p *parser) next() item {
it := p.lx.nextItem() it := p.lx.nextItem()
//fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val)
if it.typ == itemError { if it.typ == itemError {
p.panicf("%s", it.val) if it.err != nil {
panic(ParseError{
Position: it.pos,
Line: it.pos.Line,
LastKey: p.current(),
err: it.err,
})
} }
p.panicItemf(it, "%s", it.val)
}
return it
}
func (p *parser) nextPos() item {
it := p.next()
p.pos = it.pos
return it return it
} }
@ -97,44 +134,59 @@ func (p *parser) assertEqual(expected, got itemType) {
func (p *parser) topLevel(item item) { func (p *parser) topLevel(item item) {
switch item.typ { switch item.typ {
case itemCommentStart: case itemCommentStart: // # ..
p.approxLine = item.line
p.expect(itemText) p.expect(itemText)
case itemTableStart: case itemTableStart: // [ .. ]
kg := p.next() name := p.nextPos()
p.approxLine = kg.line
var key Key var key Key
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() {
key = append(key, p.keyString(kg)) key = append(key, p.keyString(name))
} }
p.assertEqual(itemTableEnd, kg.typ) p.assertEqual(itemTableEnd, name.typ)
p.establishContext(key, false) p.addContext(key, false)
p.setType("", tomlHash) p.setType("", tomlHash)
p.ordered = append(p.ordered, key) p.ordered = append(p.ordered, key)
case itemArrayTableStart: case itemArrayTableStart: // [[ .. ]]
kg := p.next() name := p.nextPos()
p.approxLine = kg.line
var key Key var key Key
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() {
key = append(key, p.keyString(kg)) key = append(key, p.keyString(name))
} }
p.assertEqual(itemArrayTableEnd, kg.typ) p.assertEqual(itemArrayTableEnd, name.typ)
p.establishContext(key, true) p.addContext(key, true)
p.setType("", tomlArrayHash) p.setType("", tomlArrayHash)
p.ordered = append(p.ordered, key) p.ordered = append(p.ordered, key)
case itemKeyStart: case itemKeyStart: // key = ..
kname := p.next() outerContext := p.context
p.approxLine = kname.line /// Read all the key parts (e.g. 'a' and 'b' in 'a.b')
p.currentKey = p.keyString(kname) k := p.nextPos()
var key Key
for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
key = append(key, p.keyString(k))
}
p.assertEqual(itemKeyEnd, k.typ)
val, typ := p.value(p.next()) /// The current key is the last part.
p.setValue(p.currentKey, val) p.currentKey = key[len(key)-1]
p.setType(p.currentKey, typ)
/// All the other parts (if any) are the context; need to set each part
/// as implicit.
context := key[:len(key)-1]
for i := range context {
p.addImplicitContext(append(p.context, context[i:i+1]...))
}
/// Set value.
val, typ := p.value(p.next(), false)
p.set(p.currentKey, val, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey)) p.ordered = append(p.ordered, p.context.add(p.currentKey))
/// Remove the context we added (preserving any context from [tbl] lines).
p.context = outerContext
p.currentKey = "" p.currentKey = ""
default: default:
p.bug("Unexpected type at top level: %s", item.typ) p.bug("Unexpected type at top level: %s", item.typ)
@ -148,59 +200,81 @@ func (p *parser) keyString(it item) string {
return it.val return it.val
case itemString, itemMultilineString, case itemString, itemMultilineString,
itemRawString, itemRawMultilineString: itemRawString, itemRawMultilineString:
s, _ := p.value(it) s, _ := p.value(it, false)
return s.(string) return s.(string)
default: default:
p.bug("Unexpected key type: %s", it.typ) p.bug("Unexpected key type: %s", it.typ)
}
panic("unreachable") panic("unreachable")
} }
}
var datetimeRepl = strings.NewReplacer(
"z", "Z",
"t", "T",
" ", "T")
// value translates an expected value from the lexer into a Go value wrapped // value translates an expected value from the lexer into a Go value wrapped
// as an empty interface. // as an empty interface.
func (p *parser) value(it item) (interface{}, tomlType) { func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
switch it.typ { switch it.typ {
case itemString: case itemString:
return p.replaceEscapes(it.val), p.typeOfPrimitive(it) return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
case itemMultilineString: case itemMultilineString:
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
case itemRawString: case itemRawString:
return it.val, p.typeOfPrimitive(it) return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString: case itemRawMultilineString:
return stripFirstNewline(it.val), p.typeOfPrimitive(it) return stripFirstNewline(it.val), p.typeOfPrimitive(it)
case itemInteger:
return p.valueInteger(it)
case itemFloat:
return p.valueFloat(it)
case itemBool: case itemBool:
switch it.val { switch it.val {
case "true": case "true":
return true, p.typeOfPrimitive(it) return true, p.typeOfPrimitive(it)
case "false": case "false":
return false, p.typeOfPrimitive(it) return false, p.typeOfPrimitive(it)
} default:
p.bug("Expected boolean value, but got '%s'.", it.val) p.bug("Expected boolean value, but got '%s'.", it.val)
case itemInteger:
if !numUnderscoresOK(it.val) {
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
it.val)
} }
val := strings.Replace(it.val, "_", "", -1) case itemDatetime:
num, err := strconv.ParseInt(val, 10, 64) return p.valueDatetime(it)
case itemArray:
return p.valueArray(it)
case itemInlineTableStart:
return p.valueInlineTable(it, parentIsArray)
default:
p.bug("Unexpected value type: %s", it.typ)
}
panic("unreachable")
}
func (p *parser) valueInteger(it item) (interface{}, tomlType) {
if !numUnderscoresOK(it.val) {
p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val)
}
if numHasLeadingZero(it.val) {
p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val)
}
num, err := strconv.ParseInt(it.val, 0, 64)
if err != nil { if err != nil {
// Distinguish integer values. Normally, it'd be a bug if the lexer // Distinguish integer values. Normally, it'd be a bug if the lexer
// provides an invalid integer, but it's possible that the number is // provides an invalid integer, but it's possible that the number is
// out of range of valid values (which the lexer cannot determine). // out of range of valid values (which the lexer cannot determine).
// So mark the former as a bug but the latter as a legitimate user // So mark the former as a bug but the latter as a legitimate user
// error. // error.
if e, ok := err.(*strconv.NumError); ok && if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
e.Err == strconv.ErrRange { p.panicItemf(it, "Integer '%s' is out of the range of 64-bit signed integers.", it.val)
p.panicf("Integer '%s' is out of the range of 64-bit "+
"signed integers.", it.val)
} else { } else {
p.bug("Expected integer value, but got '%s'.", it.val) p.bug("Expected integer value, but got '%s'.", it.val)
} }
} }
return num, p.typeOfPrimitive(it) return num, p.typeOfPrimitive(it)
case itemFloat: }
func (p *parser) valueFloat(it item) (interface{}, tomlType) {
parts := strings.FieldsFunc(it.val, func(r rune) bool { parts := strings.FieldsFunc(it.val, func(r rune) bool {
switch r { switch r {
case '.', 'e', 'E': case '.', 'e', 'E':
@ -210,66 +284,96 @@ func (p *parser) value(it item) (interface{}, tomlType) {
}) })
for _, part := range parts { for _, part := range parts {
if !numUnderscoresOK(part) { if !numUnderscoresOK(part) {
p.panicf("Invalid float %q: underscores must be "+ p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val)
"surrounded by digits", it.val)
} }
} }
if len(parts) > 0 && numHasLeadingZero(parts[0]) {
p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val)
}
if !numPeriodsOK(it.val) { if !numPeriodsOK(it.val) {
// As a special case, numbers like '123.' or '1.e2', // As a special case, numbers like '123.' or '1.e2',
// which are valid as far as Go/strconv are concerned, // which are valid as far as Go/strconv are concerned,
// must be rejected because TOML says that a fractional // must be rejected because TOML says that a fractional
// part consists of '.' followed by 1+ digits. // part consists of '.' followed by 1+ digits.
p.panicf("Invalid float %q: '.' must be followed "+ p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val)
"by one or more digits", it.val)
} }
val := strings.Replace(it.val, "_", "", -1) val := strings.Replace(it.val, "_", "", -1)
if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
val = "nan"
}
num, err := strconv.ParseFloat(val, 64) num, err := strconv.ParseFloat(val, 64)
if err != nil { if err != nil {
if e, ok := err.(*strconv.NumError); ok && if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
e.Err == strconv.ErrRange { p.panicItemf(it, "Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val)
p.panicf("Float '%s' is out of the range of 64-bit "+
"IEEE-754 floating-point numbers.", it.val)
} else { } else {
p.panicf("Invalid float value: %q", it.val) p.panicItemf(it, "Invalid float value: %q", it.val)
} }
} }
return num, p.typeOfPrimitive(it) return num, p.typeOfPrimitive(it)
case itemDatetime: }
var t time.Time
var ok bool var dtTypes = []struct {
var err error fmt string
for _, format := range []string{ zone *time.Location
"2006-01-02T15:04:05Z07:00",
"2006-01-02T15:04:05",
"2006-01-02",
}{ }{
t, err = time.ParseInLocation(format, it.val, time.Local) {time.RFC3339Nano, time.Local},
{"2006-01-02T15:04:05.999999999", internal.LocalDatetime},
{"2006-01-02", internal.LocalDate},
{"15:04:05.999999999", internal.LocalTime},
}
func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
it.val = datetimeRepl.Replace(it.val)
var (
t time.Time
ok bool
err error
)
for _, dt := range dtTypes {
t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
if err == nil { if err == nil {
ok = true ok = true
break break
} }
} }
if !ok { if !ok {
p.panicf("Invalid TOML Datetime: %q.", it.val) p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val)
} }
return t, p.typeOfPrimitive(it) return t, p.typeOfPrimitive(it)
case itemArray: }
array := make([]interface{}, 0)
types := make([]tomlType, 0)
func (p *parser) valueArray(it item) (interface{}, tomlType) {
p.setType(p.currentKey, tomlArray)
// p.setType(p.currentKey, typ)
var (
types []tomlType
// Initialize to a non-nil empty slice. This makes it consistent with
// how S = [] decodes into a non-nil slice inside something like struct
// { S []string }. See #338
array = []interface{}{}
)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() { for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart { if it.typ == itemCommentStart {
p.expect(itemText) p.expect(itemText)
continue continue
} }
val, typ := p.value(it) val, typ := p.value(it, true)
array = append(array, val) array = append(array, val)
types = append(types, typ) types = append(types, typ)
// XXX: types isn't used here, we need it to record the accurate type
// information.
//
// Not entirely sure how to best store this; could use "key[0]",
// "key[1]" notation, or maybe store it on the Array type?
} }
return array, p.typeOfArray(types) return array, tomlArray
case itemInlineTableStart: }
func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) {
var ( var (
hash = make(map[string]interface{}) hash = make(map[string]interface{})
outerContext = p.context outerContext = p.context
@ -277,51 +381,81 @@ func (p *parser) value(it item) (interface{}, tomlType) {
) )
p.context = append(p.context, p.currentKey) p.context = append(p.context, p.currentKey)
prevContext := p.context
p.currentKey = "" p.currentKey = ""
p.addImplicit(p.context)
p.addContext(p.context, parentIsArray)
/// Loop over all table key/value pairs.
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
if it.typ != itemKeyStart {
p.bug("Expected key start but instead found %q, around line %d",
it.val, p.approxLine)
}
if it.typ == itemCommentStart { if it.typ == itemCommentStart {
p.expect(itemText) p.expect(itemText)
continue continue
} }
// retrieve key /// Read all key parts.
k := p.next() k := p.nextPos()
p.approxLine = k.line var key Key
kname := p.keyString(k) for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
key = append(key, p.keyString(k))
}
p.assertEqual(itemKeyEnd, k.typ)
// retrieve value /// The current key is the last part.
p.currentKey = kname p.currentKey = key[len(key)-1]
val, typ := p.value(p.next())
// make sure we keep metadata up to date /// All the other parts (if any) are the context; need to set each part
p.setType(kname, typ) /// as implicit.
context := key[:len(key)-1]
for i := range context {
p.addImplicitContext(append(p.context, context[i:i+1]...))
}
/// Set the value.
val, typ := p.value(p.next(), false)
p.set(p.currentKey, val, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey)) p.ordered = append(p.ordered, p.context.add(p.currentKey))
hash[kname] = val hash[p.currentKey] = val
/// Restore context.
p.context = prevContext
} }
p.context = outerContext p.context = outerContext
p.currentKey = outerKey p.currentKey = outerKey
return hash, tomlHash return hash, tomlHash
} }
p.bug("Unexpected value type: %s", it.typ)
panic("unreachable") // numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
// +/- signs, and base prefixes.
func numHasLeadingZero(s string) bool {
if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x
return true
}
if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' {
return true
}
return false
} }
// numUnderscoresOK checks whether each underscore in s is surrounded by // numUnderscoresOK checks whether each underscore in s is surrounded by
// characters that are not underscores. // characters that are not underscores.
func numUnderscoresOK(s string) bool { func numUnderscoresOK(s string) bool {
switch s {
case "nan", "+nan", "-nan", "inf", "-inf", "+inf":
return true
}
accept := false accept := false
for _, r := range s { for _, r := range s {
if r == '_' { if r == '_' {
if !accept { if !accept {
return false return false
} }
accept = false
continue
} }
accept = true
// isHexadecimal is a superset of all the permissable characters
// surrounding an underscore.
accept = isHexadecimal(r)
} }
return accept return accept
} }
@ -338,13 +472,12 @@ func numPeriodsOK(s string) bool {
return !period return !period
} }
// establishContext sets the current context of the parser, // Set the current context of the parser, where the context is either a hash or
// where the context is either a hash or an array of hashes. Which one is // an array of hashes, depending on the value of the `array` parameter.
// set depends on the value of the `array` parameter.
// //
// Establishing the context also makes sure that the key isn't a duplicate, and // Establishing the context also makes sure that the key isn't a duplicate, and
// will create implicit hashes automatically. // will create implicit hashes automatically.
func (p *parser) establishContext(key Key, array bool) { func (p *parser) addContext(key Key, array bool) {
var ok bool var ok bool
// Always start at the top level and drill down for our context. // Always start at the top level and drill down for our context.
@ -383,7 +516,7 @@ func (p *parser) establishContext(key Key, array bool) {
// list of tables for it. // list of tables for it.
k := key[len(key)-1] k := key[len(key)-1]
if _, ok := hashContext[k]; !ok { if _, ok := hashContext[k]; !ok {
hashContext[k] = make([]map[string]interface{}, 0, 5) hashContext[k] = make([]map[string]interface{}, 0, 4)
} }
// Add a new table. But make sure the key hasn't already been used // Add a new table. But make sure the key hasn't already been used
@ -391,8 +524,7 @@ func (p *parser) establishContext(key Key, array bool) {
if hash, ok := hashContext[k].([]map[string]interface{}); ok { if hash, ok := hashContext[k].([]map[string]interface{}); ok {
hashContext[k] = append(hash, make(map[string]interface{})) hashContext[k] = append(hash, make(map[string]interface{}))
} else { } else {
p.panicf("Key '%s' was already created and cannot be used as "+ p.panicf("Key '%s' was already created and cannot be used as an array.", key)
"an array.", keyContext)
} }
} else { } else {
p.setValue(key[len(key)-1], make(map[string]interface{})) p.setValue(key[len(key)-1], make(map[string]interface{}))
@ -400,15 +532,22 @@ func (p *parser) establishContext(key Key, array bool) {
p.context = append(p.context, key[len(key)-1]) p.context = append(p.context, key[len(key)-1])
} }
// set calls setValue and setType.
func (p *parser) set(key string, val interface{}, typ tomlType) {
p.setValue(key, val)
p.setType(key, typ)
}
// setValue sets the given key to the given value in the current context. // setValue sets the given key to the given value in the current context.
// It will make sure that the key hasn't already been defined, account for // It will make sure that the key hasn't already been defined, account for
// implicit key groups. // implicit key groups.
func (p *parser) setValue(key string, value interface{}) { func (p *parser) setValue(key string, value interface{}) {
var tmpHash interface{} var (
var ok bool tmpHash interface{}
ok bool
hash := p.mapping hash = p.mapping
keyContext := make(Key, 0) keyContext Key
)
for _, k := range p.context { for _, k := range p.context {
keyContext = append(keyContext, k) keyContext = append(keyContext, k)
if tmpHash, ok = hash[k]; !ok { if tmpHash, ok = hash[k]; !ok {
@ -422,24 +561,26 @@ func (p *parser) setValue(key string, value interface{}) {
case map[string]interface{}: case map[string]interface{}:
hash = t hash = t
default: default:
p.bug("Expected hash to have type 'map[string]interface{}', but "+ p.panicf("Key '%s' has already been defined.", keyContext)
"it has '%T' instead.", tmpHash)
} }
} }
keyContext = append(keyContext, key) keyContext = append(keyContext, key)
if _, ok := hash[key]; ok { if _, ok := hash[key]; ok {
// Typically, if the given key has already been set, then we have // Normally redefining keys isn't allowed, but the key could have been
// to raise an error since duplicate keys are disallowed. However, // defined implicitly and it's allowed to be redefined concretely. (See
// it's possible that a key was previously defined implicitly. In this // the `valid/implicit-and-explicit-after.toml` in toml-test)
// case, it is allowed to be redefined concretely. (See the
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
// //
// But we have to make sure to stop marking it as an implicit. (So that // But we have to make sure to stop marking it as an implicit. (So that
// another redefinition provokes an error.) // another redefinition provokes an error.)
// //
// Note that since it has already been defined (as a hash), we don't // Note that since it has already been defined (as a hash), we don't
// want to overwrite it. So our business is done. // want to overwrite it. So our business is done.
if p.isArray(keyContext) {
p.removeImplicit(keyContext)
hash[key] = value
return
}
if p.isImplicit(keyContext) { if p.isImplicit(keyContext) {
p.removeImplicit(keyContext) p.removeImplicit(keyContext)
return return
@ -449,40 +590,39 @@ func (p *parser) setValue(key string, value interface{}) {
// key, which is *always* wrong. // key, which is *always* wrong.
p.panicf("Key '%s' has already been defined.", keyContext) p.panicf("Key '%s' has already been defined.", keyContext)
} }
hash[key] = value hash[key] = value
} }
// setType sets the type of a particular value at a given key. // setType sets the type of a particular value at a given key. It should be
// It should be called immediately AFTER setValue. // called immediately AFTER setValue.
// //
// Note that if `key` is empty, then the type given will be applied to the // Note that if `key` is empty, then the type given will be applied to the
// current context (which is either a table or an array of tables). // current context (which is either a table or an array of tables).
func (p *parser) setType(key string, typ tomlType) { func (p *parser) setType(key string, typ tomlType) {
keyContext := make(Key, 0, len(p.context)+1) keyContext := make(Key, 0, len(p.context)+1)
for _, k := range p.context { keyContext = append(keyContext, p.context...)
keyContext = append(keyContext, k)
}
if len(key) > 0 { // allow type setting for hashes if len(key) > 0 { // allow type setting for hashes
keyContext = append(keyContext, key) keyContext = append(keyContext, key)
} }
// Special case to make empty keys ("" = 1) work.
// Without it it will set "" rather than `""`.
// TODO: why is this needed? And why is this only needed here?
if len(keyContext) == 0 {
keyContext = Key{""}
}
p.types[keyContext.String()] = typ p.types[keyContext.String()] = typ
} }
// addImplicit sets the given Key as having been created implicitly. // Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
func (p *parser) addImplicit(key Key) { // "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
p.implicits[key.String()] = true func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
} func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
// removeImplicit stops tagging the given key as having been implicitly func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray }
// created. func (p *parser) addImplicitContext(key Key) {
func (p *parser) removeImplicit(key Key) { p.addImplicit(key)
p.implicits[key.String()] = false p.addContext(key, false)
}
// isImplicit returns true if the key group pointed to by the key was created
// implicitly.
func (p *parser) isImplicit(key Key) bool {
return p.implicits[key.String()]
} }
// current returns the full key name of the current context. // current returns the full key name of the current context.
@ -497,24 +637,62 @@ func (p *parser) current() string {
} }
func stripFirstNewline(s string) string { func stripFirstNewline(s string) string {
if len(s) == 0 || s[0] != '\n' { if len(s) > 0 && s[0] == '\n' {
return s
}
return s[1:] return s[1:]
} }
if len(s) > 1 && s[0] == '\r' && s[1] == '\n' {
func stripEscapedWhitespace(s string) string { return s[2:]
esc := strings.Split(s, "\\\n")
if len(esc) > 1 {
for i := 1; i < len(esc); i++ {
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
} }
} return s
return strings.Join(esc, "")
} }
func (p *parser) replaceEscapes(str string) string { // Remove newlines inside triple-quoted strings if a line ends with "\".
var replaced []rune func (p *parser) stripEscapedNewlines(s string) string {
split := strings.Split(s, "\n")
if len(split) < 1 {
return s
}
escNL := false // Keep track of the last non-blank line was escaped.
for i, line := range split {
line = strings.TrimRight(line, " \t\r")
if len(line) == 0 || line[len(line)-1] != '\\' {
split[i] = strings.TrimRight(split[i], "\r")
if !escNL && i != len(split)-1 {
split[i] += "\n"
}
continue
}
escBS := true
for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- {
escBS = !escBS
}
if escNL {
line = strings.TrimLeft(line, " \t\r")
}
escNL = !escBS
if escBS {
split[i] += "\n"
continue
}
if i == len(split)-1 {
p.panicf("invalid escape: '\\ '")
}
split[i] = line[:len(line)-1] // Remove \
if len(split)-1 > i {
split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
}
}
return strings.Join(split, "")
}
func (p *parser) replaceEscapes(it item, str string) string {
replaced := make([]rune, 0, len(str))
s := []byte(str) s := []byte(str)
r := 0 r := 0
for r < len(s) { for r < len(s) {
@ -533,6 +711,9 @@ func (p *parser) replaceEscapes(str string) string {
default: default:
p.bug("Expected valid escape code after \\, but got %q.", s[r]) p.bug("Expected valid escape code after \\, but got %q.", s[r])
return "" return ""
case ' ', '\t':
p.panicItemf(it, "invalid escape: '\\%c'", s[r])
return ""
case 'b': case 'b':
replaced = append(replaced, rune(0x0008)) replaced = append(replaced, rune(0x0008))
r += 1 r += 1
@ -558,14 +739,14 @@ func (p *parser) replaceEscapes(str string) string {
// At this point, we know we have a Unicode escape of the form // At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+5). (Because the lexer guarantees this // `uXXXX` at [r, r+5). (Because the lexer guarantees this
// for us.) // for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5])
replaced = append(replaced, escaped) replaced = append(replaced, escaped)
r += 5 r += 5
case 'U': case 'U':
// At this point, we know we have a Unicode escape of the form // At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+9). (Because the lexer guarantees this // `uXXXX` at [r, r+9). (Because the lexer guarantees this
// for us.) // for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9])
replaced = append(replaced, escaped) replaced = append(replaced, escaped)
r += 9 r += 9
} }
@ -573,20 +754,14 @@ func (p *parser) replaceEscapes(str string) string {
return string(replaced) return string(replaced)
} }
func (p *parser) asciiEscapeToUnicode(bs []byte) rune { func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune {
s := string(bs) s := string(bs)
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
if err != nil { if err != nil {
p.bug("Could not parse '%s' as a hexadecimal number, but the "+ p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err)
"lexer claims it's OK: %s", s, err)
} }
if !utf8.ValidRune(rune(hex)) { if !utf8.ValidRune(rune(hex)) {
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s)
} }
return rune(hex) return rune(hex)
} }
func isStringType(ty itemType) bool {
return ty == itemString || ty == itemMultilineString ||
ty == itemRawString || ty == itemRawMultilineString
}

View file

@ -70,8 +70,8 @@ func typeFields(t reflect.Type) []field {
next := []field{{typ: t}} next := []field{{typ: t}}
// Count of queued names for current level and the next. // Count of queued names for current level and the next.
count := map[reflect.Type]int{} var count map[reflect.Type]int
nextCount := map[reflect.Type]int{} var nextCount map[reflect.Type]int
// Types already visited at an earlier level. // Types already visited at an earlier level.
visited := map[reflect.Type]bool{} visited := map[reflect.Type]bool{}

View file

@ -16,7 +16,7 @@ func typeEqual(t1, t2 tomlType) bool {
return t1.typeString() == t2.typeString() return t1.typeString() == t2.typeString()
} }
func typeIsHash(t tomlType) bool { func typeIsTable(t tomlType) bool {
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
} }
@ -68,24 +68,3 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType {
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
panic("unreachable") panic("unreachable")
} }
// typeOfArray returns a tomlType for an array given a list of types of its
// values.
//
// In the current spec, if an array is homogeneous, then its type is always
// "Array". If the array is not homogeneous, an error is generated.
func (p *parser) typeOfArray(types []tomlType) tomlType {
// Empty arrays are cool.
if len(types) == 0 {
return tomlArray
}
theType := types[0]
for _, t := range types[1:] {
if !typeEqual(theType, t) {
p.panicf("Array contains values of type '%s' and '%s', but "+
"arrays must be homogeneous.", theType, t)
}
}
return tomlArray
}

View file

@ -15,7 +15,7 @@ type roffRenderer struct {
extensions blackfriday.Extensions extensions blackfriday.Extensions
listCounters []int listCounters []int
firstHeader bool firstHeader bool
defineTerm bool firstDD bool
listDepth int listDepth int
} }
@ -42,7 +42,8 @@ const (
quoteCloseTag = "\n.RE\n" quoteCloseTag = "\n.RE\n"
listTag = "\n.RS\n" listTag = "\n.RS\n"
listCloseTag = "\n.RE\n" listCloseTag = "\n.RE\n"
arglistTag = "\n.TP\n" dtTag = "\n.TP\n"
dd2Tag = "\n"
tableStart = "\n.TS\nallbox;\n" tableStart = "\n.TS\nallbox;\n"
tableEnd = ".TE\n" tableEnd = ".TE\n"
tableCellStart = "T{\n" tableCellStart = "T{\n"
@ -90,7 +91,7 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
switch node.Type { switch node.Type {
case blackfriday.Text: case blackfriday.Text:
r.handleText(w, node, entering) escapeSpecialChars(w, node.Literal)
case blackfriday.Softbreak: case blackfriday.Softbreak:
out(w, crTag) out(w, crTag)
case blackfriday.Hardbreak: case blackfriday.Hardbreak:
@ -150,40 +151,21 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
out(w, codeCloseTag) out(w, codeCloseTag)
case blackfriday.Table: case blackfriday.Table:
r.handleTable(w, node, entering) r.handleTable(w, node, entering)
case blackfriday.TableCell:
r.handleTableCell(w, node, entering)
case blackfriday.TableHead: case blackfriday.TableHead:
case blackfriday.TableBody: case blackfriday.TableBody:
case blackfriday.TableRow: case blackfriday.TableRow:
// no action as cell entries do all the nroff formatting // no action as cell entries do all the nroff formatting
return blackfriday.GoToNext return blackfriday.GoToNext
case blackfriday.TableCell:
r.handleTableCell(w, node, entering)
case blackfriday.HTMLSpan:
// ignore other HTML tags
default: default:
fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String()) fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
} }
return walkAction return walkAction
} }
func (r *roffRenderer) handleText(w io.Writer, node *blackfriday.Node, entering bool) {
var (
start, end string
)
// handle special roff table cell text encapsulation
if node.Parent.Type == blackfriday.TableCell {
if len(node.Literal) > 30 {
start = tableCellStart
end = tableCellEnd
} else {
// end rows that aren't terminated by "tableCellEnd" with a cr if end of row
if node.Parent.Next == nil && !node.Parent.IsHeader {
end = crTag
}
}
}
out(w, start)
escapeSpecialChars(w, node.Literal)
out(w, end)
}
func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) { func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) {
if entering { if entering {
switch node.Level { switch node.Level {
@ -230,15 +212,20 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering
if node.ListFlags&blackfriday.ListTypeOrdered != 0 { if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1])) out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1]))
r.listCounters[len(r.listCounters)-1]++ r.listCounters[len(r.listCounters)-1]++
} else if node.ListFlags&blackfriday.ListTypeTerm != 0 {
// DT (definition term): line just before DD (see below).
out(w, dtTag)
r.firstDD = true
} else if node.ListFlags&blackfriday.ListTypeDefinition != 0 { } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
// state machine for handling terms and following definitions // DD (definition description): line that starts with ": ".
// since blackfriday does not distinguish them properly, nor //
// does it seperate them into separate lists as it should // We have to distinguish between the first DD and the
if !r.defineTerm { // subsequent ones, as there should be no vertical
out(w, arglistTag) // whitespace between the DT and the first DD.
r.defineTerm = true if r.firstDD {
r.firstDD = false
} else { } else {
r.defineTerm = false out(w, dd2Tag)
} }
} else { } else {
out(w, ".IP \\(bu 2\n") out(w, ".IP \\(bu 2\n")
@ -261,28 +248,41 @@ func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering
} }
func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) { func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) {
var (
start, end string
)
if node.IsHeader {
start = codespanTag
end = codespanCloseTag
}
if entering { if entering {
var start string
if node.Prev != nil && node.Prev.Type == blackfriday.TableCell { if node.Prev != nil && node.Prev.Type == blackfriday.TableCell {
out(w, "\t"+start) start = "\t"
} else {
out(w, start)
} }
if node.IsHeader {
start += codespanTag
} else if nodeLiteralSize(node) > 30 {
start += tableCellStart
}
out(w, start)
} else { } else {
// need to carriage return if we are at the end of the header row var end string
if node.IsHeader && node.Next == nil { if node.IsHeader {
end = end + crTag end = codespanCloseTag
} else if nodeLiteralSize(node) > 30 {
end = tableCellEnd
}
if node.Next == nil && end != tableCellEnd {
// Last cell: need to carriage return if we are at the end of the
// header row and content isn't wrapped in a "tablecell"
end += crTag
} }
out(w, end) out(w, end)
} }
} }
func nodeLiteralSize(node *blackfriday.Node) int {
total := 0
for n := node.FirstChild; n != nil; n = n.FirstChild {
total += len(n.Literal)
}
return total
}
// because roff format requires knowing the column count before outputting any table // because roff format requires knowing the column count before outputting any table
// data we need to walk a table tree and count the columns // data we need to walk a table tree and count the columns
func countColumns(node *blackfriday.Node) int { func countColumns(node *blackfriday.Node) int {
@ -309,15 +309,6 @@ func out(w io.Writer, output string) {
io.WriteString(w, output) // nolint: errcheck io.WriteString(w, output) // nolint: errcheck
} }
func needsBackslash(c byte) bool {
for _, r := range []byte("-_&\\~") {
if c == r {
return true
}
}
return false
}
func escapeSpecialChars(w io.Writer, text []byte) { func escapeSpecialChars(w io.Writer, text []byte) {
for i := 0; i < len(text); i++ { for i := 0; i < len(text); i++ {
// escape initial apostrophe or period // escape initial apostrophe or period
@ -328,7 +319,7 @@ func escapeSpecialChars(w io.Writer, text []byte) {
// directly copy normal characters // directly copy normal characters
org := i org := i
for i < len(text) && !needsBackslash(text[i]) { for i < len(text) && text[i] != '\\' {
i++ i++
} }
if i > org { if i > org {

View file

@ -1,4 +1,5 @@
*.coverprofile *.coverprofile
coverage.txt
node_modules/ node_modules/
vendor vendor
.idea .idea

View file

@ -226,18 +226,23 @@ func reorderArgs(commandFlags []Flag, args []string) []string {
nextIndexMayContainValue := false nextIndexMayContainValue := false
for i, arg := range args { for i, arg := range args {
// dont reorder any args after a -- // if we're expecting an option-value, check if this arg is a value, in
// read about -- here: // which case it should be re-ordered next to its associated flag
// https://unix.stackexchange.com/questions/11376/what-does-double-dash-mean-also-known-as-bare-double-dash if nextIndexMayContainValue && !argIsFlag(commandFlags, arg) {
if arg == "--" {
remainingArgs = append(remainingArgs, args[i:]...)
break
// checks if this arg is a value that should be re-ordered next to its associated flag
} else if nextIndexMayContainValue && !strings.HasPrefix(arg, "-") {
nextIndexMayContainValue = false nextIndexMayContainValue = false
reorderedArgs = append(reorderedArgs, arg) reorderedArgs = append(reorderedArgs, arg)
} else if arg == "--" {
// don't reorder any args after the -- delimiter As described in the POSIX spec:
// https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap12.html#tag_12_02
// > Guideline 10:
// > The first -- argument that is not an option-argument should be accepted
// > as a delimiter indicating the end of options. Any following arguments
// > should be treated as operands, even if they begin with the '-' character.
// make sure the "--" delimiter itself is at the start
remainingArgs = append([]string{"--"}, remainingArgs...)
remainingArgs = append(remainingArgs, args[i+1:]...)
break
// checks if this is an arg that should be re-ordered // checks if this is an arg that should be re-ordered
} else if argIsFlag(commandFlags, arg) { } else if argIsFlag(commandFlags, arg) {
// we have determined that this is a flag that we should re-order // we have determined that this is a flag that we should re-order
@ -256,8 +261,9 @@ func reorderArgs(commandFlags []Flag, args []string) []string {
// argIsFlag checks if an arg is one of our command flags // argIsFlag checks if an arg is one of our command flags
func argIsFlag(commandFlags []Flag, arg string) bool { func argIsFlag(commandFlags []Flag, arg string) bool {
// checks if this is just a `-`, and so definitely not a flag if arg == "-" || arg == "--"{
if arg == "-" { // `-` is never a flag
// `--` is an option-value when following a flag, and a delimiter indicating the end of options in other cases.
return false return false
} }
// flags always start with a - // flags always start with a -

View file

@ -338,9 +338,11 @@ func flagFromFileEnv(filePath, envName string) (val string, ok bool) {
} }
} }
for _, fileVar := range strings.Split(filePath, ",") { for _, fileVar := range strings.Split(filePath, ",") {
if fileVar != "" {
if data, err := ioutil.ReadFile(fileVar); err == nil { if data, err := ioutil.ReadFile(fileVar); err == nil {
return string(data), true return string(data), true
} }
} }
}
return "", false return "", false
} }

7
vendor/modules.txt vendored
View file

@ -1,10 +1,11 @@
# github.com/BurntSushi/toml v0.3.1 # github.com/BurntSushi/toml v1.1.0
github.com/BurntSushi/toml github.com/BurntSushi/toml
# github.com/cpuguy83/go-md2man/v2 v2.0.0 github.com/BurntSushi/toml/internal
# github.com/cpuguy83/go-md2man/v2 v2.0.2
github.com/cpuguy83/go-md2man/v2/md2man github.com/cpuguy83/go-md2man/v2/md2man
# github.com/gorilla/feeds v1.1.1 # github.com/gorilla/feeds v1.1.1
github.com/gorilla/feeds github.com/gorilla/feeds
# github.com/russross/blackfriday/v2 v2.1.0 # github.com/russross/blackfriday/v2 v2.1.0
github.com/russross/blackfriday/v2 github.com/russross/blackfriday/v2
# github.com/urfave/cli v1.22.5 # github.com/urfave/cli v1.22.7
github.com/urfave/cli github.com/urfave/cli