Merge pull request #1013 from rhatdan/vendor

Vendor in latest containers/storage
This commit is contained in:
Daniel J Walsh 2017-10-16 06:12:34 -04:00 committed by GitHub
commit 24f131584b
36 changed files with 11686 additions and 56 deletions

View file

@ -7,7 +7,7 @@ github.com/sirupsen/logrus v1.0.0
github.com/containers/image 57b257d128d6075ea3287991ee408d24c7bd2758
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
github.com/ostreedev/ostree-go master
github.com/containers/storage 64bf27465d0d1edd89e7a4ce49866fea01145782
github.com/containers/storage d7921c6facc516358070a1306689eda18adaa20a
github.com/containernetworking/cni v0.4.0
google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go
github.com/opencontainers/selinux v1.0.0-rc1
@ -101,3 +101,4 @@ github.com/go-zoo/bone 031b4005dfe248ccba241a0c9de0f9e112fd6b7c
github.com/soheilhy/cmux v0.1.3
github.com/hashicorp/go-multierror 83588e72410abfbe4df460eeb6f30841ae47d4c4
github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac

View file

@ -10,6 +10,8 @@ import (
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
// A Container is a reference to a read-write layer with metadata.
@ -44,6 +46,10 @@ type Container struct {
// that has been stored, if they're known.
BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"`
// BigDataDigests maps the names in BigDataNames to the digests of the
// data that has been stored, if they're known.
BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"`
// Created is the datestamp for when this container was created. Older
// versions of the library did not track this information, so callers
// will likely want to use the IsZero() method to verify that a value
@ -133,6 +139,7 @@ func (r *containerStore) Load() error {
ids := make(map[string]*Container)
names := make(map[string]*Container)
if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil {
idlist = make([]string, 0, len(containers))
for n, container := range containers {
idlist = append(idlist, container.ID)
ids[container.ID] = containers[n]
@ -223,6 +230,9 @@ func (r *containerStore) SetFlag(id string, flag string, value interface{}) erro
if !ok {
return ErrContainerUnknown
}
if container.Flags == nil {
container.Flags = make(map[string]interface{})
}
container.Flags[flag] = value
return r.Save()
}
@ -254,6 +264,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
Metadata: metadata,
BigDataNames: []string{},
BigDataSizes: make(map[string]int64),
BigDataDigests: make(map[string]digest.Digest),
Created: time.Now().UTC(),
Flags: make(map[string]interface{}),
}
@ -362,6 +373,9 @@ func (r *containerStore) Exists(id string) bool {
}
func (r *containerStore) BigData(id, key string) ([]byte, error) {
if key == "" {
return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve container big data value for empty name")
}
c, ok := r.lookup(id)
if !ok {
return nil, ErrContainerUnknown
@ -370,6 +384,21 @@ func (r *containerStore) BigData(id, key string) ([]byte, error) {
}
func (r *containerStore) BigDataSize(id, key string) (int64, error) {
if key == "" {
return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of container big data with empty name")
}
c, ok := r.lookup(id)
if !ok {
return -1, ErrContainerUnknown
}
if c.BigDataSizes == nil {
c.BigDataSizes = make(map[string]int64)
}
if size, ok := c.BigDataSizes[key]; ok {
return size, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
if r.SetBigData(id, key, data) == nil {
c, ok := r.lookup(id)
if !ok {
return -1, ErrContainerUnknown
@ -377,9 +406,39 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) {
if size, ok := c.BigDataSizes[key]; ok {
return size, nil
}
}
}
return -1, ErrSizeUnknown
}
func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
if key == "" {
return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of container big data value with empty name")
}
c, ok := r.lookup(id)
if !ok {
return "", ErrContainerUnknown
}
if c.BigDataDigests == nil {
c.BigDataDigests = make(map[string]digest.Digest)
}
if d, ok := c.BigDataDigests[key]; ok {
return d, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
if r.SetBigData(id, key, data) == nil {
c, ok := r.lookup(id)
if !ok {
return "", ErrContainerUnknown
}
if d, ok := c.BigDataDigests[key]; ok {
return d, nil
}
}
}
return "", ErrDigestUnknown
}
func (r *containerStore) BigDataNames(id string) ([]string, error) {
c, ok := r.lookup(id)
if !ok {
@ -389,6 +448,9 @@ func (r *containerStore) BigDataNames(id string) ([]string, error) {
}
func (r *containerStore) SetBigData(id, key string, data []byte) error {
if key == "" {
return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for container big data item")
}
c, ok := r.lookup(id)
if !ok {
return ErrContainerUnknown
@ -399,19 +461,28 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0600)
if err == nil {
save := false
oldSize, ok := c.BigDataSizes[key]
if c.BigDataSizes == nil {
c.BigDataSizes = make(map[string]int64)
}
oldSize, sizeOk := c.BigDataSizes[key]
c.BigDataSizes[key] = int64(len(data))
if !ok || oldSize != c.BigDataSizes[key] {
if c.BigDataDigests == nil {
c.BigDataDigests = make(map[string]digest.Digest)
}
oldDigest, digestOk := c.BigDataDigests[key]
newDigest := digest.Canonical.FromBytes(data)
c.BigDataDigests[key] = newDigest
if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
save = true
}
add := true
addName := true
for _, name := range c.BigDataNames {
if name == key {
add = false
addName = false
break
}
}
if add {
if addName {
c.BigDataNames = append(c.BigDataNames, key)
save = true
}
@ -423,7 +494,7 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
}
func (r *containerStore) Wipe() error {
ids := []string{}
ids := make([]string, 0, len(r.byid))
for id := range r.byid {
ids = append(ids, id)
}

1194
vendor/github.com/containers/storage/containers_ffjson.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -94,7 +94,7 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err
// are extracted from tar's with full second precision on modified time.
// We need this hack here to make sure calls within same second receive
// correct result.
time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second)))
time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now()))
return err
}), nil
}

View file

@ -8,6 +8,7 @@ import (
"os"
"path"
"path/filepath"
"syscall"
"github.com/containers/storage/pkg/system"
"github.com/pkg/errors"
@ -15,10 +16,11 @@ import (
"golang.org/x/sys/unix"
)
// hasOpaqueCopyUpBug checks whether the filesystem has a bug
// doesSupportNativeDiff checks whether the filesystem has a bug
// which copies up the opaque flag when copying up an opaque
// directory. When this bug exists naive diff should be used.
func hasOpaqueCopyUpBug(d string) error {
// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR.
// When these exist naive diff should be used.
func doesSupportNativeDiff(d string) error {
td, err := ioutil.TempDir(d, "opaque-bug-check")
if err != nil {
return err
@ -29,10 +31,13 @@ func hasOpaqueCopyUpBug(d string) error {
}
}()
// Make directories l1/d, l2/d, l3, work, merged
// Make directories l1/d, l1/d1, l2/d, l3, work, merged
if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil {
return err
}
if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil {
return err
}
if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil {
return err
}
@ -75,5 +80,23 @@ func hasOpaqueCopyUpBug(d string) error {
return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix")
}
// rename "d1" to "d2"
if err := os.Rename(filepath.Join(td, "merged", "d1"), filepath.Join(td, "merged", "d2")); err != nil {
// if rename failed with syscall.EXDEV, the kernel doesn't have CONFIG_OVERLAY_FS_REDIRECT_DIR enabled
if err.(*os.LinkError).Err == syscall.EXDEV {
return nil
}
return errors.Wrap(err, "failed to rename dir in merged directory")
}
// get the xattr of "d2"
xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), "trusted.overlay.redirect")
if err != nil {
return errors.Wrap(err, "failed to read redirect flag on upper layer")
}
if string(xattrRedirect) == "d1" {
return errors.New("kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled")
}
return nil
}

View file

@ -49,7 +49,6 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e
output := bytes.NewBuffer(nil)
cmd.Stdout = output
cmd.Stderr = output
if err := cmd.Start(); err != nil {
w.Close()
return fmt.Errorf("mountfrom error on re-exec cmd: %v", err)

View file

@ -228,7 +228,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
key = strings.ToLower(key)
switch key {
case "overlay.override_kernel_check", "overlay2.override_kernel_check":
logrus.Debugf("overlay: overide_kernelcheck=%s", val)
logrus.Debugf("overlay: override_kernelcheck=%s", val)
o.overrideKernelCheck, err = strconv.ParseBool(val)
if err != nil {
return nil, err
@ -287,8 +287,8 @@ func supportsOverlay() error {
func useNaiveDiff(home string) bool {
useNaiveDiffLock.Do(func() {
if err := hasOpaqueCopyUpBug(home); err != nil {
logrus.Warnf("Not using native diff for overlay: %v", err)
if err := doesSupportNativeDiff(home); err != nil {
logrus.Warnf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err)
useNaiveDiffOnly = true
}
})
@ -654,8 +654,7 @@ func (d *Driver) Put(id string) error {
if count := d.ctr.Decrement(mountpoint); count > 0 {
return nil
}
err := unix.Unmount(mountpoint, unix.MNT_DETACH)
if err != nil {
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
}
return nil

View file

@ -4,6 +4,7 @@ import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/chrootarchive"
@ -25,13 +26,18 @@ func init() {
// This sets the home directory for the driver and returns NaiveDiffDriver.
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
d := &Driver{
home: home,
homes: []string{home},
idMappings: idtools.NewIDMappingsFromMaps(uidMaps, gidMaps),
}
rootIDs := d.idMappings.RootPair()
if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil {
return nil, err
}
for _, option := range options {
if strings.HasPrefix(option, "vfs.imagestore=") {
d.homes = append(d.homes, strings.Split(option[15:], ",")...)
}
}
return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
}
@ -40,7 +46,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support.
// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver
type Driver struct {
home string
homes []string
idMappings *idtools.IDMappings
}
@ -98,7 +104,17 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
}
func (d *Driver) dir(id string) string {
return filepath.Join(d.home, "dir", filepath.Base(id))
for i, home := range d.homes {
if i > 0 {
home = filepath.Join(home, d.String())
}
candidate := filepath.Join(home, "dir", filepath.Base(id))
fi, err := os.Stat(candidate)
if err == nil && fi.IsDir() {
return candidate
}
}
return filepath.Join(d.homes[0], "dir", filepath.Base(id))
}
// Remove deletes the content from the directory for a given id.
@ -132,5 +148,8 @@ func (d *Driver) Exists(id string) bool {
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
if len(d.homes) > 1 {
return d.homes[1:]
}
return nil
}

View file

@ -49,4 +49,8 @@ var (
ErrDuplicateImageNames = errors.New("read-only image store assigns the same name to multiple images")
// ErrDuplicateLayerNames indicates that the read-only store uses the same name for multiple layers.
ErrDuplicateLayerNames = errors.New("read-only layer store assigns the same name to multiple layers")
// ErrInvalidBigDataName indicates that the name for a big data item is not acceptable; it may be empty.
ErrInvalidBigDataName = errors.New("not a valid name for a big data item")
// ErrDigestUnknown indicates that we were unable to compute the digest of a specified item.
ErrDigestUnknown = errors.New("could not compute digest of item")
)

View file

@ -10,6 +10,7 @@ import (
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
@ -42,6 +43,10 @@ type Image struct {
// that has been stored, if they're known.
BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"`
// BigDataDigests maps the names in BigDataNames to the digests of the
// data that has been stored, if they're known.
BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"`
// Created is the datestamp for when this image was created. Older
// versions of the library did not track this information, so callers
// will likely want to use the IsZero() method to verify that a value
@ -136,6 +141,7 @@ func (r *imageStore) Load() error {
ids := make(map[string]*Image)
names := make(map[string]*Image)
if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil {
idlist = make([]string, 0, len(images))
for n, image := range images {
ids[image.ID] = images[n]
idlist = append(idlist, image.ID)
@ -252,6 +258,9 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
if !ok {
return ErrImageUnknown
}
if image.Flags == nil {
image.Flags = make(map[string]interface{})
}
image.Flags[flag] = value
return r.Save()
}
@ -288,6 +297,7 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
Metadata: metadata,
BigDataNames: []string{},
BigDataSizes: make(map[string]int64),
BigDataDigests: make(map[string]digest.Digest),
Created: created,
Flags: make(map[string]interface{}),
}
@ -402,6 +412,9 @@ func (r *imageStore) Exists(id string) bool {
}
func (r *imageStore) BigData(id, key string) ([]byte, error) {
if key == "" {
return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name")
}
image, ok := r.lookup(id)
if !ok {
return nil, ErrImageUnknown
@ -410,6 +423,21 @@ func (r *imageStore) BigData(id, key string) ([]byte, error) {
}
func (r *imageStore) BigDataSize(id, key string) (int64, error) {
if key == "" {
return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of image big data with empty name")
}
image, ok := r.lookup(id)
if !ok {
return -1, ErrImageUnknown
}
if image.BigDataSizes == nil {
image.BigDataSizes = make(map[string]int64)
}
if size, ok := image.BigDataSizes[key]; ok {
return size, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
if r.SetBigData(id, key, data) == nil {
image, ok := r.lookup(id)
if !ok {
return -1, ErrImageUnknown
@ -417,9 +445,39 @@ func (r *imageStore) BigDataSize(id, key string) (int64, error) {
if size, ok := image.BigDataSizes[key]; ok {
return size, nil
}
}
}
return -1, ErrSizeUnknown
}
func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) {
if key == "" {
return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of image big data value with empty name")
}
image, ok := r.lookup(id)
if !ok {
return "", ErrImageUnknown
}
if image.BigDataDigests == nil {
image.BigDataDigests = make(map[string]digest.Digest)
}
if d, ok := image.BigDataDigests[key]; ok {
return d, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
if r.SetBigData(id, key, data) == nil {
image, ok := r.lookup(id)
if !ok {
return "", ErrImageUnknown
}
if d, ok := image.BigDataDigests[key]; ok {
return d, nil
}
}
}
return "", ErrDigestUnknown
}
func (r *imageStore) BigDataNames(id string) ([]string, error) {
image, ok := r.lookup(id)
if !ok {
@ -429,6 +487,9 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) {
}
func (r *imageStore) SetBigData(id, key string, data []byte) error {
if key == "" {
return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item")
}
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath())
}
@ -441,20 +502,29 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
}
err := ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
if err == nil {
add := true
save := false
oldSize, ok := image.BigDataSizes[key]
if image.BigDataSizes == nil {
image.BigDataSizes = make(map[string]int64)
}
oldSize, sizeOk := image.BigDataSizes[key]
image.BigDataSizes[key] = int64(len(data))
if !ok || oldSize != image.BigDataSizes[key] {
if image.BigDataDigests == nil {
image.BigDataDigests = make(map[string]digest.Digest)
}
oldDigest, digestOk := image.BigDataDigests[key]
newDigest := digest.Canonical.FromBytes(data)
image.BigDataDigests[key] = newDigest
if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
save = true
}
addName := true
for _, name := range image.BigDataNames {
if name == key {
add = false
addName = false
break
}
}
if add {
if addName {
image.BigDataNames = append(image.BigDataNames, key)
save = true
}
@ -469,7 +539,7 @@ func (r *imageStore) Wipe() error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath())
}
ids := []string{}
ids := make([]string, 0, len(r.byid))
for id := range r.byid {
ids = append(ids, id)
}

1148
vendor/github.com/containers/storage/images_ffjson.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -254,6 +254,7 @@ func (r *layerStore) Load() error {
compressedsums := make(map[digest.Digest][]string)
uncompressedsums := make(map[digest.Digest][]string)
if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil {
idlist = make([]string, 0, len(layers))
for n, layer := range layers {
ids[layer.ID] = layers[n]
idlist = append(idlist, layer.ID)
@ -305,6 +306,9 @@ func (r *layerStore) Load() error {
// actually delete.
if r.IsReadWrite() {
for _, layer := range r.layers {
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
}
if cleanup, ok := layer.Flags[incompleteFlag]; ok {
if b, ok := cleanup.(bool); ok && b {
err = r.Delete(layer.ID)
@ -338,7 +342,7 @@ func (r *layerStore) Save() error {
if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil {
return err
}
mounts := []layerMountPoint{}
mounts := make([]layerMountPoint, 0, len(r.layers))
for _, layer := range r.layers {
if layer.MountPoint != "" && layer.MountCount > 0 {
mounts = append(mounts, layerMountPoint{
@ -455,6 +459,9 @@ func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
if !ok {
return ErrLayerUnknown
}
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
}
layer.Flags[flag] = value
return r.Save()
}
@ -733,7 +740,7 @@ func (r *layerStore) Wipe() error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
}
ids := []string{}
ids := make([]string, 0, len(r.byid))
for id := range r.byid {
ids = append(ids, id)
}

1713
vendor/github.com/containers/storage/layers_ffjson.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,97 @@
// +build ignore
// Simple tool to create an archive stream from an old and new directory
//
// By default it will stream the comparison of two temporary directories with junk files
package main
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"github.com/containers/storage/pkg/archive"
"github.com/sirupsen/logrus"
)
var (
flDebug = flag.Bool("D", false, "debugging output")
flNewDir = flag.String("newdir", "", "")
flOldDir = flag.String("olddir", "", "")
log = logrus.New()
)
func main() {
flag.Usage = func() {
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
flag.PrintDefaults()
}
flag.Parse()
log.Out = os.Stderr
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
logrus.SetLevel(logrus.DebugLevel)
}
var newDir, oldDir string
if len(*flNewDir) == 0 {
var err error
newDir, err = ioutil.TempDir("", "storage-test-newDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(newDir)
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
log.Fatal(err)
}
} else {
newDir = *flNewDir
}
if len(*flOldDir) == 0 {
oldDir, err := ioutil.TempDir("", "storage-test-oldDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(oldDir)
} else {
oldDir = *flOldDir
}
changes, err := archive.ChangesDirs(newDir, oldDir)
if err != nil {
log.Fatal(err)
}
a, err := archive.ExportChanges(newDir, changes)
if err != nil {
log.Fatal(err)
}
defer a.Close()
i, err := io.Copy(os.Stdout, a)
if err != nil && err != io.EOF {
log.Fatal(err)
}
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
}
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
fileData := []byte("fooo")
for n := 0; n < numberOfFiles; n++ {
fileName := fmt.Sprintf("file-%d", n)
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
return 0, err
}
if makeLinks {
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
return 0, err
}
}
}
totalSize := numberOfFiles * len(fileData)
return totalSize, nil
}

View file

@ -20,7 +20,7 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
"github.com/opencontainers/go-digest"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
@ -87,6 +87,10 @@ type ROBigDataStore interface {
// data associated with this ID, if it has previously been set.
BigDataSize(id, key string) (int64, error)
// BigDataDigest retrieves the digest of a (potentially large) piece of
// data associated with this ID, if it has previously been set.
BigDataDigest(id, key string) (digest.Digest, error)
// BigDataNames() returns a list of the names of previously-stored pieces of
// data.
BigDataNames(id string) ([]string, error)
@ -327,6 +331,10 @@ type Store interface {
// of named data associated with an image.
ImageBigDataSize(id, key string) (int64, error)
// ImageBigDataDigest retrieves the digest of a (possibly large) chunk
// of named data associated with an image.
ImageBigDataDigest(id, key string) (digest.Digest, error)
// SetImageBigData stores a (possibly large) chunk of named data associated
// with an image.
SetImageBigData(id, key string, data []byte) error
@ -343,6 +351,10 @@ type Store interface {
// chunk of named data associated with a container.
ContainerBigDataSize(id, key string) (int64, error)
// ContainerBigDataDigest retrieves the digest of a (possibly large)
// chunk of named data associated with a container.
ContainerBigDataDigest(id, key string) (digest.Digest, error)
// SetContainerBigData stores a (possibly large) chunk of named data
// associated with a container.
SetContainerBigData(id, key string, data []byte) error
@ -728,11 +740,14 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
if err != nil {
return nil, -1, err
}
rlstores, err := s.ROLayerStores()
if err != nil {
return nil, -1, err
}
rcstore, err := s.ContainerStore()
if err != nil {
return nil, -1, err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
@ -747,9 +762,15 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
id = stringid.GenerateRandomID()
}
if parent != "" {
if l, err := rlstore.Get(parent); err == nil && l != nil {
parent = l.ID
} else {
var ilayer *Layer
for _, lstore := range append([]ROLayerStore{rlstore}, rlstores...) {
if l, err := lstore.Get(parent); err == nil && l != nil {
ilayer = l
parent = ilayer.ID
break
}
}
if ilayer == nil {
return nil, -1, ErrLayerUnknown
}
containers, err := rcstore.Containers()
@ -1026,6 +1047,30 @@ func (s *store) ImageBigDataSize(id, key string) (int64, error) {
return -1, ErrSizeUnknown
}
func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) {
ristore, err := s.ImageStore()
if err != nil {
return "", err
}
stores, err := s.ROImageStores()
if err != nil {
return "", err
}
stores = append([]ROImageStore{ristore}, stores...)
for _, ristore := range stores {
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
ristore.Load()
}
d, err := ristore.BigDataDigest(id, key)
if err == nil && d.Validate() == nil {
return d, nil
}
}
return "", ErrDigestUnknown
}
func (s *store) ImageBigData(id, key string) ([]byte, error) {
istore, err := s.ImageStore()
if err != nil {
@ -1089,10 +1134,22 @@ func (s *store) ContainerBigDataSize(id, key string) (int64, error) {
if modified, err := rcstore.Modified(); modified || err != nil {
rcstore.Load()
}
return rcstore.BigDataSize(id, key)
}
func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return "", err
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
rcstore.Load()
}
return rcstore.BigDataDigest(id, key)
}
func (s *store) ContainerBigData(id, key string) ([]byte, error) {
rcstore, err := s.ContainerStore()
if err != nil {
@ -1103,7 +1160,6 @@ func (s *store) ContainerBigData(id, key string) ([]byte, error) {
if modified, err := rcstore.Modified(); modified || err != nil {
rcstore.Load()
}
return rcstore.BigData(id, key)
}
@ -1117,7 +1173,6 @@ func (s *store) SetContainerBigData(id, key string, data []byte) error {
if modified, err := rcstore.Modified(); modified || err != nil {
rcstore.Load()
}
return rcstore.SetBigData(id, key, data)
}
@ -1841,10 +1896,16 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye
}
func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) {
if err := d.Validate(); err != nil {
return nil, errors.Wrapf(err, "error looking for compressed layers matching digest %q", d)
}
return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d)
}
func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
if err := d.Validate(); err != nil {
return nil, errors.Wrapf(err, "error looking for layers matching digest %q", d)
}
return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d)
}
@ -2238,7 +2299,7 @@ func makeBigDataBaseName(key string) string {
}
func stringSliceWithoutValue(slice []string, value string) []string {
modified := []string{}
modified := make([]string, 0, len(slice))
for _, v := range slice {
if v == value {
continue

View file

@ -3,7 +3,6 @@ github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165
github.com/Microsoft/hcsshim a8d9cc56cbce765a7eebdf4792e6ceceeff3edb8
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/engine-api 4290f40c056686fcaa5c9caf02eac1dde9315adf
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
@ -19,3 +18,4 @@ github.com/tchap/go-patricia v2.2.6
github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac

202
vendor/github.com/pquerna/ffjson/LICENSE generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

8
vendor/github.com/pquerna/ffjson/NOTICE generated vendored Normal file
View file

@ -0,0 +1,8 @@
ffjson
Copyright (c) 2014, Paul Querna
This product includes software developed by
Paul Querna (http://paul.querna.org/).
Portions of this software were developed as
part of Go, Copyright (c) 2012 The Go Authors.

232
vendor/github.com/pquerna/ffjson/README.md generated vendored Normal file
View file

@ -0,0 +1,232 @@
# ffjson: faster JSON for Go
[![Build Status](https://travis-ci.org/pquerna/ffjson.svg?branch=master)](https://travis-ci.org/pquerna/ffjson)
`ffjson` generates static `MarshalJSON` and `UnmarshalJSON` functions for structures in Go. The generated functions reduce the reliance upon runtime reflection to do serialization and are generally 2 to 3 times faster. In cases where `ffjson` doesn't understand a Type involved, it falls back to `encoding/json`, meaning it is a safe drop in replacement. By using `ffjson` your JSON serialization just gets faster with no additional code changes.
When you change your `struct`, you will need to run `ffjson` again (or make it part of your build tools).
## Blog Posts
* 2014-03-31: [First Release and Background](https://journal.paul.querna.org/articles/2014/03/31/ffjson-faster-json-in-go/)
## Getting Started
If `myfile.go` contains the `struct` types you would like to be faster, and assuming `GOPATH` is set to a reasonable value for an existing project (meaning that in this particular example if `myfile.go` is in the `myproject` directory, the project should be under `$GOPATH/src/myproject`), you can just run:
go get -u github.com/pquerna/ffjson
ffjson myfile.go
git add myfile_ffjson.go
## Performance Status:
* `MarshalJSON` is **2x to 3x** faster than `encoding/json`.
* `UnmarshalJSON` is **2x to 3x** faster than `encoding/json`.
## Features
* **Unmarshal Support:** Since v0.9, `ffjson` supports Unmarshaling of structures.
* **Drop in Replacement:** Because `ffjson` implements the interfaces already defined by `encoding/json` the performance enhancements are transparent to users of your structures.
* **Supports all types:** `ffjson` has native support for most of Go's types -- for any type it doesn't support with fast paths, it falls back to using `encoding/json`. This means all structures should work out of the box. If they don't, [open a issue!](https://github.com/pquerna/ffjson/issues)
* **ffjson: skip**: If you have a structure you want `ffjson` to ignore, add `ffjson: skip` to the doc string for this structure.
* **Extensive Tests:** `ffjson` contains an extensive test suite including fuzz'ing against the JSON parser.
# Using ffjson
`ffjson` generates code based upon existing `struct` types. For example, `ffjson foo.go` will by default create a new file `foo_ffjson.go` that contains serialization functions for all structs found in `foo.go`.
```
Usage of ffjson:
ffjson [options] [input_file]
ffjson generates Go code for optimized JSON serialization.
-go-cmd="": Path to go command; Useful for `goapp` support.
-import-name="": Override import name in case it cannot be detected.
-nodecoder: Do not generate decoder functions
-noencoder: Do not generate encoder functions
-w="": Write generate code to this path instead of ${input}_ffjson.go.
```
Your code must be in a compilable state for `ffjson` to work. If you code doesn't compile ffjson will most likely exit with an error.
## Disabling code generation for structs
You might not want all your structs to have JSON code generated. To completely disable generation for a struct, add `ffjson: skip` to the struct comment. For example:
```Go
// ffjson: skip
type Foo struct {
Bar string
}
```
You can also choose not to have either the decoder or encoder generated by including `ffjson: nodecoder` or `ffjson: noencoder` in your comment. For instance, this will only generate the encoder (marshal) part for this struct:
```Go
// ffjson: nodecoder
type Foo struct {
Bar string
}
```
You can also disable encoders/decoders entirely for a file by using the `-noencoder`/`-nodecoder` commandline flags.
## Using ffjson with `go generate`
`ffjson` is a great fit with `go generate`. It allows you to specify the ffjson command inside your individual go files and run them all at once. This way you don't have to maintain a separate build file with the files you need to generate.
Add this comment anywhere inside your go files:
```Go
//go:generate ffjson $GOFILE
```
To re-generate ffjson for all files with the tag in a folder, simply execute:
```sh
go generate
```
To generate for the current package and all sub-packages, use:
```sh
go generate ./...
```
This is most of what you need to know about go generate, but you can sese more about [go generate on the golang blog](http://blog.golang.org/generate).
## Should I include ffjson files in VCS?
That question is really up to you. If you don't, you will have a more complex build process. If you do, you have to keep the generated files updated if you change the content of your structs.
That said, ffjson operates deterministically, so it will generate the same code every time it run, so unless your code changes, the generated content should not change. Note however that this is only true if you are using the same ffjson version, so if you have several people working on a project, you might need to synchronize your ffjson version.
## Performance pitfalls
`ffjson` has a few cases where it will fall back to using the runtime encoder/decoder. Notable cases are:
* Interface struct members. Since it isn't possible to know the type of these types before runtime, ffjson has to use the reflect based coder.
* Structs with custom marshal/unmarshal.
* Map with a complex value. Simple types like `map[string]int` is fine though.
* Inline struct definitions `type A struct{B struct{ X int} }` are handled by the encoder, but currently has fallback in the decoder.
* Slices of slices / slices of maps are currently falling back when generating the decoder.
## Reducing Garbage Collection
`ffjson` already does a lot to help garbage generation. However whenever you go through the json.Marshal you get a new byte slice back. On very high throughput servers this can lead to increased GC pressure.
### Tip 1: Use ffjson.Marshal() / ffjson.Unmarshal()
This is probably the easiest optimization for you. Instead of going through encoding/json, you can call ffjson. This will disable the checks that encoding/json does to the json when it receives it from struct functions.
```Go
import "github.com/pquerna/ffjson/ffjson"
// BEFORE:
buf, err := json.Marshal(&item)
// AFTER:
buf, err := ffjson.Marshal(&item)
```
This simple change is likely to double the speed of your encoding/decoding.
[![GoDoc][1]][2]
[1]: https://godoc.org/github.com/pquerna/ffjson/ffjson?status.svg
[2]: https://godoc.org/github.com/pquerna/ffjson/ffjson#Marshal
### Tip 2: Pooling the buffer
On servers where you have a lot of concurrent encoding going on, you can hand back the byte buffer you get from json.Marshal once you are done using it. An example could look like this:
```Go
import "github.com/pquerna/ffjson/ffjson"
func Encode(item interface{}, out io.Writer) {
// Encode
buf, err := ffjson.Marshal(&item)
// Write the buffer
_,_ = out.Write(buf)
// We are now no longer need the buffer so we pool it.
ffjson.Pool(buf)
}
```
Note that the buffers you put back in the pool can still be reclaimed by the garbage collector, so you wont risk your program building up a big memory use by pooling the buffers.
[![GoDoc][1]][2]
[1]: https://godoc.org/github.com/pquerna/ffjson/ffjson?status.svg
[2]: https://godoc.org/github.com/pquerna/ffjson/ffjson#Pool
### Tip 3: Creating an Encoder
There might be cases where you need to encode many objects at once. This could be a server backing up, writing a lot of entries to files, etc.
To do this, there is an interface similar to `encoding/json`, that allow you to create a re-usable encoder. Here is an example where we want to encode an array of the `Item` type, with a comma between entries:
```Go
import "github.com/pquerna/ffjson/ffjson"
func EncodeItems(items []Item, out io.Writer) {
// We create an encoder.
enc := ffjson.NewEncoder(out)
for i, item := range items {
// Encode into the buffer
err := enc.Encode(&item)
// If err is nil, the content is written to out, so we can write to it as well.
if i != len(items) -1 {
_,_ = out.Write([]byte{','})
}
}
}
```
Documentation: [![GoDoc][1]][2]
[1]: https://godoc.org/github.com/pquerna/ffjson/ffjson?status.svg
[2]: https://godoc.org/github.com/pquerna/ffjson/ffjson#Encoder
## Tip 4: Avoid interfaces
We don't want to dictate how you structure your data, but having interfaces in your code will make ffjson use the golang encoder for these. When ffjson has to do this, it may even become slower than using `json.Marshal` directly.
To see where that happens, search the generated `_ffjson.go` file for the text `Falling back`, which will indicate where ffjson is unable to generate code for your data structure.
## Tip 5: `ffjson` all the things!
You should not only create ffjson code for your main struct, but also any structs that is included/used in your json code.
So if your struct looks like this:
```Go
type Foo struct {
V Bar
}
```
You should also make sure that code is generated for `Bar` if it is placed in another file. Also note that currently it requires you to do this in order, since generating code for `Foo` will check if code for `Bar` exists. This is only an issue if `Foo` and `Bar` are placed in different files. We are currently working on allowing simultaneous generation of an entire package.
## Improvements, bugs, adding features, and taking ffjson new directions!
Please [open issues in Github](https://github.com/pquerna/ffjson/issues) for ideas, bugs, and general thoughts. Pull requests are of course preferred :)
## Similar projects
* [go-codec](https://github.com/ugorji/go/tree/master/codec#readme). Very good project, that also allows streaming en/decoding, but requires you to call the library to use.
* [megajson](https://github.com/benbjohnson/megajson). This has limited support, and development seems to have almost stopped at the time of writing.
# Credits
`ffjson` has recieved significant contributions from:
* [Klaus Post](https://github.com/klauspost)
* [Paul Querna](https://github.com/pquerna)
* [Erik Dubbelboer](https://github.com/erikdubbelboer)
## License
`ffjson` is licensed under the [Apache License, Version 2.0](./LICENSE)

421
vendor/github.com/pquerna/ffjson/fflib/v1/buffer.go generated vendored Normal file
View file

@ -0,0 +1,421 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package v1
// Simple byte buffer for marshaling data.
import (
"bytes"
"encoding/json"
"errors"
"io"
"unicode/utf8"
)
type grower interface {
Grow(n int)
}
type truncater interface {
Truncate(n int)
Reset()
}
type bytesReader interface {
Bytes() []byte
String() string
}
type runeWriter interface {
WriteRune(r rune) (n int, err error)
}
type stringWriter interface {
WriteString(s string) (n int, err error)
}
type lener interface {
Len() int
}
type rewinder interface {
Rewind(n int) (err error)
}
type encoder interface {
Encode(interface{}) error
}
// TODO(pquerna): continue to reduce these interfaces
type EncodingBuffer interface {
io.Writer
io.WriterTo
io.ByteWriter
stringWriter
truncater
grower
rewinder
encoder
}
type DecodingBuffer interface {
io.ReadWriter
io.ByteWriter
stringWriter
runeWriter
truncater
grower
bytesReader
lener
}
// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
// The zero value for Buffer is an empty buffer ready to use.
type Buffer struct {
buf []byte // contents are the bytes buf[off : len(buf)]
off int // read at &buf[off], write at &buf[len(buf)]
runeBytes [utf8.UTFMax]byte // avoid allocation of slice on each WriteByte or Rune
encoder *json.Encoder
skipTrailingByte bool
}
// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer.
var ErrTooLarge = errors.New("fflib.v1.Buffer: too large")
// Bytes returns a slice of the contents of the unread portion of the buffer;
// len(b.Bytes()) == b.Len(). If the caller changes the contents of the
// returned slice, the contents of the buffer will change provided there
// are no intervening method calls on the Buffer.
func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
// String returns the contents of the unread portion of the buffer
// as a string. If the Buffer is a nil pointer, it returns "<nil>".
func (b *Buffer) String() string {
if b == nil {
// Special case, useful in debugging.
return "<nil>"
}
return string(b.buf[b.off:])
}
// Len returns the number of bytes of the unread portion of the buffer;
// b.Len() == len(b.Bytes()).
func (b *Buffer) Len() int { return len(b.buf) - b.off }
// Truncate discards all but the first n unread bytes from the buffer.
// It panics if n is negative or greater than the length of the buffer.
func (b *Buffer) Truncate(n int) {
if n == 0 {
b.off = 0
b.buf = b.buf[0:0]
} else {
b.buf = b.buf[0 : b.off+n]
}
}
// Reset resets the buffer so it has no content.
// b.Reset() is the same as b.Truncate(0).
func (b *Buffer) Reset() { b.Truncate(0) }
// grow grows the buffer to guarantee space for n more bytes.
// It returns the index where bytes should be written.
// If the buffer can't grow it will panic with ErrTooLarge.
func (b *Buffer) grow(n int) int {
// If we have no buffer, get one from the pool
m := b.Len()
if m == 0 {
if b.buf == nil {
b.buf = makeSlice(2 * n)
b.off = 0
} else if b.off != 0 {
// If buffer is empty, reset to recover space.
b.Truncate(0)
}
}
if len(b.buf)+n > cap(b.buf) {
var buf []byte
if m+n <= cap(b.buf)/2 {
// We can slide things down instead of allocating a new
// slice. We only need m+n <= cap(b.buf) to slide, but
// we instead let capacity get twice as large so we
// don't spend all our time copying.
copy(b.buf[:], b.buf[b.off:])
buf = b.buf[:m]
} else {
// not enough space anywhere
buf = makeSlice(2*cap(b.buf) + n)
copy(buf, b.buf[b.off:])
Pool(b.buf)
b.buf = buf
}
b.off = 0
}
b.buf = b.buf[0 : b.off+m+n]
return b.off + m
}
// Grow grows the buffer's capacity, if necessary, to guarantee space for
// another n bytes. After Grow(n), at least n bytes can be written to the
// buffer without another allocation.
// If n is negative, Grow will panic.
// If the buffer can't grow it will panic with ErrTooLarge.
func (b *Buffer) Grow(n int) {
if n < 0 {
panic("bytes.Buffer.Grow: negative count")
}
m := b.grow(n)
b.buf = b.buf[0:m]
}
// Write appends the contents of p to the buffer, growing the buffer as
// needed. The return value n is the length of p; err is always nil. If the
// buffer becomes too large, Write will panic with ErrTooLarge.
func (b *Buffer) Write(p []byte) (n int, err error) {
if b.skipTrailingByte {
p = p[:len(p)-1]
}
m := b.grow(len(p))
return copy(b.buf[m:], p), nil
}
// WriteString appends the contents of s to the buffer, growing the buffer as
// needed. The return value n is the length of s; err is always nil. If the
// buffer becomes too large, WriteString will panic with ErrTooLarge.
func (b *Buffer) WriteString(s string) (n int, err error) {
m := b.grow(len(s))
return copy(b.buf[m:], s), nil
}
// MinRead is the minimum slice size passed to a Read call by
// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
// what is required to hold the contents of r, ReadFrom will not grow the
// underlying buffer.
const minRead = 512
// ReadFrom reads data from r until EOF and appends it to the buffer, growing
// the buffer as needed. The return value n is the number of bytes read. Any
// error except io.EOF encountered during the read is also returned. If the
// buffer becomes too large, ReadFrom will panic with ErrTooLarge.
func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
// If buffer is empty, reset to recover space.
if b.off >= len(b.buf) {
b.Truncate(0)
}
for {
if free := cap(b.buf) - len(b.buf); free < minRead {
// not enough space at end
newBuf := b.buf
if b.off+free < minRead {
// not enough space using beginning of buffer;
// double buffer capacity
newBuf = makeSlice(2*cap(b.buf) + minRead)
}
copy(newBuf, b.buf[b.off:])
Pool(b.buf)
b.buf = newBuf[:len(b.buf)-b.off]
b.off = 0
}
m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
b.buf = b.buf[0 : len(b.buf)+m]
n += int64(m)
if e == io.EOF {
break
}
if e != nil {
return n, e
}
}
return n, nil // err is EOF, so return nil explicitly
}
// WriteTo writes data to w until the buffer is drained or an error occurs.
// The return value n is the number of bytes written; it always fits into an
// int, but it is int64 to match the io.WriterTo interface. Any error
// encountered during the write is also returned.
func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
if b.off < len(b.buf) {
nBytes := b.Len()
m, e := w.Write(b.buf[b.off:])
if m > nBytes {
panic("bytes.Buffer.WriteTo: invalid Write count")
}
b.off += m
n = int64(m)
if e != nil {
return n, e
}
// all bytes should have been written, by definition of
// Write method in io.Writer
if m != nBytes {
return n, io.ErrShortWrite
}
}
// Buffer is now empty; reset.
b.Truncate(0)
return
}
// WriteByte appends the byte c to the buffer, growing the buffer as needed.
// The returned error is always nil, but is included to match bufio.Writer's
// WriteByte. If the buffer becomes too large, WriteByte will panic with
// ErrTooLarge.
func (b *Buffer) WriteByte(c byte) error {
m := b.grow(1)
b.buf[m] = c
return nil
}
func (b *Buffer) Rewind(n int) error {
b.buf = b.buf[:len(b.buf)-n]
return nil
}
func (b *Buffer) Encode(v interface{}) error {
if b.encoder == nil {
b.encoder = json.NewEncoder(b)
}
b.skipTrailingByte = true
err := b.encoder.Encode(v)
b.skipTrailingByte = false
return err
}
// WriteRune appends the UTF-8 encoding of Unicode code point r to the
// buffer, returning its length and an error, which is always nil but is
// included to match bufio.Writer's WriteRune. The buffer is grown as needed;
// if it becomes too large, WriteRune will panic with ErrTooLarge.
func (b *Buffer) WriteRune(r rune) (n int, err error) {
if r < utf8.RuneSelf {
b.WriteByte(byte(r))
return 1, nil
}
n = utf8.EncodeRune(b.runeBytes[0:], r)
b.Write(b.runeBytes[0:n])
return n, nil
}
// Read reads the next len(p) bytes from the buffer or until the buffer
// is drained. The return value n is the number of bytes read. If the
// buffer has no data to return, err is io.EOF (unless len(p) is zero);
// otherwise it is nil.
func (b *Buffer) Read(p []byte) (n int, err error) {
if b.off >= len(b.buf) {
// Buffer is empty, reset to recover space.
b.Truncate(0)
if len(p) == 0 {
return
}
return 0, io.EOF
}
n = copy(p, b.buf[b.off:])
b.off += n
return
}
// Next returns a slice containing the next n bytes from the buffer,
// advancing the buffer as if the bytes had been returned by Read.
// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
// The slice is only valid until the next call to a read or write method.
func (b *Buffer) Next(n int) []byte {
m := b.Len()
if n > m {
n = m
}
data := b.buf[b.off : b.off+n]
b.off += n
return data
}
// ReadByte reads and returns the next byte from the buffer.
// If no byte is available, it returns error io.EOF.
func (b *Buffer) ReadByte() (c byte, err error) {
if b.off >= len(b.buf) {
// Buffer is empty, reset to recover space.
b.Truncate(0)
return 0, io.EOF
}
c = b.buf[b.off]
b.off++
return c, nil
}
// ReadRune reads and returns the next UTF-8-encoded
// Unicode code point from the buffer.
// If no bytes are available, the error returned is io.EOF.
// If the bytes are an erroneous UTF-8 encoding, it
// consumes one byte and returns U+FFFD, 1.
func (b *Buffer) ReadRune() (r rune, size int, err error) {
if b.off >= len(b.buf) {
// Buffer is empty, reset to recover space.
b.Truncate(0)
return 0, 0, io.EOF
}
c := b.buf[b.off]
if c < utf8.RuneSelf {
b.off++
return rune(c), 1, nil
}
r, n := utf8.DecodeRune(b.buf[b.off:])
b.off += n
return r, n, nil
}
// ReadBytes reads until the first occurrence of delim in the input,
// returning a slice containing the data up to and including the delimiter.
// If ReadBytes encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often io.EOF).
// ReadBytes returns err != nil if and only if the returned data does not end in
// delim.
func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
slice, err := b.readSlice(delim)
// return a copy of slice. The buffer's backing array may
// be overwritten by later calls.
line = append(line, slice...)
return
}
// readSlice is like ReadBytes but returns a reference to internal buffer data.
func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
i := bytes.IndexByte(b.buf[b.off:], delim)
end := b.off + i + 1
if i < 0 {
end = len(b.buf)
err = io.EOF
}
line = b.buf[b.off:end]
b.off = end
return line, err
}
// ReadString reads until the first occurrence of delim in the input,
// returning a string containing the data up to and including the delimiter.
// If ReadString encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often io.EOF).
// ReadString returns err != nil if and only if the returned data does not end
// in delim.
func (b *Buffer) ReadString(delim byte) (line string, err error) {
slice, err := b.readSlice(delim)
return string(slice), err
}
// NewBuffer creates and initializes a new Buffer using buf as its initial
// contents. It is intended to prepare a Buffer to read existing data. It
// can also be used to size the internal buffer for writing. To do that,
// buf should have the desired capacity but a length of zero.
//
// In most cases, new(Buffer) (or just declaring a Buffer variable) is
// sufficient to initialize a Buffer.
func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
// NewBufferString creates and initializes a new Buffer using string s as its
// initial contents. It is intended to prepare a buffer to read an existing
// string.
//
// In most cases, new(Buffer) (or just declaring a Buffer variable) is
// sufficient to initialize a Buffer.
func NewBufferString(s string) *Buffer {
return &Buffer{buf: []byte(s)}
}

View file

@ -0,0 +1,11 @@
// +build !go1.3
package v1
// Stub version of buffer_pool.go for Go 1.2, which doesn't have sync.Pool.
func Pool(b []byte) {}
func makeSlice(n int) []byte {
return make([]byte, n)
}

View file

@ -0,0 +1,105 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.3
package v1
// Allocation pools for Buffers.
import "sync"
var pools [14]sync.Pool
var pool64 *sync.Pool
func init() {
var i uint
// TODO(pquerna): add science here around actual pool sizes.
for i = 6; i < 20; i++ {
n := 1 << i
pools[poolNum(n)].New = func() interface{} { return make([]byte, 0, n) }
}
pool64 = &pools[0]
}
// This returns the pool number that will give a buffer of
// at least 'i' bytes.
func poolNum(i int) int {
// TODO(pquerna): convert to log2 w/ bsr asm instruction:
// <https://groups.google.com/forum/#!topic/golang-nuts/uAb5J1_y7ns>
if i <= 64 {
return 0
} else if i <= 128 {
return 1
} else if i <= 256 {
return 2
} else if i <= 512 {
return 3
} else if i <= 1024 {
return 4
} else if i <= 2048 {
return 5
} else if i <= 4096 {
return 6
} else if i <= 8192 {
return 7
} else if i <= 16384 {
return 8
} else if i <= 32768 {
return 9
} else if i <= 65536 {
return 10
} else if i <= 131072 {
return 11
} else if i <= 262144 {
return 12
} else if i <= 524288 {
return 13
} else {
return -1
}
}
// Send a buffer to the Pool to reuse for other instances.
// You may no longer utilize the content of the buffer, since it may be used
// by other goroutines.
func Pool(b []byte) {
if b == nil {
return
}
c := cap(b)
// Our smallest buffer is 64 bytes, so we discard smaller buffers.
if c < 64 {
return
}
// We need to put the incoming buffer into the NEXT buffer,
// since a buffer guarantees AT LEAST the number of bytes available
// that is the top of this buffer.
// That is the reason for dividing the cap by 2, so it gets into the NEXT bucket.
// We add 2 to avoid rounding down if size is exactly power of 2.
pn := poolNum((c + 2) >> 1)
if pn != -1 {
pools[pn].Put(b[0:0])
}
// if we didn't have a slot for this []byte, we just drop it and let the GC
// take care of it.
}
// makeSlice allocates a slice of size n -- it will attempt to use a pool'ed
// instance whenever possible.
func makeSlice(n int) []byte {
if n <= 64 {
return pool64.Get().([]byte)[0:n]
}
pn := poolNum(n)
if pn != -1 {
return pools[pn].Get().([]byte)[0:n]
} else {
return make([]byte, n)
}
}

88
vendor/github.com/pquerna/ffjson/fflib/v1/bytenum.go generated vendored Normal file
View file

@ -0,0 +1,88 @@
/**
* Copyright 2014 Paul Querna
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* Portions of this file are on Go stdlib's strconv/iota.go */
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package v1
import (
"github.com/pquerna/ffjson/fflib/v1/internal"
)
func ParseFloat(s []byte, bitSize int) (f float64, err error) {
return internal.ParseFloat(s, bitSize)
}
// ParseUint is like ParseInt but for unsigned numbers, and oeprating on []byte
func ParseUint(s []byte, base int, bitSize int) (n uint64, err error) {
if len(s) == 1 {
switch s[0] {
case '0':
return 0, nil
case '1':
return 1, nil
case '2':
return 2, nil
case '3':
return 3, nil
case '4':
return 4, nil
case '5':
return 5, nil
case '6':
return 6, nil
case '7':
return 7, nil
case '8':
return 8, nil
case '9':
return 9, nil
}
}
return internal.ParseUint(s, base, bitSize)
}
func ParseInt(s []byte, base int, bitSize int) (i int64, err error) {
if len(s) == 1 {
switch s[0] {
case '0':
return 0, nil
case '1':
return 1, nil
case '2':
return 2, nil
case '3':
return 3, nil
case '4':
return 4, nil
case '5':
return 5, nil
case '6':
return 6, nil
case '7':
return 7, nil
case '8':
return 8, nil
case '9':
return 9, nil
}
}
return internal.ParseInt(s, base, bitSize)
}

378
vendor/github.com/pquerna/ffjson/fflib/v1/decimal.go generated vendored Normal file
View file

@ -0,0 +1,378 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Multiprecision decimal numbers.
// For floating-point formatting only; not general purpose.
// Only operations are assign and (binary) left/right shift.
// Can do binary floating point in multiprecision decimal precisely
// because 2 divides 10; cannot do decimal floating point
// in multiprecision binary precisely.
package v1
type decimal struct {
d [800]byte // digits
nd int // number of digits used
dp int // decimal point
neg bool
trunc bool // discarded nonzero digits beyond d[:nd]
}
func (a *decimal) String() string {
n := 10 + a.nd
if a.dp > 0 {
n += a.dp
}
if a.dp < 0 {
n += -a.dp
}
buf := make([]byte, n)
w := 0
switch {
case a.nd == 0:
return "0"
case a.dp <= 0:
// zeros fill space between decimal point and digits
buf[w] = '0'
w++
buf[w] = '.'
w++
w += digitZero(buf[w : w+-a.dp])
w += copy(buf[w:], a.d[0:a.nd])
case a.dp < a.nd:
// decimal point in middle of digits
w += copy(buf[w:], a.d[0:a.dp])
buf[w] = '.'
w++
w += copy(buf[w:], a.d[a.dp:a.nd])
default:
// zeros fill space between digits and decimal point
w += copy(buf[w:], a.d[0:a.nd])
w += digitZero(buf[w : w+a.dp-a.nd])
}
return string(buf[0:w])
}
func digitZero(dst []byte) int {
for i := range dst {
dst[i] = '0'
}
return len(dst)
}
// trim trailing zeros from number.
// (They are meaningless; the decimal point is tracked
// independent of the number of digits.)
func trim(a *decimal) {
for a.nd > 0 && a.d[a.nd-1] == '0' {
a.nd--
}
if a.nd == 0 {
a.dp = 0
}
}
// Assign v to a.
func (a *decimal) Assign(v uint64) {
var buf [24]byte
// Write reversed decimal in buf.
n := 0
for v > 0 {
v1 := v / 10
v -= 10 * v1
buf[n] = byte(v + '0')
n++
v = v1
}
// Reverse again to produce forward decimal in a.d.
a.nd = 0
for n--; n >= 0; n-- {
a.d[a.nd] = buf[n]
a.nd++
}
a.dp = a.nd
trim(a)
}
// Maximum shift that we can do in one pass without overflow.
// Signed int has 31 bits, and we have to be able to accommodate 9<<k.
const maxShift = 27
// Binary shift right (* 2) by k bits. k <= maxShift to avoid overflow.
func rightShift(a *decimal, k uint) {
r := 0 // read pointer
w := 0 // write pointer
// Pick up enough leading digits to cover first shift.
n := 0
for ; n>>k == 0; r++ {
if r >= a.nd {
if n == 0 {
// a == 0; shouldn't get here, but handle anyway.
a.nd = 0
return
}
for n>>k == 0 {
n = n * 10
r++
}
break
}
c := int(a.d[r])
n = n*10 + c - '0'
}
a.dp -= r - 1
// Pick up a digit, put down a digit.
for ; r < a.nd; r++ {
c := int(a.d[r])
dig := n >> k
n -= dig << k
a.d[w] = byte(dig + '0')
w++
n = n*10 + c - '0'
}
// Put down extra digits.
for n > 0 {
dig := n >> k
n -= dig << k
if w < len(a.d) {
a.d[w] = byte(dig + '0')
w++
} else if dig > 0 {
a.trunc = true
}
n = n * 10
}
a.nd = w
trim(a)
}
// Cheat sheet for left shift: table indexed by shift count giving
// number of new digits that will be introduced by that shift.
//
// For example, leftcheats[4] = {2, "625"}. That means that
// if we are shifting by 4 (multiplying by 16), it will add 2 digits
// when the string prefix is "625" through "999", and one fewer digit
// if the string prefix is "000" through "624".
//
// Credit for this trick goes to Ken.
type leftCheat struct {
delta int // number of new digits
cutoff string // minus one digit if original < a.
}
var leftcheats = []leftCheat{
// Leading digits of 1/2^i = 5^i.
// 5^23 is not an exact 64-bit floating point number,
// so have to use bc for the math.
/*
seq 27 | sed 's/^/5^/' | bc |
awk 'BEGIN{ print "\tleftCheat{ 0, \"\" }," }
{
log2 = log(2)/log(10)
printf("\tleftCheat{ %d, \"%s\" },\t// * %d\n",
int(log2*NR+1), $0, 2**NR)
}'
*/
{0, ""},
{1, "5"}, // * 2
{1, "25"}, // * 4
{1, "125"}, // * 8
{2, "625"}, // * 16
{2, "3125"}, // * 32
{2, "15625"}, // * 64
{3, "78125"}, // * 128
{3, "390625"}, // * 256
{3, "1953125"}, // * 512
{4, "9765625"}, // * 1024
{4, "48828125"}, // * 2048
{4, "244140625"}, // * 4096
{4, "1220703125"}, // * 8192
{5, "6103515625"}, // * 16384
{5, "30517578125"}, // * 32768
{5, "152587890625"}, // * 65536
{6, "762939453125"}, // * 131072
{6, "3814697265625"}, // * 262144
{6, "19073486328125"}, // * 524288
{7, "95367431640625"}, // * 1048576
{7, "476837158203125"}, // * 2097152
{7, "2384185791015625"}, // * 4194304
{7, "11920928955078125"}, // * 8388608
{8, "59604644775390625"}, // * 16777216
{8, "298023223876953125"}, // * 33554432
{8, "1490116119384765625"}, // * 67108864
{9, "7450580596923828125"}, // * 134217728
}
// Is the leading prefix of b lexicographically less than s?
func prefixIsLessThan(b []byte, s string) bool {
for i := 0; i < len(s); i++ {
if i >= len(b) {
return true
}
if b[i] != s[i] {
return b[i] < s[i]
}
}
return false
}
// Binary shift left (/ 2) by k bits. k <= maxShift to avoid overflow.
func leftShift(a *decimal, k uint) {
delta := leftcheats[k].delta
if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) {
delta--
}
r := a.nd // read index
w := a.nd + delta // write index
n := 0
// Pick up a digit, put down a digit.
for r--; r >= 0; r-- {
n += (int(a.d[r]) - '0') << k
quo := n / 10
rem := n - 10*quo
w--
if w < len(a.d) {
a.d[w] = byte(rem + '0')
} else if rem != 0 {
a.trunc = true
}
n = quo
}
// Put down extra digits.
for n > 0 {
quo := n / 10
rem := n - 10*quo
w--
if w < len(a.d) {
a.d[w] = byte(rem + '0')
} else if rem != 0 {
a.trunc = true
}
n = quo
}
a.nd += delta
if a.nd >= len(a.d) {
a.nd = len(a.d)
}
a.dp += delta
trim(a)
}
// Binary shift left (k > 0) or right (k < 0).
func (a *decimal) Shift(k int) {
switch {
case a.nd == 0:
// nothing to do: a == 0
case k > 0:
for k > maxShift {
leftShift(a, maxShift)
k -= maxShift
}
leftShift(a, uint(k))
case k < 0:
for k < -maxShift {
rightShift(a, maxShift)
k += maxShift
}
rightShift(a, uint(-k))
}
}
// If we chop a at nd digits, should we round up?
func shouldRoundUp(a *decimal, nd int) bool {
if nd < 0 || nd >= a.nd {
return false
}
if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even
// if we truncated, a little higher than what's recorded - always round up
if a.trunc {
return true
}
return nd > 0 && (a.d[nd-1]-'0')%2 != 0
}
// not halfway - digit tells all
return a.d[nd] >= '5'
}
// Round a to nd digits (or fewer).
// If nd is zero, it means we're rounding
// just to the left of the digits, as in
// 0.09 -> 0.1.
func (a *decimal) Round(nd int) {
if nd < 0 || nd >= a.nd {
return
}
if shouldRoundUp(a, nd) {
a.RoundUp(nd)
} else {
a.RoundDown(nd)
}
}
// Round a down to nd digits (or fewer).
func (a *decimal) RoundDown(nd int) {
if nd < 0 || nd >= a.nd {
return
}
a.nd = nd
trim(a)
}
// Round a up to nd digits (or fewer).
func (a *decimal) RoundUp(nd int) {
if nd < 0 || nd >= a.nd {
return
}
// round up
for i := nd - 1; i >= 0; i-- {
c := a.d[i]
if c < '9' { // can stop after this digit
a.d[i]++
a.nd = i + 1
return
}
}
// Number is all 9s.
// Change to single 1 with adjusted decimal point.
a.d[0] = '1'
a.nd = 1
a.dp++
}
// Extract integer part, rounded appropriately.
// No guarantees about overflow.
func (a *decimal) RoundedInteger() uint64 {
if a.dp > 20 {
return 0xFFFFFFFFFFFFFFFF
}
var i int
n := uint64(0)
for i = 0; i < a.dp && i < a.nd; i++ {
n = n*10 + uint64(a.d[i]-'0')
}
for ; i < a.dp; i++ {
n *= 10
}
if shouldRoundUp(a, a.dp) {
n++
}
return n
}

668
vendor/github.com/pquerna/ffjson/fflib/v1/extfloat.go generated vendored Normal file
View file

@ -0,0 +1,668 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package v1
// An extFloat represents an extended floating-point number, with more
// precision than a float64. It does not try to save bits: the
// number represented by the structure is mant*(2^exp), with a negative
// sign if neg is true.
type extFloat struct {
mant uint64
exp int
neg bool
}
// Powers of ten taken from double-conversion library.
// http://code.google.com/p/double-conversion/
const (
firstPowerOfTen = -348
stepPowerOfTen = 8
)
var smallPowersOfTen = [...]extFloat{
{1 << 63, -63, false}, // 1
{0xa << 60, -60, false}, // 1e1
{0x64 << 57, -57, false}, // 1e2
{0x3e8 << 54, -54, false}, // 1e3
{0x2710 << 50, -50, false}, // 1e4
{0x186a0 << 47, -47, false}, // 1e5
{0xf4240 << 44, -44, false}, // 1e6
{0x989680 << 40, -40, false}, // 1e7
}
var powersOfTen = [...]extFloat{
{0xfa8fd5a0081c0288, -1220, false}, // 10^-348
{0xbaaee17fa23ebf76, -1193, false}, // 10^-340
{0x8b16fb203055ac76, -1166, false}, // 10^-332
{0xcf42894a5dce35ea, -1140, false}, // 10^-324
{0x9a6bb0aa55653b2d, -1113, false}, // 10^-316
{0xe61acf033d1a45df, -1087, false}, // 10^-308
{0xab70fe17c79ac6ca, -1060, false}, // 10^-300
{0xff77b1fcbebcdc4f, -1034, false}, // 10^-292
{0xbe5691ef416bd60c, -1007, false}, // 10^-284
{0x8dd01fad907ffc3c, -980, false}, // 10^-276
{0xd3515c2831559a83, -954, false}, // 10^-268
{0x9d71ac8fada6c9b5, -927, false}, // 10^-260
{0xea9c227723ee8bcb, -901, false}, // 10^-252
{0xaecc49914078536d, -874, false}, // 10^-244
{0x823c12795db6ce57, -847, false}, // 10^-236
{0xc21094364dfb5637, -821, false}, // 10^-228
{0x9096ea6f3848984f, -794, false}, // 10^-220
{0xd77485cb25823ac7, -768, false}, // 10^-212
{0xa086cfcd97bf97f4, -741, false}, // 10^-204
{0xef340a98172aace5, -715, false}, // 10^-196
{0xb23867fb2a35b28e, -688, false}, // 10^-188
{0x84c8d4dfd2c63f3b, -661, false}, // 10^-180
{0xc5dd44271ad3cdba, -635, false}, // 10^-172
{0x936b9fcebb25c996, -608, false}, // 10^-164
{0xdbac6c247d62a584, -582, false}, // 10^-156
{0xa3ab66580d5fdaf6, -555, false}, // 10^-148
{0xf3e2f893dec3f126, -529, false}, // 10^-140
{0xb5b5ada8aaff80b8, -502, false}, // 10^-132
{0x87625f056c7c4a8b, -475, false}, // 10^-124
{0xc9bcff6034c13053, -449, false}, // 10^-116
{0x964e858c91ba2655, -422, false}, // 10^-108
{0xdff9772470297ebd, -396, false}, // 10^-100
{0xa6dfbd9fb8e5b88f, -369, false}, // 10^-92
{0xf8a95fcf88747d94, -343, false}, // 10^-84
{0xb94470938fa89bcf, -316, false}, // 10^-76
{0x8a08f0f8bf0f156b, -289, false}, // 10^-68
{0xcdb02555653131b6, -263, false}, // 10^-60
{0x993fe2c6d07b7fac, -236, false}, // 10^-52
{0xe45c10c42a2b3b06, -210, false}, // 10^-44
{0xaa242499697392d3, -183, false}, // 10^-36
{0xfd87b5f28300ca0e, -157, false}, // 10^-28
{0xbce5086492111aeb, -130, false}, // 10^-20
{0x8cbccc096f5088cc, -103, false}, // 10^-12
{0xd1b71758e219652c, -77, false}, // 10^-4
{0x9c40000000000000, -50, false}, // 10^4
{0xe8d4a51000000000, -24, false}, // 10^12
{0xad78ebc5ac620000, 3, false}, // 10^20
{0x813f3978f8940984, 30, false}, // 10^28
{0xc097ce7bc90715b3, 56, false}, // 10^36
{0x8f7e32ce7bea5c70, 83, false}, // 10^44
{0xd5d238a4abe98068, 109, false}, // 10^52
{0x9f4f2726179a2245, 136, false}, // 10^60
{0xed63a231d4c4fb27, 162, false}, // 10^68
{0xb0de65388cc8ada8, 189, false}, // 10^76
{0x83c7088e1aab65db, 216, false}, // 10^84
{0xc45d1df942711d9a, 242, false}, // 10^92
{0x924d692ca61be758, 269, false}, // 10^100
{0xda01ee641a708dea, 295, false}, // 10^108
{0xa26da3999aef774a, 322, false}, // 10^116
{0xf209787bb47d6b85, 348, false}, // 10^124
{0xb454e4a179dd1877, 375, false}, // 10^132
{0x865b86925b9bc5c2, 402, false}, // 10^140
{0xc83553c5c8965d3d, 428, false}, // 10^148
{0x952ab45cfa97a0b3, 455, false}, // 10^156
{0xde469fbd99a05fe3, 481, false}, // 10^164
{0xa59bc234db398c25, 508, false}, // 10^172
{0xf6c69a72a3989f5c, 534, false}, // 10^180
{0xb7dcbf5354e9bece, 561, false}, // 10^188
{0x88fcf317f22241e2, 588, false}, // 10^196
{0xcc20ce9bd35c78a5, 614, false}, // 10^204
{0x98165af37b2153df, 641, false}, // 10^212
{0xe2a0b5dc971f303a, 667, false}, // 10^220
{0xa8d9d1535ce3b396, 694, false}, // 10^228
{0xfb9b7cd9a4a7443c, 720, false}, // 10^236
{0xbb764c4ca7a44410, 747, false}, // 10^244
{0x8bab8eefb6409c1a, 774, false}, // 10^252
{0xd01fef10a657842c, 800, false}, // 10^260
{0x9b10a4e5e9913129, 827, false}, // 10^268
{0xe7109bfba19c0c9d, 853, false}, // 10^276
{0xac2820d9623bf429, 880, false}, // 10^284
{0x80444b5e7aa7cf85, 907, false}, // 10^292
{0xbf21e44003acdd2d, 933, false}, // 10^300
{0x8e679c2f5e44ff8f, 960, false}, // 10^308
{0xd433179d9c8cb841, 986, false}, // 10^316
{0x9e19db92b4e31ba9, 1013, false}, // 10^324
{0xeb96bf6ebadf77d9, 1039, false}, // 10^332
{0xaf87023b9bf0ee6b, 1066, false}, // 10^340
}
// floatBits returns the bits of the float64 that best approximates
// the extFloat passed as receiver. Overflow is set to true if
// the resulting float64 is ±Inf.
func (f *extFloat) floatBits(flt *floatInfo) (bits uint64, overflow bool) {
f.Normalize()
exp := f.exp + 63
// Exponent too small.
if exp < flt.bias+1 {
n := flt.bias + 1 - exp
f.mant >>= uint(n)
exp += n
}
// Extract 1+flt.mantbits bits from the 64-bit mantissa.
mant := f.mant >> (63 - flt.mantbits)
if f.mant&(1<<(62-flt.mantbits)) != 0 {
// Round up.
mant += 1
}
// Rounding might have added a bit; shift down.
if mant == 2<<flt.mantbits {
mant >>= 1
exp++
}
// Infinities.
if exp-flt.bias >= 1<<flt.expbits-1 {
// ±Inf
mant = 0
exp = 1<<flt.expbits - 1 + flt.bias
overflow = true
} else if mant&(1<<flt.mantbits) == 0 {
// Denormalized?
exp = flt.bias
}
// Assemble bits.
bits = mant & (uint64(1)<<flt.mantbits - 1)
bits |= uint64((exp-flt.bias)&(1<<flt.expbits-1)) << flt.mantbits
if f.neg {
bits |= 1 << (flt.mantbits + flt.expbits)
}
return
}
// AssignComputeBounds sets f to the floating point value
// defined by mant, exp and precision given by flt. It returns
// lower, upper such that any number in the closed interval
// [lower, upper] is converted back to the same floating point number.
func (f *extFloat) AssignComputeBounds(mant uint64, exp int, neg bool, flt *floatInfo) (lower, upper extFloat) {
f.mant = mant
f.exp = exp - int(flt.mantbits)
f.neg = neg
if f.exp <= 0 && mant == (mant>>uint(-f.exp))<<uint(-f.exp) {
// An exact integer
f.mant >>= uint(-f.exp)
f.exp = 0
return *f, *f
}
expBiased := exp - flt.bias
upper = extFloat{mant: 2*f.mant + 1, exp: f.exp - 1, neg: f.neg}
if mant != 1<<flt.mantbits || expBiased == 1 {
lower = extFloat{mant: 2*f.mant - 1, exp: f.exp - 1, neg: f.neg}
} else {
lower = extFloat{mant: 4*f.mant - 1, exp: f.exp - 2, neg: f.neg}
}
return
}
// Normalize normalizes f so that the highest bit of the mantissa is
// set, and returns the number by which the mantissa was left-shifted.
func (f *extFloat) Normalize() (shift uint) {
mant, exp := f.mant, f.exp
if mant == 0 {
return 0
}
if mant>>(64-32) == 0 {
mant <<= 32
exp -= 32
}
if mant>>(64-16) == 0 {
mant <<= 16
exp -= 16
}
if mant>>(64-8) == 0 {
mant <<= 8
exp -= 8
}
if mant>>(64-4) == 0 {
mant <<= 4
exp -= 4
}
if mant>>(64-2) == 0 {
mant <<= 2
exp -= 2
}
if mant>>(64-1) == 0 {
mant <<= 1
exp -= 1
}
shift = uint(f.exp - exp)
f.mant, f.exp = mant, exp
return
}
// Multiply sets f to the product f*g: the result is correctly rounded,
// but not normalized.
func (f *extFloat) Multiply(g extFloat) {
fhi, flo := f.mant>>32, uint64(uint32(f.mant))
ghi, glo := g.mant>>32, uint64(uint32(g.mant))
// Cross products.
cross1 := fhi * glo
cross2 := flo * ghi
// f.mant*g.mant is fhi*ghi << 64 + (cross1+cross2) << 32 + flo*glo
f.mant = fhi*ghi + (cross1 >> 32) + (cross2 >> 32)
rem := uint64(uint32(cross1)) + uint64(uint32(cross2)) + ((flo * glo) >> 32)
// Round up.
rem += (1 << 31)
f.mant += (rem >> 32)
f.exp = f.exp + g.exp + 64
}
var uint64pow10 = [...]uint64{
1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
}
// AssignDecimal sets f to an approximate value mantissa*10^exp. It
// returns true if the value represented by f is guaranteed to be the
// best approximation of d after being rounded to a float64 or
// float32 depending on flt.
func (f *extFloat) AssignDecimal(mantissa uint64, exp10 int, neg bool, trunc bool, flt *floatInfo) (ok bool) {
const uint64digits = 19
const errorscale = 8
errors := 0 // An upper bound for error, computed in errorscale*ulp.
if trunc {
// the decimal number was truncated.
errors += errorscale / 2
}
f.mant = mantissa
f.exp = 0
f.neg = neg
// Multiply by powers of ten.
i := (exp10 - firstPowerOfTen) / stepPowerOfTen
if exp10 < firstPowerOfTen || i >= len(powersOfTen) {
return false
}
adjExp := (exp10 - firstPowerOfTen) % stepPowerOfTen
// We multiply by exp%step
if adjExp < uint64digits && mantissa < uint64pow10[uint64digits-adjExp] {
// We can multiply the mantissa exactly.
f.mant *= uint64pow10[adjExp]
f.Normalize()
} else {
f.Normalize()
f.Multiply(smallPowersOfTen[adjExp])
errors += errorscale / 2
}
// We multiply by 10 to the exp - exp%step.
f.Multiply(powersOfTen[i])
if errors > 0 {
errors += 1
}
errors += errorscale / 2
// Normalize
shift := f.Normalize()
errors <<= shift
// Now f is a good approximation of the decimal.
// Check whether the error is too large: that is, if the mantissa
// is perturbated by the error, the resulting float64 will change.
// The 64 bits mantissa is 1 + 52 bits for float64 + 11 extra bits.
//
// In many cases the approximation will be good enough.
denormalExp := flt.bias - 63
var extrabits uint
if f.exp <= denormalExp {
// f.mant * 2^f.exp is smaller than 2^(flt.bias+1).
extrabits = uint(63 - flt.mantbits + 1 + uint(denormalExp-f.exp))
} else {
extrabits = uint(63 - flt.mantbits)
}
halfway := uint64(1) << (extrabits - 1)
mant_extra := f.mant & (1<<extrabits - 1)
// Do a signed comparison here! If the error estimate could make
// the mantissa round differently for the conversion to double,
// then we can't give a definite answer.
if int64(halfway)-int64(errors) < int64(mant_extra) &&
int64(mant_extra) < int64(halfway)+int64(errors) {
return false
}
return true
}
// Frexp10 is an analogue of math.Frexp for decimal powers. It scales
// f by an approximate power of ten 10^-exp, and returns exp10, so
// that f*10^exp10 has the same value as the old f, up to an ulp,
// as well as the index of 10^-exp in the powersOfTen table.
func (f *extFloat) frexp10() (exp10, index int) {
// The constants expMin and expMax constrain the final value of the
// binary exponent of f. We want a small integral part in the result
// because finding digits of an integer requires divisions, whereas
// digits of the fractional part can be found by repeatedly multiplying
// by 10.
const expMin = -60
const expMax = -32
// Find power of ten such that x * 10^n has a binary exponent
// between expMin and expMax.
approxExp10 := ((expMin+expMax)/2 - f.exp) * 28 / 93 // log(10)/log(2) is close to 93/28.
i := (approxExp10 - firstPowerOfTen) / stepPowerOfTen
Loop:
for {
exp := f.exp + powersOfTen[i].exp + 64
switch {
case exp < expMin:
i++
case exp > expMax:
i--
default:
break Loop
}
}
// Apply the desired decimal shift on f. It will have exponent
// in the desired range. This is multiplication by 10^-exp10.
f.Multiply(powersOfTen[i])
return -(firstPowerOfTen + i*stepPowerOfTen), i
}
// frexp10Many applies a common shift by a power of ten to a, b, c.
func frexp10Many(a, b, c *extFloat) (exp10 int) {
exp10, i := c.frexp10()
a.Multiply(powersOfTen[i])
b.Multiply(powersOfTen[i])
return
}
// FixedDecimal stores in d the first n significant digits
// of the decimal representation of f. It returns false
// if it cannot be sure of the answer.
func (f *extFloat) FixedDecimal(d *decimalSlice, n int) bool {
if f.mant == 0 {
d.nd = 0
d.dp = 0
d.neg = f.neg
return true
}
if n == 0 {
panic("strconv: internal error: extFloat.FixedDecimal called with n == 0")
}
// Multiply by an appropriate power of ten to have a reasonable
// number to process.
f.Normalize()
exp10, _ := f.frexp10()
shift := uint(-f.exp)
integer := uint32(f.mant >> shift)
fraction := f.mant - (uint64(integer) << shift)
ε := uint64(1) // ε is the uncertainty we have on the mantissa of f.
// Write exactly n digits to d.
needed := n // how many digits are left to write.
integerDigits := 0 // the number of decimal digits of integer.
pow10 := uint64(1) // the power of ten by which f was scaled.
for i, pow := 0, uint64(1); i < 20; i++ {
if pow > uint64(integer) {
integerDigits = i
break
}
pow *= 10
}
rest := integer
if integerDigits > needed {
// the integral part is already large, trim the last digits.
pow10 = uint64pow10[integerDigits-needed]
integer /= uint32(pow10)
rest -= integer * uint32(pow10)
} else {
rest = 0
}
// Write the digits of integer: the digits of rest are omitted.
var buf [32]byte
pos := len(buf)
for v := integer; v > 0; {
v1 := v / 10
v -= 10 * v1
pos--
buf[pos] = byte(v + '0')
v = v1
}
for i := pos; i < len(buf); i++ {
d.d[i-pos] = buf[i]
}
nd := len(buf) - pos
d.nd = nd
d.dp = integerDigits + exp10
needed -= nd
if needed > 0 {
if rest != 0 || pow10 != 1 {
panic("strconv: internal error, rest != 0 but needed > 0")
}
// Emit digits for the fractional part. Each time, 10*fraction
// fits in a uint64 without overflow.
for needed > 0 {
fraction *= 10
ε *= 10 // the uncertainty scales as we multiply by ten.
if 2*ε > 1<<shift {
// the error is so large it could modify which digit to write, abort.
return false
}
digit := fraction >> shift
d.d[nd] = byte(digit + '0')
fraction -= digit << shift
nd++
needed--
}
d.nd = nd
}
// We have written a truncation of f (a numerator / 10^d.dp). The remaining part
// can be interpreted as a small number (< 1) to be added to the last digit of the
// numerator.
//
// If rest > 0, the amount is:
// (rest<<shift | fraction) / (pow10 << shift)
// fraction being known with a ±ε uncertainty.
// The fact that n > 0 guarantees that pow10 << shift does not overflow a uint64.
//
// If rest = 0, pow10 == 1 and the amount is
// fraction / (1 << shift)
// fraction being known with a ±ε uncertainty.
//
// We pass this information to the rounding routine for adjustment.
ok := adjustLastDigitFixed(d, uint64(rest)<<shift|fraction, pow10, shift, ε)
if !ok {
return false
}
// Trim trailing zeros.
for i := d.nd - 1; i >= 0; i-- {
if d.d[i] != '0' {
d.nd = i + 1
break
}
}
return true
}
// adjustLastDigitFixed assumes d contains the representation of the integral part
// of some number, whose fractional part is num / (den << shift). The numerator
// num is only known up to an uncertainty of size ε, assumed to be less than
// (den << shift)/2.
//
// It will increase the last digit by one to account for correct rounding, typically
// when the fractional part is greater than 1/2, and will return false if ε is such
// that no correct answer can be given.
func adjustLastDigitFixed(d *decimalSlice, num, den uint64, shift uint, ε uint64) bool {
if num > den<<shift {
panic("strconv: num > den<<shift in adjustLastDigitFixed")
}
if 2*ε > den<<shift {
panic("strconv: ε > (den<<shift)/2")
}
if 2*(num+ε) < den<<shift {
return true
}
if 2*(num-ε) > den<<shift {
// increment d by 1.
i := d.nd - 1
for ; i >= 0; i-- {
if d.d[i] == '9' {
d.nd--
} else {
break
}
}
if i < 0 {
d.d[0] = '1'
d.nd = 1
d.dp++
} else {
d.d[i]++
}
return true
}
return false
}
// ShortestDecimal stores in d the shortest decimal representation of f
// which belongs to the open interval (lower, upper), where f is supposed
// to lie. It returns false whenever the result is unsure. The implementation
// uses the Grisu3 algorithm.
func (f *extFloat) ShortestDecimal(d *decimalSlice, lower, upper *extFloat) bool {
if f.mant == 0 {
d.nd = 0
d.dp = 0
d.neg = f.neg
return true
}
if f.exp == 0 && *lower == *f && *lower == *upper {
// an exact integer.
var buf [24]byte
n := len(buf) - 1
for v := f.mant; v > 0; {
v1 := v / 10
v -= 10 * v1
buf[n] = byte(v + '0')
n--
v = v1
}
nd := len(buf) - n - 1
for i := 0; i < nd; i++ {
d.d[i] = buf[n+1+i]
}
d.nd, d.dp = nd, nd
for d.nd > 0 && d.d[d.nd-1] == '0' {
d.nd--
}
if d.nd == 0 {
d.dp = 0
}
d.neg = f.neg
return true
}
upper.Normalize()
// Uniformize exponents.
if f.exp > upper.exp {
f.mant <<= uint(f.exp - upper.exp)
f.exp = upper.exp
}
if lower.exp > upper.exp {
lower.mant <<= uint(lower.exp - upper.exp)
lower.exp = upper.exp
}
exp10 := frexp10Many(lower, f, upper)
// Take a safety margin due to rounding in frexp10Many, but we lose precision.
upper.mant++
lower.mant--
// The shortest representation of f is either rounded up or down, but
// in any case, it is a truncation of upper.
shift := uint(-upper.exp)
integer := uint32(upper.mant >> shift)
fraction := upper.mant - (uint64(integer) << shift)
// How far we can go down from upper until the result is wrong.
allowance := upper.mant - lower.mant
// How far we should go to get a very precise result.
targetDiff := upper.mant - f.mant
// Count integral digits: there are at most 10.
var integerDigits int
for i, pow := 0, uint64(1); i < 20; i++ {
if pow > uint64(integer) {
integerDigits = i
break
}
pow *= 10
}
for i := 0; i < integerDigits; i++ {
pow := uint64pow10[integerDigits-i-1]
digit := integer / uint32(pow)
d.d[i] = byte(digit + '0')
integer -= digit * uint32(pow)
// evaluate whether we should stop.
if currentDiff := uint64(integer)<<shift + fraction; currentDiff < allowance {
d.nd = i + 1
d.dp = integerDigits + exp10
d.neg = f.neg
// Sometimes allowance is so large the last digit might need to be
// decremented to get closer to f.
return adjustLastDigit(d, currentDiff, targetDiff, allowance, pow<<shift, 2)
}
}
d.nd = integerDigits
d.dp = d.nd + exp10
d.neg = f.neg
// Compute digits of the fractional part. At each step fraction does not
// overflow. The choice of minExp implies that fraction is less than 2^60.
var digit int
multiplier := uint64(1)
for {
fraction *= 10
multiplier *= 10
digit = int(fraction >> shift)
d.d[d.nd] = byte(digit + '0')
d.nd++
fraction -= uint64(digit) << shift
if fraction < allowance*multiplier {
// We are in the admissible range. Note that if allowance is about to
// overflow, that is, allowance > 2^64/10, the condition is automatically
// true due to the limited range of fraction.
return adjustLastDigit(d,
fraction, targetDiff*multiplier, allowance*multiplier,
1<<shift, multiplier*2)
}
}
}
// adjustLastDigit modifies d = x-currentDiff*ε, to get closest to
// d = x-targetDiff*ε, without becoming smaller than x-maxDiff*ε.
// It assumes that a decimal digit is worth ulpDecimal*ε, and that
// all data is known with a error estimate of ulpBinary*ε.
func adjustLastDigit(d *decimalSlice, currentDiff, targetDiff, maxDiff, ulpDecimal, ulpBinary uint64) bool {
if ulpDecimal < 2*ulpBinary {
// Approximation is too wide.
return false
}
for currentDiff+ulpDecimal/2+ulpBinary < targetDiff {
d.d[d.nd-1]--
currentDiff += ulpDecimal
}
if currentDiff+ulpDecimal <= targetDiff+ulpDecimal/2+ulpBinary {
// we have two choices, and don't know what to do.
return false
}
if currentDiff < ulpBinary || currentDiff > maxDiff-ulpBinary {
// we went too far
return false
}
if d.nd == 1 && d.d[0] == '0' {
// the number has actually reached zero.
d.nd = 0
d.dp = 0
}
return true
}

121
vendor/github.com/pquerna/ffjson/fflib/v1/fold.go generated vendored Normal file
View file

@ -0,0 +1,121 @@
/**
* Copyright 2014 Paul Querna
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* Portions of this file are on Go stdlib's encoding/json/fold.go */
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package v1
import (
"unicode/utf8"
)
const (
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
kelvin = '\u212a'
smallLongEss = '\u017f'
)
// equalFoldRight is a specialization of bytes.EqualFold when s is
// known to be all ASCII (including punctuation), but contains an 's',
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
// See comments on foldFunc.
func EqualFoldRight(s, t []byte) bool {
for _, sb := range s {
if len(t) == 0 {
return false
}
tb := t[0]
if tb < utf8.RuneSelf {
if sb != tb {
sbUpper := sb & caseMask
if 'A' <= sbUpper && sbUpper <= 'Z' {
if sbUpper != tb&caseMask {
return false
}
} else {
return false
}
}
t = t[1:]
continue
}
// sb is ASCII and t is not. t must be either kelvin
// sign or long s; sb must be s, S, k, or K.
tr, size := utf8.DecodeRune(t)
switch sb {
case 's', 'S':
if tr != smallLongEss {
return false
}
case 'k', 'K':
if tr != kelvin {
return false
}
default:
return false
}
t = t[size:]
}
if len(t) > 0 {
return false
}
return true
}
// asciiEqualFold is a specialization of bytes.EqualFold for use when
// s is all ASCII (but may contain non-letters) and contains no
// special-folding letters.
// See comments on foldFunc.
func AsciiEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, sb := range s {
tb := t[i]
if sb == tb {
continue
}
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
if sb&caseMask != tb&caseMask {
return false
}
} else {
return false
}
}
return true
}
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
// use when s is all ASCII letters (no underscores, etc) and also
// doesn't contain 'k', 'K', 's', or 'S'.
// See comments on foldFunc.
func SimpleLetterEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, b := range s {
if b&caseMask != t[i]&caseMask {
return false
}
}
return true
}

542
vendor/github.com/pquerna/ffjson/fflib/v1/ftoa.go generated vendored Normal file
View file

@ -0,0 +1,542 @@
package v1
/**
* Copyright 2015 Paul Querna, Klaus Post
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* Most of this file are on Go stdlib's strconv/ftoa.go */
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
import "math"
// TODO: move elsewhere?
type floatInfo struct {
mantbits uint
expbits uint
bias int
}
var optimize = true // can change for testing
var float32info = floatInfo{23, 8, -127}
var float64info = floatInfo{52, 11, -1023}
// AppendFloat appends the string form of the floating-point number f,
// as generated by FormatFloat
func AppendFloat(dst EncodingBuffer, val float64, fmt byte, prec, bitSize int) {
var bits uint64
var flt *floatInfo
switch bitSize {
case 32:
bits = uint64(math.Float32bits(float32(val)))
flt = &float32info
case 64:
bits = math.Float64bits(val)
flt = &float64info
default:
panic("strconv: illegal AppendFloat/FormatFloat bitSize")
}
neg := bits>>(flt.expbits+flt.mantbits) != 0
exp := int(bits>>flt.mantbits) & (1<<flt.expbits - 1)
mant := bits & (uint64(1)<<flt.mantbits - 1)
switch exp {
case 1<<flt.expbits - 1:
// Inf, NaN
var s string
switch {
case mant != 0:
s = "NaN"
case neg:
s = "-Inf"
default:
s = "+Inf"
}
dst.WriteString(s)
return
case 0:
// denormalized
exp++
default:
// add implicit top bit
mant |= uint64(1) << flt.mantbits
}
exp += flt.bias
// Pick off easy binary format.
if fmt == 'b' {
fmtB(dst, neg, mant, exp, flt)
return
}
if !optimize {
bigFtoa(dst, prec, fmt, neg, mant, exp, flt)
return
}
var digs decimalSlice
ok := false
// Negative precision means "only as much as needed to be exact."
shortest := prec < 0
if shortest {
// Try Grisu3 algorithm.
f := new(extFloat)
lower, upper := f.AssignComputeBounds(mant, exp, neg, flt)
var buf [32]byte
digs.d = buf[:]
ok = f.ShortestDecimal(&digs, &lower, &upper)
if !ok {
bigFtoa(dst, prec, fmt, neg, mant, exp, flt)
return
}
// Precision for shortest representation mode.
switch fmt {
case 'e', 'E':
prec = max(digs.nd-1, 0)
case 'f':
prec = max(digs.nd-digs.dp, 0)
case 'g', 'G':
prec = digs.nd
}
} else if fmt != 'f' {
// Fixed number of digits.
digits := prec
switch fmt {
case 'e', 'E':
digits++
case 'g', 'G':
if prec == 0 {
prec = 1
}
digits = prec
}
if digits <= 15 {
// try fast algorithm when the number of digits is reasonable.
var buf [24]byte
digs.d = buf[:]
f := extFloat{mant, exp - int(flt.mantbits), neg}
ok = f.FixedDecimal(&digs, digits)
}
}
if !ok {
bigFtoa(dst, prec, fmt, neg, mant, exp, flt)
return
}
formatDigits(dst, shortest, neg, digs, prec, fmt)
return
}
// bigFtoa uses multiprecision computations to format a float.
func bigFtoa(dst EncodingBuffer, prec int, fmt byte, neg bool, mant uint64, exp int, flt *floatInfo) {
d := new(decimal)
d.Assign(mant)
d.Shift(exp - int(flt.mantbits))
var digs decimalSlice
shortest := prec < 0
if shortest {
roundShortest(d, mant, exp, flt)
digs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp}
// Precision for shortest representation mode.
switch fmt {
case 'e', 'E':
prec = digs.nd - 1
case 'f':
prec = max(digs.nd-digs.dp, 0)
case 'g', 'G':
prec = digs.nd
}
} else {
// Round appropriately.
switch fmt {
case 'e', 'E':
d.Round(prec + 1)
case 'f':
d.Round(d.dp + prec)
case 'g', 'G':
if prec == 0 {
prec = 1
}
d.Round(prec)
}
digs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp}
}
formatDigits(dst, shortest, neg, digs, prec, fmt)
return
}
func formatDigits(dst EncodingBuffer, shortest bool, neg bool, digs decimalSlice, prec int, fmt byte) {
switch fmt {
case 'e', 'E':
fmtE(dst, neg, digs, prec, fmt)
return
case 'f':
fmtF(dst, neg, digs, prec)
return
case 'g', 'G':
// trailing fractional zeros in 'e' form will be trimmed.
eprec := prec
if eprec > digs.nd && digs.nd >= digs.dp {
eprec = digs.nd
}
// %e is used if the exponent from the conversion
// is less than -4 or greater than or equal to the precision.
// if precision was the shortest possible, use precision 6 for this decision.
if shortest {
eprec = 6
}
exp := digs.dp - 1
if exp < -4 || exp >= eprec {
if prec > digs.nd {
prec = digs.nd
}
fmtE(dst, neg, digs, prec-1, fmt+'e'-'g')
return
}
if prec > digs.dp {
prec = digs.nd
}
fmtF(dst, neg, digs, max(prec-digs.dp, 0))
return
}
// unknown format
dst.Write([]byte{'%', fmt})
return
}
// Round d (= mant * 2^exp) to the shortest number of digits
// that will let the original floating point value be precisely
// reconstructed. Size is original floating point size (64 or 32).
func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) {
// If mantissa is zero, the number is zero; stop now.
if mant == 0 {
d.nd = 0
return
}
// Compute upper and lower such that any decimal number
// between upper and lower (possibly inclusive)
// will round to the original floating point number.
// We may see at once that the number is already shortest.
//
// Suppose d is not denormal, so that 2^exp <= d < 10^dp.
// The closest shorter number is at least 10^(dp-nd) away.
// The lower/upper bounds computed below are at distance
// at most 2^(exp-mantbits).
//
// So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits),
// or equivalently log2(10)*(dp-nd) > exp-mantbits.
// It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32).
minexp := flt.bias + 1 // minimum possible exponent
if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) {
// The number is already shortest.
return
}
// d = mant << (exp - mantbits)
// Next highest floating point number is mant+1 << exp-mantbits.
// Our upper bound is halfway between, mant*2+1 << exp-mantbits-1.
upper := new(decimal)
upper.Assign(mant*2 + 1)
upper.Shift(exp - int(flt.mantbits) - 1)
// d = mant << (exp - mantbits)
// Next lowest floating point number is mant-1 << exp-mantbits,
// unless mant-1 drops the significant bit and exp is not the minimum exp,
// in which case the next lowest is mant*2-1 << exp-mantbits-1.
// Either way, call it mantlo << explo-mantbits.
// Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1.
var mantlo uint64
var explo int
if mant > 1<<flt.mantbits || exp == minexp {
mantlo = mant - 1
explo = exp
} else {
mantlo = mant*2 - 1
explo = exp - 1
}
lower := new(decimal)
lower.Assign(mantlo*2 + 1)
lower.Shift(explo - int(flt.mantbits) - 1)
// The upper and lower bounds are possible outputs only if
// the original mantissa is even, so that IEEE round-to-even
// would round to the original mantissa and not the neighbors.
inclusive := mant%2 == 0
// Now we can figure out the minimum number of digits required.
// Walk along until d has distinguished itself from upper and lower.
for i := 0; i < d.nd; i++ {
var l, m, u byte // lower, middle, upper digits
if i < lower.nd {
l = lower.d[i]
} else {
l = '0'
}
m = d.d[i]
if i < upper.nd {
u = upper.d[i]
} else {
u = '0'
}
// Okay to round down (truncate) if lower has a different digit
// or if lower is inclusive and is exactly the result of rounding down.
okdown := l != m || (inclusive && l == m && i+1 == lower.nd)
// Okay to round up if upper has a different digit and
// either upper is inclusive or upper is bigger than the result of rounding up.
okup := m != u && (inclusive || m+1 < u || i+1 < upper.nd)
// If it's okay to do either, then round to the nearest one.
// If it's okay to do only one, do it.
switch {
case okdown && okup:
d.Round(i + 1)
return
case okdown:
d.RoundDown(i + 1)
return
case okup:
d.RoundUp(i + 1)
return
}
}
}
type decimalSlice struct {
d []byte
nd, dp int
neg bool
}
// %e: -d.ddddde±dd
func fmtE(dst EncodingBuffer, neg bool, d decimalSlice, prec int, fmt byte) {
// sign
if neg {
dst.WriteByte('-')
}
// first digit
ch := byte('0')
if d.nd != 0 {
ch = d.d[0]
}
dst.WriteByte(ch)
// .moredigits
if prec > 0 {
dst.WriteByte('.')
i := 1
m := min(d.nd, prec+1)
if i < m {
dst.Write(d.d[i:m])
i = m
}
for i <= prec {
dst.WriteByte('0')
i++
}
}
// e±
dst.WriteByte(fmt)
exp := d.dp - 1
if d.nd == 0 { // special case: 0 has exponent 0
exp = 0
}
if exp < 0 {
ch = '-'
exp = -exp
} else {
ch = '+'
}
dst.WriteByte(ch)
// dd or ddd
switch {
case exp < 10:
dst.WriteByte('0')
dst.WriteByte(byte(exp) + '0')
case exp < 100:
dst.WriteByte(byte(exp/10) + '0')
dst.WriteByte(byte(exp%10) + '0')
default:
dst.WriteByte(byte(exp/100) + '0')
dst.WriteByte(byte(exp/10)%10 + '0')
dst.WriteByte(byte(exp%10) + '0')
}
return
}
// %f: -ddddddd.ddddd
func fmtF(dst EncodingBuffer, neg bool, d decimalSlice, prec int) {
// sign
if neg {
dst.WriteByte('-')
}
// integer, padded with zeros as needed.
if d.dp > 0 {
m := min(d.nd, d.dp)
dst.Write(d.d[:m])
for ; m < d.dp; m++ {
dst.WriteByte('0')
}
} else {
dst.WriteByte('0')
}
// fraction
if prec > 0 {
dst.WriteByte('.')
for i := 0; i < prec; i++ {
ch := byte('0')
if j := d.dp + i; 0 <= j && j < d.nd {
ch = d.d[j]
}
dst.WriteByte(ch)
}
}
return
}
// %b: -ddddddddp±ddd
func fmtB(dst EncodingBuffer, neg bool, mant uint64, exp int, flt *floatInfo) {
// sign
if neg {
dst.WriteByte('-')
}
// mantissa
formatBits(dst, mant, 10, false)
// p
dst.WriteByte('p')
// ±exponent
exp -= int(flt.mantbits)
if exp >= 0 {
dst.WriteByte('+')
}
formatBits(dst, uint64(exp), 10, exp < 0)
return
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
// formatBits computes the string representation of u in the given base.
// If neg is set, u is treated as negative int64 value.
func formatBits(dst EncodingBuffer, u uint64, base int, neg bool) {
if base < 2 || base > len(digits) {
panic("strconv: illegal AppendInt/FormatInt base")
}
// 2 <= base && base <= len(digits)
var a [64 + 1]byte // +1 for sign of 64bit value in base 2
i := len(a)
if neg {
u = -u
}
// convert bits
if base == 10 {
// common case: use constants for / because
// the compiler can optimize it into a multiply+shift
if ^uintptr(0)>>32 == 0 {
for u > uint64(^uintptr(0)) {
q := u / 1e9
us := uintptr(u - q*1e9) // us % 1e9 fits into a uintptr
for j := 9; j > 0; j-- {
i--
qs := us / 10
a[i] = byte(us - qs*10 + '0')
us = qs
}
u = q
}
}
// u guaranteed to fit into a uintptr
us := uintptr(u)
for us >= 10 {
i--
q := us / 10
a[i] = byte(us - q*10 + '0')
us = q
}
// u < 10
i--
a[i] = byte(us + '0')
} else if s := shifts[base]; s > 0 {
// base is power of 2: use shifts and masks instead of / and %
b := uint64(base)
m := uintptr(b) - 1 // == 1<<s - 1
for u >= b {
i--
a[i] = digits[uintptr(u)&m]
u >>= s
}
// u < base
i--
a[i] = digits[uintptr(u)]
} else {
// general case
b := uint64(base)
for u >= b {
i--
q := u / b
a[i] = digits[uintptr(u-q*b)]
u = q
}
// u < base
i--
a[i] = digits[uintptr(u)]
}
// add sign, if any
if neg {
i--
a[i] = '-'
}
dst.Write(a[i:])
}

View file

@ -0,0 +1,936 @@
/**
* Copyright 2014 Paul Querna
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* Portions of this file are on Go stdlib's strconv/atof.go */
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package internal
// decimal to binary floating point conversion.
// Algorithm:
// 1) Store input in multiprecision decimal.
// 2) Multiply/divide decimal by powers of two until in range [0.5, 1)
// 3) Multiply by 2^precision and round to get mantissa.
import "math"
var optimize = true // can change for testing
func equalIgnoreCase(s1 []byte, s2 []byte) bool {
if len(s1) != len(s2) {
return false
}
for i := 0; i < len(s1); i++ {
c1 := s1[i]
if 'A' <= c1 && c1 <= 'Z' {
c1 += 'a' - 'A'
}
c2 := s2[i]
if 'A' <= c2 && c2 <= 'Z' {
c2 += 'a' - 'A'
}
if c1 != c2 {
return false
}
}
return true
}
func special(s []byte) (f float64, ok bool) {
if len(s) == 0 {
return
}
switch s[0] {
default:
return
case '+':
if equalIgnoreCase(s, []byte("+inf")) || equalIgnoreCase(s, []byte("+infinity")) {
return math.Inf(1), true
}
case '-':
if equalIgnoreCase(s, []byte("-inf")) || equalIgnoreCase(s, []byte("-infinity")) {
return math.Inf(-1), true
}
case 'n', 'N':
if equalIgnoreCase(s, []byte("nan")) {
return math.NaN(), true
}
case 'i', 'I':
if equalIgnoreCase(s, []byte("inf")) || equalIgnoreCase(s, []byte("infinity")) {
return math.Inf(1), true
}
}
return
}
func (b *decimal) set(s []byte) (ok bool) {
i := 0
b.neg = false
b.trunc = false
// optional sign
if i >= len(s) {
return
}
switch {
case s[i] == '+':
i++
case s[i] == '-':
b.neg = true
i++
}
// digits
sawdot := false
sawdigits := false
for ; i < len(s); i++ {
switch {
case s[i] == '.':
if sawdot {
return
}
sawdot = true
b.dp = b.nd
continue
case '0' <= s[i] && s[i] <= '9':
sawdigits = true
if s[i] == '0' && b.nd == 0 { // ignore leading zeros
b.dp--
continue
}
if b.nd < len(b.d) {
b.d[b.nd] = s[i]
b.nd++
} else if s[i] != '0' {
b.trunc = true
}
continue
}
break
}
if !sawdigits {
return
}
if !sawdot {
b.dp = b.nd
}
// optional exponent moves decimal point.
// if we read a very large, very long number,
// just be sure to move the decimal point by
// a lot (say, 100000). it doesn't matter if it's
// not the exact number.
if i < len(s) && (s[i] == 'e' || s[i] == 'E') {
i++
if i >= len(s) {
return
}
esign := 1
if s[i] == '+' {
i++
} else if s[i] == '-' {
i++
esign = -1
}
if i >= len(s) || s[i] < '0' || s[i] > '9' {
return
}
e := 0
for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ {
if e < 10000 {
e = e*10 + int(s[i]) - '0'
}
}
b.dp += e * esign
}
if i != len(s) {
return
}
ok = true
return
}
// readFloat reads a decimal mantissa and exponent from a float
// string representation. It sets ok to false if the number could
// not fit return types or is invalid.
func readFloat(s []byte) (mantissa uint64, exp int, neg, trunc, ok bool) {
const uint64digits = 19
i := 0
// optional sign
if i >= len(s) {
return
}
switch {
case s[i] == '+':
i++
case s[i] == '-':
neg = true
i++
}
// digits
sawdot := false
sawdigits := false
nd := 0
ndMant := 0
dp := 0
for ; i < len(s); i++ {
switch c := s[i]; true {
case c == '.':
if sawdot {
return
}
sawdot = true
dp = nd
continue
case '0' <= c && c <= '9':
sawdigits = true
if c == '0' && nd == 0 { // ignore leading zeros
dp--
continue
}
nd++
if ndMant < uint64digits {
mantissa *= 10
mantissa += uint64(c - '0')
ndMant++
} else if s[i] != '0' {
trunc = true
}
continue
}
break
}
if !sawdigits {
return
}
if !sawdot {
dp = nd
}
// optional exponent moves decimal point.
// if we read a very large, very long number,
// just be sure to move the decimal point by
// a lot (say, 100000). it doesn't matter if it's
// not the exact number.
if i < len(s) && (s[i] == 'e' || s[i] == 'E') {
i++
if i >= len(s) {
return
}
esign := 1
if s[i] == '+' {
i++
} else if s[i] == '-' {
i++
esign = -1
}
if i >= len(s) || s[i] < '0' || s[i] > '9' {
return
}
e := 0
for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ {
if e < 10000 {
e = e*10 + int(s[i]) - '0'
}
}
dp += e * esign
}
if i != len(s) {
return
}
exp = dp - ndMant
ok = true
return
}
// decimal power of ten to binary power of two.
var powtab = []int{1, 3, 6, 9, 13, 16, 19, 23, 26}
func (d *decimal) floatBits(flt *floatInfo) (b uint64, overflow bool) {
var exp int
var mant uint64
// Zero is always a special case.
if d.nd == 0 {
mant = 0
exp = flt.bias
goto out
}
// Obvious overflow/underflow.
// These bounds are for 64-bit floats.
// Will have to change if we want to support 80-bit floats in the future.
if d.dp > 310 {
goto overflow
}
if d.dp < -330 {
// zero
mant = 0
exp = flt.bias
goto out
}
// Scale by powers of two until in range [0.5, 1.0)
exp = 0
for d.dp > 0 {
var n int
if d.dp >= len(powtab) {
n = 27
} else {
n = powtab[d.dp]
}
d.Shift(-n)
exp += n
}
for d.dp < 0 || d.dp == 0 && d.d[0] < '5' {
var n int
if -d.dp >= len(powtab) {
n = 27
} else {
n = powtab[-d.dp]
}
d.Shift(n)
exp -= n
}
// Our range is [0.5,1) but floating point range is [1,2).
exp--
// Minimum representable exponent is flt.bias+1.
// If the exponent is smaller, move it up and
// adjust d accordingly.
if exp < flt.bias+1 {
n := flt.bias + 1 - exp
d.Shift(-n)
exp += n
}
if exp-flt.bias >= 1<<flt.expbits-1 {
goto overflow
}
// Extract 1+flt.mantbits bits.
d.Shift(int(1 + flt.mantbits))
mant = d.RoundedInteger()
// Rounding might have added a bit; shift down.
if mant == 2<<flt.mantbits {
mant >>= 1
exp++
if exp-flt.bias >= 1<<flt.expbits-1 {
goto overflow
}
}
// Denormalized?
if mant&(1<<flt.mantbits) == 0 {
exp = flt.bias
}
goto out
overflow:
// ±Inf
mant = 0
exp = 1<<flt.expbits - 1 + flt.bias
overflow = true
out:
// Assemble bits.
bits := mant & (uint64(1)<<flt.mantbits - 1)
bits |= uint64((exp-flt.bias)&(1<<flt.expbits-1)) << flt.mantbits
if d.neg {
bits |= 1 << flt.mantbits << flt.expbits
}
return bits, overflow
}
// Exact powers of 10.
var float64pow10 = []float64{
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
1e20, 1e21, 1e22,
}
var float32pow10 = []float32{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10}
// If possible to convert decimal representation to 64-bit float f exactly,
// entirely in floating-point math, do so, avoiding the expense of decimalToFloatBits.
// Three common cases:
// value is exact integer
// value is exact integer * exact power of ten
// value is exact integer / exact power of ten
// These all produce potentially inexact but correctly rounded answers.
func atof64exact(mantissa uint64, exp int, neg bool) (f float64, ok bool) {
if mantissa>>float64info.mantbits != 0 {
return
}
f = float64(mantissa)
if neg {
f = -f
}
switch {
case exp == 0:
// an integer.
return f, true
// Exact integers are <= 10^15.
// Exact powers of ten are <= 10^22.
case exp > 0 && exp <= 15+22: // int * 10^k
// If exponent is big but number of digits is not,
// can move a few zeros into the integer part.
if exp > 22 {
f *= float64pow10[exp-22]
exp = 22
}
if f > 1e15 || f < -1e15 {
// the exponent was really too large.
return
}
return f * float64pow10[exp], true
case exp < 0 && exp >= -22: // int / 10^k
return f / float64pow10[-exp], true
}
return
}
// If possible to compute mantissa*10^exp to 32-bit float f exactly,
// entirely in floating-point math, do so, avoiding the machinery above.
func atof32exact(mantissa uint64, exp int, neg bool) (f float32, ok bool) {
if mantissa>>float32info.mantbits != 0 {
return
}
f = float32(mantissa)
if neg {
f = -f
}
switch {
case exp == 0:
return f, true
// Exact integers are <= 10^7.
// Exact powers of ten are <= 10^10.
case exp > 0 && exp <= 7+10: // int * 10^k
// If exponent is big but number of digits is not,
// can move a few zeros into the integer part.
if exp > 10 {
f *= float32pow10[exp-10]
exp = 10
}
if f > 1e7 || f < -1e7 {
// the exponent was really too large.
return
}
return f * float32pow10[exp], true
case exp < 0 && exp >= -10: // int / 10^k
return f / float32pow10[-exp], true
}
return
}
const fnParseFloat = "ParseFloat"
func atof32(s []byte) (f float32, err error) {
if val, ok := special(s); ok {
return float32(val), nil
}
if optimize {
// Parse mantissa and exponent.
mantissa, exp, neg, trunc, ok := readFloat(s)
if ok {
// Try pure floating-point arithmetic conversion.
if !trunc {
if f, ok := atof32exact(mantissa, exp, neg); ok {
return f, nil
}
}
// Try another fast path.
ext := new(extFloat)
if ok := ext.AssignDecimal(mantissa, exp, neg, trunc, &float32info); ok {
b, ovf := ext.floatBits(&float32info)
f = math.Float32frombits(uint32(b))
if ovf {
err = rangeError(fnParseFloat, string(s))
}
return f, err
}
}
}
var d decimal
if !d.set(s) {
return 0, syntaxError(fnParseFloat, string(s))
}
b, ovf := d.floatBits(&float32info)
f = math.Float32frombits(uint32(b))
if ovf {
err = rangeError(fnParseFloat, string(s))
}
return f, err
}
func atof64(s []byte) (f float64, err error) {
if val, ok := special(s); ok {
return val, nil
}
if optimize {
// Parse mantissa and exponent.
mantissa, exp, neg, trunc, ok := readFloat(s)
if ok {
// Try pure floating-point arithmetic conversion.
if !trunc {
if f, ok := atof64exact(mantissa, exp, neg); ok {
return f, nil
}
}
// Try another fast path.
ext := new(extFloat)
if ok := ext.AssignDecimal(mantissa, exp, neg, trunc, &float64info); ok {
b, ovf := ext.floatBits(&float64info)
f = math.Float64frombits(b)
if ovf {
err = rangeError(fnParseFloat, string(s))
}
return f, err
}
}
}
var d decimal
if !d.set(s) {
return 0, syntaxError(fnParseFloat, string(s))
}
b, ovf := d.floatBits(&float64info)
f = math.Float64frombits(b)
if ovf {
err = rangeError(fnParseFloat, string(s))
}
return f, err
}
// ParseFloat converts the string s to a floating-point number
// with the precision specified by bitSize: 32 for float32, or 64 for float64.
// When bitSize=32, the result still has type float64, but it will be
// convertible to float32 without changing its value.
//
// If s is well-formed and near a valid floating point number,
// ParseFloat returns the nearest floating point number rounded
// using IEEE754 unbiased rounding.
//
// The errors that ParseFloat returns have concrete type *NumError
// and include err.Num = s.
//
// If s is not syntactically well-formed, ParseFloat returns err.Err = ErrSyntax.
//
// If s is syntactically well-formed but is more than 1/2 ULP
// away from the largest floating point number of the given size,
// ParseFloat returns f = ±Inf, err.Err = ErrRange.
func ParseFloat(s []byte, bitSize int) (f float64, err error) {
if bitSize == 32 {
f1, err1 := atof32(s)
return float64(f1), err1
}
f1, err1 := atof64(s)
return f1, err1
}
// oroginal: strconv/decimal.go, but not exported, and needed for PareFloat.
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Multiprecision decimal numbers.
// For floating-point formatting only; not general purpose.
// Only operations are assign and (binary) left/right shift.
// Can do binary floating point in multiprecision decimal precisely
// because 2 divides 10; cannot do decimal floating point
// in multiprecision binary precisely.
type decimal struct {
d [800]byte // digits
nd int // number of digits used
dp int // decimal point
neg bool
trunc bool // discarded nonzero digits beyond d[:nd]
}
func (a *decimal) String() string {
n := 10 + a.nd
if a.dp > 0 {
n += a.dp
}
if a.dp < 0 {
n += -a.dp
}
buf := make([]byte, n)
w := 0
switch {
case a.nd == 0:
return "0"
case a.dp <= 0:
// zeros fill space between decimal point and digits
buf[w] = '0'
w++
buf[w] = '.'
w++
w += digitZero(buf[w : w+-a.dp])
w += copy(buf[w:], a.d[0:a.nd])
case a.dp < a.nd:
// decimal point in middle of digits
w += copy(buf[w:], a.d[0:a.dp])
buf[w] = '.'
w++
w += copy(buf[w:], a.d[a.dp:a.nd])
default:
// zeros fill space between digits and decimal point
w += copy(buf[w:], a.d[0:a.nd])
w += digitZero(buf[w : w+a.dp-a.nd])
}
return string(buf[0:w])
}
func digitZero(dst []byte) int {
for i := range dst {
dst[i] = '0'
}
return len(dst)
}
// trim trailing zeros from number.
// (They are meaningless; the decimal point is tracked
// independent of the number of digits.)
func trim(a *decimal) {
for a.nd > 0 && a.d[a.nd-1] == '0' {
a.nd--
}
if a.nd == 0 {
a.dp = 0
}
}
// Assign v to a.
func (a *decimal) Assign(v uint64) {
var buf [24]byte
// Write reversed decimal in buf.
n := 0
for v > 0 {
v1 := v / 10
v -= 10 * v1
buf[n] = byte(v + '0')
n++
v = v1
}
// Reverse again to produce forward decimal in a.d.
a.nd = 0
for n--; n >= 0; n-- {
a.d[a.nd] = buf[n]
a.nd++
}
a.dp = a.nd
trim(a)
}
// Maximum shift that we can do in one pass without overflow.
// Signed int has 31 bits, and we have to be able to accommodate 9<<k.
const maxShift = 27
// Binary shift right (* 2) by k bits. k <= maxShift to avoid overflow.
func rightShift(a *decimal, k uint) {
r := 0 // read pointer
w := 0 // write pointer
// Pick up enough leading digits to cover first shift.
n := 0
for ; n>>k == 0; r++ {
if r >= a.nd {
if n == 0 {
// a == 0; shouldn't get here, but handle anyway.
a.nd = 0
return
}
for n>>k == 0 {
n = n * 10
r++
}
break
}
c := int(a.d[r])
n = n*10 + c - '0'
}
a.dp -= r - 1
// Pick up a digit, put down a digit.
for ; r < a.nd; r++ {
c := int(a.d[r])
dig := n >> k
n -= dig << k
a.d[w] = byte(dig + '0')
w++
n = n*10 + c - '0'
}
// Put down extra digits.
for n > 0 {
dig := n >> k
n -= dig << k
if w < len(a.d) {
a.d[w] = byte(dig + '0')
w++
} else if dig > 0 {
a.trunc = true
}
n = n * 10
}
a.nd = w
trim(a)
}
// Cheat sheet for left shift: table indexed by shift count giving
// number of new digits that will be introduced by that shift.
//
// For example, leftcheats[4] = {2, "625"}. That means that
// if we are shifting by 4 (multiplying by 16), it will add 2 digits
// when the string prefix is "625" through "999", and one fewer digit
// if the string prefix is "000" through "624".
//
// Credit for this trick goes to Ken.
type leftCheat struct {
delta int // number of new digits
cutoff string // minus one digit if original < a.
}
var leftcheats = []leftCheat{
// Leading digits of 1/2^i = 5^i.
// 5^23 is not an exact 64-bit floating point number,
// so have to use bc for the math.
/*
seq 27 | sed 's/^/5^/' | bc |
awk 'BEGIN{ print "\tleftCheat{ 0, \"\" }," }
{
log2 = log(2)/log(10)
printf("\tleftCheat{ %d, \"%s\" },\t// * %d\n",
int(log2*NR+1), $0, 2**NR)
}'
*/
{0, ""},
{1, "5"}, // * 2
{1, "25"}, // * 4
{1, "125"}, // * 8
{2, "625"}, // * 16
{2, "3125"}, // * 32
{2, "15625"}, // * 64
{3, "78125"}, // * 128
{3, "390625"}, // * 256
{3, "1953125"}, // * 512
{4, "9765625"}, // * 1024
{4, "48828125"}, // * 2048
{4, "244140625"}, // * 4096
{4, "1220703125"}, // * 8192
{5, "6103515625"}, // * 16384
{5, "30517578125"}, // * 32768
{5, "152587890625"}, // * 65536
{6, "762939453125"}, // * 131072
{6, "3814697265625"}, // * 262144
{6, "19073486328125"}, // * 524288
{7, "95367431640625"}, // * 1048576
{7, "476837158203125"}, // * 2097152
{7, "2384185791015625"}, // * 4194304
{7, "11920928955078125"}, // * 8388608
{8, "59604644775390625"}, // * 16777216
{8, "298023223876953125"}, // * 33554432
{8, "1490116119384765625"}, // * 67108864
{9, "7450580596923828125"}, // * 134217728
}
// Is the leading prefix of b lexicographically less than s?
func prefixIsLessThan(b []byte, s string) bool {
for i := 0; i < len(s); i++ {
if i >= len(b) {
return true
}
if b[i] != s[i] {
return b[i] < s[i]
}
}
return false
}
// Binary shift left (/ 2) by k bits. k <= maxShift to avoid overflow.
func leftShift(a *decimal, k uint) {
delta := leftcheats[k].delta
if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) {
delta--
}
r := a.nd // read index
w := a.nd + delta // write index
n := 0
// Pick up a digit, put down a digit.
for r--; r >= 0; r-- {
n += (int(a.d[r]) - '0') << k
quo := n / 10
rem := n - 10*quo
w--
if w < len(a.d) {
a.d[w] = byte(rem + '0')
} else if rem != 0 {
a.trunc = true
}
n = quo
}
// Put down extra digits.
for n > 0 {
quo := n / 10
rem := n - 10*quo
w--
if w < len(a.d) {
a.d[w] = byte(rem + '0')
} else if rem != 0 {
a.trunc = true
}
n = quo
}
a.nd += delta
if a.nd >= len(a.d) {
a.nd = len(a.d)
}
a.dp += delta
trim(a)
}
// Binary shift left (k > 0) or right (k < 0).
func (a *decimal) Shift(k int) {
switch {
case a.nd == 0:
// nothing to do: a == 0
case k > 0:
for k > maxShift {
leftShift(a, maxShift)
k -= maxShift
}
leftShift(a, uint(k))
case k < 0:
for k < -maxShift {
rightShift(a, maxShift)
k += maxShift
}
rightShift(a, uint(-k))
}
}
// If we chop a at nd digits, should we round up?
func shouldRoundUp(a *decimal, nd int) bool {
if nd < 0 || nd >= a.nd {
return false
}
if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even
// if we truncated, a little higher than what's recorded - always round up
if a.trunc {
return true
}
return nd > 0 && (a.d[nd-1]-'0')%2 != 0
}
// not halfway - digit tells all
return a.d[nd] >= '5'
}
// Round a to nd digits (or fewer).
// If nd is zero, it means we're rounding
// just to the left of the digits, as in
// 0.09 -> 0.1.
func (a *decimal) Round(nd int) {
if nd < 0 || nd >= a.nd {
return
}
if shouldRoundUp(a, nd) {
a.RoundUp(nd)
} else {
a.RoundDown(nd)
}
}
// Round a down to nd digits (or fewer).
func (a *decimal) RoundDown(nd int) {
if nd < 0 || nd >= a.nd {
return
}
a.nd = nd
trim(a)
}
// Round a up to nd digits (or fewer).
func (a *decimal) RoundUp(nd int) {
if nd < 0 || nd >= a.nd {
return
}
// round up
for i := nd - 1; i >= 0; i-- {
c := a.d[i]
if c < '9' { // can stop after this digit
a.d[i]++
a.nd = i + 1
return
}
}
// Number is all 9s.
// Change to single 1 with adjusted decimal point.
a.d[0] = '1'
a.nd = 1
a.dp++
}
// Extract integer part, rounded appropriately.
// No guarantees about overflow.
func (a *decimal) RoundedInteger() uint64 {
if a.dp > 20 {
return 0xFFFFFFFFFFFFFFFF
}
var i int
n := uint64(0)
for i = 0; i < a.dp && i < a.nd; i++ {
n = n*10 + uint64(a.d[i]-'0')
}
for ; i < a.dp; i++ {
n *= 10
}
if shouldRoundUp(a, a.dp) {
n++
}
return n
}

View file

@ -0,0 +1,213 @@
/**
* Copyright 2014 Paul Querna
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* Portions of this file are on Go stdlib's strconv/atoi.go */
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package internal
import (
"errors"
"strconv"
)
// ErrRange indicates that a value is out of range for the target type.
var ErrRange = errors.New("value out of range")
// ErrSyntax indicates that a value does not have the right syntax for the target type.
var ErrSyntax = errors.New("invalid syntax")
// A NumError records a failed conversion.
type NumError struct {
Func string // the failing function (ParseBool, ParseInt, ParseUint, ParseFloat)
Num string // the input
Err error // the reason the conversion failed (ErrRange, ErrSyntax)
}
func (e *NumError) Error() string {
return "strconv." + e.Func + ": " + "parsing " + strconv.Quote(e.Num) + ": " + e.Err.Error()
}
func syntaxError(fn, str string) *NumError {
return &NumError{fn, str, ErrSyntax}
}
func rangeError(fn, str string) *NumError {
return &NumError{fn, str, ErrRange}
}
const intSize = 32 << uint(^uint(0)>>63)
// IntSize is the size in bits of an int or uint value.
const IntSize = intSize
// Return the first number n such that n*base >= 1<<64.
func cutoff64(base int) uint64 {
if base < 2 {
return 0
}
return (1<<64-1)/uint64(base) + 1
}
// ParseUint is like ParseInt but for unsigned numbers, and oeprating on []byte
func ParseUint(s []byte, base int, bitSize int) (n uint64, err error) {
var cutoff, maxVal uint64
if bitSize == 0 {
bitSize = int(IntSize)
}
s0 := s
switch {
case len(s) < 1:
err = ErrSyntax
goto Error
case 2 <= base && base <= 36:
// valid base; nothing to do
case base == 0:
// Look for octal, hex prefix.
switch {
case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
base = 16
s = s[2:]
if len(s) < 1 {
err = ErrSyntax
goto Error
}
case s[0] == '0':
base = 8
default:
base = 10
}
default:
err = errors.New("invalid base " + strconv.Itoa(base))
goto Error
}
n = 0
cutoff = cutoff64(base)
maxVal = 1<<uint(bitSize) - 1
for i := 0; i < len(s); i++ {
var v byte
d := s[i]
switch {
case '0' <= d && d <= '9':
v = d - '0'
case 'a' <= d && d <= 'z':
v = d - 'a' + 10
case 'A' <= d && d <= 'Z':
v = d - 'A' + 10
default:
n = 0
err = ErrSyntax
goto Error
}
if int(v) >= base {
n = 0
err = ErrSyntax
goto Error
}
if n >= cutoff {
// n*base overflows
n = 1<<64 - 1
err = ErrRange
goto Error
}
n *= uint64(base)
n1 := n + uint64(v)
if n1 < n || n1 > maxVal {
// n+v overflows
n = 1<<64 - 1
err = ErrRange
goto Error
}
n = n1
}
return n, nil
Error:
return n, &NumError{"ParseUint", string(s0), err}
}
// ParseInt interprets a string s in the given base (2 to 36) and
// returns the corresponding value i. If base == 0, the base is
// implied by the string's prefix: base 16 for "0x", base 8 for
// "0", and base 10 otherwise.
//
// The bitSize argument specifies the integer type
// that the result must fit into. Bit sizes 0, 8, 16, 32, and 64
// correspond to int, int8, int16, int32, and int64.
//
// The errors that ParseInt returns have concrete type *NumError
// and include err.Num = s. If s is empty or contains invalid
// digits, err.Err = ErrSyntax and the returned value is 0;
// if the value corresponding to s cannot be represented by a
// signed integer of the given size, err.Err = ErrRange and the
// returned value is the maximum magnitude integer of the
// appropriate bitSize and sign.
func ParseInt(s []byte, base int, bitSize int) (i int64, err error) {
const fnParseInt = "ParseInt"
if bitSize == 0 {
bitSize = int(IntSize)
}
// Empty string bad.
if len(s) == 0 {
return 0, syntaxError(fnParseInt, string(s))
}
// Pick off leading sign.
s0 := s
neg := false
if s[0] == '+' {
s = s[1:]
} else if s[0] == '-' {
neg = true
s = s[1:]
}
// Convert unsigned and check range.
var un uint64
un, err = ParseUint(s, base, bitSize)
if err != nil && err.(*NumError).Err != ErrRange {
err.(*NumError).Func = fnParseInt
err.(*NumError).Num = string(s0)
return 0, err
}
cutoff := uint64(1 << uint(bitSize-1))
if !neg && un >= cutoff {
return int64(cutoff - 1), rangeError(fnParseInt, string(s0))
}
if neg && un > cutoff {
return -int64(cutoff), rangeError(fnParseInt, string(s0))
}
n := int64(un)
if neg {
n = -n
}
return n, nil
}

View file

@ -0,0 +1,668 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package internal
// An extFloat represents an extended floating-point number, with more
// precision than a float64. It does not try to save bits: the
// number represented by the structure is mant*(2^exp), with a negative
// sign if neg is true.
type extFloat struct {
mant uint64
exp int
neg bool
}
// Powers of ten taken from double-conversion library.
// http://code.google.com/p/double-conversion/
const (
firstPowerOfTen = -348
stepPowerOfTen = 8
)
var smallPowersOfTen = [...]extFloat{
{1 << 63, -63, false}, // 1
{0xa << 60, -60, false}, // 1e1
{0x64 << 57, -57, false}, // 1e2
{0x3e8 << 54, -54, false}, // 1e3
{0x2710 << 50, -50, false}, // 1e4
{0x186a0 << 47, -47, false}, // 1e5
{0xf4240 << 44, -44, false}, // 1e6
{0x989680 << 40, -40, false}, // 1e7
}
var powersOfTen = [...]extFloat{
{0xfa8fd5a0081c0288, -1220, false}, // 10^-348
{0xbaaee17fa23ebf76, -1193, false}, // 10^-340
{0x8b16fb203055ac76, -1166, false}, // 10^-332
{0xcf42894a5dce35ea, -1140, false}, // 10^-324
{0x9a6bb0aa55653b2d, -1113, false}, // 10^-316
{0xe61acf033d1a45df, -1087, false}, // 10^-308
{0xab70fe17c79ac6ca, -1060, false}, // 10^-300
{0xff77b1fcbebcdc4f, -1034, false}, // 10^-292
{0xbe5691ef416bd60c, -1007, false}, // 10^-284
{0x8dd01fad907ffc3c, -980, false}, // 10^-276
{0xd3515c2831559a83, -954, false}, // 10^-268
{0x9d71ac8fada6c9b5, -927, false}, // 10^-260
{0xea9c227723ee8bcb, -901, false}, // 10^-252
{0xaecc49914078536d, -874, false}, // 10^-244
{0x823c12795db6ce57, -847, false}, // 10^-236
{0xc21094364dfb5637, -821, false}, // 10^-228
{0x9096ea6f3848984f, -794, false}, // 10^-220
{0xd77485cb25823ac7, -768, false}, // 10^-212
{0xa086cfcd97bf97f4, -741, false}, // 10^-204
{0xef340a98172aace5, -715, false}, // 10^-196
{0xb23867fb2a35b28e, -688, false}, // 10^-188
{0x84c8d4dfd2c63f3b, -661, false}, // 10^-180
{0xc5dd44271ad3cdba, -635, false}, // 10^-172
{0x936b9fcebb25c996, -608, false}, // 10^-164
{0xdbac6c247d62a584, -582, false}, // 10^-156
{0xa3ab66580d5fdaf6, -555, false}, // 10^-148
{0xf3e2f893dec3f126, -529, false}, // 10^-140
{0xb5b5ada8aaff80b8, -502, false}, // 10^-132
{0x87625f056c7c4a8b, -475, false}, // 10^-124
{0xc9bcff6034c13053, -449, false}, // 10^-116
{0x964e858c91ba2655, -422, false}, // 10^-108
{0xdff9772470297ebd, -396, false}, // 10^-100
{0xa6dfbd9fb8e5b88f, -369, false}, // 10^-92
{0xf8a95fcf88747d94, -343, false}, // 10^-84
{0xb94470938fa89bcf, -316, false}, // 10^-76
{0x8a08f0f8bf0f156b, -289, false}, // 10^-68
{0xcdb02555653131b6, -263, false}, // 10^-60
{0x993fe2c6d07b7fac, -236, false}, // 10^-52
{0xe45c10c42a2b3b06, -210, false}, // 10^-44
{0xaa242499697392d3, -183, false}, // 10^-36
{0xfd87b5f28300ca0e, -157, false}, // 10^-28
{0xbce5086492111aeb, -130, false}, // 10^-20
{0x8cbccc096f5088cc, -103, false}, // 10^-12
{0xd1b71758e219652c, -77, false}, // 10^-4
{0x9c40000000000000, -50, false}, // 10^4
{0xe8d4a51000000000, -24, false}, // 10^12
{0xad78ebc5ac620000, 3, false}, // 10^20
{0x813f3978f8940984, 30, false}, // 10^28
{0xc097ce7bc90715b3, 56, false}, // 10^36
{0x8f7e32ce7bea5c70, 83, false}, // 10^44
{0xd5d238a4abe98068, 109, false}, // 10^52
{0x9f4f2726179a2245, 136, false}, // 10^60
{0xed63a231d4c4fb27, 162, false}, // 10^68
{0xb0de65388cc8ada8, 189, false}, // 10^76
{0x83c7088e1aab65db, 216, false}, // 10^84
{0xc45d1df942711d9a, 242, false}, // 10^92
{0x924d692ca61be758, 269, false}, // 10^100
{0xda01ee641a708dea, 295, false}, // 10^108
{0xa26da3999aef774a, 322, false}, // 10^116
{0xf209787bb47d6b85, 348, false}, // 10^124
{0xb454e4a179dd1877, 375, false}, // 10^132
{0x865b86925b9bc5c2, 402, false}, // 10^140
{0xc83553c5c8965d3d, 428, false}, // 10^148
{0x952ab45cfa97a0b3, 455, false}, // 10^156
{0xde469fbd99a05fe3, 481, false}, // 10^164
{0xa59bc234db398c25, 508, false}, // 10^172
{0xf6c69a72a3989f5c, 534, false}, // 10^180
{0xb7dcbf5354e9bece, 561, false}, // 10^188
{0x88fcf317f22241e2, 588, false}, // 10^196
{0xcc20ce9bd35c78a5, 614, false}, // 10^204
{0x98165af37b2153df, 641, false}, // 10^212
{0xe2a0b5dc971f303a, 667, false}, // 10^220
{0xa8d9d1535ce3b396, 694, false}, // 10^228
{0xfb9b7cd9a4a7443c, 720, false}, // 10^236
{0xbb764c4ca7a44410, 747, false}, // 10^244
{0x8bab8eefb6409c1a, 774, false}, // 10^252
{0xd01fef10a657842c, 800, false}, // 10^260
{0x9b10a4e5e9913129, 827, false}, // 10^268
{0xe7109bfba19c0c9d, 853, false}, // 10^276
{0xac2820d9623bf429, 880, false}, // 10^284
{0x80444b5e7aa7cf85, 907, false}, // 10^292
{0xbf21e44003acdd2d, 933, false}, // 10^300
{0x8e679c2f5e44ff8f, 960, false}, // 10^308
{0xd433179d9c8cb841, 986, false}, // 10^316
{0x9e19db92b4e31ba9, 1013, false}, // 10^324
{0xeb96bf6ebadf77d9, 1039, false}, // 10^332
{0xaf87023b9bf0ee6b, 1066, false}, // 10^340
}
// floatBits returns the bits of the float64 that best approximates
// the extFloat passed as receiver. Overflow is set to true if
// the resulting float64 is ±Inf.
func (f *extFloat) floatBits(flt *floatInfo) (bits uint64, overflow bool) {
f.Normalize()
exp := f.exp + 63
// Exponent too small.
if exp < flt.bias+1 {
n := flt.bias + 1 - exp
f.mant >>= uint(n)
exp += n
}
// Extract 1+flt.mantbits bits from the 64-bit mantissa.
mant := f.mant >> (63 - flt.mantbits)
if f.mant&(1<<(62-flt.mantbits)) != 0 {
// Round up.
mant += 1
}
// Rounding might have added a bit; shift down.
if mant == 2<<flt.mantbits {
mant >>= 1
exp++
}
// Infinities.
if exp-flt.bias >= 1<<flt.expbits-1 {
// ±Inf
mant = 0
exp = 1<<flt.expbits - 1 + flt.bias
overflow = true
} else if mant&(1<<flt.mantbits) == 0 {
// Denormalized?
exp = flt.bias
}
// Assemble bits.
bits = mant & (uint64(1)<<flt.mantbits - 1)
bits |= uint64((exp-flt.bias)&(1<<flt.expbits-1)) << flt.mantbits
if f.neg {
bits |= 1 << (flt.mantbits + flt.expbits)
}
return
}
// AssignComputeBounds sets f to the floating point value
// defined by mant, exp and precision given by flt. It returns
// lower, upper such that any number in the closed interval
// [lower, upper] is converted back to the same floating point number.
func (f *extFloat) AssignComputeBounds(mant uint64, exp int, neg bool, flt *floatInfo) (lower, upper extFloat) {
f.mant = mant
f.exp = exp - int(flt.mantbits)
f.neg = neg
if f.exp <= 0 && mant == (mant>>uint(-f.exp))<<uint(-f.exp) {
// An exact integer
f.mant >>= uint(-f.exp)
f.exp = 0
return *f, *f
}
expBiased := exp - flt.bias
upper = extFloat{mant: 2*f.mant + 1, exp: f.exp - 1, neg: f.neg}
if mant != 1<<flt.mantbits || expBiased == 1 {
lower = extFloat{mant: 2*f.mant - 1, exp: f.exp - 1, neg: f.neg}
} else {
lower = extFloat{mant: 4*f.mant - 1, exp: f.exp - 2, neg: f.neg}
}
return
}
// Normalize normalizes f so that the highest bit of the mantissa is
// set, and returns the number by which the mantissa was left-shifted.
func (f *extFloat) Normalize() (shift uint) {
mant, exp := f.mant, f.exp
if mant == 0 {
return 0
}
if mant>>(64-32) == 0 {
mant <<= 32
exp -= 32
}
if mant>>(64-16) == 0 {
mant <<= 16
exp -= 16
}
if mant>>(64-8) == 0 {
mant <<= 8
exp -= 8
}
if mant>>(64-4) == 0 {
mant <<= 4
exp -= 4
}
if mant>>(64-2) == 0 {
mant <<= 2
exp -= 2
}
if mant>>(64-1) == 0 {
mant <<= 1
exp -= 1
}
shift = uint(f.exp - exp)
f.mant, f.exp = mant, exp
return
}
// Multiply sets f to the product f*g: the result is correctly rounded,
// but not normalized.
func (f *extFloat) Multiply(g extFloat) {
fhi, flo := f.mant>>32, uint64(uint32(f.mant))
ghi, glo := g.mant>>32, uint64(uint32(g.mant))
// Cross products.
cross1 := fhi * glo
cross2 := flo * ghi
// f.mant*g.mant is fhi*ghi << 64 + (cross1+cross2) << 32 + flo*glo
f.mant = fhi*ghi + (cross1 >> 32) + (cross2 >> 32)
rem := uint64(uint32(cross1)) + uint64(uint32(cross2)) + ((flo * glo) >> 32)
// Round up.
rem += (1 << 31)
f.mant += (rem >> 32)
f.exp = f.exp + g.exp + 64
}
var uint64pow10 = [...]uint64{
1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
}
// AssignDecimal sets f to an approximate value mantissa*10^exp. It
// returns true if the value represented by f is guaranteed to be the
// best approximation of d after being rounded to a float64 or
// float32 depending on flt.
func (f *extFloat) AssignDecimal(mantissa uint64, exp10 int, neg bool, trunc bool, flt *floatInfo) (ok bool) {
const uint64digits = 19
const errorscale = 8
errors := 0 // An upper bound for error, computed in errorscale*ulp.
if trunc {
// the decimal number was truncated.
errors += errorscale / 2
}
f.mant = mantissa
f.exp = 0
f.neg = neg
// Multiply by powers of ten.
i := (exp10 - firstPowerOfTen) / stepPowerOfTen
if exp10 < firstPowerOfTen || i >= len(powersOfTen) {
return false
}
adjExp := (exp10 - firstPowerOfTen) % stepPowerOfTen
// We multiply by exp%step
if adjExp < uint64digits && mantissa < uint64pow10[uint64digits-adjExp] {
// We can multiply the mantissa exactly.
f.mant *= uint64pow10[adjExp]
f.Normalize()
} else {
f.Normalize()
f.Multiply(smallPowersOfTen[adjExp])
errors += errorscale / 2
}
// We multiply by 10 to the exp - exp%step.
f.Multiply(powersOfTen[i])
if errors > 0 {
errors += 1
}
errors += errorscale / 2
// Normalize
shift := f.Normalize()
errors <<= shift
// Now f is a good approximation of the decimal.
// Check whether the error is too large: that is, if the mantissa
// is perturbated by the error, the resulting float64 will change.
// The 64 bits mantissa is 1 + 52 bits for float64 + 11 extra bits.
//
// In many cases the approximation will be good enough.
denormalExp := flt.bias - 63
var extrabits uint
if f.exp <= denormalExp {
// f.mant * 2^f.exp is smaller than 2^(flt.bias+1).
extrabits = uint(63 - flt.mantbits + 1 + uint(denormalExp-f.exp))
} else {
extrabits = uint(63 - flt.mantbits)
}
halfway := uint64(1) << (extrabits - 1)
mant_extra := f.mant & (1<<extrabits - 1)
// Do a signed comparison here! If the error estimate could make
// the mantissa round differently for the conversion to double,
// then we can't give a definite answer.
if int64(halfway)-int64(errors) < int64(mant_extra) &&
int64(mant_extra) < int64(halfway)+int64(errors) {
return false
}
return true
}
// Frexp10 is an analogue of math.Frexp for decimal powers. It scales
// f by an approximate power of ten 10^-exp, and returns exp10, so
// that f*10^exp10 has the same value as the old f, up to an ulp,
// as well as the index of 10^-exp in the powersOfTen table.
func (f *extFloat) frexp10() (exp10, index int) {
// The constants expMin and expMax constrain the final value of the
// binary exponent of f. We want a small integral part in the result
// because finding digits of an integer requires divisions, whereas
// digits of the fractional part can be found by repeatedly multiplying
// by 10.
const expMin = -60
const expMax = -32
// Find power of ten such that x * 10^n has a binary exponent
// between expMin and expMax.
approxExp10 := ((expMin+expMax)/2 - f.exp) * 28 / 93 // log(10)/log(2) is close to 93/28.
i := (approxExp10 - firstPowerOfTen) / stepPowerOfTen
Loop:
for {
exp := f.exp + powersOfTen[i].exp + 64
switch {
case exp < expMin:
i++
case exp > expMax:
i--
default:
break Loop
}
}
// Apply the desired decimal shift on f. It will have exponent
// in the desired range. This is multiplication by 10^-exp10.
f.Multiply(powersOfTen[i])
return -(firstPowerOfTen + i*stepPowerOfTen), i
}
// frexp10Many applies a common shift by a power of ten to a, b, c.
func frexp10Many(a, b, c *extFloat) (exp10 int) {
exp10, i := c.frexp10()
a.Multiply(powersOfTen[i])
b.Multiply(powersOfTen[i])
return
}
// FixedDecimal stores in d the first n significant digits
// of the decimal representation of f. It returns false
// if it cannot be sure of the answer.
func (f *extFloat) FixedDecimal(d *decimalSlice, n int) bool {
if f.mant == 0 {
d.nd = 0
d.dp = 0
d.neg = f.neg
return true
}
if n == 0 {
panic("strconv: internal error: extFloat.FixedDecimal called with n == 0")
}
// Multiply by an appropriate power of ten to have a reasonable
// number to process.
f.Normalize()
exp10, _ := f.frexp10()
shift := uint(-f.exp)
integer := uint32(f.mant >> shift)
fraction := f.mant - (uint64(integer) << shift)
ε := uint64(1) // ε is the uncertainty we have on the mantissa of f.
// Write exactly n digits to d.
needed := n // how many digits are left to write.
integerDigits := 0 // the number of decimal digits of integer.
pow10 := uint64(1) // the power of ten by which f was scaled.
for i, pow := 0, uint64(1); i < 20; i++ {
if pow > uint64(integer) {
integerDigits = i
break
}
pow *= 10
}
rest := integer
if integerDigits > needed {
// the integral part is already large, trim the last digits.
pow10 = uint64pow10[integerDigits-needed]
integer /= uint32(pow10)
rest -= integer * uint32(pow10)
} else {
rest = 0
}
// Write the digits of integer: the digits of rest are omitted.
var buf [32]byte
pos := len(buf)
for v := integer; v > 0; {
v1 := v / 10
v -= 10 * v1
pos--
buf[pos] = byte(v + '0')
v = v1
}
for i := pos; i < len(buf); i++ {
d.d[i-pos] = buf[i]
}
nd := len(buf) - pos
d.nd = nd
d.dp = integerDigits + exp10
needed -= nd
if needed > 0 {
if rest != 0 || pow10 != 1 {
panic("strconv: internal error, rest != 0 but needed > 0")
}
// Emit digits for the fractional part. Each time, 10*fraction
// fits in a uint64 without overflow.
for needed > 0 {
fraction *= 10
ε *= 10 // the uncertainty scales as we multiply by ten.
if 2*ε > 1<<shift {
// the error is so large it could modify which digit to write, abort.
return false
}
digit := fraction >> shift
d.d[nd] = byte(digit + '0')
fraction -= digit << shift
nd++
needed--
}
d.nd = nd
}
// We have written a truncation of f (a numerator / 10^d.dp). The remaining part
// can be interpreted as a small number (< 1) to be added to the last digit of the
// numerator.
//
// If rest > 0, the amount is:
// (rest<<shift | fraction) / (pow10 << shift)
// fraction being known with a ±ε uncertainty.
// The fact that n > 0 guarantees that pow10 << shift does not overflow a uint64.
//
// If rest = 0, pow10 == 1 and the amount is
// fraction / (1 << shift)
// fraction being known with a ±ε uncertainty.
//
// We pass this information to the rounding routine for adjustment.
ok := adjustLastDigitFixed(d, uint64(rest)<<shift|fraction, pow10, shift, ε)
if !ok {
return false
}
// Trim trailing zeros.
for i := d.nd - 1; i >= 0; i-- {
if d.d[i] != '0' {
d.nd = i + 1
break
}
}
return true
}
// adjustLastDigitFixed assumes d contains the representation of the integral part
// of some number, whose fractional part is num / (den << shift). The numerator
// num is only known up to an uncertainty of size ε, assumed to be less than
// (den << shift)/2.
//
// It will increase the last digit by one to account for correct rounding, typically
// when the fractional part is greater than 1/2, and will return false if ε is such
// that no correct answer can be given.
func adjustLastDigitFixed(d *decimalSlice, num, den uint64, shift uint, ε uint64) bool {
if num > den<<shift {
panic("strconv: num > den<<shift in adjustLastDigitFixed")
}
if 2*ε > den<<shift {
panic("strconv: ε > (den<<shift)/2")
}
if 2*(num+ε) < den<<shift {
return true
}
if 2*(num-ε) > den<<shift {
// increment d by 1.
i := d.nd - 1
for ; i >= 0; i-- {
if d.d[i] == '9' {
d.nd--
} else {
break
}
}
if i < 0 {
d.d[0] = '1'
d.nd = 1
d.dp++
} else {
d.d[i]++
}
return true
}
return false
}
// ShortestDecimal stores in d the shortest decimal representation of f
// which belongs to the open interval (lower, upper), where f is supposed
// to lie. It returns false whenever the result is unsure. The implementation
// uses the Grisu3 algorithm.
func (f *extFloat) ShortestDecimal(d *decimalSlice, lower, upper *extFloat) bool {
if f.mant == 0 {
d.nd = 0
d.dp = 0
d.neg = f.neg
return true
}
if f.exp == 0 && *lower == *f && *lower == *upper {
// an exact integer.
var buf [24]byte
n := len(buf) - 1
for v := f.mant; v > 0; {
v1 := v / 10
v -= 10 * v1
buf[n] = byte(v + '0')
n--
v = v1
}
nd := len(buf) - n - 1
for i := 0; i < nd; i++ {
d.d[i] = buf[n+1+i]
}
d.nd, d.dp = nd, nd
for d.nd > 0 && d.d[d.nd-1] == '0' {
d.nd--
}
if d.nd == 0 {
d.dp = 0
}
d.neg = f.neg
return true
}
upper.Normalize()
// Uniformize exponents.
if f.exp > upper.exp {
f.mant <<= uint(f.exp - upper.exp)
f.exp = upper.exp
}
if lower.exp > upper.exp {
lower.mant <<= uint(lower.exp - upper.exp)
lower.exp = upper.exp
}
exp10 := frexp10Many(lower, f, upper)
// Take a safety margin due to rounding in frexp10Many, but we lose precision.
upper.mant++
lower.mant--
// The shortest representation of f is either rounded up or down, but
// in any case, it is a truncation of upper.
shift := uint(-upper.exp)
integer := uint32(upper.mant >> shift)
fraction := upper.mant - (uint64(integer) << shift)
// How far we can go down from upper until the result is wrong.
allowance := upper.mant - lower.mant
// How far we should go to get a very precise result.
targetDiff := upper.mant - f.mant
// Count integral digits: there are at most 10.
var integerDigits int
for i, pow := 0, uint64(1); i < 20; i++ {
if pow > uint64(integer) {
integerDigits = i
break
}
pow *= 10
}
for i := 0; i < integerDigits; i++ {
pow := uint64pow10[integerDigits-i-1]
digit := integer / uint32(pow)
d.d[i] = byte(digit + '0')
integer -= digit * uint32(pow)
// evaluate whether we should stop.
if currentDiff := uint64(integer)<<shift + fraction; currentDiff < allowance {
d.nd = i + 1
d.dp = integerDigits + exp10
d.neg = f.neg
// Sometimes allowance is so large the last digit might need to be
// decremented to get closer to f.
return adjustLastDigit(d, currentDiff, targetDiff, allowance, pow<<shift, 2)
}
}
d.nd = integerDigits
d.dp = d.nd + exp10
d.neg = f.neg
// Compute digits of the fractional part. At each step fraction does not
// overflow. The choice of minExp implies that fraction is less than 2^60.
var digit int
multiplier := uint64(1)
for {
fraction *= 10
multiplier *= 10
digit = int(fraction >> shift)
d.d[d.nd] = byte(digit + '0')
d.nd++
fraction -= uint64(digit) << shift
if fraction < allowance*multiplier {
// We are in the admissible range. Note that if allowance is about to
// overflow, that is, allowance > 2^64/10, the condition is automatically
// true due to the limited range of fraction.
return adjustLastDigit(d,
fraction, targetDiff*multiplier, allowance*multiplier,
1<<shift, multiplier*2)
}
}
}
// adjustLastDigit modifies d = x-currentDiff*ε, to get closest to
// d = x-targetDiff*ε, without becoming smaller than x-maxDiff*ε.
// It assumes that a decimal digit is worth ulpDecimal*ε, and that
// all data is known with a error estimate of ulpBinary*ε.
func adjustLastDigit(d *decimalSlice, currentDiff, targetDiff, maxDiff, ulpDecimal, ulpBinary uint64) bool {
if ulpDecimal < 2*ulpBinary {
// Approximation is too wide.
return false
}
for currentDiff+ulpDecimal/2+ulpBinary < targetDiff {
d.d[d.nd-1]--
currentDiff += ulpDecimal
}
if currentDiff+ulpDecimal <= targetDiff+ulpDecimal/2+ulpBinary {
// we have two choices, and don't know what to do.
return false
}
if currentDiff < ulpBinary || currentDiff > maxDiff-ulpBinary {
// we went too far
return false
}
if d.nd == 1 && d.d[0] == '0' {
// the number has actually reached zero.
d.nd = 0
d.dp = 0
}
return true
}

View file

@ -0,0 +1,475 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Binary to decimal floating point conversion.
// Algorithm:
// 1) store mantissa in multiprecision decimal
// 2) shift decimal by exponent
// 3) read digits out & format
package internal
import "math"
// TODO: move elsewhere?
type floatInfo struct {
mantbits uint
expbits uint
bias int
}
var float32info = floatInfo{23, 8, -127}
var float64info = floatInfo{52, 11, -1023}
// FormatFloat converts the floating-point number f to a string,
// according to the format fmt and precision prec. It rounds the
// result assuming that the original was obtained from a floating-point
// value of bitSize bits (32 for float32, 64 for float64).
//
// The format fmt is one of
// 'b' (-ddddp±ddd, a binary exponent),
// 'e' (-d.dddde±dd, a decimal exponent),
// 'E' (-d.ddddE±dd, a decimal exponent),
// 'f' (-ddd.dddd, no exponent),
// 'g' ('e' for large exponents, 'f' otherwise), or
// 'G' ('E' for large exponents, 'f' otherwise).
//
// The precision prec controls the number of digits
// (excluding the exponent) printed by the 'e', 'E', 'f', 'g', and 'G' formats.
// For 'e', 'E', and 'f' it is the number of digits after the decimal point.
// For 'g' and 'G' it is the total number of digits.
// The special precision -1 uses the smallest number of digits
// necessary such that ParseFloat will return f exactly.
func formatFloat(f float64, fmt byte, prec, bitSize int) string {
return string(genericFtoa(make([]byte, 0, max(prec+4, 24)), f, fmt, prec, bitSize))
}
// AppendFloat appends the string form of the floating-point number f,
// as generated by FormatFloat, to dst and returns the extended buffer.
func appendFloat(dst []byte, f float64, fmt byte, prec int, bitSize int) []byte {
return genericFtoa(dst, f, fmt, prec, bitSize)
}
func genericFtoa(dst []byte, val float64, fmt byte, prec, bitSize int) []byte {
var bits uint64
var flt *floatInfo
switch bitSize {
case 32:
bits = uint64(math.Float32bits(float32(val)))
flt = &float32info
case 64:
bits = math.Float64bits(val)
flt = &float64info
default:
panic("strconv: illegal AppendFloat/FormatFloat bitSize")
}
neg := bits>>(flt.expbits+flt.mantbits) != 0
exp := int(bits>>flt.mantbits) & (1<<flt.expbits - 1)
mant := bits & (uint64(1)<<flt.mantbits - 1)
switch exp {
case 1<<flt.expbits - 1:
// Inf, NaN
var s string
switch {
case mant != 0:
s = "NaN"
case neg:
s = "-Inf"
default:
s = "+Inf"
}
return append(dst, s...)
case 0:
// denormalized
exp++
default:
// add implicit top bit
mant |= uint64(1) << flt.mantbits
}
exp += flt.bias
// Pick off easy binary format.
if fmt == 'b' {
return fmtB(dst, neg, mant, exp, flt)
}
if !optimize {
return bigFtoa(dst, prec, fmt, neg, mant, exp, flt)
}
var digs decimalSlice
ok := false
// Negative precision means "only as much as needed to be exact."
shortest := prec < 0
if shortest {
// Try Grisu3 algorithm.
f := new(extFloat)
lower, upper := f.AssignComputeBounds(mant, exp, neg, flt)
var buf [32]byte
digs.d = buf[:]
ok = f.ShortestDecimal(&digs, &lower, &upper)
if !ok {
return bigFtoa(dst, prec, fmt, neg, mant, exp, flt)
}
// Precision for shortest representation mode.
switch fmt {
case 'e', 'E':
prec = digs.nd - 1
case 'f':
prec = max(digs.nd-digs.dp, 0)
case 'g', 'G':
prec = digs.nd
}
} else if fmt != 'f' {
// Fixed number of digits.
digits := prec
switch fmt {
case 'e', 'E':
digits++
case 'g', 'G':
if prec == 0 {
prec = 1
}
digits = prec
}
if digits <= 15 {
// try fast algorithm when the number of digits is reasonable.
var buf [24]byte
digs.d = buf[:]
f := extFloat{mant, exp - int(flt.mantbits), neg}
ok = f.FixedDecimal(&digs, digits)
}
}
if !ok {
return bigFtoa(dst, prec, fmt, neg, mant, exp, flt)
}
return formatDigits(dst, shortest, neg, digs, prec, fmt)
}
// bigFtoa uses multiprecision computations to format a float.
func bigFtoa(dst []byte, prec int, fmt byte, neg bool, mant uint64, exp int, flt *floatInfo) []byte {
d := new(decimal)
d.Assign(mant)
d.Shift(exp - int(flt.mantbits))
var digs decimalSlice
shortest := prec < 0
if shortest {
roundShortest(d, mant, exp, flt)
digs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp}
// Precision for shortest representation mode.
switch fmt {
case 'e', 'E':
prec = digs.nd - 1
case 'f':
prec = max(digs.nd-digs.dp, 0)
case 'g', 'G':
prec = digs.nd
}
} else {
// Round appropriately.
switch fmt {
case 'e', 'E':
d.Round(prec + 1)
case 'f':
d.Round(d.dp + prec)
case 'g', 'G':
if prec == 0 {
prec = 1
}
d.Round(prec)
}
digs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp}
}
return formatDigits(dst, shortest, neg, digs, prec, fmt)
}
func formatDigits(dst []byte, shortest bool, neg bool, digs decimalSlice, prec int, fmt byte) []byte {
switch fmt {
case 'e', 'E':
return fmtE(dst, neg, digs, prec, fmt)
case 'f':
return fmtF(dst, neg, digs, prec)
case 'g', 'G':
// trailing fractional zeros in 'e' form will be trimmed.
eprec := prec
if eprec > digs.nd && digs.nd >= digs.dp {
eprec = digs.nd
}
// %e is used if the exponent from the conversion
// is less than -4 or greater than or equal to the precision.
// if precision was the shortest possible, use precision 6 for this decision.
if shortest {
eprec = 6
}
exp := digs.dp - 1
if exp < -4 || exp >= eprec {
if prec > digs.nd {
prec = digs.nd
}
return fmtE(dst, neg, digs, prec-1, fmt+'e'-'g')
}
if prec > digs.dp {
prec = digs.nd
}
return fmtF(dst, neg, digs, max(prec-digs.dp, 0))
}
// unknown format
return append(dst, '%', fmt)
}
// Round d (= mant * 2^exp) to the shortest number of digits
// that will let the original floating point value be precisely
// reconstructed. Size is original floating point size (64 or 32).
func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) {
// If mantissa is zero, the number is zero; stop now.
if mant == 0 {
d.nd = 0
return
}
// Compute upper and lower such that any decimal number
// between upper and lower (possibly inclusive)
// will round to the original floating point number.
// We may see at once that the number is already shortest.
//
// Suppose d is not denormal, so that 2^exp <= d < 10^dp.
// The closest shorter number is at least 10^(dp-nd) away.
// The lower/upper bounds computed below are at distance
// at most 2^(exp-mantbits).
//
// So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits),
// or equivalently log2(10)*(dp-nd) > exp-mantbits.
// It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32).
minexp := flt.bias + 1 // minimum possible exponent
if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) {
// The number is already shortest.
return
}
// d = mant << (exp - mantbits)
// Next highest floating point number is mant+1 << exp-mantbits.
// Our upper bound is halfway between, mant*2+1 << exp-mantbits-1.
upper := new(decimal)
upper.Assign(mant*2 + 1)
upper.Shift(exp - int(flt.mantbits) - 1)
// d = mant << (exp - mantbits)
// Next lowest floating point number is mant-1 << exp-mantbits,
// unless mant-1 drops the significant bit and exp is not the minimum exp,
// in which case the next lowest is mant*2-1 << exp-mantbits-1.
// Either way, call it mantlo << explo-mantbits.
// Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1.
var mantlo uint64
var explo int
if mant > 1<<flt.mantbits || exp == minexp {
mantlo = mant - 1
explo = exp
} else {
mantlo = mant*2 - 1
explo = exp - 1
}
lower := new(decimal)
lower.Assign(mantlo*2 + 1)
lower.Shift(explo - int(flt.mantbits) - 1)
// The upper and lower bounds are possible outputs only if
// the original mantissa is even, so that IEEE round-to-even
// would round to the original mantissa and not the neighbors.
inclusive := mant%2 == 0
// Now we can figure out the minimum number of digits required.
// Walk along until d has distinguished itself from upper and lower.
for i := 0; i < d.nd; i++ {
var l, m, u byte // lower, middle, upper digits
if i < lower.nd {
l = lower.d[i]
} else {
l = '0'
}
m = d.d[i]
if i < upper.nd {
u = upper.d[i]
} else {
u = '0'
}
// Okay to round down (truncate) if lower has a different digit
// or if lower is inclusive and is exactly the result of rounding down.
okdown := l != m || (inclusive && l == m && i+1 == lower.nd)
// Okay to round up if upper has a different digit and
// either upper is inclusive or upper is bigger than the result of rounding up.
okup := m != u && (inclusive || m+1 < u || i+1 < upper.nd)
// If it's okay to do either, then round to the nearest one.
// If it's okay to do only one, do it.
switch {
case okdown && okup:
d.Round(i + 1)
return
case okdown:
d.RoundDown(i + 1)
return
case okup:
d.RoundUp(i + 1)
return
}
}
}
type decimalSlice struct {
d []byte
nd, dp int
neg bool
}
// %e: -d.ddddde±dd
func fmtE(dst []byte, neg bool, d decimalSlice, prec int, fmt byte) []byte {
// sign
if neg {
dst = append(dst, '-')
}
// first digit
ch := byte('0')
if d.nd != 0 {
ch = d.d[0]
}
dst = append(dst, ch)
// .moredigits
if prec > 0 {
dst = append(dst, '.')
i := 1
m := d.nd + prec + 1 - max(d.nd, prec+1)
for i < m {
dst = append(dst, d.d[i])
i++
}
for i <= prec {
dst = append(dst, '0')
i++
}
}
// e±
dst = append(dst, fmt)
exp := d.dp - 1
if d.nd == 0 { // special case: 0 has exponent 0
exp = 0
}
if exp < 0 {
ch = '-'
exp = -exp
} else {
ch = '+'
}
dst = append(dst, ch)
// dddd
var buf [3]byte
i := len(buf)
for exp >= 10 {
i--
buf[i] = byte(exp%10 + '0')
exp /= 10
}
// exp < 10
i--
buf[i] = byte(exp + '0')
switch i {
case 0:
dst = append(dst, buf[0], buf[1], buf[2])
case 1:
dst = append(dst, buf[1], buf[2])
case 2:
// leading zeroes
dst = append(dst, '0', buf[2])
}
return dst
}
// %f: -ddddddd.ddddd
func fmtF(dst []byte, neg bool, d decimalSlice, prec int) []byte {
// sign
if neg {
dst = append(dst, '-')
}
// integer, padded with zeros as needed.
if d.dp > 0 {
var i int
for i = 0; i < d.dp && i < d.nd; i++ {
dst = append(dst, d.d[i])
}
for ; i < d.dp; i++ {
dst = append(dst, '0')
}
} else {
dst = append(dst, '0')
}
// fraction
if prec > 0 {
dst = append(dst, '.')
for i := 0; i < prec; i++ {
ch := byte('0')
if j := d.dp + i; 0 <= j && j < d.nd {
ch = d.d[j]
}
dst = append(dst, ch)
}
}
return dst
}
// %b: -ddddddddp+ddd
func fmtB(dst []byte, neg bool, mant uint64, exp int, flt *floatInfo) []byte {
var buf [50]byte
w := len(buf)
exp -= int(flt.mantbits)
esign := byte('+')
if exp < 0 {
esign = '-'
exp = -exp
}
n := 0
for exp > 0 || n < 1 {
n++
w--
buf[w] = byte(exp%10 + '0')
exp /= 10
}
w--
buf[w] = esign
w--
buf[w] = 'p'
n = 0
for mant > 0 || n < 1 {
n++
w--
buf[w] = byte(mant%10 + '0')
mant /= 10
}
if neg {
w--
buf[w] = '-'
}
return append(dst, buf[w:]...)
}
func max(a, b int) int {
if a > b {
return a
}
return b
}

161
vendor/github.com/pquerna/ffjson/fflib/v1/iota.go generated vendored Normal file
View file

@ -0,0 +1,161 @@
/**
* Copyright 2014 Paul Querna
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* Portions of this file are on Go stdlib's strconv/iota.go */
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package v1
import (
"io"
)
const (
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
)
var shifts = [len(digits) + 1]uint{
1 << 1: 1,
1 << 2: 2,
1 << 3: 3,
1 << 4: 4,
1 << 5: 5,
}
var smallNumbers = [][]byte{
[]byte("0"),
[]byte("1"),
[]byte("2"),
[]byte("3"),
[]byte("4"),
[]byte("5"),
[]byte("6"),
[]byte("7"),
[]byte("8"),
[]byte("9"),
[]byte("10"),
}
type FormatBitsWriter interface {
io.Writer
io.ByteWriter
}
type FormatBitsScratch struct{}
//
// DEPRECIATED: `scratch` is no longer used, FormatBits2 is available.
//
// FormatBits computes the string representation of u in the given base.
// If neg is set, u is treated as negative int64 value. If append_ is
// set, the string is appended to dst and the resulting byte slice is
// returned as the first result value; otherwise the string is returned
// as the second result value.
//
func FormatBits(scratch *FormatBitsScratch, dst FormatBitsWriter, u uint64, base int, neg bool) {
FormatBits2(dst, u, base, neg)
}
// FormatBits2 computes the string representation of u in the given base.
// If neg is set, u is treated as negative int64 value. If append_ is
// set, the string is appended to dst and the resulting byte slice is
// returned as the first result value; otherwise the string is returned
// as the second result value.
//
func FormatBits2(dst FormatBitsWriter, u uint64, base int, neg bool) {
if base < 2 || base > len(digits) {
panic("strconv: illegal AppendInt/FormatInt base")
}
// fast path for small common numbers
if u <= 10 {
if neg {
dst.WriteByte('-')
}
dst.Write(smallNumbers[u])
return
}
// 2 <= base && base <= len(digits)
var a = makeSlice(65)
// var a [64 + 1]byte // +1 for sign of 64bit value in base 2
i := len(a)
if neg {
u = -u
}
// convert bits
if base == 10 {
// common case: use constants for / and % because
// the compiler can optimize it into a multiply+shift,
// and unroll loop
for u >= 100 {
i -= 2
q := u / 100
j := uintptr(u - q*100)
a[i+1] = digits01[j]
a[i+0] = digits10[j]
u = q
}
if u >= 10 {
i--
q := u / 10
a[i] = digits[uintptr(u-q*10)]
u = q
}
} else if s := shifts[base]; s > 0 {
// base is power of 2: use shifts and masks instead of / and %
b := uint64(base)
m := uintptr(b) - 1 // == 1<<s - 1
for u >= b {
i--
a[i] = digits[uintptr(u)&m]
u >>= s
}
} else {
// general case
b := uint64(base)
for u >= b {
i--
a[i] = digits[uintptr(u%b)]
u /= b
}
}
// u < base
i--
a[i] = digits[uintptr(u)]
// add sign, if any
if neg {
i--
a[i] = '-'
}
dst.Write(a[i:])
Pool(a)
return
}

512
vendor/github.com/pquerna/ffjson/fflib/v1/jsonstring.go generated vendored Normal file
View file

@ -0,0 +1,512 @@
/**
* Copyright 2014 Paul Querna
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* Portions of this file are on Go stdlib's encoding/json/encode.go */
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package v1
import (
"io"
"unicode/utf8"
"strconv"
"unicode/utf16"
"unicode"
)
const hex = "0123456789abcdef"
type JsonStringWriter interface {
io.Writer
io.ByteWriter
stringWriter
}
func WriteJsonString(buf JsonStringWriter, s string) {
WriteJson(buf, []byte(s))
}
/**
* Function ported from encoding/json: func (e *encodeState) string(s string) (int, error)
*/
func WriteJson(buf JsonStringWriter, s []byte) {
buf.WriteByte('"')
start := 0
for i := 0; i < len(s); {
if b := s[i]; b < utf8.RuneSelf {
/*
if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
i++
continue
}
*/
if lt[b] == true {
i++
continue
}
if start < i {
buf.Write(s[start:i])
}
switch b {
case '\\', '"':
buf.WriteByte('\\')
buf.WriteByte(b)
case '\n':
buf.WriteByte('\\')
buf.WriteByte('n')
case '\r':
buf.WriteByte('\\')
buf.WriteByte('r')
default:
// This encodes bytes < 0x20 except for \n and \r,
// as well as < and >. The latter are escaped because they
// can lead to security holes when user-controlled strings
// are rendered into JSON and served to some browsers.
buf.WriteString(`\u00`)
buf.WriteByte(hex[b>>4])
buf.WriteByte(hex[b&0xF])
}
i++
start = i
continue
}
c, size := utf8.DecodeRune(s[i:])
if c == utf8.RuneError && size == 1 {
if start < i {
buf.Write(s[start:i])
}
buf.WriteString(`\ufffd`)
i += size
start = i
continue
}
// U+2028 is LINE SEPARATOR.
// U+2029 is PARAGRAPH SEPARATOR.
// They are both technically valid characters in JSON strings,
// but don't work in JSONP, which has to be evaluated as JavaScript,
// and can lead to security holes there. It is valid JSON to
// escape them, so we do so unconditionally.
// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
if c == '\u2028' || c == '\u2029' {
if start < i {
buf.Write(s[start:i])
}
buf.WriteString(`\u202`)
buf.WriteByte(hex[c&0xF])
i += size
start = i
continue
}
i += size
}
if start < len(s) {
buf.Write(s[start:])
}
buf.WriteByte('"')
}
// UnquoteBytes will decode []byte containing json string to go string
// ported from encoding/json/decode.go
func UnquoteBytes(s []byte) (t []byte, ok bool) {
if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
return
}
s = s[1 : len(s)-1]
// Check for unusual characters. If there are none,
// then no unquoting is needed, so return a slice of the
// original bytes.
r := 0
for r < len(s) {
c := s[r]
if c == '\\' || c == '"' || c < ' ' {
break
}
if c < utf8.RuneSelf {
r++
continue
}
rr, size := utf8.DecodeRune(s[r:])
if rr == utf8.RuneError && size == 1 {
break
}
r += size
}
if r == len(s) {
return s, true
}
b := make([]byte, len(s)+2*utf8.UTFMax)
w := copy(b, s[0:r])
for r < len(s) {
// Out of room? Can only happen if s is full of
// malformed UTF-8 and we're replacing each
// byte with RuneError.
if w >= len(b)-2*utf8.UTFMax {
nb := make([]byte, (len(b)+utf8.UTFMax)*2)
copy(nb, b[0:w])
b = nb
}
switch c := s[r]; {
case c == '\\':
r++
if r >= len(s) {
return
}
switch s[r] {
default:
return
case '"', '\\', '/', '\'':
b[w] = s[r]
r++
w++
case 'b':
b[w] = '\b'
r++
w++
case 'f':
b[w] = '\f'
r++
w++
case 'n':
b[w] = '\n'
r++
w++
case 'r':
b[w] = '\r'
r++
w++
case 't':
b[w] = '\t'
r++
w++
case 'u':
r--
rr := getu4(s[r:])
if rr < 0 {
return
}
r += 6
if utf16.IsSurrogate(rr) {
rr1 := getu4(s[r:])
if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
// A valid pair; consume.
r += 6
w += utf8.EncodeRune(b[w:], dec)
break
}
// Invalid surrogate; fall back to replacement rune.
rr = unicode.ReplacementChar
}
w += utf8.EncodeRune(b[w:], rr)
}
// Quote, control characters are invalid.
case c == '"', c < ' ':
return
// ASCII
case c < utf8.RuneSelf:
b[w] = c
r++
w++
// Coerce to well-formed UTF-8.
default:
rr, size := utf8.DecodeRune(s[r:])
r += size
w += utf8.EncodeRune(b[w:], rr)
}
}
return b[0:w], true
}
// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
// or it returns -1.
func getu4(s []byte) rune {
if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
return -1
}
r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
if err != nil {
return -1
}
return rune(r)
}
// TODO(pquerna): consider combining wibth the normal byte mask.
var lt [256]bool = [256]bool{
false, /* 0 */
false, /* 1 */
false, /* 2 */
false, /* 3 */
false, /* 4 */
false, /* 5 */
false, /* 6 */
false, /* 7 */
false, /* 8 */
false, /* 9 */
false, /* 10 */
false, /* 11 */
false, /* 12 */
false, /* 13 */
false, /* 14 */
false, /* 15 */
false, /* 16 */
false, /* 17 */
false, /* 18 */
false, /* 19 */
false, /* 20 */
false, /* 21 */
false, /* 22 */
false, /* 23 */
false, /* 24 */
false, /* 25 */
false, /* 26 */
false, /* 27 */
false, /* 28 */
false, /* 29 */
false, /* 30 */
false, /* 31 */
true, /* 32 */
true, /* 33 */
false, /* 34 */
true, /* 35 */
true, /* 36 */
true, /* 37 */
false, /* 38 */
true, /* 39 */
true, /* 40 */
true, /* 41 */
true, /* 42 */
true, /* 43 */
true, /* 44 */
true, /* 45 */
true, /* 46 */
true, /* 47 */
true, /* 48 */
true, /* 49 */
true, /* 50 */
true, /* 51 */
true, /* 52 */
true, /* 53 */
true, /* 54 */
true, /* 55 */
true, /* 56 */
true, /* 57 */
true, /* 58 */
true, /* 59 */
false, /* 60 */
true, /* 61 */
false, /* 62 */
true, /* 63 */
true, /* 64 */
true, /* 65 */
true, /* 66 */
true, /* 67 */
true, /* 68 */
true, /* 69 */
true, /* 70 */
true, /* 71 */
true, /* 72 */
true, /* 73 */
true, /* 74 */
true, /* 75 */
true, /* 76 */
true, /* 77 */
true, /* 78 */
true, /* 79 */
true, /* 80 */
true, /* 81 */
true, /* 82 */
true, /* 83 */
true, /* 84 */
true, /* 85 */
true, /* 86 */
true, /* 87 */
true, /* 88 */
true, /* 89 */
true, /* 90 */
true, /* 91 */
false, /* 92 */
true, /* 93 */
true, /* 94 */
true, /* 95 */
true, /* 96 */
true, /* 97 */
true, /* 98 */
true, /* 99 */
true, /* 100 */
true, /* 101 */
true, /* 102 */
true, /* 103 */
true, /* 104 */
true, /* 105 */
true, /* 106 */
true, /* 107 */
true, /* 108 */
true, /* 109 */
true, /* 110 */
true, /* 111 */
true, /* 112 */
true, /* 113 */
true, /* 114 */
true, /* 115 */
true, /* 116 */
true, /* 117 */
true, /* 118 */
true, /* 119 */
true, /* 120 */
true, /* 121 */
true, /* 122 */
true, /* 123 */
true, /* 124 */
true, /* 125 */
true, /* 126 */
true, /* 127 */
true, /* 128 */
true, /* 129 */
true, /* 130 */
true, /* 131 */
true, /* 132 */
true, /* 133 */
true, /* 134 */
true, /* 135 */
true, /* 136 */
true, /* 137 */
true, /* 138 */
true, /* 139 */
true, /* 140 */
true, /* 141 */
true, /* 142 */
true, /* 143 */
true, /* 144 */
true, /* 145 */
true, /* 146 */
true, /* 147 */
true, /* 148 */
true, /* 149 */
true, /* 150 */
true, /* 151 */
true, /* 152 */
true, /* 153 */
true, /* 154 */
true, /* 155 */
true, /* 156 */
true, /* 157 */
true, /* 158 */
true, /* 159 */
true, /* 160 */
true, /* 161 */
true, /* 162 */
true, /* 163 */
true, /* 164 */
true, /* 165 */
true, /* 166 */
true, /* 167 */
true, /* 168 */
true, /* 169 */
true, /* 170 */
true, /* 171 */
true, /* 172 */
true, /* 173 */
true, /* 174 */
true, /* 175 */
true, /* 176 */
true, /* 177 */
true, /* 178 */
true, /* 179 */
true, /* 180 */
true, /* 181 */
true, /* 182 */
true, /* 183 */
true, /* 184 */
true, /* 185 */
true, /* 186 */
true, /* 187 */
true, /* 188 */
true, /* 189 */
true, /* 190 */
true, /* 191 */
true, /* 192 */
true, /* 193 */
true, /* 194 */
true, /* 195 */
true, /* 196 */
true, /* 197 */
true, /* 198 */
true, /* 199 */
true, /* 200 */
true, /* 201 */
true, /* 202 */
true, /* 203 */
true, /* 204 */
true, /* 205 */
true, /* 206 */
true, /* 207 */
true, /* 208 */
true, /* 209 */
true, /* 210 */
true, /* 211 */
true, /* 212 */
true, /* 213 */
true, /* 214 */
true, /* 215 */
true, /* 216 */
true, /* 217 */
true, /* 218 */
true, /* 219 */
true, /* 220 */
true, /* 221 */
true, /* 222 */
true, /* 223 */
true, /* 224 */
true, /* 225 */
true, /* 226 */
true, /* 227 */
true, /* 228 */
true, /* 229 */
true, /* 230 */
true, /* 231 */
true, /* 232 */
true, /* 233 */
true, /* 234 */
true, /* 235 */
true, /* 236 */
true, /* 237 */
true, /* 238 */
true, /* 239 */
true, /* 240 */
true, /* 241 */
true, /* 242 */
true, /* 243 */
true, /* 244 */
true, /* 245 */
true, /* 246 */
true, /* 247 */
true, /* 248 */
true, /* 249 */
true, /* 250 */
true, /* 251 */
true, /* 252 */
true, /* 253 */
true, /* 254 */
true, /* 255 */
}

937
vendor/github.com/pquerna/ffjson/fflib/v1/lexer.go generated vendored Normal file
View file

@ -0,0 +1,937 @@
/**
* Copyright 2014 Paul Querna
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* Portions of this file are on derived from yajl: <https://github.com/lloyd/yajl> */
/*
* Copyright (c) 2007-2014, Lloyd Hilaiel <me@lloyd.io>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package v1
import (
"errors"
"fmt"
"io"
)
type FFParseState int
const (
FFParse_map_start FFParseState = iota
FFParse_want_key
FFParse_want_colon
FFParse_want_value
FFParse_after_value
)
type FFTok int
const (
FFTok_init FFTok = iota
FFTok_bool FFTok = iota
FFTok_colon FFTok = iota
FFTok_comma FFTok = iota
FFTok_eof FFTok = iota
FFTok_error FFTok = iota
FFTok_left_brace FFTok = iota
FFTok_left_bracket FFTok = iota
FFTok_null FFTok = iota
FFTok_right_brace FFTok = iota
FFTok_right_bracket FFTok = iota
/* we differentiate between integers and doubles to allow the
* parser to interpret the number without re-scanning */
FFTok_integer FFTok = iota
FFTok_double FFTok = iota
FFTok_string FFTok = iota
/* comment tokens are not currently returned to the parser, ever */
FFTok_comment FFTok = iota
)
type FFErr int
const (
FFErr_e_ok FFErr = iota
FFErr_io FFErr = iota
FFErr_string_invalid_utf8 FFErr = iota
FFErr_string_invalid_escaped_char FFErr = iota
FFErr_string_invalid_json_char FFErr = iota
FFErr_string_invalid_hex_char FFErr = iota
FFErr_invalid_char FFErr = iota
FFErr_invalid_string FFErr = iota
FFErr_missing_integer_after_decimal FFErr = iota
FFErr_missing_integer_after_exponent FFErr = iota
FFErr_missing_integer_after_minus FFErr = iota
FFErr_unallowed_comment FFErr = iota
FFErr_incomplete_comment FFErr = iota
FFErr_unexpected_token_type FFErr = iota // TODO: improve this error
)
type FFLexer struct {
reader *ffReader
Output DecodingBuffer
Token FFTok
Error FFErr
BigError error
// TODO: convert all of this to an interface
lastCurrentChar int
captureAll bool
buf Buffer
}
func NewFFLexer(input []byte) *FFLexer {
fl := &FFLexer{
Token: FFTok_init,
Error: FFErr_e_ok,
reader: newffReader(input),
Output: &Buffer{},
}
// TODO: guess size?
//fl.Output.Grow(64)
return fl
}
type LexerError struct {
offset int
line int
char int
err error
}
// Reset the Lexer and add new input.
func (ffl *FFLexer) Reset(input []byte) {
ffl.Token = FFTok_init
ffl.Error = FFErr_e_ok
ffl.BigError = nil
ffl.reader.Reset(input)
ffl.lastCurrentChar = 0
ffl.Output.Reset()
}
func (le *LexerError) Error() string {
return fmt.Sprintf(`ffjson error: (%T)%s offset=%d line=%d char=%d`,
le.err, le.err.Error(),
le.offset, le.line, le.char)
}
func (ffl *FFLexer) WrapErr(err error) error {
line, char := ffl.reader.PosWithLine()
// TOOD: calcualte lines/characters based on offset
return &LexerError{
offset: ffl.reader.Pos(),
line: line,
char: char,
err: err,
}
}
func (ffl *FFLexer) scanReadByte() (byte, error) {
var c byte
var err error
if ffl.captureAll {
c, err = ffl.reader.ReadByte()
} else {
c, err = ffl.reader.ReadByteNoWS()
}
if err != nil {
ffl.Error = FFErr_io
ffl.BigError = err
return 0, err
}
return c, nil
}
func (ffl *FFLexer) readByte() (byte, error) {
c, err := ffl.reader.ReadByte()
if err != nil {
ffl.Error = FFErr_io
ffl.BigError = err
return 0, err
}
return c, nil
}
func (ffl *FFLexer) unreadByte() {
ffl.reader.UnreadByte()
}
func (ffl *FFLexer) wantBytes(want []byte, iftrue FFTok) FFTok {
startPos := ffl.reader.Pos()
for _, b := range want {
c, err := ffl.readByte()
if err != nil {
return FFTok_error
}
if c != b {
ffl.unreadByte()
// fmt.Printf("wanted bytes: %s\n", string(want))
// TODO(pquerna): thsi is a bad error message
ffl.Error = FFErr_invalid_string
return FFTok_error
}
}
endPos := ffl.reader.Pos()
ffl.Output.Write(ffl.reader.Slice(startPos, endPos))
return iftrue
}
func (ffl *FFLexer) lexComment() FFTok {
c, err := ffl.readByte()
if err != nil {
return FFTok_error
}
if c == '/' {
// a // comment, scan until line ends.
for {
c, err := ffl.readByte()
if err != nil {
return FFTok_error
}
if c == '\n' {
return FFTok_comment
}
}
} else if c == '*' {
// a /* */ comment, scan */
for {
c, err := ffl.readByte()
if err != nil {
return FFTok_error
}
if c == '*' {
c, err := ffl.readByte()
if err != nil {
return FFTok_error
}
if c == '/' {
return FFTok_comment
}
ffl.Error = FFErr_incomplete_comment
return FFTok_error
}
}
} else {
ffl.Error = FFErr_incomplete_comment
return FFTok_error
}
}
func (ffl *FFLexer) lexString() FFTok {
if ffl.captureAll {
ffl.buf.Reset()
err := ffl.reader.SliceString(&ffl.buf)
if err != nil {
ffl.BigError = err
return FFTok_error
}
WriteJson(ffl.Output, ffl.buf.Bytes())
return FFTok_string
} else {
err := ffl.reader.SliceString(ffl.Output)
if err != nil {
ffl.BigError = err
return FFTok_error
}
return FFTok_string
}
}
func (ffl *FFLexer) lexNumber() FFTok {
var numRead int = 0
tok := FFTok_integer
startPos := ffl.reader.Pos()
c, err := ffl.readByte()
if err != nil {
return FFTok_error
}
/* optional leading minus */
if c == '-' {
c, err = ffl.readByte()
if err != nil {
return FFTok_error
}
}
/* a single zero, or a series of integers */
if c == '0' {
c, err = ffl.readByte()
if err != nil {
return FFTok_error
}
} else if c >= '1' && c <= '9' {
for c >= '0' && c <= '9' {
c, err = ffl.readByte()
if err != nil {
return FFTok_error
}
}
} else {
ffl.unreadByte()
ffl.Error = FFErr_missing_integer_after_minus
return FFTok_error
}
if c == '.' {
numRead = 0
c, err = ffl.readByte()
if err != nil {
return FFTok_error
}
for c >= '0' && c <= '9' {
numRead++
c, err = ffl.readByte()
if err != nil {
return FFTok_error
}
}
if numRead == 0 {
ffl.unreadByte()
ffl.Error = FFErr_missing_integer_after_decimal
return FFTok_error
}
tok = FFTok_double
}
/* optional exponent (indicates this is floating point) */
if c == 'e' || c == 'E' {
numRead = 0
c, err = ffl.readByte()
if err != nil {
return FFTok_error
}
/* optional sign */
if c == '+' || c == '-' {
c, err = ffl.readByte()
if err != nil {
return FFTok_error
}
}
for c >= '0' && c <= '9' {
numRead++
c, err = ffl.readByte()
if err != nil {
return FFTok_error
}
}
if numRead == 0 {
ffl.Error = FFErr_missing_integer_after_exponent
return FFTok_error
}
tok = FFTok_double
}
ffl.unreadByte()
endPos := ffl.reader.Pos()
ffl.Output.Write(ffl.reader.Slice(startPos, endPos))
return tok
}
var true_bytes = []byte{'r', 'u', 'e'}
var false_bytes = []byte{'a', 'l', 's', 'e'}
var null_bytes = []byte{'u', 'l', 'l'}
func (ffl *FFLexer) Scan() FFTok {
tok := FFTok_error
if ffl.captureAll == false {
ffl.Output.Reset()
}
ffl.Token = FFTok_init
for {
c, err := ffl.scanReadByte()
if err != nil {
if err == io.EOF {
return FFTok_eof
} else {
return FFTok_error
}
}
switch c {
case '{':
tok = FFTok_left_bracket
if ffl.captureAll {
ffl.Output.WriteByte('{')
}
goto lexed
case '}':
tok = FFTok_right_bracket
if ffl.captureAll {
ffl.Output.WriteByte('}')
}
goto lexed
case '[':
tok = FFTok_left_brace
if ffl.captureAll {
ffl.Output.WriteByte('[')
}
goto lexed
case ']':
tok = FFTok_right_brace
if ffl.captureAll {
ffl.Output.WriteByte(']')
}
goto lexed
case ',':
tok = FFTok_comma
if ffl.captureAll {
ffl.Output.WriteByte(',')
}
goto lexed
case ':':
tok = FFTok_colon
if ffl.captureAll {
ffl.Output.WriteByte(':')
}
goto lexed
case '\t', '\n', '\v', '\f', '\r', ' ':
if ffl.captureAll {
ffl.Output.WriteByte(c)
}
break
case 't':
ffl.Output.WriteByte('t')
tok = ffl.wantBytes(true_bytes, FFTok_bool)
goto lexed
case 'f':
ffl.Output.WriteByte('f')
tok = ffl.wantBytes(false_bytes, FFTok_bool)
goto lexed
case 'n':
ffl.Output.WriteByte('n')
tok = ffl.wantBytes(null_bytes, FFTok_null)
goto lexed
case '"':
tok = ffl.lexString()
goto lexed
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
ffl.unreadByte()
tok = ffl.lexNumber()
goto lexed
case '/':
tok = ffl.lexComment()
goto lexed
default:
tok = FFTok_error
ffl.Error = FFErr_invalid_char
}
}
lexed:
ffl.Token = tok
return tok
}
func (ffl *FFLexer) scanField(start FFTok, capture bool) ([]byte, error) {
switch start {
case FFTok_left_brace,
FFTok_left_bracket:
{
end := FFTok_right_brace
if start == FFTok_left_bracket {
end = FFTok_right_bracket
if capture {
ffl.Output.WriteByte('{')
}
} else {
if capture {
ffl.Output.WriteByte('[')
}
}
depth := 1
if capture {
ffl.captureAll = true
}
// TODO: work.
scanloop:
for {
tok := ffl.Scan()
//fmt.Printf("capture-token: %v end: %v depth: %v\n", tok, end, depth)
switch tok {
case FFTok_eof:
return nil, errors.New("ffjson: unexpected EOF")
case FFTok_error:
if ffl.BigError != nil {
return nil, ffl.BigError
}
return nil, ffl.Error.ToError()
case end:
depth--
if depth == 0 {
break scanloop
}
case start:
depth++
}
}
if capture {
ffl.captureAll = false
}
if capture {
return ffl.Output.Bytes(), nil
} else {
return nil, nil
}
}
case FFTok_bool,
FFTok_integer,
FFTok_null,
FFTok_double:
// simple value, return it.
if capture {
return ffl.Output.Bytes(), nil
} else {
return nil, nil
}
case FFTok_string:
//TODO(pquerna): so, other users expect this to be a quoted string :(
if capture {
ffl.buf.Reset()
WriteJson(&ffl.buf, ffl.Output.Bytes())
return ffl.buf.Bytes(), nil
} else {
return nil, nil
}
}
return nil, fmt.Errorf("ffjson: invalid capture type: %v", start)
}
// Captures an entire field value, including recursive objects,
// and converts them to a []byte suitable to pass to a sub-object's
// UnmarshalJSON
func (ffl *FFLexer) CaptureField(start FFTok) ([]byte, error) {
return ffl.scanField(start, true)
}
func (ffl *FFLexer) SkipField(start FFTok) error {
_, err := ffl.scanField(start, false)
return err
}
// TODO(pquerna): return line number and offset.
func (err FFErr) ToError() error {
switch err {
case FFErr_e_ok:
return nil
case FFErr_io:
return errors.New("ffjson: IO error")
case FFErr_string_invalid_utf8:
return errors.New("ffjson: string with invalid UTF-8 sequence")
case FFErr_string_invalid_escaped_char:
return errors.New("ffjson: string with invalid escaped character")
case FFErr_string_invalid_json_char:
return errors.New("ffjson: string with invalid JSON character")
case FFErr_string_invalid_hex_char:
return errors.New("ffjson: string with invalid hex character")
case FFErr_invalid_char:
return errors.New("ffjson: invalid character")
case FFErr_invalid_string:
return errors.New("ffjson: invalid string")
case FFErr_missing_integer_after_decimal:
return errors.New("ffjson: missing integer after decimal")
case FFErr_missing_integer_after_exponent:
return errors.New("ffjson: missing integer after exponent")
case FFErr_missing_integer_after_minus:
return errors.New("ffjson: missing integer after minus")
case FFErr_unallowed_comment:
return errors.New("ffjson: unallowed comment")
case FFErr_incomplete_comment:
return errors.New("ffjson: incomplete comment")
case FFErr_unexpected_token_type:
return errors.New("ffjson: unexpected token sequence")
}
panic(fmt.Sprintf("unknown error type: %v ", err))
}
func (state FFParseState) String() string {
switch state {
case FFParse_map_start:
return "map:start"
case FFParse_want_key:
return "want_key"
case FFParse_want_colon:
return "want_colon"
case FFParse_want_value:
return "want_value"
case FFParse_after_value:
return "after_value"
}
panic(fmt.Sprintf("unknown parse state: %d", int(state)))
}
func (tok FFTok) String() string {
switch tok {
case FFTok_init:
return "tok:init"
case FFTok_bool:
return "tok:bool"
case FFTok_colon:
return "tok:colon"
case FFTok_comma:
return "tok:comma"
case FFTok_eof:
return "tok:eof"
case FFTok_error:
return "tok:error"
case FFTok_left_brace:
return "tok:left_brace"
case FFTok_left_bracket:
return "tok:left_bracket"
case FFTok_null:
return "tok:null"
case FFTok_right_brace:
return "tok:right_brace"
case FFTok_right_bracket:
return "tok:right_bracket"
case FFTok_integer:
return "tok:integer"
case FFTok_double:
return "tok:double"
case FFTok_string:
return "tok:string"
case FFTok_comment:
return "comment"
}
panic(fmt.Sprintf("unknown token: %d", int(tok)))
}
/* a lookup table which lets us quickly determine three things:
* cVEC - valid escaped control char
* note. the solidus '/' may be escaped or not.
* cIJC - invalid json char
* cVHC - valid hex char
* cNFP - needs further processing (from a string scanning perspective)
* cNUC - needs utf8 checking when enabled (from a string scanning perspective)
*/
const (
cVEC int8 = 0x01
cIJC int8 = 0x02
cVHC int8 = 0x04
cNFP int8 = 0x08
cNUC int8 = 0x10
)
var byteLookupTable [256]int8 = [256]int8{
cIJC, /* 0 */
cIJC, /* 1 */
cIJC, /* 2 */
cIJC, /* 3 */
cIJC, /* 4 */
cIJC, /* 5 */
cIJC, /* 6 */
cIJC, /* 7 */
cIJC, /* 8 */
cIJC, /* 9 */
cIJC, /* 10 */
cIJC, /* 11 */
cIJC, /* 12 */
cIJC, /* 13 */
cIJC, /* 14 */
cIJC, /* 15 */
cIJC, /* 16 */
cIJC, /* 17 */
cIJC, /* 18 */
cIJC, /* 19 */
cIJC, /* 20 */
cIJC, /* 21 */
cIJC, /* 22 */
cIJC, /* 23 */
cIJC, /* 24 */
cIJC, /* 25 */
cIJC, /* 26 */
cIJC, /* 27 */
cIJC, /* 28 */
cIJC, /* 29 */
cIJC, /* 30 */
cIJC, /* 31 */
0, /* 32 */
0, /* 33 */
cVEC | cIJC | cNFP, /* 34 */
0, /* 35 */
0, /* 36 */
0, /* 37 */
0, /* 38 */
0, /* 39 */
0, /* 40 */
0, /* 41 */
0, /* 42 */
0, /* 43 */
0, /* 44 */
0, /* 45 */
0, /* 46 */
cVEC, /* 47 */
cVHC, /* 48 */
cVHC, /* 49 */
cVHC, /* 50 */
cVHC, /* 51 */
cVHC, /* 52 */
cVHC, /* 53 */
cVHC, /* 54 */
cVHC, /* 55 */
cVHC, /* 56 */
cVHC, /* 57 */
0, /* 58 */
0, /* 59 */
0, /* 60 */
0, /* 61 */
0, /* 62 */
0, /* 63 */
0, /* 64 */
cVHC, /* 65 */
cVHC, /* 66 */
cVHC, /* 67 */
cVHC, /* 68 */
cVHC, /* 69 */
cVHC, /* 70 */
0, /* 71 */
0, /* 72 */
0, /* 73 */
0, /* 74 */
0, /* 75 */
0, /* 76 */
0, /* 77 */
0, /* 78 */
0, /* 79 */
0, /* 80 */
0, /* 81 */
0, /* 82 */
0, /* 83 */
0, /* 84 */
0, /* 85 */
0, /* 86 */
0, /* 87 */
0, /* 88 */
0, /* 89 */
0, /* 90 */
0, /* 91 */
cVEC | cIJC | cNFP, /* 92 */
0, /* 93 */
0, /* 94 */
0, /* 95 */
0, /* 96 */
cVHC, /* 97 */
cVEC | cVHC, /* 98 */
cVHC, /* 99 */
cVHC, /* 100 */
cVHC, /* 101 */
cVEC | cVHC, /* 102 */
0, /* 103 */
0, /* 104 */
0, /* 105 */
0, /* 106 */
0, /* 107 */
0, /* 108 */
0, /* 109 */
cVEC, /* 110 */
0, /* 111 */
0, /* 112 */
0, /* 113 */
cVEC, /* 114 */
0, /* 115 */
cVEC, /* 116 */
0, /* 117 */
0, /* 118 */
0, /* 119 */
0, /* 120 */
0, /* 121 */
0, /* 122 */
0, /* 123 */
0, /* 124 */
0, /* 125 */
0, /* 126 */
0, /* 127 */
cNUC, /* 128 */
cNUC, /* 129 */
cNUC, /* 130 */
cNUC, /* 131 */
cNUC, /* 132 */
cNUC, /* 133 */
cNUC, /* 134 */
cNUC, /* 135 */
cNUC, /* 136 */
cNUC, /* 137 */
cNUC, /* 138 */
cNUC, /* 139 */
cNUC, /* 140 */
cNUC, /* 141 */
cNUC, /* 142 */
cNUC, /* 143 */
cNUC, /* 144 */
cNUC, /* 145 */
cNUC, /* 146 */
cNUC, /* 147 */
cNUC, /* 148 */
cNUC, /* 149 */
cNUC, /* 150 */
cNUC, /* 151 */
cNUC, /* 152 */
cNUC, /* 153 */
cNUC, /* 154 */
cNUC, /* 155 */
cNUC, /* 156 */
cNUC, /* 157 */
cNUC, /* 158 */
cNUC, /* 159 */
cNUC, /* 160 */
cNUC, /* 161 */
cNUC, /* 162 */
cNUC, /* 163 */
cNUC, /* 164 */
cNUC, /* 165 */
cNUC, /* 166 */
cNUC, /* 167 */
cNUC, /* 168 */
cNUC, /* 169 */
cNUC, /* 170 */
cNUC, /* 171 */
cNUC, /* 172 */
cNUC, /* 173 */
cNUC, /* 174 */
cNUC, /* 175 */
cNUC, /* 176 */
cNUC, /* 177 */
cNUC, /* 178 */
cNUC, /* 179 */
cNUC, /* 180 */
cNUC, /* 181 */
cNUC, /* 182 */
cNUC, /* 183 */
cNUC, /* 184 */
cNUC, /* 185 */
cNUC, /* 186 */
cNUC, /* 187 */
cNUC, /* 188 */
cNUC, /* 189 */
cNUC, /* 190 */
cNUC, /* 191 */
cNUC, /* 192 */
cNUC, /* 193 */
cNUC, /* 194 */
cNUC, /* 195 */
cNUC, /* 196 */
cNUC, /* 197 */
cNUC, /* 198 */
cNUC, /* 199 */
cNUC, /* 200 */
cNUC, /* 201 */
cNUC, /* 202 */
cNUC, /* 203 */
cNUC, /* 204 */
cNUC, /* 205 */
cNUC, /* 206 */
cNUC, /* 207 */
cNUC, /* 208 */
cNUC, /* 209 */
cNUC, /* 210 */
cNUC, /* 211 */
cNUC, /* 212 */
cNUC, /* 213 */
cNUC, /* 214 */
cNUC, /* 215 */
cNUC, /* 216 */
cNUC, /* 217 */
cNUC, /* 218 */
cNUC, /* 219 */
cNUC, /* 220 */
cNUC, /* 221 */
cNUC, /* 222 */
cNUC, /* 223 */
cNUC, /* 224 */
cNUC, /* 225 */
cNUC, /* 226 */
cNUC, /* 227 */
cNUC, /* 228 */
cNUC, /* 229 */
cNUC, /* 230 */
cNUC, /* 231 */
cNUC, /* 232 */
cNUC, /* 233 */
cNUC, /* 234 */
cNUC, /* 235 */
cNUC, /* 236 */
cNUC, /* 237 */
cNUC, /* 238 */
cNUC, /* 239 */
cNUC, /* 240 */
cNUC, /* 241 */
cNUC, /* 242 */
cNUC, /* 243 */
cNUC, /* 244 */
cNUC, /* 245 */
cNUC, /* 246 */
cNUC, /* 247 */
cNUC, /* 248 */
cNUC, /* 249 */
cNUC, /* 250 */
cNUC, /* 251 */
cNUC, /* 252 */
cNUC, /* 253 */
cNUC, /* 254 */
cNUC, /* 255 */
}

512
vendor/github.com/pquerna/ffjson/fflib/v1/reader.go generated vendored Normal file
View file

@ -0,0 +1,512 @@
/**
* Copyright 2014 Paul Querna
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package v1
import (
"fmt"
"io"
"unicode"
"unicode/utf16"
)
const sliceStringMask = cIJC | cNFP
type ffReader struct {
s []byte
i int
l int
}
func newffReader(d []byte) *ffReader {
return &ffReader{
s: d,
i: 0,
l: len(d),
}
}
func (r *ffReader) Slice(start, stop int) []byte {
return r.s[start:stop]
}
func (r *ffReader) Pos() int {
return r.i
}
// Reset the reader, and add new input.
func (r *ffReader) Reset(d []byte) {
r.s = d
r.i = 0
r.l = len(d)
}
// Calcuates the Position with line and line offset,
// because this isn't counted for performance reasons,
// it will iterate the buffer from the beginning, and should
// only be used in error-paths.
func (r *ffReader) PosWithLine() (int, int) {
currentLine := 1
currentChar := 0
for i := 0; i < r.i; i++ {
c := r.s[i]
currentChar++
if c == '\n' {
currentLine++
currentChar = 0
}
}
return currentLine, currentChar
}
func (r *ffReader) ReadByteNoWS() (byte, error) {
if r.i >= r.l {
return 0, io.EOF
}
j := r.i
for {
c := r.s[j]
j++
// inline whitespace parsing gives another ~8% performance boost
// for many kinds of nicely indented JSON.
// ... and using a [255]bool instead of multiple ifs, gives another 2%
/*
if c != '\t' &&
c != '\n' &&
c != '\v' &&
c != '\f' &&
c != '\r' &&
c != ' ' {
r.i = j
return c, nil
}
*/
if whitespaceLookupTable[c] == false {
r.i = j
return c, nil
}
if j >= r.l {
return 0, io.EOF
}
}
}
func (r *ffReader) ReadByte() (byte, error) {
if r.i >= r.l {
return 0, io.EOF
}
r.i++
return r.s[r.i-1], nil
}
func (r *ffReader) UnreadByte() error {
if r.i <= 0 {
panic("ffReader.UnreadByte: at beginning of slice")
}
r.i--
return nil
}
func (r *ffReader) readU4(j int) (rune, error) {
var u4 [4]byte
for i := 0; i < 4; i++ {
if j >= r.l {
return -1, io.EOF
}
c := r.s[j]
if byteLookupTable[c]&cVHC != 0 {
u4[i] = c
j++
continue
} else {
// TODO(pquerna): handle errors better. layering violation.
return -1, fmt.Errorf("lex_string_invalid_hex_char: %v %v", c, string(u4[:]))
}
}
// TODO(pquerna): utf16.IsSurrogate
rr, err := ParseUint(u4[:], 16, 64)
if err != nil {
return -1, err
}
return rune(rr), nil
}
func (r *ffReader) handleEscaped(c byte, j int, out DecodingBuffer) (int, error) {
if j >= r.l {
return 0, io.EOF
}
c = r.s[j]
j++
if c == 'u' {
ru, err := r.readU4(j)
if err != nil {
return 0, err
}
if utf16.IsSurrogate(ru) {
ru2, err := r.readU4(j + 6)
if err != nil {
return 0, err
}
out.Write(r.s[r.i : j-2])
r.i = j + 10
j = r.i
rval := utf16.DecodeRune(ru, ru2)
if rval != unicode.ReplacementChar {
out.WriteRune(rval)
} else {
return 0, fmt.Errorf("lex_string_invalid_unicode_surrogate: %v %v", ru, ru2)
}
} else {
out.Write(r.s[r.i : j-2])
r.i = j + 4
j = r.i
out.WriteRune(ru)
}
return j, nil
} else if byteLookupTable[c]&cVEC == 0 {
return 0, fmt.Errorf("lex_string_invalid_escaped_char: %v", c)
} else {
out.Write(r.s[r.i : j-2])
r.i = j
j = r.i
switch c {
case '"':
out.WriteByte('"')
case '\\':
out.WriteByte('\\')
case '/':
out.WriteByte('/')
case 'b':
out.WriteByte('\b')
case 'f':
out.WriteByte('\f')
case 'n':
out.WriteByte('\n')
case 'r':
out.WriteByte('\r')
case 't':
out.WriteByte('\t')
}
}
return j, nil
}
func (r *ffReader) SliceString(out DecodingBuffer) error {
var c byte
// TODO(pquerna): string_with_escapes? de-escape here?
j := r.i
for {
if j >= r.l {
return io.EOF
}
j, c = scanString(r.s, j)
if c == '"' {
if j != r.i {
out.Write(r.s[r.i : j-1])
r.i = j
}
return nil
} else if c == '\\' {
var err error
j, err = r.handleEscaped(c, j, out)
if err != nil {
return err
}
} else if byteLookupTable[c]&cIJC != 0 {
return fmt.Errorf("lex_string_invalid_json_char: %v", c)
}
continue
}
}
// TODO(pquerna): consider combining wibth the normal byte mask.
var whitespaceLookupTable [256]bool = [256]bool{
false, /* 0 */
false, /* 1 */
false, /* 2 */
false, /* 3 */
false, /* 4 */
false, /* 5 */
false, /* 6 */
false, /* 7 */
false, /* 8 */
true, /* 9 */
true, /* 10 */
true, /* 11 */
true, /* 12 */
true, /* 13 */
false, /* 14 */
false, /* 15 */
false, /* 16 */
false, /* 17 */
false, /* 18 */
false, /* 19 */
false, /* 20 */
false, /* 21 */
false, /* 22 */
false, /* 23 */
false, /* 24 */
false, /* 25 */
false, /* 26 */
false, /* 27 */
false, /* 28 */
false, /* 29 */
false, /* 30 */
false, /* 31 */
true, /* 32 */
false, /* 33 */
false, /* 34 */
false, /* 35 */
false, /* 36 */
false, /* 37 */
false, /* 38 */
false, /* 39 */
false, /* 40 */
false, /* 41 */
false, /* 42 */
false, /* 43 */
false, /* 44 */
false, /* 45 */
false, /* 46 */
false, /* 47 */
false, /* 48 */
false, /* 49 */
false, /* 50 */
false, /* 51 */
false, /* 52 */
false, /* 53 */
false, /* 54 */
false, /* 55 */
false, /* 56 */
false, /* 57 */
false, /* 58 */
false, /* 59 */
false, /* 60 */
false, /* 61 */
false, /* 62 */
false, /* 63 */
false, /* 64 */
false, /* 65 */
false, /* 66 */
false, /* 67 */
false, /* 68 */
false, /* 69 */
false, /* 70 */
false, /* 71 */
false, /* 72 */
false, /* 73 */
false, /* 74 */
false, /* 75 */
false, /* 76 */
false, /* 77 */
false, /* 78 */
false, /* 79 */
false, /* 80 */
false, /* 81 */
false, /* 82 */
false, /* 83 */
false, /* 84 */
false, /* 85 */
false, /* 86 */
false, /* 87 */
false, /* 88 */
false, /* 89 */
false, /* 90 */
false, /* 91 */
false, /* 92 */
false, /* 93 */
false, /* 94 */
false, /* 95 */
false, /* 96 */
false, /* 97 */
false, /* 98 */
false, /* 99 */
false, /* 100 */
false, /* 101 */
false, /* 102 */
false, /* 103 */
false, /* 104 */
false, /* 105 */
false, /* 106 */
false, /* 107 */
false, /* 108 */
false, /* 109 */
false, /* 110 */
false, /* 111 */
false, /* 112 */
false, /* 113 */
false, /* 114 */
false, /* 115 */
false, /* 116 */
false, /* 117 */
false, /* 118 */
false, /* 119 */
false, /* 120 */
false, /* 121 */
false, /* 122 */
false, /* 123 */
false, /* 124 */
false, /* 125 */
false, /* 126 */
false, /* 127 */
false, /* 128 */
false, /* 129 */
false, /* 130 */
false, /* 131 */
false, /* 132 */
false, /* 133 */
false, /* 134 */
false, /* 135 */
false, /* 136 */
false, /* 137 */
false, /* 138 */
false, /* 139 */
false, /* 140 */
false, /* 141 */
false, /* 142 */
false, /* 143 */
false, /* 144 */
false, /* 145 */
false, /* 146 */
false, /* 147 */
false, /* 148 */
false, /* 149 */
false, /* 150 */
false, /* 151 */
false, /* 152 */
false, /* 153 */
false, /* 154 */
false, /* 155 */
false, /* 156 */
false, /* 157 */
false, /* 158 */
false, /* 159 */
false, /* 160 */
false, /* 161 */
false, /* 162 */
false, /* 163 */
false, /* 164 */
false, /* 165 */
false, /* 166 */
false, /* 167 */
false, /* 168 */
false, /* 169 */
false, /* 170 */
false, /* 171 */
false, /* 172 */
false, /* 173 */
false, /* 174 */
false, /* 175 */
false, /* 176 */
false, /* 177 */
false, /* 178 */
false, /* 179 */
false, /* 180 */
false, /* 181 */
false, /* 182 */
false, /* 183 */
false, /* 184 */
false, /* 185 */
false, /* 186 */
false, /* 187 */
false, /* 188 */
false, /* 189 */
false, /* 190 */
false, /* 191 */
false, /* 192 */
false, /* 193 */
false, /* 194 */
false, /* 195 */
false, /* 196 */
false, /* 197 */
false, /* 198 */
false, /* 199 */
false, /* 200 */
false, /* 201 */
false, /* 202 */
false, /* 203 */
false, /* 204 */
false, /* 205 */
false, /* 206 */
false, /* 207 */
false, /* 208 */
false, /* 209 */
false, /* 210 */
false, /* 211 */
false, /* 212 */
false, /* 213 */
false, /* 214 */
false, /* 215 */
false, /* 216 */
false, /* 217 */
false, /* 218 */
false, /* 219 */
false, /* 220 */
false, /* 221 */
false, /* 222 */
false, /* 223 */
false, /* 224 */
false, /* 225 */
false, /* 226 */
false, /* 227 */
false, /* 228 */
false, /* 229 */
false, /* 230 */
false, /* 231 */
false, /* 232 */
false, /* 233 */
false, /* 234 */
false, /* 235 */
false, /* 236 */
false, /* 237 */
false, /* 238 */
false, /* 239 */
false, /* 240 */
false, /* 241 */
false, /* 242 */
false, /* 243 */
false, /* 244 */
false, /* 245 */
false, /* 246 */
false, /* 247 */
false, /* 248 */
false, /* 249 */
false, /* 250 */
false, /* 251 */
false, /* 252 */
false, /* 253 */
false, /* 254 */
false, /* 255 */
}

View file

@ -0,0 +1,34 @@
/**
* Copyright 2014 Paul Querna
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package v1
func scanString(s []byte, j int) (int, byte) {
for {
if j >= len(s) {
return j, 0
}
c := s[j]
j++
if byteLookupTable[c]&sliceStringMask == 0 {
continue
}
return j, c
}
}