*: vender code and make a Dockerfile

This commit is contained in:
Vincent Batts 2017-02-06 12:25:03 -05:00
parent ce7e87f290
commit c611d0d853
Signed by: vbatts
GPG Key ID: 10937E57733F1362
107 changed files with 30522 additions and 0 deletions

8
Dockerfile Normal file
View File

@ -0,0 +1,8 @@
FROM fedora
RUN dnf install -y golang
RUN mkdir -p /usr/local/src/github.com/vbatts/
ENV GOPATH=/usr/local
ADD ./ /usr/local/src/github.com/vbatts/imgsrv/
RUN go install github.com/vbatts/imgsrv
EXPOSE 7777
ENTRYPOINT ["/usr/local/bin/imgsrv"]

18
glide.lock generated Normal file
View File

@ -0,0 +1,18 @@
hash: e99d6e208d9806cd3133aaf20912a1e3ece517fc36c3ad1e67631bc2ba7b4009
updated: 2017-02-06T12:03:37.357927455-05:00
imports:
- name: github.com/gorilla/context
version: 08b5f424b9271eedf6f9f0ce86cb9396ed337a42
- name: github.com/gorilla/mux
version: 392c28fe23e1c45ddba891b0320b3b5df220beea
- name: github.com/vbatts/go-httplog
version: becd5526f5dcce33a513c4c0ef2c70c761ca66e0
- name: labix.org/v2/mgo
version: "287"
subpackages:
- bson
- name: labix.org/v2/mgo/sasl
version: ""
- name: launchpad.net/goyaml
version: "51"
testImports: []

8
glide.yaml Normal file
View File

@ -0,0 +1,8 @@
package: github.com/vbatts/imgsrv
import:
- package: github.com/gorilla/mux
- package: github.com/vbatts/go-httplog
- package: labix.org/v2/mgo
subpackages:
- bson
- package: launchpad.net/goyaml

1
vendor/github.com/gorilla/context generated vendored Submodule

@ -0,0 +1 @@
Subproject commit 08b5f424b9271eedf6f9f0ce86cb9396ed337a42

1
vendor/github.com/gorilla/mux generated vendored Submodule

@ -0,0 +1 @@
Subproject commit 392c28fe23e1c45ddba891b0320b3b5df220beea

1
vendor/github.com/vbatts/go-httplog generated vendored Submodule

@ -0,0 +1 @@
Subproject commit becd5526f5dcce33a513c4c0ef2c70c761ca66e0

3
vendor/labix.org/v2/mgo/.bzr/README generated vendored Normal file
View File

@ -0,0 +1,3 @@
This is a Bazaar control directory.
Do not change any files in this directory.
See http://bazaar.canonical.com/ for more information about Bazaar.

1
vendor/labix.org/v2/mgo/.bzr/branch-format generated vendored Normal file
View File

@ -0,0 +1 @@
Bazaar-NG meta directory, format 1

1
vendor/labix.org/v2/mgo/.bzr/branch/branch.conf generated vendored Normal file
View File

@ -0,0 +1 @@
parent_location = http://bazaar.launchpad.net/~niemeyer/mgo/v2/

1
vendor/labix.org/v2/mgo/.bzr/branch/format generated vendored Normal file
View File

@ -0,0 +1 @@
Bazaar Branch Format 7 (needs bzr 1.6)

1
vendor/labix.org/v2/mgo/.bzr/branch/last-revision generated vendored Normal file
View File

@ -0,0 +1 @@
287 gustavo@niemeyer.net-20140701140051-90be2tvk93pcczzi

1
vendor/labix.org/v2/mgo/.bzr/checkout/conflicts generated vendored Normal file
View File

@ -0,0 +1 @@
BZR conflict list format 1

BIN
vendor/labix.org/v2/mgo/.bzr/checkout/dirstate generated vendored Normal file

Binary file not shown.

1
vendor/labix.org/v2/mgo/.bzr/checkout/format generated vendored Normal file
View File

@ -0,0 +1 @@
Bazaar Working Tree Format 6 (bzr 1.14)

0
vendor/labix.org/v2/mgo/.bzr/checkout/views generated vendored Normal file
View File

1
vendor/labix.org/v2/mgo/.bzr/repository/format generated vendored Normal file
View File

@ -0,0 +1 @@
Bazaar repository format 2a (needs bzr 1.16 or later)

View File

@ -0,0 +1,5 @@
B+Tree Graph Index 2
node_ref_lists=0
key_elements=1
len=0
row_lengths=

6
vendor/labix.org/v2/mgo/.bzr/repository/pack-names generated vendored Normal file
View File

@ -0,0 +1,6 @@
B+Tree Graph Index 2
node_ref_lists=0
key_elements=1
len=1
row_lengths=1
xœ ı€0ÀÔ™‚ò<>`R8 /<2F>ÊÂÂÆíõŠ{Þ;Î+<«„-¬T · w¡Ù7LÀÉ¥@…A¹MúC£ƒ ÕOx

2
vendor/labix.org/v2/mgo/.bzrignore generated vendored Normal file
View File

@ -0,0 +1,2 @@
_*
[856].out

25
vendor/labix.org/v2/mgo/LICENSE generated vendored Normal file
View File

@ -0,0 +1,25 @@
mgo - MongoDB driver for Go
Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

5
vendor/labix.org/v2/mgo/Makefile generated vendored Normal file
View File

@ -0,0 +1,5 @@
startdb:
@testdb/setup.sh start
stopdb:
@testdb/setup.sh stop

412
vendor/labix.org/v2/mgo/auth.go generated vendored Normal file
View File

@ -0,0 +1,412 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"labix.org/v2/mgo/bson"
"sync"
)
type authCmd struct {
Authenticate int
Nonce string
User string
Key string
}
type startSaslCmd struct {
StartSASL int `bson:"startSasl"`
}
type authResult struct {
ErrMsg string
Ok bool
}
type getNonceCmd struct {
GetNonce int
}
type getNonceResult struct {
Nonce string
Err string "$err"
Code int
}
type logoutCmd struct {
Logout int
}
type saslCmd struct {
Start int `bson:"saslStart,omitempty"`
Continue int `bson:"saslContinue,omitempty"`
ConversationId int `bson:"conversationId,omitempty"`
Mechanism string `bson:"mechanism,omitempty"`
Payload []byte
}
type saslResult struct {
Ok bool `bson:"ok"`
NotOk bool `bson:"code"` // Server <= 2.3.2 returns ok=1 & code>0 on errors (WTF?)
Done bool
ConversationId int `bson:"conversationId"`
Payload []byte
ErrMsg string
}
type saslStepper interface {
Step(serverData []byte) (clientData []byte, done bool, err error)
Close()
}
func (socket *mongoSocket) getNonce() (nonce string, err error) {
socket.Lock()
for socket.cachedNonce == "" && socket.dead == nil {
debugf("Socket %p to %s: waiting for nonce", socket, socket.addr)
socket.gotNonce.Wait()
}
if socket.cachedNonce == "mongos" {
socket.Unlock()
return "", errors.New("Can't authenticate with mongos; see http://j.mp/mongos-auth")
}
debugf("Socket %p to %s: got nonce", socket, socket.addr)
nonce, err = socket.cachedNonce, socket.dead
socket.cachedNonce = ""
socket.Unlock()
if err != nil {
nonce = ""
}
return
}
func (socket *mongoSocket) resetNonce() {
debugf("Socket %p to %s: requesting a new nonce", socket, socket.addr)
op := &queryOp{}
op.query = &getNonceCmd{GetNonce: 1}
op.collection = "admin.$cmd"
op.limit = -1
op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
if err != nil {
socket.kill(errors.New("getNonce: "+err.Error()), true)
return
}
result := &getNonceResult{}
err = bson.Unmarshal(docData, &result)
if err != nil {
socket.kill(errors.New("Failed to unmarshal nonce: "+err.Error()), true)
return
}
debugf("Socket %p to %s: nonce unmarshalled: %#v", socket, socket.addr, result)
if result.Code == 13390 {
// mongos doesn't yet support auth (see http://j.mp/mongos-auth)
result.Nonce = "mongos"
} else if result.Nonce == "" {
var msg string
if result.Err != "" {
msg = fmt.Sprintf("Got an empty nonce: %s (%d)", result.Err, result.Code)
} else {
msg = "Got an empty nonce"
}
socket.kill(errors.New(msg), true)
return
}
socket.Lock()
if socket.cachedNonce != "" {
socket.Unlock()
panic("resetNonce: nonce already cached")
}
socket.cachedNonce = result.Nonce
socket.gotNonce.Signal()
socket.Unlock()
}
err := socket.Query(op)
if err != nil {
socket.kill(errors.New("resetNonce: "+err.Error()), true)
}
}
func (socket *mongoSocket) Login(cred Credential) error {
socket.Lock()
for _, sockCred := range socket.creds {
if sockCred == cred {
debugf("Socket %p to %s: login: db=%q user=%q (already logged in)", socket, socket.addr, cred.Source, cred.Username)
socket.Unlock()
return nil
}
}
if socket.dropLogout(cred) {
debugf("Socket %p to %s: login: db=%q user=%q (cached)", socket, socket.addr, cred.Source, cred.Username)
socket.creds = append(socket.creds, cred)
socket.Unlock()
return nil
}
socket.Unlock()
debugf("Socket %p to %s: login: db=%q user=%q", socket, socket.addr, cred.Source, cred.Username)
var err error
switch cred.Mechanism {
case "", "MONGO-CR":
err = socket.loginClassic(cred)
case "PLAIN":
err = socket.loginPlain(cred)
case "MONGO-X509":
err = fmt.Errorf("unsupported authentication mechanism: %s", cred.Mechanism)
default:
// Try SASL for everything else, if it is available.
err = socket.loginSASL(cred)
}
if err != nil {
debugf("Socket %p to %s: login error: %s", socket, socket.addr, err)
} else {
debugf("Socket %p to %s: login successful", socket, socket.addr)
}
return err
}
func (socket *mongoSocket) loginClassic(cred Credential) error {
// Note that this only works properly because this function is
// synchronous, which means the nonce won't get reset while we're
// using it and any other login requests will block waiting for a
// new nonce provided in the defer call below.
nonce, err := socket.getNonce()
if err != nil {
return err
}
defer socket.resetNonce()
psum := md5.New()
psum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
ksum := md5.New()
ksum.Write([]byte(nonce + cred.Username))
ksum.Write([]byte(hex.EncodeToString(psum.Sum(nil))))
key := hex.EncodeToString(ksum.Sum(nil))
cmd := authCmd{Authenticate: 1, User: cred.Username, Nonce: nonce, Key: key}
res := authResult{}
return socket.loginRun(cred.Source, &cmd, &res, func() error {
if !res.Ok {
return errors.New(res.ErrMsg)
}
socket.Lock()
socket.dropAuth(cred.Source)
socket.creds = append(socket.creds, cred)
socket.Unlock()
return nil
})
}
func (socket *mongoSocket) loginPlain(cred Credential) error {
cmd := saslCmd{Start: 1, Mechanism: "PLAIN", Payload: []byte("\x00" + cred.Username + "\x00" + cred.Password)}
res := authResult{}
return socket.loginRun(cred.Source, &cmd, &res, func() error {
if !res.Ok {
return errors.New(res.ErrMsg)
}
socket.Lock()
socket.dropAuth(cred.Source)
socket.creds = append(socket.creds, cred)
socket.Unlock()
return nil
})
}
func (socket *mongoSocket) loginSASL(cred Credential) error {
sasl, err := saslNew(cred, socket.Server().Addr)
if err != nil {
return err
}
defer sasl.Close()
// The goal of this logic is to carry a locked socket until the
// local SASL step confirms the auth is valid; the socket needs to be
// locked so that concurrent action doesn't leave the socket in an
// auth state that doesn't reflect the operations that took place.
// As a simple case, imagine inverting login=>logout to logout=>login.
//
// The logic below works because the lock func isn't called concurrently.
locked := false
lock := func(b bool) {
if locked != b {
locked = b
if b {
socket.Lock()
} else {
socket.Unlock()
}
}
}
lock(true)
defer lock(false)
start := 1
cmd := saslCmd{}
res := saslResult{}
for {
payload, done, err := sasl.Step(res.Payload)
if err != nil {
return err
}
if done && res.Done {
socket.dropAuth(cred.Source)
socket.creds = append(socket.creds, cred)
break
}
lock(false)
cmd = saslCmd{
Start: start,
Continue: 1 - start,
ConversationId: res.ConversationId,
Mechanism: cred.Mechanism,
Payload: payload,
}
start = 0
err = socket.loginRun(cred.Source, &cmd, &res, func() error {
// See the comment on lock for why this is necessary.
lock(true)
if !res.Ok || res.NotOk {
return fmt.Errorf("server returned error on SASL authentication step: %s", res.ErrMsg)
}
return nil
})
if err != nil {
return err
}
if done && res.Done {
socket.dropAuth(cred.Source)
socket.creds = append(socket.creds, cred)
break
}
}
return nil
}
func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error {
var mutex sync.Mutex
var replyErr error
mutex.Lock()
op := queryOp{}
op.query = query
op.collection = db + ".$cmd"
op.limit = -1
op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
defer mutex.Unlock()
if err != nil {
replyErr = err
return
}
err = bson.Unmarshal(docData, result)
if err != nil {
replyErr = err
} else {
// Must handle this within the read loop for the socket, so
// that concurrent login requests are properly ordered.
replyErr = f()
}
}
err := socket.Query(&op)
if err != nil {
return err
}
mutex.Lock() // Wait.
return replyErr
}
func (socket *mongoSocket) Logout(db string) {
socket.Lock()
cred, found := socket.dropAuth(db)
if found {
debugf("Socket %p to %s: logout: db=%q (flagged)", socket, socket.addr, db)
socket.logout = append(socket.logout, cred)
}
socket.Unlock()
}
func (socket *mongoSocket) LogoutAll() {
socket.Lock()
if l := len(socket.creds); l > 0 {
debugf("Socket %p to %s: logout all (flagged %d)", socket, socket.addr, l)
socket.logout = append(socket.logout, socket.creds...)
socket.creds = socket.creds[0:0]
}
socket.Unlock()
}
func (socket *mongoSocket) flushLogout() (ops []interface{}) {
socket.Lock()
if l := len(socket.logout); l > 0 {
debugf("Socket %p to %s: logout all (flushing %d)", socket, socket.addr, l)
for i := 0; i != l; i++ {
op := queryOp{}
op.query = &logoutCmd{1}
op.collection = socket.logout[i].Source + ".$cmd"
op.limit = -1
ops = append(ops, &op)
}
socket.logout = socket.logout[0:0]
}
socket.Unlock()
return
}
func (socket *mongoSocket) dropAuth(db string) (cred Credential, found bool) {
for i, sockCred := range socket.creds {
if sockCred.Source == db {
copy(socket.creds[i:], socket.creds[i+1:])
socket.creds = socket.creds[:len(socket.creds)-1]
return sockCred, true
}
}
return cred, false
}
func (socket *mongoSocket) dropLogout(cred Credential) (found bool) {
for i, sockCred := range socket.logout {
if sockCred == cred {
copy(socket.logout[i:], socket.logout[i+1:])
socket.logout = socket.logout[:len(socket.logout)-1]
return true
}
}
return false
}

935
vendor/labix.org/v2/mgo/auth_test.go generated vendored Normal file
View File

@ -0,0 +1,935 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo_test
import (
"flag"
"fmt"
"labix.org/v2/mgo"
. "launchpad.net/gocheck"
"net/url"
"sync"
"time"
)
func (s *S) TestAuthLoginDatabase(c *C) {
// Test both with a normal database and with an authenticated shard.
for _, addr := range []string{"localhost:40002", "localhost:40203"} {
session, err := mgo.Dial(addr)
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1})
c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
admindb := session.DB("admin")
err = admindb.Login("root", "wrong")
c.Assert(err, ErrorMatches, "auth fail(s|ed)")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
err = coll.Insert(M{"n": 1})
c.Assert(err, IsNil)
}
}
func (s *S) TestAuthLoginSession(c *C) {
// Test both with a normal database and with an authenticated shard.
for _, addr := range []string{"localhost:40002", "localhost:40203"} {
session, err := mgo.Dial(addr)
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1})
c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
cred := mgo.Credential{
Username: "root",
Password: "wrong",
}
err = session.Login(&cred)
c.Assert(err, ErrorMatches, "auth fail(s|ed)")
cred.Password = "rapadura"
err = session.Login(&cred)
c.Assert(err, IsNil)
err = coll.Insert(M{"n": 1})
c.Assert(err, IsNil)
}
}
func (s *S) TestAuthLoginLogout(c *C) {
// Test both with a normal database and with an authenticated shard.
for _, addr := range []string{"localhost:40002", "localhost:40203"} {
session, err := mgo.Dial(addr)
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
admindb.Logout()
coll := session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1})
c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
// Must have dropped auth from the session too.
session = session.Copy()
defer session.Close()
coll = session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1})
c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
}
}
func (s *S) TestAuthLoginLogoutAll(c *C) {
session, err := mgo.Dial("localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
session.LogoutAll()
coll := session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1})
c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
// Must have dropped auth from the session too.
session = session.Copy()
defer session.Close()
coll = session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1})
c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
}
func (s *S) TestAuthUpsertUserErrors(c *C) {
session, err := mgo.Dial("localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
mydb := session.DB("mydb")
err = mydb.UpsertUser(&mgo.User{})
c.Assert(err, ErrorMatches, "user has no Username")
err = mydb.UpsertUser(&mgo.User{Username: "user", Password: "pass", UserSource: "source"})
c.Assert(err, ErrorMatches, "user has both Password/PasswordHash and UserSource set")
err = mydb.UpsertUser(&mgo.User{Username: "user", Password: "pass", OtherDBRoles: map[string][]mgo.Role{"db": nil}})
c.Assert(err, ErrorMatches, "user with OtherDBRoles is only supported in admin database")
}
func (s *S) TestAuthUpsertUser(c *C) {
if !s.versionAtLeast(2, 4) {
c.Skip("UpsertUser only works on 2.4+")
}
session, err := mgo.Dial("localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
mydb := session.DB("mydb")
ruser := &mgo.User{
Username: "myruser",
Password: "mypass",
Roles: []mgo.Role{mgo.RoleRead},
}
rwuser := &mgo.User{
Username: "myrwuser",
Password: "mypass",
Roles: []mgo.Role{mgo.RoleReadWrite},
}
err = mydb.UpsertUser(ruser)
c.Assert(err, IsNil)
err = mydb.UpsertUser(rwuser)
c.Assert(err, IsNil)
err = mydb.Login("myruser", "mypass")
c.Assert(err, IsNil)
admindb.Logout()
coll := session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1})
c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
err = mydb.Login("myrwuser", "mypass")
c.Assert(err, IsNil)
err = coll.Insert(M{"n": 1})
c.Assert(err, IsNil)
myotherdb := session.DB("myotherdb")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
// Test UserSource.
rwuserother := &mgo.User{
Username: "myrwuser",
UserSource: "mydb",
Roles: []mgo.Role{mgo.RoleRead},
}
err = myotherdb.UpsertUser(rwuserother)
if s.versionAtLeast(2, 6) {
c.Assert(err, ErrorMatches, `MongoDB 2.6\+ does not support the UserSource setting`)
return
}
c.Assert(err, IsNil)
admindb.Logout()
// Test indirection via UserSource: we can't write to it, because
// the roles for myrwuser are different there.
othercoll := myotherdb.C("myothercoll")
err = othercoll.Insert(M{"n": 1})
c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
// Reading works, though.
err = othercoll.Find(nil).One(nil)
c.Assert(err, Equals, mgo.ErrNotFound)
// Can't login directly into the database using UserSource, though.
err = myotherdb.Login("myrwuser", "mypass")
c.Assert(err, ErrorMatches, "auth fail(s|ed)")
}
func (s *S) TestAuthUpsertUserOtherDBRoles(c *C) {
if !s.versionAtLeast(2, 4) {
c.Skip("UpsertUser only works on 2.4+")
}
session, err := mgo.Dial("localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
ruser := &mgo.User{
Username: "myruser",
Password: "mypass",
OtherDBRoles: map[string][]mgo.Role{"mydb": []mgo.Role{mgo.RoleRead}},
}
err = admindb.UpsertUser(ruser)
c.Assert(err, IsNil)
defer admindb.RemoveUser("myruser")
admindb.Logout()
err = admindb.Login("myruser", "mypass")
coll := session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1})
c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
err = coll.Find(nil).One(nil)
c.Assert(err, Equals, mgo.ErrNotFound)
}
func (s *S) TestAuthUpsertUserUpdates(c *C) {
if !s.versionAtLeast(2, 4) {
c.Skip("UpsertUser only works on 2.4+")
}
session, err := mgo.Dial("localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
mydb := session.DB("mydb")
// Insert a user that can read.
user := &mgo.User{
Username: "myruser",
Password: "mypass",
Roles: []mgo.Role{mgo.RoleRead},
}
err = mydb.UpsertUser(user)
c.Assert(err, IsNil)
// Now update the user password.
user = &mgo.User{
Username: "myruser",
Password: "mynewpass",
}
err = mydb.UpsertUser(user)
c.Assert(err, IsNil)
// Login with the new user.
usession, err := mgo.Dial("myruser:mynewpass@localhost:40002/mydb")
c.Assert(err, IsNil)
defer usession.Close()
// Can read, but not write.
err = usession.DB("mydb").C("mycoll").Find(nil).One(nil)
c.Assert(err, Equals, mgo.ErrNotFound)
err = usession.DB("mydb").C("mycoll").Insert(M{"ok": 1})
c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
// Update the user role.
user = &mgo.User{
Username: "myruser",
Roles: []mgo.Role{mgo.RoleReadWrite},
}
err = mydb.UpsertUser(user)
c.Assert(err, IsNil)
// Dial again to ensure the password hasn't changed.
usession, err = mgo.Dial("myruser:mynewpass@localhost:40002/mydb")
c.Assert(err, IsNil)
defer usession.Close()
// Now it can write.
err = usession.DB("mydb").C("mycoll").Insert(M{"ok": 1})
c.Assert(err, IsNil)
}
func (s *S) TestAuthAddUser(c *C) {
session, err := mgo.Dial("localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
mydb := session.DB("mydb")
err = mydb.AddUser("myruser", "mypass", true)
c.Assert(err, IsNil)
err = mydb.AddUser("mywuser", "mypass", false)
c.Assert(err, IsNil)
err = mydb.Login("myruser", "mypass")
c.Assert(err, IsNil)
admindb.Logout()
coll := session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1})
c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
err = mydb.Login("mywuser", "mypass")
c.Assert(err, IsNil)
err = coll.Insert(M{"n": 1})
c.Assert(err, IsNil)
}
func (s *S) TestAuthAddUserReplaces(c *C) {
session, err := mgo.Dial("localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
mydb := session.DB("mydb")
err = mydb.AddUser("myuser", "myoldpass", false)
c.Assert(err, IsNil)
err = mydb.AddUser("myuser", "mynewpass", true)
c.Assert(err, IsNil)
admindb.Logout()
err = mydb.Login("myuser", "myoldpass")
c.Assert(err, ErrorMatches, "auth fail(s|ed)")
err = mydb.Login("myuser", "mynewpass")
c.Assert(err, IsNil)
// ReadOnly flag was changed too.
err = mydb.C("mycoll").Insert(M{"n": 1})
c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
}
func (s *S) TestAuthRemoveUser(c *C) {
session, err := mgo.Dial("localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
mydb := session.DB("mydb")
err = mydb.AddUser("myuser", "mypass", true)
c.Assert(err, IsNil)
err = mydb.RemoveUser("myuser")
c.Assert(err, IsNil)
err = mydb.Login("myuser", "mypass")
c.Assert(err, ErrorMatches, "auth fail(s|ed)")
}
func (s *S) TestAuthLoginTwiceDoesNothing(c *C) {
session, err := mgo.Dial("localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
oldStats := mgo.GetStats()
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
newStats := mgo.GetStats()
c.Assert(newStats.SentOps, Equals, oldStats.SentOps)
}
func (s *S) TestAuthLoginLogoutLoginDoesNothing(c *C) {
session, err := mgo.Dial("localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
oldStats := mgo.GetStats()
admindb.Logout()
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
newStats := mgo.GetStats()
c.Assert(newStats.SentOps, Equals, oldStats.SentOps)
}
func (s *S) TestAuthLoginSwitchUser(c *C) {
session, err := mgo.Dial("localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
coll := session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1})
c.Assert(err, IsNil)
err = admindb.Login("reader", "rapadura")
c.Assert(err, IsNil)
// Can't write.
err = coll.Insert(M{"n": 1})
c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
// But can read.
result := struct{ N int }{}
err = coll.Find(nil).One(&result)
c.Assert(err, IsNil)
c.Assert(result.N, Equals, 1)
}
func (s *S) TestAuthLoginChangePassword(c *C) {
session, err := mgo.Dial("localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
mydb := session.DB("mydb")
err = mydb.AddUser("myuser", "myoldpass", false)
c.Assert(err, IsNil)
err = mydb.Login("myuser", "myoldpass")
c.Assert(err, IsNil)
err = mydb.AddUser("myuser", "mynewpass", true)
c.Assert(err, IsNil)
err = mydb.Login("myuser", "mynewpass")
c.Assert(err, IsNil)
admindb.Logout()
// The second login must be in effect, which means read-only.
err = mydb.C("mycoll").Insert(M{"n": 1})
c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
}
func (s *S) TestAuthLoginCachingWithSessionRefresh(c *C) {
session, err := mgo.Dial("localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
session.Refresh()
coll := session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1})
c.Assert(err, IsNil)
}
func (s *S) TestAuthLoginCachingWithSessionCopy(c *C) {
session, err := mgo.Dial("localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
session = session.Copy()
defer session.Close()
coll := session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1})
c.Assert(err, IsNil)
}
func (s *S) TestAuthLoginCachingWithSessionClone(c *C) {
session, err := mgo.Dial("localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
session = session.Clone()
defer session.Close()
coll := session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1})
c.Assert(err, IsNil)
}
func (s *S) TestAuthLoginCachingWithNewSession(c *C) {
session, err := mgo.Dial("localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
session = session.New()
defer session.Close()
coll := session.DB("mydb").C("mycoll")
err = coll.Insert(M{"n": 1})
c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized for .*")
}
func (s *S) TestAuthLoginCachingAcrossPool(c *C) {
// Logins are cached even when the conenction goes back
// into the pool.
session, err := mgo.Dial("localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
// Add another user to test the logout case at the same time.
mydb := session.DB("mydb")
err = mydb.AddUser("myuser", "mypass", false)
c.Assert(err, IsNil)
err = mydb.Login("myuser", "mypass")
c.Assert(err, IsNil)
// Logout root explicitly, to test both cases.
admindb.Logout()
// Give socket back to pool.
session.Refresh()
// Brand new session, should use socket from the pool.
other := session.New()
defer other.Close()
oldStats := mgo.GetStats()
err = other.DB("admin").Login("root", "rapadura")
c.Assert(err, IsNil)
err = other.DB("mydb").Login("myuser", "mypass")
c.Assert(err, IsNil)
// Both logins were cached, so no ops.
newStats := mgo.GetStats()
c.Assert(newStats.SentOps, Equals, oldStats.SentOps)
// And they actually worked.
err = other.DB("mydb").C("mycoll").Insert(M{"n": 1})
c.Assert(err, IsNil)
other.DB("admin").Logout()
err = other.DB("mydb").C("mycoll").Insert(M{"n": 1})
c.Assert(err, IsNil)
}
func (s *S) TestAuthLoginCachingAcrossPoolWithLogout(c *C) {
// Now verify that logouts are properly flushed if they
// are not revalidated after leaving the pool.
session, err := mgo.Dial("localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
// Add another user to test the logout case at the same time.
mydb := session.DB("mydb")
err = mydb.AddUser("myuser", "mypass", true)
c.Assert(err, IsNil)
err = mydb.Login("myuser", "mypass")
c.Assert(err, IsNil)
// Just some data to query later.
err = session.DB("mydb").C("mycoll").Insert(M{"n": 1})
c.Assert(err, IsNil)
// Give socket back to pool.
session.Refresh()
// Brand new session, should use socket from the pool.
other := session.New()
defer other.Close()
oldStats := mgo.GetStats()
err = other.DB("mydb").Login("myuser", "mypass")
c.Assert(err, IsNil)
// Login was cached, so no ops.
newStats := mgo.GetStats()
c.Assert(newStats.SentOps, Equals, oldStats.SentOps)
// Can't write, since root has been implicitly logged out
// when the collection went into the pool, and not revalidated.
err = other.DB("mydb").C("mycoll").Insert(M{"n": 1})
c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
// But can read due to the revalidated myuser login.
result := struct{ N int }{}
err = other.DB("mydb").C("mycoll").Find(nil).One(&result)
c.Assert(err, IsNil)
c.Assert(result.N, Equals, 1)
}
func (s *S) TestAuthEventual(c *C) {
// Eventual sessions don't keep sockets around, so they are
// an interesting test case.
session, err := mgo.Dial("localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
admindb := session.DB("admin")
err = admindb.Login("root", "rapadura")
c.Assert(err, IsNil)
err = session.DB("mydb").C("mycoll").Insert(M{"n": 1})
c.Assert(err, IsNil)
var wg sync.WaitGroup
wg.Add(20)
for i := 0; i != 10; i++ {
go func() {
defer wg.Done()
var result struct{ N int }
err := session.DB("mydb").C("mycoll").Find(nil).One(&result)
c.Assert(err, IsNil)
c.Assert(result.N, Equals, 1)
}()
}
for i := 0; i != 10; i++ {
go func() {
defer wg.Done()
err := session.DB("mydb").C("mycoll").Insert(M{"n": 1})
c.Assert(err, IsNil)
}()
}
wg.Wait()
}
func (s *S) TestAuthURL(c *C) {
session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002/")
c.Assert(err, IsNil)
defer session.Close()
err = session.DB("mydb").C("mycoll").Insert(M{"n": 1})
c.Assert(err, IsNil)
}
func (s *S) TestAuthURLWrongCredentials(c *C) {
session, err := mgo.Dial("mongodb://root:wrong@localhost:40002/")
if session != nil {
session.Close()
}
c.Assert(err, ErrorMatches, "auth fail(s|ed)")
c.Assert(session, IsNil)
}
func (s *S) TestAuthURLWithNewSession(c *C) {
// When authentication is in the URL, the new session will
// actually carry it on as well, even if logged out explicitly.
session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002/")
c.Assert(err, IsNil)
defer session.Close()
session.DB("admin").Logout()
// Do it twice to ensure it passes the needed data on.
session = session.New()
defer session.Close()
session = session.New()
defer session.Close()
err = session.DB("mydb").C("mycoll").Insert(M{"n": 1})
c.Assert(err, IsNil)
}
func (s *S) TestAuthURLWithDatabase(c *C) {
session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002")
c.Assert(err, IsNil)
defer session.Close()
mydb := session.DB("mydb")
err = mydb.AddUser("myruser", "mypass", true)
c.Assert(err, IsNil)
// Test once with database, and once with source.
for i := 0; i < 2; i++ {
var url string
if i == 0 {
url = "mongodb://myruser:mypass@localhost:40002/mydb"
} else {
url = "mongodb://myruser:mypass@localhost:40002/admin?authSource=mydb"
}
usession, err := mgo.Dial(url)
c.Assert(err, IsNil)
defer usession.Close()
ucoll := usession.DB("mydb").C("mycoll")
err = ucoll.FindId(0).One(nil)
c.Assert(err, Equals, mgo.ErrNotFound)
err = ucoll.Insert(M{"n": 1})
c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
}
}
func (s *S) TestDefaultDatabase(c *C) {
tests := []struct{ url, db string }{
{"mongodb://root:rapadura@localhost:40002", "test"},
{"mongodb://root:rapadura@localhost:40002/admin", "admin"},
{"mongodb://localhost:40001", "test"},
{"mongodb://localhost:40001/", "test"},
{"mongodb://localhost:40001/mydb", "mydb"},
}
for _, test := range tests {
session, err := mgo.Dial(test.url)
c.Assert(err, IsNil)
defer session.Close()
c.Logf("test: %#v", test)
c.Assert(session.DB("").Name, Equals, test.db)
scopy := session.Copy()
c.Check(scopy.DB("").Name, Equals, test.db)
scopy.Close()
}
}
func (s *S) TestAuthDirect(c *C) {
// Direct connections must work to the master and slaves.
for _, port := range []string{"40031", "40032", "40033"} {
url := fmt.Sprintf("mongodb://root:rapadura@localhost:%s/?connect=direct", port)
session, err := mgo.Dial(url)
c.Assert(err, IsNil)
defer session.Close()
session.SetMode(mgo.Monotonic, true)
var result struct{}
err = session.DB("mydb").C("mycoll").Find(nil).One(&result)
c.Assert(err, Equals, mgo.ErrNotFound)
}
}
func (s *S) TestAuthDirectWithLogin(c *C) {
// Direct connections must work to the master and slaves.
for _, port := range []string{"40031", "40032", "40033"} {
url := fmt.Sprintf("mongodb://localhost:%s/?connect=direct", port)
session, err := mgo.Dial(url)
c.Assert(err, IsNil)
defer session.Close()
session.SetMode(mgo.Monotonic, true)
session.SetSyncTimeout(3 * time.Second)
err = session.DB("admin").Login("root", "rapadura")
c.Assert(err, IsNil)
var result struct{}
err = session.DB("mydb").C("mycoll").Find(nil).One(&result)
c.Assert(err, Equals, mgo.ErrNotFound)
}
}
var (
plainFlag = flag.String("plain", "", "Host to test PLAIN authentication against (depends on custom environment)")
plainUser = "einstein"
plainPass = "password"
)
func (s *S) TestAuthPlainCred(c *C) {
if *plainFlag == "" {
c.Skip("no -plain")
}
cred := &mgo.Credential{
Username: plainUser,
Password: plainPass,
Source: "$external",
Mechanism: "PLAIN",
}
c.Logf("Connecting to %s...", *plainFlag)
session, err := mgo.Dial(*plainFlag)
c.Assert(err, IsNil)
defer session.Close()
records := session.DB("records").C("records")
c.Logf("Connected! Testing the need for authentication...")
err = records.Find(nil).One(nil)
c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
c.Logf("Authenticating...")
err = session.Login(cred)
c.Assert(err, IsNil)
c.Logf("Authenticated!")
c.Logf("Connected! Testing the need for authentication...")
err = records.Find(nil).One(nil)
c.Assert(err, Equals, mgo.ErrNotFound)
}
func (s *S) TestAuthPlainURL(c *C) {
if *plainFlag == "" {
c.Skip("no -plain")
}
c.Logf("Connecting to %s...", *plainFlag)
session, err := mgo.Dial(fmt.Sprintf("%s:%s@%s?authMechanism=PLAIN", url.QueryEscape(plainUser), url.QueryEscape(plainPass), *plainFlag))
c.Assert(err, IsNil)
defer session.Close()
c.Logf("Connected! Testing the need for authentication...")
err = session.DB("records").C("records").Find(nil).One(nil)
c.Assert(err, Equals, mgo.ErrNotFound)
}
var (
kerberosFlag = flag.Bool("kerberos", false, "Test Kerberos authentication (depends on custom environment)")
kerberosHost = "mmscustmongo.10gen.me"
kerberosUser = "mmsagent/mmscustagent.10gen.me@10GEN.ME"
)
func (s *S) TestAuthKerberosCred(c *C) {
if !*kerberosFlag {
c.Skip("no -kerberos")
}
cred := &mgo.Credential{
Username: kerberosUser,
Mechanism: "GSSAPI",
}
c.Logf("Connecting to %s...", kerberosHost)
session, err := mgo.Dial(kerberosHost)
c.Assert(err, IsNil)
defer session.Close()
c.Logf("Connected! Testing the need for authentication...")
names, err := session.DatabaseNames()
c.Assert(err, ErrorMatches, "unauthorized")
c.Logf("Authenticating...")
err = session.Login(cred)
c.Assert(err, IsNil)
c.Logf("Authenticated!")
names, err = session.DatabaseNames()
c.Assert(err, IsNil)
c.Assert(len(names) > 0, Equals, true)
}
func (s *S) TestAuthKerberosURL(c *C) {
if !*kerberosFlag {
c.Skip("no -kerberos")
}
c.Logf("Connecting to %s...", kerberosHost)
session, err := mgo.Dial(url.QueryEscape(kerberosUser) + "@" + kerberosHost + "?authMechanism=GSSAPI")
c.Assert(err, IsNil)
defer session.Close()
names, err := session.DatabaseNames()
c.Assert(err, IsNil)
c.Assert(len(names) > 0, Equals, true)
}

25
vendor/labix.org/v2/mgo/bson/LICENSE generated vendored Normal file
View File

@ -0,0 +1,25 @@
BSON library for Go
Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

682
vendor/labix.org/v2/mgo/bson/bson.go generated vendored Normal file
View File

@ -0,0 +1,682 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Package bson is an implementation of the BSON specification for Go:
//
// http://bsonspec.org
//
// It was created as part of the mgo MongoDB driver for Go, but is standalone
// and may be used on its own without the driver.
package bson
import (
"crypto/md5"
"crypto/rand"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"os"
"reflect"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
)
// --------------------------------------------------------------------------
// The public API.
// A value implementing the bson.Getter interface will have its GetBSON
// method called when the given value has to be marshalled, and the result
// of this method will be marshaled in place of the actual object.
//
// If GetBSON returns return a non-nil error, the marshalling procedure
// will stop and error out with the provided value.
type Getter interface {
GetBSON() (interface{}, error)
}
// A value implementing the bson.Setter interface will receive the BSON
// value via the SetBSON method during unmarshaling, and the object
// itself will not be changed as usual.
//
// If setting the value works, the method should return nil or alternatively
// bson.SetZero to set the respective field to its zero value (nil for
// pointer types). If SetBSON returns a value of type bson.TypeError, the
// BSON value will be omitted from a map or slice being decoded and the
// unmarshalling will continue. If it returns any other non-nil error, the
// unmarshalling procedure will stop and error out with the provided value.
//
// This interface is generally useful in pointer receivers, since the method
// will want to change the receiver. A type field that implements the Setter
// interface doesn't have to be a pointer, though.
//
// Unlike the usual behavior, unmarshalling onto a value that implements a
// Setter interface will NOT reset the value to its zero state. This allows
// the value to decide by itself how to be unmarshalled.
//
// For example:
//
// type MyString string
//
// func (s *MyString) SetBSON(raw bson.Raw) error {
// return raw.Unmarshal(s)
// }
//
type Setter interface {
SetBSON(raw Raw) error
}
// SetZero may be returned from a SetBSON method to have the value set to
// its respective zero value. When used in pointer values, this will set the
// field to nil rather than to the pre-allocated value.
var SetZero = errors.New("set to zero")
// M is a convenient alias for a map[string]interface{} map, useful for
// dealing with BSON in a native way. For instance:
//
// bson.M{"a": 1, "b": true}
//
// There's no special handling for this type in addition to what's done anyway
// for an equivalent map type. Elements in the map will be dumped in an
// undefined ordered. See also the bson.D type for an ordered alternative.
type M map[string]interface{}
// D represents a BSON document containing ordered elements. For example:
//
// bson.D{{"a", 1}, {"b", true}}
//
// In some situations, such as when creating indexes for MongoDB, the order in
// which the elements are defined is important. If the order is not important,
// using a map is generally more comfortable. See bson.M and bson.RawD.
type D []DocElem
// See the D type.
type DocElem struct {
Name string
Value interface{}
}
// Map returns a map out of the ordered element name/value pairs in d.
func (d D) Map() (m M) {
m = make(M, len(d))
for _, item := range d {
m[item.Name] = item.Value
}
return m
}
// The Raw type represents raw unprocessed BSON documents and elements.
// Kind is the kind of element as defined per the BSON specification, and
// Data is the raw unprocessed data for the respective element.
// Using this type it is possible to unmarshal or marshal values partially.
//
// Relevant documentation:
//
// http://bsonspec.org/#/specification
//
type Raw struct {
Kind byte
Data []byte
}
// RawD represents a BSON document containing raw unprocessed elements.
// This low-level representation may be useful when lazily processing
// documents of uncertain content, or when manipulating the raw content
// documents in general.
type RawD []RawDocElem
// See the RawD type.
type RawDocElem struct {
Name string
Value Raw
}
// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes
// long. MongoDB objects by default have such a property set in their "_id"
// property.
//
// http://www.mongodb.org/display/DOCS/Object+IDs
type ObjectId string
// ObjectIdHex returns an ObjectId from the provided hex representation.
// Calling this function with an invalid hex representation will
// cause a runtime panic. See the IsObjectIdHex function.
func ObjectIdHex(s string) ObjectId {
d, err := hex.DecodeString(s)
if err != nil || len(d) != 12 {
panic(fmt.Sprintf("Invalid input to ObjectIdHex: %q", s))
}
return ObjectId(d)
}
// IsObjectIdHex returns whether s is a valid hex representation of
// an ObjectId. See the ObjectIdHex function.
func IsObjectIdHex(s string) bool {
if len(s) != 24 {
return false
}
_, err := hex.DecodeString(s)
return err == nil
}
// objectIdCounter is atomically incremented when generating a new ObjectId
// using NewObjectId() function. It's used as a counter part of an id.
var objectIdCounter uint32 = 0
// machineId stores machine id generated once and used in subsequent calls
// to NewObjectId function.
var machineId = readMachineId()
// readMachineId generates machine id and puts it into the machineId global
// variable. If this function fails to get the hostname, it will cause
// a runtime error.
func readMachineId() []byte {
var sum [3]byte
id := sum[:]
hostname, err1 := os.Hostname()
if err1 != nil {
_, err2 := io.ReadFull(rand.Reader, id)
if err2 != nil {
panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2))
}
return id
}
hw := md5.New()
hw.Write([]byte(hostname))
copy(id, hw.Sum(nil))
return id
}
// NewObjectId returns a new unique ObjectId.
func NewObjectId() ObjectId {
var b [12]byte
// Timestamp, 4 bytes, big endian
binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix()))
// Machine, first 3 bytes of md5(hostname)
b[4] = machineId[0]
b[5] = machineId[1]
b[6] = machineId[2]
// Pid, 2 bytes, specs don't specify endianness, but we use big endian.
pid := os.Getpid()
b[7] = byte(pid >> 8)
b[8] = byte(pid)
// Increment, 3 bytes, big endian
i := atomic.AddUint32(&objectIdCounter, 1)
b[9] = byte(i >> 16)
b[10] = byte(i >> 8)
b[11] = byte(i)
return ObjectId(b[:])
}
// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled
// with the provided number of seconds from epoch UTC, and all other parts
// filled with zeroes. It's not safe to insert a document with an id generated
// by this method, it is useful only for queries to find documents with ids
// generated before or after the specified timestamp.
func NewObjectIdWithTime(t time.Time) ObjectId {
var b [12]byte
binary.BigEndian.PutUint32(b[:4], uint32(t.Unix()))
return ObjectId(string(b[:]))
}
// String returns a hex string representation of the id.
// Example: ObjectIdHex("4d88e15b60f486e428412dc9").
func (id ObjectId) String() string {
return fmt.Sprintf(`ObjectIdHex("%x")`, string(id))
}
// Hex returns a hex representation of the ObjectId.
func (id ObjectId) Hex() string {
return hex.EncodeToString([]byte(id))
}
// MarshalJSON turns a bson.ObjectId into a json.Marshaller.
func (id ObjectId) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%x"`, string(id))), nil
}
// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller.
func (id *ObjectId) UnmarshalJSON(data []byte) error {
if len(data) != 26 || data[0] != '"' || data[25] != '"' {
return errors.New(fmt.Sprintf("Invalid ObjectId in JSON: %s", string(data)))
}
var buf [12]byte
_, err := hex.Decode(buf[:], data[1:25])
if err != nil {
return errors.New(fmt.Sprintf("Invalid ObjectId in JSON: %s (%s)", string(data), err))
}
*id = ObjectId(string(buf[:]))
return nil
}
// Valid returns true if id is valid. A valid id must contain exactly 12 bytes.
func (id ObjectId) Valid() bool {
return len(id) == 12
}
// byteSlice returns byte slice of id from start to end.
// Calling this function with an invalid id will cause a runtime panic.
func (id ObjectId) byteSlice(start, end int) []byte {
if len(id) != 12 {
panic(fmt.Sprintf("Invalid ObjectId: %q", string(id)))
}
return []byte(string(id)[start:end])
}
// Time returns the timestamp part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Time() time.Time {
// First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4)))
return time.Unix(secs, 0)
}
// Machine returns the 3-byte machine id part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Machine() []byte {
return id.byteSlice(4, 7)
}
// Pid returns the process id part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Pid() uint16 {
return binary.BigEndian.Uint16(id.byteSlice(7, 9))
}
// Counter returns the incrementing value part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Counter() int32 {
b := id.byteSlice(9, 12)
// Counter is stored as big-endian 3-byte value
return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
}
// The Symbol type is similar to a string and is used in languages with a
// distinct symbol type.
type Symbol string
// Now returns the current time with millisecond precision. MongoDB stores
// timestamps with the same precision, so a Time returned from this method
// will not change after a roundtrip to the database. That's the only reason
// why this function exists. Using the time.Now function also works fine
// otherwise.
func Now() time.Time {
return time.Unix(0, time.Now().UnixNano()/1e6*1e6)
}
// MongoTimestamp is a special internal type used by MongoDB that for some
// strange reason has its own datatype defined in BSON.
type MongoTimestamp int64
type orderKey int64
// MaxKey is a special value that compares higher than all other possible BSON
// values in a MongoDB database.
var MaxKey = orderKey(1<<63 - 1)
// MinKey is a special value that compares lower than all other possible BSON
// values in a MongoDB database.
var MinKey = orderKey(-1 << 63)
type undefined struct{}
// Undefined represents the undefined BSON value.
var Undefined undefined
// Binary is a representation for non-standard binary values. Any kind should
// work, but the following are known as of this writing:
//
// 0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}.
// 0x01 - Function (!?)
// 0x02 - Obsolete generic.
// 0x03 - UUID
// 0x05 - MD5
// 0x80 - User defined.
//
type Binary struct {
Kind byte
Data []byte
}
// RegEx represents a regular expression. The Options field may contain
// individual characters defining the way in which the pattern should be
// applied, and must be sorted. Valid options as of this writing are 'i' for
// case insensitive matching, 'm' for multi-line matching, 'x' for verbose
// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all
// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match
// unicode. The value of the Options parameter is not verified before being
// marshaled into the BSON format.
type RegEx struct {
Pattern string
Options string
}
// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it
// will be marshaled as a mapping from identifiers to values that may be
// used when evaluating the provided Code.
type JavaScript struct {
Code string
Scope interface{}
}
const initialBufferSize = 64
func handleErr(err *error) {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
} else if _, ok := r.(externalPanic); ok {
panic(r)
} else if s, ok := r.(string); ok {
*err = errors.New(s)
} else if e, ok := r.(error); ok {
*err = e
} else {
panic(r)
}
}
}
// Marshal serializes the in value, which may be a map or a struct value.
// In the case of struct values, only exported fields will be serialized.
// The lowercased field name is used as the key for each exported field,
// but this behavior may be changed using the respective field tag.
// The tag may also contain flags to tweak the marshalling behavior for
// the field. The tag formats accepted are:
//
// "[<key>][,<flag1>[,<flag2>]]"
//
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported:
//
// omitempty Only include the field if it's not set to the zero
// value for the type or to empty slices or maps.
//
// minsize Marshal an int64 value as an int32, if that's feasible
// while preserving the numeric value.
//
// inline Inline the field, which must be a struct or a map,
// causing all of its fields or keys to be processed as if
// they were part of the outer struct. For maps, keys must
// not conflict with the bson keys of other struct fields.
//
// Some examples:
//
// type T struct {
// A bool
// B int "myb"
// C string "myc,omitempty"
// D string `bson:",omitempty" json:"jsonkey"`
// E int64 ",minsize"
// F int64 "myf,omitempty,minsize"
// }
//
func Marshal(in interface{}) (out []byte, err error) {
defer handleErr(&err)
e := &encoder{make([]byte, 0, initialBufferSize)}
e.addDoc(reflect.ValueOf(in))
return e.out, nil
}
// Unmarshal deserializes data from in into the out value. The out value
// must be a map, a pointer to a struct, or a pointer to a bson.D value.
// The lowercased field name is used as the key for each exported field,
// but this behavior may be changed using the respective field tag.
// The tag may also contain flags to tweak the marshalling behavior for
// the field. The tag formats accepted are:
//
// "[<key>][,<flag1>[,<flag2>]]"
//
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported during unmarshal (see the
// Marshal method for other flags):
//
// inline Inline the field, which must be a struct or a map.
// Inlined structs are handled as if its fields were part
// of the outer struct. An inlined map causes keys that do
// not match any other struct field to be inserted in the
// map rather than being discarded as usual.
//
// The target field or element types of out may not necessarily match
// the BSON values of the provided data. The following conversions are
// made automatically:
//
// - Numeric types are converted if at least the integer part of the
// value would be preserved correctly
// - Bools are converted to numeric types as 1 or 0
// - Numeric types are converted to bools as true if not 0 or false otherwise
// - Binary and string BSON data is converted to a string, array or byte slice
//
// If the value would not fit the type and cannot be converted, it's
// silently skipped.
//
// Pointer values are initialized when necessary.
func Unmarshal(in []byte, out interface{}) (err error) {
defer handleErr(&err)
v := reflect.ValueOf(out)
switch v.Kind() {
case reflect.Map, reflect.Ptr:
d := newDecoder(in)
d.readDocTo(v)
case reflect.Struct:
return errors.New("Unmarshal can't deal with struct values. Use a pointer.")
default:
return errors.New("Unmarshal needs a map or a pointer to a struct.")
}
return nil
}
// Unmarshal deserializes raw into the out value. If the out value type
// is not compatible with raw, a *bson.TypeError is returned.
//
// See the Unmarshal function documentation for more details on the
// unmarshalling process.
func (raw Raw) Unmarshal(out interface{}) (err error) {
defer handleErr(&err)
v := reflect.ValueOf(out)
switch v.Kind() {
case reflect.Ptr:
v = v.Elem()
fallthrough
case reflect.Map:
d := newDecoder(raw.Data)
good := d.readElemTo(v, raw.Kind)
if !good {
return &TypeError{v.Type(), raw.Kind}
}
case reflect.Struct:
return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.")
default:
return errors.New("Raw Unmarshal needs a map or a valid pointer.")
}
return nil
}
type TypeError struct {
Type reflect.Type
Kind byte
}
func (e *TypeError) Error() string {
return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String())
}
// --------------------------------------------------------------------------
// Maintain a mapping of keys to structure field indexes
type structInfo struct {
FieldsMap map[string]fieldInfo
FieldsList []fieldInfo
InlineMap int
Zero reflect.Value
}
type fieldInfo struct {
Key string
Num int
OmitEmpty bool
MinSize bool
Inline []int
}
var structMap = make(map[reflect.Type]*structInfo)
var structMapMutex sync.RWMutex
type externalPanic string
func (e externalPanic) String() string {
return string(e)
}
func getStructInfo(st reflect.Type) (*structInfo, error) {
structMapMutex.RLock()
sinfo, found := structMap[st]
structMapMutex.RUnlock()
if found {
return sinfo, nil
}
n := st.NumField()
fieldsMap := make(map[string]fieldInfo)
fieldsList := make([]fieldInfo, 0, n)
inlineMap := -1
for i := 0; i != n; i++ {
field := st.Field(i)
if field.PkgPath != "" {
continue // Private field
}
info := fieldInfo{Num: i}
tag := field.Tag.Get("bson")
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
tag = string(field.Tag)
}
if tag == "-" {
continue
}
// XXX Drop this after a few releases.
if s := strings.Index(tag, "/"); s >= 0 {
recommend := tag[:s]
for _, c := range tag[s+1:] {
switch c {
case 'c':
recommend += ",omitempty"
case 's':
recommend += ",minsize"
default:
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", string([]byte{uint8(c)}), tag, st)
panic(externalPanic(msg))
}
}
msg := fmt.Sprintf("Replace tag %q in field %s of type %s by %q", tag, field.Name, st, recommend)
panic(externalPanic(msg))
}
inline := false
fields := strings.Split(tag, ",")
if len(fields) > 1 {
for _, flag := range fields[1:] {
switch flag {
case "omitempty":
info.OmitEmpty = true
case "minsize":
info.MinSize = true
case "inline":
inline = true
default:
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
panic(externalPanic(msg))
}
}
tag = fields[0]
}
if inline {
switch field.Type.Kind() {
case reflect.Map:
if inlineMap >= 0 {
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
}
if field.Type.Key() != reflect.TypeOf("") {
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
}
inlineMap = info.Num
case reflect.Struct:
sinfo, err := getStructInfo(field.Type)
if err != nil {
return nil, err
}
for _, finfo := range sinfo.FieldsList {
if _, found := fieldsMap[finfo.Key]; found {
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
if finfo.Inline == nil {
finfo.Inline = []int{i, finfo.Num}
} else {
finfo.Inline = append([]int{i}, finfo.Inline...)
}
fieldsMap[finfo.Key] = finfo
fieldsList = append(fieldsList, finfo)
}
default:
panic("Option ,inline needs a struct value or map field")
}
continue
}
if tag != "" {
info.Key = tag
} else {
info.Key = strings.ToLower(field.Name)
}
if _, found = fieldsMap[info.Key]; found {
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
fieldsList = append(fieldsList, info)
fieldsMap[info.Key] = info
}
sinfo = &structInfo{
fieldsMap,
fieldsList,
inlineMap,
reflect.New(st).Elem(),
}
structMapMutex.Lock()
structMap[st] = sinfo
structMapMutex.Unlock()
return sinfo, nil
}

1466
vendor/labix.org/v2/mgo/bson/bson_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

795
vendor/labix.org/v2/mgo/bson/decode.go generated vendored Normal file
View File

@ -0,0 +1,795 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// gobson - BSON library for Go.
package bson
import (
"fmt"
"math"
"net/url"
"reflect"
"sync"
"time"
)
type decoder struct {
in []byte
i int
docType reflect.Type
}
var typeM = reflect.TypeOf(M{})
func newDecoder(in []byte) *decoder {
return &decoder{in, 0, typeM}
}
// --------------------------------------------------------------------------
// Some helper functions.
func corrupted() {
panic("Document is corrupted")
}
func settableValueOf(i interface{}) reflect.Value {
v := reflect.ValueOf(i)
sv := reflect.New(v.Type()).Elem()
sv.Set(v)
return sv
}
// --------------------------------------------------------------------------
// Unmarshaling of documents.
const (
setterUnknown = iota
setterNone
setterType
setterAddr
)
var setterStyle map[reflect.Type]int
var setterIface reflect.Type
var setterMutex sync.RWMutex
func init() {
var iface Setter
setterIface = reflect.TypeOf(&iface).Elem()
setterStyle = make(map[reflect.Type]int)
}
func getSetter(outt reflect.Type, out reflect.Value) Setter {
setterMutex.RLock()
style := setterStyle[outt]
setterMutex.RUnlock()
if style == setterNone {
return nil
}
if style == setterUnknown {
setterMutex.Lock()
defer setterMutex.Unlock()
if outt.Implements(setterIface) {
setterStyle[outt] = setterType
} else if reflect.PtrTo(outt).Implements(setterIface) {
setterStyle[outt] = setterAddr
} else {
setterStyle[outt] = setterNone
return nil
}
style = setterStyle[outt]
}
if style == setterAddr {
if !out.CanAddr() {
return nil
}
out = out.Addr()
} else if outt.Kind() == reflect.Ptr && out.IsNil() {
out.Set(reflect.New(outt.Elem()))
}
return out.Interface().(Setter)
}
func clearMap(m reflect.Value) {
var none reflect.Value
for _, k := range m.MapKeys() {
m.SetMapIndex(k, none)
}
}
func (d *decoder) readDocTo(out reflect.Value) {
var elemType reflect.Type
outt := out.Type()
outk := outt.Kind()
for {
if outk == reflect.Ptr && out.IsNil() {
out.Set(reflect.New(outt.Elem()))
}
if setter := getSetter(outt, out); setter != nil {
var raw Raw
d.readDocTo(reflect.ValueOf(&raw))
err := setter.SetBSON(raw)
if _, ok := err.(*TypeError); err != nil && !ok {
panic(err)
}
return
}
if outk == reflect.Ptr {
out = out.Elem()
outt = out.Type()
outk = out.Kind()
continue
}
break
}
var fieldsMap map[string]fieldInfo
var inlineMap reflect.Value
start := d.i
origout := out
if outk == reflect.Interface {
if d.docType.Kind() == reflect.Map {
mv := reflect.MakeMap(d.docType)
out.Set(mv)
out = mv
} else {
dv := reflect.New(d.docType).Elem()
out.Set(dv)
out = dv
}
outt = out.Type()
outk = outt.Kind()
}
docType := d.docType
keyType := typeString
convertKey := false
switch outk {
case reflect.Map:
keyType = outt.Key()
if keyType.Kind() != reflect.String {
panic("BSON map must have string keys. Got: " + outt.String())
}
if keyType != typeString {
convertKey = true
}
elemType = outt.Elem()
if elemType == typeIface {
d.docType = outt
}
if out.IsNil() {
out.Set(reflect.MakeMap(out.Type()))
} else if out.Len() > 0 {
clearMap(out)
}
case reflect.Struct:
if outt != typeRaw {
sinfo, err := getStructInfo(out.Type())
if err != nil {
panic(err)
}
fieldsMap = sinfo.FieldsMap
out.Set(sinfo.Zero)
if sinfo.InlineMap != -1 {
inlineMap = out.Field(sinfo.InlineMap)
if !inlineMap.IsNil() && inlineMap.Len() > 0 {
clearMap(inlineMap)
}
elemType = inlineMap.Type().Elem()
if elemType == typeIface {
d.docType = inlineMap.Type()
}
}
}
case reflect.Slice:
switch outt.Elem() {
case typeDocElem:
origout.Set(d.readDocElems(outt))
return
case typeRawDocElem:
origout.Set(d.readRawDocElems(outt))
return
}
fallthrough
default:
panic("Unsupported document type for unmarshalling: " + out.Type().String())
}
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
for d.in[d.i] != '\x00' {
kind := d.readByte()
name := d.readCStr()
if d.i >= end {
corrupted()
}
switch outk {
case reflect.Map:
e := reflect.New(elemType).Elem()
if d.readElemTo(e, kind) {
k := reflect.ValueOf(name)
if convertKey {
k = k.Convert(keyType)
}
out.SetMapIndex(k, e)
}
case reflect.Struct:
if outt == typeRaw {
d.dropElem(kind)
} else {
if info, ok := fieldsMap[name]; ok {
if info.Inline == nil {
d.readElemTo(out.Field(info.Num), kind)
} else {
d.readElemTo(out.FieldByIndex(info.Inline), kind)
}
} else if inlineMap.IsValid() {
if inlineMap.IsNil() {
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
}
e := reflect.New(elemType).Elem()
if d.readElemTo(e, kind) {
inlineMap.SetMapIndex(reflect.ValueOf(name), e)
}
} else {
d.dropElem(kind)
}
}
case reflect.Slice:
}
if d.i >= end {
corrupted()
}
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
d.docType = docType
if outt == typeRaw {
out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]}))
}
}
func (d *decoder) readArrayDocTo(out reflect.Value) {
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
i := 0
l := out.Len()
for d.in[d.i] != '\x00' {
if i >= l {
panic("Length mismatch on array field")
}
kind := d.readByte()
for d.i < end && d.in[d.i] != '\x00' {
d.i++
}
if d.i >= end {
corrupted()
}
d.i++
d.readElemTo(out.Index(i), kind)
if d.i >= end {
corrupted()
}
i++
}
if i != l {
panic("Length mismatch on array field")
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
}
func (d *decoder) readSliceDoc(t reflect.Type) interface{} {
tmp := make([]reflect.Value, 0, 8)
elemType := t.Elem()
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
for d.in[d.i] != '\x00' {
kind := d.readByte()
for d.i < end && d.in[d.i] != '\x00' {
d.i++
}
if d.i >= end {
corrupted()
}
d.i++
e := reflect.New(elemType).Elem()
if d.readElemTo(e, kind) {
tmp = append(tmp, e)
}
if d.i >= end {
corrupted()
}
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
n := len(tmp)
slice := reflect.MakeSlice(t, n, n)
for i := 0; i != n; i++ {
slice.Index(i).Set(tmp[i])
}
return slice.Interface()
}
var typeSlice = reflect.TypeOf([]interface{}{})
var typeIface = typeSlice.Elem()
func (d *decoder) readDocElems(typ reflect.Type) reflect.Value {
docType := d.docType
d.docType = typ
slice := make([]DocElem, 0, 8)
d.readDocWith(func(kind byte, name string) {
e := DocElem{Name: name}
v := reflect.ValueOf(&e.Value)
if d.readElemTo(v.Elem(), kind) {
slice = append(slice, e)
}
})
slicev := reflect.New(typ).Elem()
slicev.Set(reflect.ValueOf(slice))
d.docType = docType
return slicev
}
func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value {
docType := d.docType
d.docType = typ
slice := make([]RawDocElem, 0, 8)
d.readDocWith(func(kind byte, name string) {
e := RawDocElem{Name: name}
v := reflect.ValueOf(&e.Value)
if d.readElemTo(v.Elem(), kind) {
slice = append(slice, e)
}
})
slicev := reflect.New(typ).Elem()
slicev.Set(reflect.ValueOf(slice))
d.docType = docType
return slicev
}
func (d *decoder) readDocWith(f func(kind byte, name string)) {
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
for d.in[d.i] != '\x00' {
kind := d.readByte()
name := d.readCStr()
if d.i >= end {
corrupted()
}
f(kind, name)
if d.i >= end {
corrupted()
}
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
}
// --------------------------------------------------------------------------
// Unmarshaling of individual elements within a document.
var blackHole = settableValueOf(struct{}{})
func (d *decoder) dropElem(kind byte) {
d.readElemTo(blackHole, kind)
}
// Attempt to decode an element from the document and put it into out.
// If the types are not compatible, the returned ok value will be
// false and out will be unchanged.
func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
start := d.i
if kind == '\x03' {
// Special case for documents. Delegate to readDocTo().
switch out.Kind() {
case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map:
d.readDocTo(out)
default:
switch out.Interface().(type) {
case D:
out.Set(d.readDocElems(out.Type()))
case RawD:
out.Set(d.readRawDocElems(out.Type()))
default:
d.readDocTo(blackHole)
}
}
return true
}
var in interface{}
switch kind {
case 0x01: // Float64
in = d.readFloat64()
case 0x02: // UTF-8 string
in = d.readStr()
case 0x03: // Document
panic("Can't happen. Handled above.")
case 0x04: // Array
outt := out.Type()
for outt.Kind() == reflect.Ptr {
outt = outt.Elem()
}
switch outt.Kind() {
case reflect.Array:
d.readArrayDocTo(out)
return true
case reflect.Slice:
in = d.readSliceDoc(outt)
default:
in = d.readSliceDoc(typeSlice)
}
case 0x05: // Binary
b := d.readBinary()
if b.Kind == 0x00 || b.Kind == 0x02 {
in = b.Data
} else {
in = b
}
case 0x06: // Undefined (obsolete, but still seen in the wild)
in = Undefined
case 0x07: // ObjectId
in = ObjectId(d.readBytes(12))
case 0x08: // Bool
in = d.readBool()
case 0x09: // Timestamp
// MongoDB handles timestamps as milliseconds.
i := d.readInt64()
if i == -62135596800000 {
in = time.Time{} // In UTC for convenience.
} else {
in = time.Unix(i/1e3, i%1e3*1e6)
}
case 0x0A: // Nil
in = nil
case 0x0B: // RegEx
in = d.readRegEx()
case 0x0D: // JavaScript without scope
in = JavaScript{Code: d.readStr()}
case 0x0E: // Symbol
in = Symbol(d.readStr())
case 0x0F: // JavaScript with scope
d.i += 4 // Skip length
js := JavaScript{d.readStr(), make(M)}
d.readDocTo(reflect.ValueOf(js.Scope))
in = js
case 0x10: // Int32
in = int(d.readInt32())
case 0x11: // Mongo-specific timestamp
in = MongoTimestamp(d.readInt64())
case 0x12: // Int64
in = d.readInt64()
case 0x7F: // Max key
in = MaxKey
case 0xFF: // Min key
in = MinKey
default:
panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind))
}
outt := out.Type()
if outt == typeRaw {
out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]}))
return true
}
if setter := getSetter(outt, out); setter != nil {
err := setter.SetBSON(Raw{kind, d.in[start:d.i]})
if err == SetZero {
out.Set(reflect.Zero(outt))
return true
}
if err == nil {
return true
}
if _, ok := err.(*TypeError); !ok {
panic(err)
}
return false
}
if in == nil {
out.Set(reflect.Zero(outt))
return true
}
outk := outt.Kind()
// Dereference and initialize pointer if necessary.
first := true
for outk == reflect.Ptr {
if !out.IsNil() {
out = out.Elem()
} else {
elem := reflect.New(outt.Elem())
if first {
// Only set if value is compatible.
first = false
defer func(out, elem reflect.Value) {
if good {
out.Set(elem)
}
}(out, elem)
} else {
out.Set(elem)
}
out = elem
}
outt = out.Type()
outk = outt.Kind()
}
inv := reflect.ValueOf(in)
if outt == inv.Type() {
out.Set(inv)
return true
}
switch outk {
case reflect.Interface:
out.Set(inv)
return true
case reflect.String:
switch inv.Kind() {
case reflect.String:
out.SetString(inv.String())
return true
case reflect.Slice:
if b, ok := in.([]byte); ok {
out.SetString(string(b))
return true
}
}
case reflect.Slice, reflect.Array:
// Remember, array (0x04) slices are built with the correct
// element type. If we are here, must be a cross BSON kind
// conversion (e.g. 0x05 unmarshalling on string).
if outt.Elem().Kind() != reflect.Uint8 {
break
}
switch inv.Kind() {
case reflect.String:
slice := []byte(inv.String())
out.Set(reflect.ValueOf(slice))
return true
case reflect.Slice:
switch outt.Kind() {
case reflect.Array:
reflect.Copy(out, inv)
case reflect.Slice:
out.SetBytes(inv.Bytes())
}
return true
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch inv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetInt(inv.Int())
return true
case reflect.Float32, reflect.Float64:
out.SetInt(int64(inv.Float()))
return true
case reflect.Bool:
if inv.Bool() {
out.SetInt(1)
} else {
out.SetInt(0)
}
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON?")
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch inv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetUint(uint64(inv.Int()))
return true
case reflect.Float32, reflect.Float64:
out.SetUint(uint64(inv.Float()))
return true
case reflect.Bool:
if inv.Bool() {
out.SetUint(1)
} else {
out.SetUint(0)
}
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON.")
}
case reflect.Float32, reflect.Float64:
switch inv.Kind() {
case reflect.Float32, reflect.Float64:
out.SetFloat(inv.Float())
return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetFloat(float64(inv.Int()))
return true
case reflect.Bool:
if inv.Bool() {
out.SetFloat(1)
} else {
out.SetFloat(0)
}
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON?")
}
case reflect.Bool:
switch inv.Kind() {
case reflect.Bool:
out.SetBool(inv.Bool())
return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetBool(inv.Int() != 0)
return true
case reflect.Float32, reflect.Float64:
out.SetBool(inv.Float() != 0)
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON?")
}
case reflect.Struct:
if outt == typeURL && inv.Kind() == reflect.String {
u, err := url.Parse(inv.String())
if err != nil {
panic(err)
}
out.Set(reflect.ValueOf(u).Elem())
return true
}
}
return false
}
// --------------------------------------------------------------------------
// Parsers of basic types.
func (d *decoder) readRegEx() RegEx {
re := RegEx{}
re.Pattern = d.readCStr()
re.Options = d.readCStr()
return re
}
func (d *decoder) readBinary() Binary {
l := d.readInt32()
b := Binary{}
b.Kind = d.readByte()
b.Data = d.readBytes(l)
if b.Kind == 0x02 && len(b.Data) >= 4 {
// Weird obsolete format with redundant length.
b.Data = b.Data[4:]
}
return b
}
func (d *decoder) readStr() string {
l := d.readInt32()
b := d.readBytes(l - 1)
if d.readByte() != '\x00' {
corrupted()
}
return string(b)
}
func (d *decoder) readCStr() string {
start := d.i
end := start
l := len(d.in)
for ; end != l; end++ {
if d.in[end] == '\x00' {
break
}
}
d.i = end + 1
if d.i > l {
corrupted()
}
return string(d.in[start:end])
}
func (d *decoder) readBool() bool {
if d.readByte() == 1 {
return true
}
return false
}
func (d *decoder) readFloat64() float64 {
return math.Float64frombits(uint64(d.readInt64()))
}
func (d *decoder) readInt32() int32 {
b := d.readBytes(4)
return int32((uint32(b[0]) << 0) |
(uint32(b[1]) << 8) |
(uint32(b[2]) << 16) |
(uint32(b[3]) << 24))
}
func (d *decoder) readInt64() int64 {
b := d.readBytes(8)
return int64((uint64(b[0]) << 0) |
(uint64(b[1]) << 8) |
(uint64(b[2]) << 16) |
(uint64(b[3]) << 24) |
(uint64(b[4]) << 32) |
(uint64(b[5]) << 40) |
(uint64(b[6]) << 48) |
(uint64(b[7]) << 56))
}
func (d *decoder) readByte() byte {
i := d.i
d.i++
if d.i > len(d.in) {
corrupted()
}
return d.in[i]
}
func (d *decoder) readBytes(length int32) []byte {
start := d.i
d.i += int(length)
if d.i > len(d.in) {
corrupted()
}
return d.in[start : start+int(length)]
}

462
vendor/labix.org/v2/mgo/bson/encode.go generated vendored Normal file
View File

@ -0,0 +1,462 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// gobson - BSON library for Go.
package bson
import (
"fmt"
"math"
"net/url"
"reflect"
"strconv"
"time"
)
// --------------------------------------------------------------------------
// Some internal infrastructure.
var (
typeBinary = reflect.TypeOf(Binary{})
typeObjectId = reflect.TypeOf(ObjectId(""))
typeSymbol = reflect.TypeOf(Symbol(""))
typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0))
typeOrderKey = reflect.TypeOf(MinKey)
typeDocElem = reflect.TypeOf(DocElem{})
typeRawDocElem = reflect.TypeOf(RawDocElem{})
typeRaw = reflect.TypeOf(Raw{})
typeURL = reflect.TypeOf(url.URL{})
typeTime = reflect.TypeOf(time.Time{})
typeString = reflect.TypeOf("")
)
const itoaCacheSize = 32
var itoaCache []string
func init() {
itoaCache = make([]string, itoaCacheSize)
for i := 0; i != itoaCacheSize; i++ {
itoaCache[i] = strconv.Itoa(i)
}
}
func itoa(i int) string {
if i < itoaCacheSize {
return itoaCache[i]
}
return strconv.Itoa(i)
}
// --------------------------------------------------------------------------
// Marshaling of the document value itself.
type encoder struct {
out []byte
}
func (e *encoder) addDoc(v reflect.Value) {
for {
if vi, ok := v.Interface().(Getter); ok {
getv, err := vi.GetBSON()
if err != nil {
panic(err)
}
v = reflect.ValueOf(getv)
continue
}
if v.Kind() == reflect.Ptr {
v = v.Elem()
continue
}
break
}
if v.Type() == typeRaw {
raw := v.Interface().(Raw)
if raw.Kind != 0x03 && raw.Kind != 0x00 {
panic("Attempted to unmarshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
}
e.addBytes(raw.Data...)
return
}
start := e.reserveInt32()
switch v.Kind() {
case reflect.Map:
e.addMap(v)
case reflect.Struct:
e.addStruct(v)
case reflect.Array, reflect.Slice:
e.addSlice(v)
default:
panic("Can't marshal " + v.Type().String() + " as a BSON document")
}
e.addBytes(0)
e.setInt32(start, int32(len(e.out)-start))
}
func (e *encoder) addMap(v reflect.Value) {
for _, k := range v.MapKeys() {
e.addElem(k.String(), v.MapIndex(k), false)
}
}
func (e *encoder) addStruct(v reflect.Value) {
sinfo, err := getStructInfo(v.Type())
if err != nil {
panic(err)
}
var value reflect.Value
if sinfo.InlineMap >= 0 {
m := v.Field(sinfo.InlineMap)
if m.Len() > 0 {
for _, k := range m.MapKeys() {
ks := k.String()
if _, found := sinfo.FieldsMap[ks]; found {
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks))
}
e.addElem(ks, m.MapIndex(k), false)
}
}
}
for _, info := range sinfo.FieldsList {
if info.Inline == nil {
value = v.Field(info.Num)
} else {
value = v.FieldByIndex(info.Inline)
}
if info.OmitEmpty && isZero(value) {
continue
}
e.addElem(info.Key, value, info.MinSize)
}
}
func isZero(v reflect.Value) bool {
switch v.Kind() {
case reflect.String:
return len(v.String()) == 0
case reflect.Ptr, reflect.Interface:
return v.IsNil()
case reflect.Slice:
return v.Len() == 0
case reflect.Map:
return v.Len() == 0
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Struct:
if v.Type() == typeTime {
return v.Interface().(time.Time).IsZero()
}
for i := v.NumField()-1; i >= 0; i-- {
if !isZero(v.Field(i)) {
return false
}
}
return true
}
return false
}
func (e *encoder) addSlice(v reflect.Value) {
vi := v.Interface()
if d, ok := vi.(D); ok {
for _, elem := range d {
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
if d, ok := vi.(RawD); ok {
for _, elem := range d {
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
l := v.Len()
et := v.Type().Elem()
if et == typeDocElem {
for i := 0; i < l; i++ {
elem := v.Index(i).Interface().(DocElem)
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
if et == typeRawDocElem {
for i := 0; i < l; i++ {
elem := v.Index(i).Interface().(RawDocElem)
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
for i := 0; i < l; i++ {
e.addElem(itoa(i), v.Index(i), false)
}
}
// --------------------------------------------------------------------------
// Marshaling of elements in a document.
func (e *encoder) addElemName(kind byte, name string) {
e.addBytes(kind)
e.addBytes([]byte(name)...)
e.addBytes(0)
}
func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
if !v.IsValid() {
e.addElemName('\x0A', name)
return
}
if getter, ok := v.Interface().(Getter); ok {
getv, err := getter.GetBSON()
if err != nil {
panic(err)
}
e.addElem(name, reflect.ValueOf(getv), minSize)
return
}
switch v.Kind() {
case reflect.Interface:
e.addElem(name, v.Elem(), minSize)
case reflect.Ptr:
e.addElem(name, v.Elem(), minSize)
case reflect.String:
s := v.String()
switch v.Type() {
case typeObjectId:
if len(s) != 12 {
panic("ObjectIDs must be exactly 12 bytes long (got " +
strconv.Itoa(len(s)) + ")")
}
e.addElemName('\x07', name)
e.addBytes([]byte(s)...)
case typeSymbol:
e.addElemName('\x0E', name)
e.addStr(s)
default:
e.addElemName('\x02', name)
e.addStr(s)
}
case reflect.Float32, reflect.Float64:
e.addElemName('\x01', name)
e.addInt64(int64(math.Float64bits(v.Float())))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
u := v.Uint()
if int64(u) < 0 {
panic("BSON has no uint64 type, and value is too large to fit correctly in an int64")
} else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) {
e.addElemName('\x10', name)
e.addInt32(int32(u))
} else {
e.addElemName('\x12', name)
e.addInt64(int64(u))
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch v.Type() {
case typeMongoTimestamp:
e.addElemName('\x11', name)
e.addInt64(v.Int())
case typeOrderKey:
if v.Int() == int64(MaxKey) {
e.addElemName('\x7F', name)
} else {
e.addElemName('\xFF', name)
}
default:
i := v.Int()
if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 {
// It fits into an int32, encode as such.
e.addElemName('\x10', name)
e.addInt32(int32(i))
} else {
e.addElemName('\x12', name)
e.addInt64(i)
}
}
case reflect.Bool:
e.addElemName('\x08', name)
if v.Bool() {
e.addBytes(1)
} else {
e.addBytes(0)
}
case reflect.Map:
e.addElemName('\x03', name)
e.addDoc(v)
case reflect.Slice:
vt := v.Type()
et := vt.Elem()
if et.Kind() == reflect.Uint8 {
e.addElemName('\x05', name)
e.addBinary('\x00', v.Bytes())
} else if et == typeDocElem || et == typeRawDocElem {
e.addElemName('\x03', name)
e.addDoc(v)
} else {
e.addElemName('\x04', name)
e.addDoc(v)
}
case reflect.Array:
et := v.Type().Elem()
if et.Kind() == reflect.Uint8 {
e.addElemName('\x05', name)
e.addBinary('\x00', v.Slice(0, v.Len()).Interface().([]byte))
} else {
e.addElemName('\x04', name)
e.addDoc(v)
}
case reflect.Struct:
switch s := v.Interface().(type) {
case Raw:
kind := s.Kind
if kind == 0x00 {
kind = 0x03
}
e.addElemName(kind, name)
e.addBytes(s.Data...)
case Binary:
e.addElemName('\x05', name)
e.addBinary(s.Kind, s.Data)
case RegEx:
e.addElemName('\x0B', name)
e.addCStr(s.Pattern)
e.addCStr(s.Options)
case JavaScript:
if s.Scope == nil {
e.addElemName('\x0D', name)
e.addStr(s.Code)
} else {
e.addElemName('\x0F', name)
start := e.reserveInt32()
e.addStr(s.Code)
e.addDoc(reflect.ValueOf(s.Scope))
e.setInt32(start, int32(len(e.out)-start))
}
case time.Time:
// MongoDB handles timestamps as milliseconds.
e.addElemName('\x09', name)
e.addInt64(s.Unix() * 1000 + int64(s.Nanosecond() / 1e6))
case url.URL:
e.addElemName('\x02', name)
e.addStr(s.String())
case undefined:
e.addElemName('\x06', name)
default:
e.addElemName('\x03', name)
e.addDoc(v)
}
default:
panic("Can't marshal " + v.Type().String() + " in a BSON document")
}
}
// --------------------------------------------------------------------------
// Marshaling of base types.
func (e *encoder) addBinary(subtype byte, v []byte) {
if subtype == 0x02 {
// Wonder how that brilliant idea came to life. Obsolete, luckily.
e.addInt32(int32(len(v) + 4))
e.addBytes(subtype)
e.addInt32(int32(len(v)))
} else {
e.addInt32(int32(len(v)))
e.addBytes(subtype)
}
e.addBytes(v...)
}
func (e *encoder) addStr(v string) {
e.addInt32(int32(len(v) + 1))
e.addCStr(v)
}
func (e *encoder) addCStr(v string) {
e.addBytes([]byte(v)...)
e.addBytes(0)
}
func (e *encoder) reserveInt32() (pos int) {
pos = len(e.out)
e.addBytes(0, 0, 0, 0)
return pos
}
func (e *encoder) setInt32(pos int, v int32) {
e.out[pos+0] = byte(v)
e.out[pos+1] = byte(v >> 8)
e.out[pos+2] = byte(v >> 16)
e.out[pos+3] = byte(v >> 24)
}
func (e *encoder) addInt32(v int32) {
u := uint32(v)
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24))
}
func (e *encoder) addInt64(v int64) {
u := uint64(v)
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24),
byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56))
}
func (e *encoder) addBytes(v ...byte) {
e.out = append(e.out, v...)
}

71
vendor/labix.org/v2/mgo/bulk.go generated vendored Normal file
View File

@ -0,0 +1,71 @@
package mgo
// Bulk represents an operation that can be prepared with several
// orthogonal changes before being delivered to the server.
//
// WARNING: This API is still experimental.
//
// Relevant documentation:
//
// http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api
//
type Bulk struct {
c *Collection
ordered bool
inserts []interface{}
}
// BulkError holds an error returned from running a Bulk operation.
//
// TODO: This is private for the moment, until we understand exactly how
// to report these multi-errors in a useful and convenient way.
type bulkError struct {
err error
}
// BulkResult holds the results for a bulk operation.
type BulkResult struct {
// Be conservative while we understand exactly how to report these
// results in a useful and convenient way, and also how to emulate
// them with prior servers.
private bool
}
func (e *bulkError) Error() string {
return e.err.Error()
}
// Bulk returns a value to prepare the execution of a bulk operation.
//
// WARNING: This API is still experimental.
//
func (c *Collection) Bulk() *Bulk {
return &Bulk{c: c, ordered: true}
}
// Unordered puts the bulk operation in unordered mode.
//
// In unordered mode the indvidual operations may be sent
// out of order, which means latter operations may proceed
// even if prior ones have failed.
func (b *Bulk) Unordered() {
b.ordered = false
}
// Insert queues up the provided documents for insertion.
func (b *Bulk) Insert(docs ...interface{}) {
b.inserts = append(b.inserts, docs...)
}
// Run runs all the operations queued up.
func (b *Bulk) Run() (*BulkResult, error) {
op := &insertOp{b.c.FullName, b.inserts, 0}
if !b.ordered {
op.flags = 1 // ContinueOnError
}
_, err := b.c.writeQuery(op)
if err != nil {
return nil, &bulkError{err}
}
return &BulkResult{}, nil
}

89
vendor/labix.org/v2/mgo/bulk_test.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2014 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo_test
import (
"labix.org/v2/mgo"
. "launchpad.net/gocheck"
)
func (s *S) TestBulkInsert(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
bulk := coll.Bulk()
bulk.Insert(M{"n": 1})
bulk.Insert(M{"n": 2}, M{"n": 3})
r, err := bulk.Run()
c.Assert(err, IsNil)
c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
type doc struct{ N int }
var res []doc
err = coll.Find(nil).Sort("n").All(&res)
c.Assert(err, IsNil)
c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}})
}
func (s *S) TestBulkInsertError(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
bulk := coll.Bulk()
bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"n": 3})
_, err = bulk.Run()
c.Assert(err, ErrorMatches, ".*duplicate key.*")
type doc struct{ N int `_id` }
var res []doc
err = coll.Find(nil).Sort("_id").All(&res)
c.Assert(err, IsNil)
c.Assert(res, DeepEquals, []doc{{1}, {2}})
}
func (s *S) TestBulkInsertErrorUnordered(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
bulk := coll.Bulk()
bulk.Unordered()
bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3})
_, err = bulk.Run()
c.Assert(err, ErrorMatches, ".*duplicate key.*")
type doc struct{ N int `_id` }
var res []doc
err = coll.Find(nil).Sort("_id").All(&res)
c.Assert(err, IsNil)
c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}})
}

616
vendor/labix.org/v2/mgo/cluster.go generated vendored Normal file
View File

@ -0,0 +1,616 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"errors"
"labix.org/v2/mgo/bson"
"net"
"sync"
"time"
)
// ---------------------------------------------------------------------------
// Mongo cluster encapsulation.
//
// A cluster enables the communication with one or more servers participating
// in a mongo cluster. This works with individual servers, a replica set,
// a replica pair, one or multiple mongos routers, etc.
type mongoCluster struct {
sync.RWMutex
serverSynced sync.Cond
userSeeds []string
dynaSeeds []string
servers mongoServers
masters mongoServers
references int
syncing bool
direct bool
failFast bool
syncCount uint
cachedIndex map[string]bool
sync chan bool
dial dialer
}
func newCluster(userSeeds []string, direct, failFast bool, dial dialer) *mongoCluster {
cluster := &mongoCluster{
userSeeds: userSeeds,
references: 1,
direct: direct,
failFast: failFast,
dial: dial,
}
cluster.serverSynced.L = cluster.RWMutex.RLocker()
cluster.sync = make(chan bool, 1)
stats.cluster(+1)
go cluster.syncServersLoop()
return cluster
}
// Acquire increases the reference count for the cluster.
func (cluster *mongoCluster) Acquire() {
cluster.Lock()
cluster.references++
debugf("Cluster %p acquired (refs=%d)", cluster, cluster.references)
cluster.Unlock()
}
// Release decreases the reference count for the cluster. Once
// it reaches zero, all servers will be closed.
func (cluster *mongoCluster) Release() {
cluster.Lock()
if cluster.references == 0 {
panic("cluster.Release() with references == 0")
}
cluster.references--
debugf("Cluster %p released (refs=%d)", cluster, cluster.references)
if cluster.references == 0 {
for _, server := range cluster.servers.Slice() {
server.Close()
}
// Wake up the sync loop so it can die.
cluster.syncServers()
stats.cluster(-1)
}
cluster.Unlock()
}
func (cluster *mongoCluster) LiveServers() (servers []string) {
cluster.RLock()
for _, serv := range cluster.servers.Slice() {
servers = append(servers, serv.Addr)
}
cluster.RUnlock()
return servers
}
func (cluster *mongoCluster) removeServer(server *mongoServer) {
cluster.Lock()
cluster.masters.Remove(server)
other := cluster.servers.Remove(server)
cluster.Unlock()
if other != nil {
other.Close()
log("Removed server ", server.Addr, " from cluster.")
}
server.Close()
}
type isMasterResult struct {
IsMaster bool
Secondary bool
Primary string
Hosts []string
Passives []string
Tags bson.D
Msg string
}
func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResult) error {
// Monotonic let's it talk to a slave and still hold the socket.
session := newSession(Monotonic, cluster, 10*time.Second)
session.setSocket(socket)
err := session.Run("ismaster", result)
session.Close()
return err
}
type possibleTimeout interface {
Timeout() bool
}
var syncSocketTimeout = 5 * time.Second
func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerInfo, hosts []string, err error) {
var syncTimeout time.Duration
if raceDetector {
// This variable is only ever touched by tests.
globalMutex.Lock()
syncTimeout = syncSocketTimeout
globalMutex.Unlock()
} else {
syncTimeout = syncSocketTimeout
}
addr := server.Addr
log("SYNC Processing ", addr, "...")
// Retry a few times to avoid knocking a server down for a hiccup.
var result isMasterResult
var tryerr error
for retry := 0; ; retry++ {
if retry == 3 || retry == 1 && cluster.failFast {
return nil, nil, tryerr
}
if retry > 0 {
// Don't abuse the server needlessly if there's something actually wrong.
if err, ok := tryerr.(possibleTimeout); ok && err.Timeout() {
// Give a chance for waiters to timeout as well.
cluster.serverSynced.Broadcast()
}
time.Sleep(syncShortDelay)
}
// It's not clear what would be a good timeout here. Is it
// better to wait longer or to retry?
socket, _, err := server.AcquireSocket(0, syncTimeout)
if err != nil {
tryerr = err
logf("SYNC Failed to get socket to %s: %v", addr, err)
continue
}
err = cluster.isMaster(socket, &result)
socket.Release()
if err != nil {
tryerr = err
logf("SYNC Command 'ismaster' to %s failed: %v", addr, err)
continue
}
debugf("SYNC Result of 'ismaster' from %s: %#v", addr, result)
break
}
if result.IsMaster {
debugf("SYNC %s is a master.", addr)
// Made an incorrect assumption above, so fix stats.
stats.conn(-1, false)
stats.conn(+1, true)
} else if result.Secondary {
debugf("SYNC %s is a slave.", addr)
} else if cluster.direct {
logf("SYNC %s in unknown state. Pretending it's a slave due to direct connection.", addr)
} else {
logf("SYNC %s is neither a master nor a slave.", addr)
// Made an incorrect assumption above, so fix stats.
stats.conn(-1, false)
return nil, nil, errors.New(addr + " is not a master nor slave")
}
info = &mongoServerInfo{
Master: result.IsMaster,
Mongos: result.Msg == "isdbgrid",
Tags: result.Tags,
}
hosts = make([]string, 0, 1+len(result.Hosts)+len(result.Passives))
if result.Primary != "" {
// First in the list to speed up master discovery.
hosts = append(hosts, result.Primary)
}
hosts = append(hosts, result.Hosts...)
hosts = append(hosts, result.Passives...)
debugf("SYNC %s knows about the following peers: %#v", addr, hosts)
return info, hosts, nil
}
type syncKind bool
const (
completeSync syncKind = true
partialSync syncKind = false
)
func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInfo, syncKind syncKind) {
cluster.Lock()
current := cluster.servers.Search(server.ResolvedAddr)
if current == nil {
if syncKind == partialSync {
cluster.Unlock()
server.Close()
log("SYNC Discarding unknown server ", server.Addr, " due to partial sync.")
return
}
cluster.servers.Add(server)
if info.Master {
cluster.masters.Add(server)
log("SYNC Adding ", server.Addr, " to cluster as a master.")
} else {
log("SYNC Adding ", server.Addr, " to cluster as a slave.")
}
} else {
if server != current {
panic("addServer attempting to add duplicated server")
}
if server.Info().Master != info.Master {
if info.Master {
log("SYNC Server ", server.Addr, " is now a master.")
cluster.masters.Add(server)
} else {
log("SYNC Server ", server.Addr, " is now a slave.")
cluster.masters.Remove(server)
}
}
}
server.SetInfo(info)
debugf("SYNC Broadcasting availability of server %s", server.Addr)
cluster.serverSynced.Broadcast()
cluster.Unlock()
}
func (cluster *mongoCluster) getKnownAddrs() []string {
cluster.RLock()
max := len(cluster.userSeeds) + len(cluster.dynaSeeds) + cluster.servers.Len()
seen := make(map[string]bool, max)
known := make([]string, 0, max)
add := func(addr string) {
if _, found := seen[addr]; !found {
seen[addr] = true
known = append(known, addr)
}
}
for _, addr := range cluster.userSeeds {
add(addr)
}
for _, addr := range cluster.dynaSeeds {
add(addr)
}
for _, serv := range cluster.servers.Slice() {
add(serv.Addr)
}
cluster.RUnlock()
return known
}
// syncServers injects a value into the cluster.sync channel to force
// an iteration of the syncServersLoop function.
func (cluster *mongoCluster) syncServers() {
select {
case cluster.sync <- true:
default:
}
}
// How long to wait for a checkup of the cluster topology if nothing
// else kicks a synchronization before that.
const syncServersDelay = 30 * time.Second
const syncShortDelay = 500 * time.Millisecond
// syncServersLoop loops while the cluster is alive to keep its idea of
// the server topology up-to-date. It must be called just once from
// newCluster. The loop iterates once syncServersDelay has passed, or
// if somebody injects a value into the cluster.sync channel to force a
// synchronization. A loop iteration will contact all servers in
// parallel, ask them about known peers and their own role within the
// cluster, and then attempt to do the same with all the peers
// retrieved.
func (cluster *mongoCluster) syncServersLoop() {
for {
debugf("SYNC Cluster %p is starting a sync loop iteration.", cluster)
cluster.Lock()
if cluster.references == 0 {
cluster.Unlock()
break
}
cluster.references++ // Keep alive while syncing.
direct := cluster.direct
cluster.Unlock()
cluster.syncServersIteration(direct)
// We just synchronized, so consume any outstanding requests.
select {
case <-cluster.sync:
default:
}
cluster.Release()
// Hold off before allowing another sync. No point in
// burning CPU looking for down servers.
if !cluster.failFast {
time.Sleep(syncShortDelay)
}
cluster.Lock()
if cluster.references == 0 {
cluster.Unlock()
break
}
cluster.syncCount++
// Poke all waiters so they have a chance to timeout or
// restart syncing if they wish to.
cluster.serverSynced.Broadcast()
// Check if we have to restart immediately either way.
restart := !direct && cluster.masters.Empty() || cluster.servers.Empty()
cluster.Unlock()
if restart {
log("SYNC No masters found. Will synchronize again.")
time.Sleep(syncShortDelay)
continue
}
debugf("SYNC Cluster %p waiting for next requested or scheduled sync.", cluster)
// Hold off until somebody explicitly requests a synchronization
// or it's time to check for a cluster topology change again.
select {
case <-cluster.sync:
case <-time.After(syncServersDelay):
}
}
debugf("SYNC Cluster %p is stopping its sync loop.", cluster)
}
func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoServer {
cluster.RLock()
server := cluster.servers.Search(tcpaddr.String())
cluster.RUnlock()
if server != nil {
return server
}
return newServer(addr, tcpaddr, cluster.sync, cluster.dial)
}
func resolveAddr(addr string) (*net.TCPAddr, error) {
tcpaddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
log("SYNC Failed to resolve ", addr, ": ", err.Error())
return nil, err
}
if tcpaddr.String() != addr {
debug("SYNC Address ", addr, " resolved as ", tcpaddr.String())
}
return tcpaddr, nil
}
type pendingAdd struct {
server *mongoServer
info *mongoServerInfo
}
func (cluster *mongoCluster) syncServersIteration(direct bool) {
log("SYNC Starting full topology synchronization...")
var wg sync.WaitGroup
var m sync.Mutex
notYetAdded := make(map[string]pendingAdd)
addIfFound := make(map[string]bool)
seen := make(map[string]bool)
syncKind := partialSync
var spawnSync func(addr string, byMaster bool)
spawnSync = func(addr string, byMaster bool) {
wg.Add(1)
go func() {
defer wg.Done()
tcpaddr, err := resolveAddr(addr)
if err != nil {
log("SYNC Failed to start sync of ", addr, ": ", err.Error())
return
}
resolvedAddr := tcpaddr.String()
m.Lock()
if byMaster {
if pending, ok := notYetAdded[resolvedAddr]; ok {
delete(notYetAdded, resolvedAddr)
m.Unlock()
cluster.addServer(pending.server, pending.info, completeSync)
return
}
addIfFound[resolvedAddr] = true
}
if seen[resolvedAddr] {
m.Unlock()
return
}
seen[resolvedAddr] = true
m.Unlock()
server := cluster.server(addr, tcpaddr)
info, hosts, err := cluster.syncServer(server)
if err != nil {
cluster.removeServer(server)
return
}
m.Lock()
add := direct || info.Master || addIfFound[resolvedAddr]
if add {
syncKind = completeSync
} else {
notYetAdded[resolvedAddr] = pendingAdd{server, info}
}
m.Unlock()
if add {
cluster.addServer(server, info, completeSync)
}
if !direct {
for _, addr := range hosts {
spawnSync(addr, info.Master)
}
}
}()
}
knownAddrs := cluster.getKnownAddrs()
for _, addr := range knownAddrs {
spawnSync(addr, false)
}
wg.Wait()
if syncKind == completeSync {
logf("SYNC Synchronization was complete (got data from primary).")
for _, pending := range notYetAdded {
cluster.removeServer(pending.server)
}
} else {
logf("SYNC Synchronization was partial (cannot talk to primary).")
for _, pending := range notYetAdded {
cluster.addServer(pending.server, pending.info, partialSync)
}
}
cluster.Lock()
ml := cluster.masters.Len()
logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", ml, cluster.servers.Len()-ml)
// Update dynamic seeds, but only if we have any good servers. Otherwise,
// leave them alone for better chances of a successful sync in the future.
if syncKind == completeSync {
dynaSeeds := make([]string, cluster.servers.Len())
for i, server := range cluster.servers.Slice() {
dynaSeeds[i] = server.Addr
}
cluster.dynaSeeds = dynaSeeds
debugf("SYNC New dynamic seeds: %#v\n", dynaSeeds)
}
cluster.Unlock()
}
var socketsPerServer = 4096
// AcquireSocket returns a socket to a server in the cluster. If slaveOk is
// true, it will attempt to return a socket to a slave server. If it is
// false, the socket will necessarily be to a master server.
func (cluster *mongoCluster) AcquireSocket(slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D) (s *mongoSocket, err error) {
var started time.Time
var syncCount uint
warnedLimit := false
for {
cluster.RLock()
for {
ml := cluster.masters.Len()
sl := cluster.servers.Len()
debugf("Cluster has %d known masters and %d known slaves.", ml, sl-ml)
if ml > 0 || slaveOk && sl > 0 {
break
}
if started.IsZero() {
// Initialize after fast path above.
started = time.Now()
syncCount = cluster.syncCount
} else if syncTimeout != 0 && started.Before(time.Now().Add(-syncTimeout)) || cluster.failFast && cluster.syncCount != syncCount {
cluster.RUnlock()
return nil, errors.New("no reachable servers")
}
log("Waiting for servers to synchronize...")
cluster.syncServers()
// Remember: this will release and reacquire the lock.
cluster.serverSynced.Wait()
}
var server *mongoServer
if slaveOk {
server = cluster.servers.BestFit(serverTags)
} else {
server = cluster.masters.BestFit(nil)
}
cluster.RUnlock()
if server == nil {
// Must have failed the requested tags. Sleep to avoid spinning.
time.Sleep(1e8)
continue
}
s, abended, err := server.AcquireSocket(socketsPerServer, socketTimeout)
if err == errSocketLimit {
if !warnedLimit {
log("WARNING: Per-server connection limit reached.")
}
time.Sleep(1e8)
continue
}
if err != nil {
cluster.removeServer(server)
cluster.syncServers()
continue
}
if abended && !slaveOk {
var result isMasterResult
err := cluster.isMaster(s, &result)
if err != nil || !result.IsMaster {
logf("Cannot confirm server %s as master (%v)", server.Addr, err)
s.Release()
cluster.syncServers()
time.Sleep(1e8)
continue
}
}
return s, nil
}
panic("unreached")
}
func (cluster *mongoCluster) CacheIndex(cacheKey string, exists bool) {
cluster.Lock()
if cluster.cachedIndex == nil {
cluster.cachedIndex = make(map[string]bool)
}
if exists {
cluster.cachedIndex[cacheKey] = true
} else {
delete(cluster.cachedIndex, cacheKey)
}
cluster.Unlock()
}
func (cluster *mongoCluster) HasCachedIndex(cacheKey string) (result bool) {
cluster.RLock()
if cluster.cachedIndex != nil {
result = cluster.cachedIndex[cacheKey]
}
cluster.RUnlock()
return
}
func (cluster *mongoCluster) ResetIndexCache() {
cluster.Lock()
cluster.cachedIndex = make(map[string]bool)
cluster.Unlock()
}

1559
vendor/labix.org/v2/mgo/cluster_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

31
vendor/labix.org/v2/mgo/doc.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
// Package mgo offers a rich MongoDB driver for Go.
//
// Details about the mgo project (pronounced as "mango") are found
// in its web page:
//
// http://labix.org/mgo
//
// Usage of the driver revolves around the concept of sessions. To
// get started, obtain a session using the Dial function:
//
// session, err := mgo.Dial(url)
//
// This will establish one or more connections with the cluster of
// servers defined by the url parameter. From then on, the cluster
// may be queried with multiple consistency rules (see SetMode) and
// documents retrieved with statements such as:
//
// c := session.DB(database).C(collection)
// err := c.Find(query).One(&result)
//
// New sessions are typically created by calling session.Copy on the
// initial session obtained at dial time. These new sessions will share
// the same cluster information and connection cache, and may be easily
// handed into other methods and functions for organizing logic.
// Every session created must have its Close method called at the end
// of its life time, so its resources may be put back in the pool or
// collected, depending on the case.
//
// For more details, see the documentation for the types and methods.
//
package mgo

42
vendor/labix.org/v2/mgo/export_test.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
package mgo
import (
"time"
)
func HackSocketsPerServer(newLimit int) (restore func()) {
oldLimit := newLimit
restore = func() {
socketsPerServer = oldLimit
}
socketsPerServer = newLimit
return
}
func HackPingDelay(newDelay time.Duration) (restore func()) {
globalMutex.Lock()
defer globalMutex.Unlock()
oldDelay := pingDelay
restore = func() {
globalMutex.Lock()
pingDelay = oldDelay
globalMutex.Unlock()
}
pingDelay = newDelay
return
}
func HackSyncSocketTimeout(newTimeout time.Duration) (restore func()) {
globalMutex.Lock()
defer globalMutex.Unlock()
oldTimeout := syncSocketTimeout
restore = func() {
globalMutex.Lock()
syncSocketTimeout = oldTimeout
globalMutex.Unlock()
}
syncSocketTimeout = newTimeout
return
}

732
vendor/labix.org/v2/mgo/gridfs.go generated vendored Normal file
View File

@ -0,0 +1,732 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"crypto/md5"
"encoding/hex"
"errors"
"hash"
"io"
"labix.org/v2/mgo/bson"
"os"
"sync"
"time"
)
type GridFS struct {
Files *Collection
Chunks *Collection
}
type gfsFileMode int
const (
gfsClosed gfsFileMode = 0
gfsReading gfsFileMode = 1
gfsWriting gfsFileMode = 2
)
type GridFile struct {
m sync.Mutex
c sync.Cond
gfs *GridFS
mode gfsFileMode
err error
chunk int
offset int64
wpending int
wbuf []byte
wsum hash.Hash
rbuf []byte
rcache *gfsCachedChunk
doc gfsFile
}
type gfsFile struct {
Id interface{} "_id"
ChunkSize int "chunkSize"
UploadDate time.Time "uploadDate"
Length int64 ",minsize"
MD5 string
Filename string ",omitempty"
ContentType string "contentType,omitempty"
Metadata *bson.Raw ",omitempty"
}
type gfsChunk struct {
Id interface{} "_id"
FilesId interface{} "files_id"
N int
Data []byte
}
type gfsCachedChunk struct {
wait sync.Mutex
n int
data []byte
err error
}
func newGridFS(db *Database, prefix string) *GridFS {
return &GridFS{db.C(prefix + ".files"), db.C(prefix + ".chunks")}
}
func (gfs *GridFS) newFile() *GridFile {
file := &GridFile{gfs: gfs}
file.c.L = &file.m
//runtime.SetFinalizer(file, finalizeFile)
return file
}
func finalizeFile(file *GridFile) {
file.Close()
}
// Create creates a new file with the provided name in the GridFS. If the file
// name already exists, a new version will be inserted with an up-to-date
// uploadDate that will cause it to be atomically visible to the Open and
// OpenId methods. If the file name is not important, an empty name may be
// provided and the file Id used instead.
//
// It's important to Close files whether they are being written to
// or read from, and to check the err result to ensure the operation
// completed successfully.
//
// A simple example inserting a new file:
//
// func check(err error) {
// if err != nil {
// panic(err.String())
// }
// }
// file, err := db.GridFS("fs").Create("myfile.txt")
// check(err)
// n, err := file.Write([]byte("Hello world!")
// check(err)
// err = file.Close()
// check(err)
// fmt.Printf("%d bytes written\n", n)
//
// The io.Writer interface is implemented by *GridFile and may be used to
// help on the file creation. For example:
//
// file, err := db.GridFS("fs").Create("myfile.txt")
// check(err)
// messages, err := os.Open("/var/log/messages")
// check(err)
// defer messages.Close()
// err = io.Copy(file, messages)
// check(err)
// err = file.Close()
// check(err)
//
func (gfs *GridFS) Create(name string) (file *GridFile, err error) {
file = gfs.newFile()
file.mode = gfsWriting
file.wsum = md5.New()
file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 256 * 1024, Filename: name}
return
}
// OpenId returns the file with the provided id, for reading.
// If the file isn't found, err will be set to mgo.ErrNotFound.
//
// It's important to Close files whether they are being written to
// or read from, and to check the err result to ensure the operation
// completed successfully.
//
// The following example will print the first 8192 bytes from the file:
//
// func check(err error) {
// if err != nil {
// panic(err.String())
// }
// }
// file, err := db.GridFS("fs").OpenId(objid)
// check(err)
// b := make([]byte, 8192)
// n, err := file.Read(b)
// check(err)
// fmt.Println(string(b))
// check(err)
// err = file.Close()
// check(err)
// fmt.Printf("%d bytes read\n", n)
//
// The io.Reader interface is implemented by *GridFile and may be used to
// deal with it. As an example, the following snippet will dump the whole
// file into the standard output:
//
// file, err := db.GridFS("fs").OpenId(objid)
// check(err)
// err = io.Copy(os.Stdout, file)
// check(err)
// err = file.Close()
// check(err)
//
func (gfs *GridFS) OpenId(id interface{}) (file *GridFile, err error) {
var doc gfsFile
err = gfs.Files.Find(bson.M{"_id": id}).One(&doc)
if err != nil {
return
}
file = gfs.newFile()
file.mode = gfsReading
file.doc = doc
return
}
// Open returns the most recently uploaded file with the provided
// name, for reading. If the file isn't found, err will be set
// to mgo.ErrNotFound.
//
// It's important to Close files whether they are being written to
// or read from, and to check the err result to ensure the operation
// completed successfully.
//
// The following example will print the first 8192 bytes from the file:
//
// file, err := db.GridFS("fs").Open("myfile.txt")
// check(err)
// b := make([]byte, 8192)
// n, err := file.Read(b)
// check(err)
// fmt.Println(string(b))
// check(err)
// err = file.Close()
// check(err)
// fmt.Printf("%d bytes read\n", n)
//
// The io.Reader interface is implemented by *GridFile and may be used to
// deal with it. As an example, the following snippet will dump the whole
// file into the standard output:
//
// file, err := db.GridFS("fs").Open("myfile.txt")
// check(err)
// err = io.Copy(os.Stdout, file)
// check(err)
// err = file.Close()
// check(err)
//
func (gfs *GridFS) Open(name string) (file *GridFile, err error) {
var doc gfsFile
err = gfs.Files.Find(bson.M{"filename": name}).Sort("-uploadDate").One(&doc)
if err != nil {
return
}
file = gfs.newFile()
file.mode = gfsReading
file.doc = doc
return
}
// OpenNext opens the next file from iter for reading, sets *file to it,
// and returns true on the success case. If no more documents are available
// on iter or an error occurred, *file is set to nil and the result is false.
// Errors will be available via iter.Err().
//
// The iter parameter must be an iterator on the GridFS files collection.
// Using the GridFS.Find method is an easy way to obtain such an iterator,
// but any iterator on the collection will work.
//
// If the provided *file is non-nil, OpenNext will close it before attempting
// to iterate to the next element. This means that in a loop one only
// has to worry about closing files when breaking out of the loop early
// (break, return, or panic).
//
// For example:
//
// gfs := db.GridFS("fs")
// query := gfs.Find(nil).Sort("filename")
// iter := query.Iter()
// var f *mgo.GridFile
// for gfs.OpenNext(iter, &f) {
// fmt.Printf("Filename: %s\n", f.Name())
// }
// if iter.Close() != nil {
// panic(iter.Close())
// }
//
func (gfs *GridFS) OpenNext(iter *Iter, file **GridFile) bool {
if *file != nil {
// Ignoring the error here shouldn't be a big deal
// as we're reading the file and the loop iteration
// for this file is finished.
_ = (*file).Close()
}
var doc gfsFile
if !iter.Next(&doc) {
*file = nil
return false
}
f := gfs.newFile()
f.mode = gfsReading
f.doc = doc
*file = f
return true
}
// Find runs query on GridFS's files collection and returns
// the resulting Query.
//
// This logic:
//
// gfs := db.GridFS("fs")
// iter := gfs.Find(nil).Iter()
//
// Is equivalent to:
//
// files := db.C("fs" + ".files")
// iter := files.Find(nil).Iter()
//
func (gfs *GridFS) Find(query interface{}) *Query {
return gfs.Files.Find(query)
}
// RemoveId deletes the file with the provided id from the GridFS.
func (gfs *GridFS) RemoveId(id interface{}) error {
err := gfs.Files.Remove(bson.M{"_id": id})
if err != nil {
return err
}
_, err = gfs.Chunks.RemoveAll(bson.D{{"files_id", id}})
return err
}
type gfsDocId struct {
Id interface{} "_id"
}
// Remove deletes all files with the provided name from the GridFS.
func (gfs *GridFS) Remove(name string) (err error) {
iter := gfs.Files.Find(bson.M{"filename": name}).Select(bson.M{"_id": 1}).Iter()
var doc gfsDocId
for iter.Next(&doc) {
if e := gfs.RemoveId(doc.Id); e != nil {
err = e
}
}
if err == nil {
err = iter.Close()
}
return err
}
func (file *GridFile) assertMode(mode gfsFileMode) {
switch file.mode {
case mode:
return
case gfsWriting:
panic("GridFile is open for writing")
case gfsReading:
panic("GridFile is open for reading")
case gfsClosed:
panic("GridFile is closed")
default:
panic("internal error: missing GridFile mode")
}
}
// SetChunkSize sets size of saved chunks. Once the file is written to, it
// will be split in blocks of that size and each block saved into an
// independent chunk document. The default chunk size is 256kb.
//
// It is a runtime error to call this function once the file has started
// being written to.
func (file *GridFile) SetChunkSize(bytes int) {
file.assertMode(gfsWriting)
debugf("GridFile %p: setting chunk size to %d", file, bytes)
file.m.Lock()
file.doc.ChunkSize = bytes
file.m.Unlock()
}
// Id returns the current file Id.
func (file *GridFile) Id() interface{} {
return file.doc.Id
}
// SetId changes the current file Id.
//
// It is a runtime error to call this function once the file has started
// being written to, or when the file is not open for writing.
func (file *GridFile) SetId(id interface{}) {
file.assertMode(gfsWriting)
file.m.Lock()
file.doc.Id = id
file.m.Unlock()
}
// Name returns the optional file name. An empty string will be returned
// in case it is unset.
func (file *GridFile) Name() string {
return file.doc.Filename
}
// SetName changes the optional file name. An empty string may be used to
// unset it.
//
// It is a runtime error to call this function when the file is not open
// for writing.
func (file *GridFile) SetName(name string) {
file.assertMode(gfsWriting)
file.m.Lock()
file.doc.Filename = name
file.m.Unlock()
}
// ContentType returns the optional file content type. An empty string will be
// returned in case it is unset.
func (file *GridFile) ContentType() string {
return file.doc.ContentType
}
// ContentType changes the optional file content type. An empty string may be
// used to unset it.
//
// It is a runtime error to call this function when the file is not open
// for writing.
func (file *GridFile) SetContentType(ctype string) {
file.assertMode(gfsWriting)
file.m.Lock()
file.doc.ContentType = ctype
file.m.Unlock()
}
// GetMeta unmarshals the optional "metadata" field associated with the
// file into the result parameter. The meaning of keys under that field
// is user-defined. For example:
//
// result := struct{ INode int }{}
// err = file.GetMeta(&result)
// if err != nil {
// panic(err.String())
// }
// fmt.Printf("inode: %d\n", result.INode)
//
func (file *GridFile) GetMeta(result interface{}) (err error) {
file.m.Lock()
if file.doc.Metadata != nil {
err = bson.Unmarshal(file.doc.Metadata.Data, result)
}
file.m.Unlock()
return
}
// SetMeta changes the optional "metadata" field associated with the
// file. The meaning of keys under that field is user-defined.
// For example:
//
// file.SetMeta(bson.M{"inode": inode})
//
// It is a runtime error to call this function when the file is not open
// for writing.
func (file *GridFile) SetMeta(metadata interface{}) {
file.assertMode(gfsWriting)
data, err := bson.Marshal(metadata)
file.m.Lock()
if err != nil && file.err == nil {
file.err = err
} else {
file.doc.Metadata = &bson.Raw{Data: data}
}
file.m.Unlock()
}
// Size returns the file size in bytes.
func (file *GridFile) Size() (bytes int64) {
file.m.Lock()
bytes = file.doc.Length
file.m.Unlock()
return
}
// MD5 returns the file MD5 as a hex-encoded string.
func (file *GridFile) MD5() (md5 string) {
return file.doc.MD5
}
// UploadDate returns the file upload time.
func (file *GridFile) UploadDate() time.Time {
return file.doc.UploadDate
}
// Close flushes any pending changes in case the file is being written
// to, waits for any background operations to finish, and closes the file.
//
// It's important to Close files whether they are being written to
// or read from, and to check the err result to ensure the operation
// completed successfully.
func (file *GridFile) Close() (err error) {
file.m.Lock()
defer file.m.Unlock()
if file.mode == gfsWriting {
if len(file.wbuf) > 0 && file.err == nil {
file.insertChunk(file.wbuf)
file.wbuf = file.wbuf[0:0]
}
file.completeWrite()
} else if file.mode == gfsReading && file.rcache != nil {
file.rcache.wait.Lock()
file.rcache = nil
}
file.mode = gfsClosed
debugf("GridFile %p: closed", file)
return file.err
}
func (file *GridFile) completeWrite() {
for file.wpending > 0 {
debugf("GridFile %p: waiting for %d pending chunks to complete file write", file, file.wpending)
file.c.Wait()
}
if file.err != nil {
file.gfs.Chunks.RemoveAll(bson.D{{"files_id", file.doc.Id}})
return
}
hexsum := hex.EncodeToString(file.wsum.Sum(nil))
file.doc.UploadDate = bson.Now()
file.doc.MD5 = hexsum
file.err = file.gfs.Files.Insert(file.doc)
file.gfs.Chunks.EnsureIndexKey("files_id", "n")
}
// Abort cancels an in-progress write, preventing the file from being
// automically created and ensuring previously written chunks are
// removed when the file is closed.
//
// It is a runtime error to call Abort when the file was not opened
// for writing.
func (file *GridFile) Abort() {
if file.mode != gfsWriting {
panic("file.Abort must be called on file opened for writing")
}
file.err = errors.New("write aborted")
}
// Write writes the provided data to the file and returns the
// number of bytes written and an error in case something
// wrong happened.
//
// The file will internally cache the data so that all but the last
// chunk sent to the database have the size defined by SetChunkSize.
// This also means that errors may be deferred until a future call
// to Write or Close.
//
// The parameters and behavior of this function turn the file
// into an io.Writer.
func (file *GridFile) Write(data []byte) (n int, err error) {
file.assertMode(gfsWriting)
file.m.Lock()
debugf("GridFile %p: writing %d bytes", file, len(data))
defer file.m.Unlock()
if file.err != nil {
return 0, file.err
}
n = len(data)
file.doc.Length += int64(n)
chunkSize := file.doc.ChunkSize
if len(file.wbuf)+len(data) < chunkSize {
file.wbuf = append(file.wbuf, data...)
return
}
// First, flush file.wbuf complementing with data.
if len(file.wbuf) > 0 {
missing := chunkSize - len(file.wbuf)
if missing > len(data) {
missing = len(data)
}
file.wbuf = append(file.wbuf, data[:missing]...)
data = data[missing:]
file.insertChunk(file.wbuf)
file.wbuf = file.wbuf[0:0]
}
// Then, flush all chunks from data without copying.
for len(data) > chunkSize {
size := chunkSize
if size > len(data) {
size = len(data)
}
file.insertChunk(data[:size])
data = data[size:]
}
// And append the rest for a future call.
file.wbuf = append(file.wbuf, data...)
return n, file.err
}
func (file *GridFile) insertChunk(data []byte) {
n := file.chunk
file.chunk++
debugf("GridFile %p: adding to checksum: %q", file, string(data))
file.wsum.Write(data)
for file.doc.ChunkSize*file.wpending >= 1024*1024 {
// Hold on.. we got a MB pending.
file.c.Wait()
if file.err != nil {
return
}
}
file.wpending++
debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data))
// We may not own the memory of data, so rather than
// simply copying it, we'll marshal the document ahead of time.
data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data})
if err != nil {
file.err = err
return
}
go func() {
err := file.gfs.Chunks.Insert(bson.Raw{Data: data})
file.m.Lock()
file.wpending--
if err != nil && file.err == nil {
file.err = err
}
file.c.Broadcast()
file.m.Unlock()
}()
}
// Seek sets the offset for the next Read or Write on file to
// offset, interpreted according to whence: 0 means relative to
// the origin of the file, 1 means relative to the current offset,
// and 2 means relative to the end. It returns the new offset and
// an error, if any.
func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) {
file.m.Lock()
debugf("GridFile %p: seeking for %s (whence=%d)", file, offset, whence)
defer file.m.Unlock()
switch whence {
case os.SEEK_SET:
case os.SEEK_CUR:
offset += file.offset
case os.SEEK_END:
offset += file.doc.Length
default:
panic("unsupported whence value")
}
if offset > file.doc.Length {
return file.offset, errors.New("seek past end of file")
}
chunk := int(offset / int64(file.doc.ChunkSize))
if chunk+1 == file.chunk && offset >= file.offset {
file.rbuf = file.rbuf[int(offset-file.offset):]
file.offset = offset
return file.offset, nil
}
file.offset = offset
file.chunk = chunk
file.rbuf = nil
file.rbuf, err = file.getChunk()
if err == nil {
file.rbuf = file.rbuf[int(file.offset-int64(chunk)*int64(file.doc.ChunkSize)):]
}
return file.offset, err
}
// Read reads into b the next available data from the file and
// returns the number of bytes written and an error in case
// something wrong happened. At the end of the file, n will
// be zero and err will be set to os.EOF.
//
// The parameters and behavior of this function turn the file
// into an io.Reader.
func (file *GridFile) Read(b []byte) (n int, err error) {
file.assertMode(gfsReading)
file.m.Lock()
debugf("GridFile %p: reading at offset %d into buffer of length %d", file, file.offset, len(b))
defer file.m.Unlock()
if file.offset == file.doc.Length {
return 0, io.EOF
}
for err == nil {
i := copy(b, file.rbuf)
n += i
file.offset += int64(i)
file.rbuf = file.rbuf[i:]
if i == len(b) || file.offset == file.doc.Length {
break
}
b = b[i:]
file.rbuf, err = file.getChunk()
}
return n, err
}
func (file *GridFile) getChunk() (data []byte, err error) {
cache := file.rcache
file.rcache = nil
if cache != nil && cache.n == file.chunk {
debugf("GridFile %p: Getting chunk %d from cache", file, file.chunk)
cache.wait.Lock()
data, err = cache.data, cache.err
} else {
debugf("GridFile %p: Fetching chunk %d", file, file.chunk)
var doc gfsChunk
err = file.gfs.Chunks.Find(bson.D{{"files_id", file.doc.Id}, {"n", file.chunk}}).One(&doc)
data = doc.Data
}
file.chunk++
if int64(file.chunk)*int64(file.doc.ChunkSize) < file.doc.Length {
// Read the next one in background.
cache = &gfsCachedChunk{n: file.chunk}
cache.wait.Lock()
debugf("GridFile %p: Scheduling chunk %d for background caching", file, file.chunk)
// Clone the session to avoid having it closed in between.
chunks := file.gfs.Chunks
session := chunks.Database.Session.Clone()
go func(id interface{}, n int) {
defer session.Close()
chunks = chunks.With(session)
var doc gfsChunk
cache.err = chunks.Find(bson.D{{"files_id", id}, {"n", n}}).One(&doc)
cache.data = doc.Data
cache.wait.Unlock()
}(file.doc.Id, file.chunk)
file.rcache = cache
}
debugf("Returning err: %#v", err)
return
}

644
vendor/labix.org/v2/mgo/gridfs_test.go generated vendored Normal file
View File

@ -0,0 +1,644 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo_test
import (
"io"
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
. "launchpad.net/gocheck"
"os"
"time"
)
func (s *S) TestGridFSCreate(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
before := bson.Now()
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
n, err := file.Write([]byte("some data"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 9)
err = file.Close()
c.Assert(err, IsNil)
after := bson.Now()
// Check the file information.
result := M{}
err = db.C("fs.files").Find(nil).One(result)
c.Assert(err, IsNil)
fileId, ok := result["_id"].(bson.ObjectId)
c.Assert(ok, Equals, true)
c.Assert(fileId.Valid(), Equals, true)
result["_id"] = "<id>"
ud, ok := result["uploadDate"].(time.Time)
c.Assert(ok, Equals, true)
c.Assert(ud.After(before) && ud.Before(after), Equals, true)
result["uploadDate"] = "<timestamp>"
expected := M{
"_id": "<id>",
"length": 9,
"chunkSize": 262144,
"uploadDate": "<timestamp>",
"md5": "1e50210a0202497fb79bc38b6ade6c34",
}
c.Assert(result, DeepEquals, expected)
// Check the chunk.
result = M{}
err = db.C("fs.chunks").Find(nil).One(result)
c.Assert(err, IsNil)
chunkId, ok := result["_id"].(bson.ObjectId)
c.Assert(ok, Equals, true)
c.Assert(chunkId.Valid(), Equals, true)
result["_id"] = "<id>"
expected = M{
"_id": "<id>",
"files_id": fileId,
"n": 0,
"data": []byte("some data"),
}
c.Assert(result, DeepEquals, expected)
// Check that an index was created.
indexes, err := db.C("fs.chunks").Indexes()
c.Assert(err, IsNil)
c.Assert(len(indexes), Equals, 2)
c.Assert(indexes[1].Key, DeepEquals, []string{"files_id", "n"})
}
func (s *S) TestGridFSFileDetails(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("myfile1.txt")
c.Assert(err, IsNil)
n, err := file.Write([]byte("some"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 4)
c.Assert(file.Size(), Equals, int64(4))
n, err = file.Write([]byte(" data"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 5)
c.Assert(file.Size(), Equals, int64(9))
id, _ := file.Id().(bson.ObjectId)
c.Assert(id.Valid(), Equals, true)
c.Assert(file.Name(), Equals, "myfile1.txt")
c.Assert(file.ContentType(), Equals, "")
var info interface{}
err = file.GetMeta(&info)
c.Assert(err, IsNil)
c.Assert(info, IsNil)
file.SetId("myid")
file.SetName("myfile2.txt")
file.SetContentType("text/plain")
file.SetMeta(M{"any": "thing"})
c.Assert(file.Id(), Equals, "myid")
c.Assert(file.Name(), Equals, "myfile2.txt")
c.Assert(file.ContentType(), Equals, "text/plain")
err = file.GetMeta(&info)
c.Assert(err, IsNil)
c.Assert(info, DeepEquals, bson.M{"any": "thing"})
err = file.Close()
c.Assert(err, IsNil)
c.Assert(file.MD5(), Equals, "1e50210a0202497fb79bc38b6ade6c34")
ud := file.UploadDate()
now := time.Now()
c.Assert(ud.Before(now), Equals, true)
c.Assert(ud.After(now.Add(-3*time.Second)), Equals, true)
result := M{}
err = db.C("fs.files").Find(nil).One(result)
c.Assert(err, IsNil)
result["uploadDate"] = "<timestamp>"
expected := M{
"_id": "myid",
"length": 9,
"chunkSize": 262144,
"uploadDate": "<timestamp>",
"md5": "1e50210a0202497fb79bc38b6ade6c34",
"filename": "myfile2.txt",
"contentType": "text/plain",
"metadata": M{"any": "thing"},
}
c.Assert(result, DeepEquals, expected)
}
func (s *S) TestGridFSCreateWithChunking(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
file.SetChunkSize(5)
// Smaller than the chunk size.
n, err := file.Write([]byte("abc"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 3)
// Boundary in the middle.
n, err = file.Write([]byte("defg"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 4)
// Boundary at the end.
n, err = file.Write([]byte("hij"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 3)
// Larger than the chunk size, with 3 chunks.
n, err = file.Write([]byte("klmnopqrstuv"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 12)
err = file.Close()
c.Assert(err, IsNil)
// Check the file information.
result := M{}
err = db.C("fs.files").Find(nil).One(result)
c.Assert(err, IsNil)
fileId, _ := result["_id"].(bson.ObjectId)
c.Assert(fileId.Valid(), Equals, true)
result["_id"] = "<id>"
result["uploadDate"] = "<timestamp>"
expected := M{
"_id": "<id>",
"length": 22,
"chunkSize": 5,
"uploadDate": "<timestamp>",
"md5": "44a66044834cbe55040089cabfc102d5",
}
c.Assert(result, DeepEquals, expected)
// Check the chunks.
iter := db.C("fs.chunks").Find(nil).Sort("n").Iter()
dataChunks := []string{"abcde", "fghij", "klmno", "pqrst", "uv"}
for i := 0; ; i++ {
result = M{}
if !iter.Next(result) {
if i != 5 {
c.Fatalf("Expected 5 chunks, got %d", i)
}
break
}
c.Assert(iter.Close(), IsNil)
result["_id"] = "<id>"
expected = M{
"_id": "<id>",
"files_id": fileId,
"n": i,
"data": []byte(dataChunks[i]),
}
c.Assert(result, DeepEquals, expected)
}
}
func (s *S) TestGridFSAbort(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
file.SetChunkSize(5)
n, err := file.Write([]byte("some data"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 9)
var count int
for i := 0; i < 10; i++ {
count, err = db.C("fs.chunks").Count()
if count > 0 || err != nil {
break
}
}
c.Assert(err, IsNil)
c.Assert(count, Equals, 1)
file.Abort()
err = file.Close()
c.Assert(err, ErrorMatches, "write aborted")
count, err = db.C("fs.chunks").Count()
c.Assert(err, IsNil)
c.Assert(count, Equals, 0)
}
func (s *S) TestGridFSOpenNotFound(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.OpenId("non-existent")
c.Assert(err == mgo.ErrNotFound, Equals, true)
c.Assert(file, IsNil)
file, err = gfs.Open("non-existent")
c.Assert(err == mgo.ErrNotFound, Equals, true)
c.Assert(file, IsNil)
}
func (s *S) TestGridFSReadAll(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
id := file.Id()
file.SetChunkSize(5)
n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 22)
err = file.Close()
c.Assert(err, IsNil)
file, err = gfs.OpenId(id)
c.Assert(err, IsNil)
b := make([]byte, 30)
n, err = file.Read(b)
c.Assert(n, Equals, 22)
c.Assert(err, IsNil)
n, err = file.Read(b)
c.Assert(n, Equals, 0)
c.Assert(err == io.EOF, Equals, true)
err = file.Close()
c.Assert(err, IsNil)
}
func (s *S) TestGridFSReadChunking(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
id := file.Id()
file.SetChunkSize(5)
n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 22)
err = file.Close()
c.Assert(err, IsNil)
file, err = gfs.OpenId(id)
c.Assert(err, IsNil)
b := make([]byte, 30)
// Smaller than the chunk size.
n, err = file.Read(b[:3])
c.Assert(err, IsNil)
c.Assert(n, Equals, 3)
c.Assert(b[:3], DeepEquals, []byte("abc"))
// Boundary in the middle.
n, err = file.Read(b[:4])
c.Assert(err, IsNil)
c.Assert(n, Equals, 4)
c.Assert(b[:4], DeepEquals, []byte("defg"))
// Boundary at the end.
n, err = file.Read(b[:3])
c.Assert(err, IsNil)
c.Assert(n, Equals, 3)
c.Assert(b[:3], DeepEquals, []byte("hij"))
// Larger than the chunk size, with 3 chunks.
n, err = file.Read(b)
c.Assert(err, IsNil)
c.Assert(n, Equals, 12)
c.Assert(b[:12], DeepEquals, []byte("klmnopqrstuv"))
n, err = file.Read(b)
c.Assert(n, Equals, 0)
c.Assert(err == io.EOF, Equals, true)
err = file.Close()
c.Assert(err, IsNil)
}
func (s *S) TestGridFSOpen(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("myfile.txt")
c.Assert(err, IsNil)
file.Write([]byte{'1'})
file.Close()
file, err = gfs.Create("myfile.txt")
c.Assert(err, IsNil)
file.Write([]byte{'2'})
file.Close()
file, err = gfs.Open("myfile.txt")
c.Assert(err, IsNil)
defer file.Close()
var b [1]byte
_, err = file.Read(b[:])
c.Assert(err, IsNil)
c.Assert(string(b[:]), Equals, "2")
}
func (s *S) TestGridFSSeek(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
id := file.Id()
file.SetChunkSize(5)
n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 22)
err = file.Close()
c.Assert(err, IsNil)
b := make([]byte, 5)
file, err = gfs.OpenId(id)
c.Assert(err, IsNil)
o, err := file.Seek(3, os.SEEK_SET)
c.Assert(err, IsNil)
c.Assert(o, Equals, int64(3))
_, err = file.Read(b)
c.Assert(err, IsNil)
c.Assert(b, DeepEquals, []byte("defgh"))
o, err = file.Seek(5, os.SEEK_CUR)
c.Assert(err, IsNil)
c.Assert(o, Equals, int64(13))
_, err = file.Read(b)
c.Assert(err, IsNil)
c.Assert(b, DeepEquals, []byte("nopqr"))
o, err = file.Seek(-10, os.SEEK_END)
c.Assert(err, IsNil)
c.Assert(o, Equals, int64(12))
_, err = file.Read(b)
c.Assert(err, IsNil)
c.Assert(b, DeepEquals, []byte("mnopq"))
o, err = file.Seek(8, os.SEEK_SET)
c.Assert(err, IsNil)
c.Assert(o, Equals, int64(8))
_, err = file.Read(b)
c.Assert(err, IsNil)
c.Assert(b, DeepEquals, []byte("ijklm"))
// Trivial seek forward within same chunk. Already
// got the data, shouldn't touch the database.
sent := mgo.GetStats().SentOps
o, err = file.Seek(1, os.SEEK_CUR)
c.Assert(err, IsNil)
c.Assert(o, Equals, int64(14))
c.Assert(mgo.GetStats().SentOps, Equals, sent)
_, err = file.Read(b)
c.Assert(err, IsNil)
c.Assert(b, DeepEquals, []byte("opqrs"))
// Try seeking past end of file.
file.Seek(3, os.SEEK_SET)
o, err = file.Seek(23, os.SEEK_SET)
c.Assert(err, ErrorMatches, "seek past end of file")
c.Assert(o, Equals, int64(3))
}
func (s *S) TestGridFSRemoveId(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("myfile.txt")
c.Assert(err, IsNil)
file.Write([]byte{'1'})
file.Close()
file, err = gfs.Create("myfile.txt")
c.Assert(err, IsNil)
file.Write([]byte{'2'})
id := file.Id()
file.Close()
err = gfs.RemoveId(id)
c.Assert(err, IsNil)
file, err = gfs.Open("myfile.txt")
c.Assert(err, IsNil)
defer file.Close()
var b [1]byte
_, err = file.Read(b[:])
c.Assert(err, IsNil)
c.Assert(string(b[:]), Equals, "1")
n, err := db.C("fs.chunks").Find(M{"files_id": id}).Count()
c.Assert(err, IsNil)
c.Assert(n, Equals, 0)
}
func (s *S) TestGridFSRemove(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("myfile.txt")
c.Assert(err, IsNil)
file.Write([]byte{'1'})
file.Close()
file, err = gfs.Create("myfile.txt")
c.Assert(err, IsNil)
file.Write([]byte{'2'})
file.Close()
err = gfs.Remove("myfile.txt")
c.Assert(err, IsNil)
_, err = gfs.Open("myfile.txt")
c.Assert(err == mgo.ErrNotFound, Equals, true)
n, err := db.C("fs.chunks").Find(nil).Count()
c.Assert(err, IsNil)
c.Assert(n, Equals, 0)
}
func (s *S) TestGridFSOpenNext(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("myfile1.txt")
c.Assert(err, IsNil)
file.Write([]byte{'1'})
file.Close()
file, err = gfs.Create("myfile2.txt")
c.Assert(err, IsNil)
file.Write([]byte{'2'})
file.Close()
var f *mgo.GridFile
var b [1]byte
iter := gfs.Find(nil).Sort("-filename").Iter()
ok := gfs.OpenNext(iter, &f)
c.Assert(ok, Equals, true)
c.Check(f.Name(), Equals, "myfile2.txt")
_, err = f.Read(b[:])
c.Assert(err, IsNil)
c.Assert(string(b[:]), Equals, "2")
ok = gfs.OpenNext(iter, &f)
c.Assert(ok, Equals, true)
c.Check(f.Name(), Equals, "myfile1.txt")
_, err = f.Read(b[:])
c.Assert(err, IsNil)
c.Assert(string(b[:]), Equals, "1")
ok = gfs.OpenNext(iter, &f)
c.Assert(ok, Equals, false)
c.Assert(iter.Close(), IsNil)
c.Assert(f, IsNil)
// Do it again with a more restrictive query to make sure
// it's actually taken into account.
iter = gfs.Find(bson.M{"filename": "myfile1.txt"}).Iter()
ok = gfs.OpenNext(iter, &f)
c.Assert(ok, Equals, true)
c.Check(f.Name(), Equals, "myfile1.txt")
ok = gfs.OpenNext(iter, &f)
c.Assert(ok, Equals, false)
c.Assert(iter.Close(), IsNil)
c.Assert(f, IsNil)
}

133
vendor/labix.org/v2/mgo/log.go generated vendored Normal file
View File

@ -0,0 +1,133 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"fmt"
"sync"
)
// ---------------------------------------------------------------------------
// Logging integration.
// Avoid importing the log type information unnecessarily. There's a small cost
// associated with using an interface rather than the type. Depending on how
// often the logger is plugged in, it would be worth using the type instead.
type log_Logger interface {
Output(calldepth int, s string) error
}
var (
globalLogger log_Logger
globalDebug bool
globalMutex sync.Mutex
)
// RACE WARNING: There are known data races when logging, which are manually
// silenced when the race detector is in use. These data races won't be
// observed in typical use, because logging is supposed to be set up once when
// the application starts. Having raceDetector as a constant, the compiler
// should elide the locks altogether in actual use.
// Specify the *log.Logger object where log messages should be sent to.
func SetLogger(logger log_Logger) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
globalLogger = logger
}
// Enable the delivery of debug messages to the logger. Only meaningful
// if a logger is also set.
func SetDebug(debug bool) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
globalDebug = debug
}
func log(v ...interface{}) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
if globalLogger != nil {
globalLogger.Output(2, fmt.Sprint(v...))
}
}
func logln(v ...interface{}) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
if globalLogger != nil {
globalLogger.Output(2, fmt.Sprintln(v...))
}
}
func logf(format string, v ...interface{}) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
if globalLogger != nil {
globalLogger.Output(2, fmt.Sprintf(format, v...))
}
}
func debug(v ...interface{}) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
if globalDebug && globalLogger != nil {
globalLogger.Output(2, fmt.Sprint(v...))
}
}
func debugln(v ...interface{}) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
if globalDebug && globalLogger != nil {
globalLogger.Output(2, fmt.Sprintln(v...))
}
}
func debugf(format string, v ...interface{}) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
if globalDebug && globalLogger != nil {
globalLogger.Output(2, fmt.Sprintf(format, v...))
}
}

91
vendor/labix.org/v2/mgo/queue.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
type queue struct {
elems []interface{}
nelems, popi, pushi int
}
func (q *queue) Len() int {
return q.nelems
}
func (q *queue) Push(elem interface{}) {
//debugf("Pushing(pushi=%d popi=%d cap=%d): %#v\n",
// q.pushi, q.popi, len(q.elems), elem)
if q.nelems == len(q.elems) {
q.expand()
}
q.elems[q.pushi] = elem
q.nelems++
q.pushi = (q.pushi + 1) % len(q.elems)
//debugf(" Pushed(pushi=%d popi=%d cap=%d): %#v\n",
// q.pushi, q.popi, len(q.elems), elem)
}
func (q *queue) Pop() (elem interface{}) {
//debugf("Popping(pushi=%d popi=%d cap=%d)\n",
// q.pushi, q.popi, len(q.elems))
if q.nelems == 0 {
return nil
}
elem = q.elems[q.popi]
q.elems[q.popi] = nil // Help GC.
q.nelems--
q.popi = (q.popi + 1) % len(q.elems)
//debugf(" Popped(pushi=%d popi=%d cap=%d): %#v\n",
// q.pushi, q.popi, len(q.elems), elem)
return elem
}
func (q *queue) expand() {
curcap := len(q.elems)
var newcap int
if curcap == 0 {
newcap = 8
} else if curcap < 1024 {
newcap = curcap * 2
} else {
newcap = curcap + (curcap / 4)
}
elems := make([]interface{}, newcap)
if q.popi == 0 {
copy(elems, q.elems)
q.pushi = curcap
} else {
newpopi := newcap - (curcap - q.popi)
copy(elems, q.elems[:q.popi])
copy(elems[newpopi:], q.elems[q.popi:])
q.popi = newpopi
}
for i := range q.elems {
q.elems[i] = nil // Help GC.
}
q.elems = elems
}

104
vendor/labix.org/v2/mgo/queue_test.go generated vendored Normal file
View File

@ -0,0 +1,104 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"launchpad.net/gocheck"
)
type QS struct{}
var _ = gocheck.Suite(&QS{})
func (s *QS) TestSequentialGrowth(c *gocheck.C) {
q := queue{}
n := 2048
for i := 0; i != n; i++ {
q.Push(i)
}
for i := 0; i != n; i++ {
c.Assert(q.Pop(), gocheck.Equals, i)
}
}
var queueTestLists = [][]int{
// {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
// {8, 9, 10, 11, ... 2, 3, 4, 5, 6, 7}
{0, 1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, 11},
// {8, 9, 10, 11, ... 2, 3, 4, 5, 6, 7}
{0, 1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11},
// {0, 1, 2, 3, 4, 5, 6, 7, 8}
{0, 1, 2, 3, 4, 5, 6, 7, 8,
-1, -1, -1, -1, -1, -1, -1, -1, -1,
0, 1, 2, 3, 4, 5, 6, 7, 8},
}
func (s *QS) TestQueueTestLists(c *gocheck.C) {
test := []int{}
testi := 0
reset := func() {
test = test[0:0]
testi = 0
}
push := func(i int) {
test = append(test, i)
}
pop := func() (i int) {
if testi == len(test) {
return -1
}
i = test[testi]
testi++
return
}
for _, list := range queueTestLists {
reset()
q := queue{}
for _, n := range list {
if n == -1 {
c.Assert(q.Pop(), gocheck.Equals, pop(),
gocheck.Commentf("With list %#v", list))
} else {
q.Push(n)
push(n)
}
}
for n := pop(); n != -1; n = pop() {
c.Assert(q.Pop(), gocheck.Equals, n,
gocheck.Commentf("With list %#v", list))
}
c.Assert(q.Pop(), gocheck.Equals, nil,
gocheck.Commentf("With list %#v", list))
}
}

6
vendor/labix.org/v2/mgo/raceoff.go generated vendored Normal file
View File

@ -0,0 +1,6 @@
// +build !race
package mgo
const raceDetector = false

5
vendor/labix.org/v2/mgo/raceon.go generated vendored Normal file
View File

@ -0,0 +1,5 @@
// +build race
package mgo
const raceDetector = true

11
vendor/labix.org/v2/mgo/saslimpl.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
//+build sasl
package mgo
import (
"labix.org/v2/mgo/sasl"
)
func saslNew(cred Credential, host string) (saslStepper, error) {
return sasl.New(cred.Username, cred.Password, cred.Mechanism, cred.Service, host)
}

11
vendor/labix.org/v2/mgo/saslstub.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
//+build !sasl
package mgo
import (
"fmt"
)
func saslNew(cred Credential, host string) (saslStepper, error) {
return nil, fmt.Errorf("SASL support not enabled during build (-tags sasl)")
}

444
vendor/labix.org/v2/mgo/server.go generated vendored Normal file
View File

@ -0,0 +1,444 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"errors"
"labix.org/v2/mgo/bson"
"net"
"sort"
"sync"
"time"
)
// ---------------------------------------------------------------------------
// Mongo server encapsulation.
type mongoServer struct {
sync.RWMutex
Addr string
ResolvedAddr string
tcpaddr *net.TCPAddr
unusedSockets []*mongoSocket
liveSockets []*mongoSocket
closed bool
abended bool
sync chan bool
dial dialer
pingValue time.Duration
pingIndex int
pingCount uint32
pingWindow [6]time.Duration
info *mongoServerInfo
}
type dialer struct {
old func(addr net.Addr) (net.Conn, error)
new func(addr *ServerAddr) (net.Conn, error)
}
func (dial dialer) isSet() bool {
return dial.old != nil || dial.new != nil
}
type mongoServerInfo struct {
Master bool
Mongos bool
Tags bson.D
}
var defaultServerInfo mongoServerInfo
func newServer(addr string, tcpaddr *net.TCPAddr, sync chan bool, dial dialer) *mongoServer {
server := &mongoServer{
Addr: addr,
ResolvedAddr: tcpaddr.String(),
tcpaddr: tcpaddr,
sync: sync,
dial: dial,
info: &defaultServerInfo,
}
// Once so the server gets a ping value, then loop in background.
server.pinger(false)
go server.pinger(true)
return server
}
var errSocketLimit = errors.New("per-server connection limit reached")
var errServerClosed = errors.New("server was closed")
// AcquireSocket returns a socket for communicating with the server.
// This will attempt to reuse an old connection, if one is available. Otherwise,
// it will establish a new one. The returned socket is owned by the call site,
// and will return to the cache when the socket has its Release method called
// the same number of times as AcquireSocket + Acquire were called for it.
// If the limit argument is not zero, a socket will only be returned if the
// number of sockets in use for this server is under the provided limit.
func (server *mongoServer) AcquireSocket(limit int, timeout time.Duration) (socket *mongoSocket, abended bool, err error) {
for {
server.Lock()
abended = server.abended
if server.closed {
server.Unlock()
return nil, abended, errServerClosed
}
n := len(server.unusedSockets)
if limit > 0 && len(server.liveSockets)-n >= limit {
server.Unlock()
return nil, false, errSocketLimit
}
if n > 0 {
socket = server.unusedSockets[n-1]
server.unusedSockets[n-1] = nil // Help GC.
server.unusedSockets = server.unusedSockets[:n-1]
info := server.info
server.Unlock()
err = socket.InitialAcquire(info, timeout)
if err != nil {
continue
}
} else {
server.Unlock()
socket, err = server.Connect(timeout)
if err == nil {
server.Lock()
// We've waited for the Connect, see if we got
// closed in the meantime
if server.closed {
server.Unlock()
socket.Release()
socket.Close()
return nil, abended, errServerClosed
}
server.liveSockets = append(server.liveSockets, socket)
server.Unlock()
}
}
return
}
panic("unreachable")
}
// Connect establishes a new connection to the server. This should
// generally be done through server.AcquireSocket().
func (server *mongoServer) Connect(timeout time.Duration) (*mongoSocket, error) {
server.RLock()
master := server.info.Master
dial := server.dial
server.RUnlock()
logf("Establishing new connection to %s (timeout=%s)...", server.Addr, timeout)
var conn net.Conn
var err error
switch {
case !dial.isSet():
// Cannot do this because it lacks timeout support. :-(
//conn, err = net.DialTCP("tcp", nil, server.tcpaddr)
conn, err = net.DialTimeout("tcp", server.ResolvedAddr, timeout)
case dial.old != nil:
conn, err = dial.old(server.tcpaddr)
case dial.new != nil:
conn, err = dial.new(&ServerAddr{server.Addr, server.tcpaddr})
default:
panic("dialer is set, but both dial.old and dial.new are nil")
}
if err != nil {
logf("Connection to %s failed: %v", server.Addr, err.Error())
return nil, err
}
logf("Connection to %s established.", server.Addr)
stats.conn(+1, master)
return newSocket(server, conn, timeout), nil
}
// Close forces closing all sockets that are alive, whether
// they're currently in use or not.
func (server *mongoServer) Close() {
server.Lock()
server.closed = true
liveSockets := server.liveSockets
unusedSockets := server.unusedSockets
server.liveSockets = nil
server.unusedSockets = nil
server.Unlock()
logf("Connections to %s closing (%d live sockets).", server.Addr, len(liveSockets))
for i, s := range liveSockets {
s.Close()
liveSockets[i] = nil
}
for i := range unusedSockets {
unusedSockets[i] = nil
}
}
// RecycleSocket puts socket back into the unused cache.
func (server *mongoServer) RecycleSocket(socket *mongoSocket) {
server.Lock()
if !server.closed {
server.unusedSockets = append(server.unusedSockets, socket)
}
server.Unlock()
}
func removeSocket(sockets []*mongoSocket, socket *mongoSocket) []*mongoSocket {
for i, s := range sockets {
if s == socket {
copy(sockets[i:], sockets[i+1:])
n := len(sockets) - 1
sockets[n] = nil
sockets = sockets[:n]
break
}
}
return sockets
}
// AbendSocket notifies the server that the given socket has terminated
// abnormally, and thus should be discarded rather than cached.
func (server *mongoServer) AbendSocket(socket *mongoSocket) {
server.Lock()
server.abended = true
if server.closed {
server.Unlock()
return
}
server.liveSockets = removeSocket(server.liveSockets, socket)
server.unusedSockets = removeSocket(server.unusedSockets, socket)
server.Unlock()
// Maybe just a timeout, but suggest a cluster sync up just in case.
select {
case server.sync <- true:
default:
}
}
func (server *mongoServer) SetInfo(info *mongoServerInfo) {
server.Lock()
server.info = info
server.Unlock()
}
func (server *mongoServer) Info() *mongoServerInfo {
server.Lock()
info := server.info
server.Unlock()
return info
}
func (server *mongoServer) hasTags(serverTags []bson.D) bool {
NextTagSet:
for _, tags := range serverTags {
NextReqTag:
for _, req := range tags {
for _, has := range server.info.Tags {
if req.Name == has.Name {
if req.Value == has.Value {
continue NextReqTag
}
continue NextTagSet
}
}
continue NextTagSet
}
return true
}
return false
}
var pingDelay = 5 * time.Second
func (server *mongoServer) pinger(loop bool) {
var delay time.Duration
if raceDetector {
// This variable is only ever touched by tests.
globalMutex.Lock()
delay = pingDelay
globalMutex.Unlock()
} else {
delay = pingDelay
}
op := queryOp{
collection: "admin.$cmd",
query: bson.D{{"ping", 1}},
flags: flagSlaveOk,
limit: -1,
}
for {
if loop {
time.Sleep(delay)
}
op := op
socket, _, err := server.AcquireSocket(0, 3 * delay)
if err == nil {
start := time.Now()
_, _ = socket.SimpleQuery(&op)
delay := time.Now().Sub(start)
server.pingWindow[server.pingIndex] = delay
server.pingIndex = (server.pingIndex + 1) % len(server.pingWindow)
server.pingCount++
var max time.Duration
for i := 0; i < len(server.pingWindow) && uint32(i) < server.pingCount; i++ {
if server.pingWindow[i] > max {
max = server.pingWindow[i]
}
}
socket.Release()
server.Lock()
if server.closed {
loop = false
}
server.pingValue = max
server.Unlock()
logf("Ping for %s is %d ms", server.Addr, max/time.Millisecond)
} else if err == errServerClosed {
return
}
if !loop {
return
}
}
}
type mongoServerSlice []*mongoServer
func (s mongoServerSlice) Len() int {
return len(s)
}
func (s mongoServerSlice) Less(i, j int) bool {
return s[i].ResolvedAddr < s[j].ResolvedAddr
}
func (s mongoServerSlice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s mongoServerSlice) Sort() {
sort.Sort(s)
}
func (s mongoServerSlice) Search(resolvedAddr string) (i int, ok bool) {
n := len(s)
i = sort.Search(n, func(i int) bool {
return s[i].ResolvedAddr >= resolvedAddr
})
return i, i != n && s[i].ResolvedAddr == resolvedAddr
}
type mongoServers struct {
slice mongoServerSlice
}
func (servers *mongoServers) Search(resolvedAddr string) (server *mongoServer) {
if i, ok := servers.slice.Search(resolvedAddr); ok {
return servers.slice[i]
}
return nil
}
func (servers *mongoServers) Add(server *mongoServer) {
servers.slice = append(servers.slice, server)
servers.slice.Sort()
}
func (servers *mongoServers) Remove(other *mongoServer) (server *mongoServer) {
if i, found := servers.slice.Search(other.ResolvedAddr); found {
server = servers.slice[i]
copy(servers.slice[i:], servers.slice[i+1:])
n := len(servers.slice) - 1
servers.slice[n] = nil // Help GC.
servers.slice = servers.slice[:n]
}
return
}
func (servers *mongoServers) Slice() []*mongoServer {
return ([]*mongoServer)(servers.slice)
}
func (servers *mongoServers) Get(i int) *mongoServer {
return servers.slice[i]
}
func (servers *mongoServers) Len() int {
return len(servers.slice)
}
func (servers *mongoServers) Empty() bool {
return len(servers.slice) == 0
}
// BestFit returns the best guess of what would be the most interesting
// server to perform operations on at this point in time.
func (servers *mongoServers) BestFit(serverTags []bson.D) *mongoServer {
var best *mongoServer
for _, next := range servers.slice {
if best == nil {
best = next
best.RLock()
if serverTags != nil && !next.info.Mongos && !best.hasTags(serverTags) {
best.RUnlock()
best = nil
}
continue
}
next.RLock()
swap := false
switch {
case serverTags != nil && !next.info.Mongos && !next.hasTags(serverTags):
// Must have requested tags.
case next.info.Master != best.info.Master:
// Prefer slaves.
swap = best.info.Master
case absDuration(next.pingValue-best.pingValue) > 15*time.Millisecond:
// Prefer nearest server.
swap = next.pingValue < best.pingValue
case len(next.liveSockets)-len(next.unusedSockets) < len(best.liveSockets)-len(best.unusedSockets):
// Prefer servers with less connections.
swap = true
}
if swap {
best.RUnlock()
best = next
} else {
next.RUnlock()
}
}
if best != nil {
best.RUnlock()
}
return best
}
func absDuration(d time.Duration) time.Duration {
if d < 0 {
return -d
}
return d
}

3517
vendor/labix.org/v2/mgo/session.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

3260
vendor/labix.org/v2/mgo/session_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

673
vendor/labix.org/v2/mgo/socket.go generated vendored Normal file
View File

@ -0,0 +1,673 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"errors"
"labix.org/v2/mgo/bson"
"net"
"sync"
"time"
)
type replyFunc func(err error, reply *replyOp, docNum int, docData []byte)
type mongoSocket struct {
sync.Mutex
server *mongoServer // nil when cached
conn net.Conn
timeout time.Duration
addr string // For debugging only.
nextRequestId uint32
replyFuncs map[uint32]replyFunc
references int
creds []Credential
logout []Credential
cachedNonce string
gotNonce sync.Cond
dead error
serverInfo *mongoServerInfo
}
type queryOpFlags uint32
const (
_ queryOpFlags = 1 << iota
flagTailable
flagSlaveOk
flagLogReplay
flagNoCursorTimeout
flagAwaitData
)
type queryOp struct {
collection string
query interface{}
skip int32
limit int32
selector interface{}
flags queryOpFlags
replyFunc replyFunc
options queryWrapper
hasOptions bool
serverTags []bson.D
}
type queryWrapper struct {
Query interface{} "$query"
OrderBy interface{} "$orderby,omitempty"
Hint interface{} "$hint,omitempty"
Explain bool "$explain,omitempty"
Snapshot bool "$snapshot,omitempty"
ReadPreference bson.D "$readPreference,omitempty"
}
func (op *queryOp) finalQuery(socket *mongoSocket) interface{} {
if op.flags&flagSlaveOk != 0 && len(op.serverTags) > 0 && socket.ServerInfo().Mongos {
op.hasOptions = true
op.options.ReadPreference = bson.D{{"mode", "secondaryPreferred"}, {"tags", op.serverTags}}
}
if op.hasOptions {
if op.query == nil {
var empty bson.D
op.options.Query = empty
} else {
op.options.Query = op.query
}
debugf("final query is %#v\n", &op.options)
return &op.options
}
return op.query
}
type getMoreOp struct {
collection string
limit int32
cursorId int64
replyFunc replyFunc
}
type replyOp struct {
flags uint32
cursorId int64
firstDoc int32
replyDocs int32
}
type insertOp struct {
collection string // "database.collection"
documents []interface{} // One or more documents to insert
flags uint32
}
type updateOp struct {
collection string // "database.collection"
selector interface{}
update interface{}
flags uint32
}
type deleteOp struct {
collection string // "database.collection"
selector interface{}
flags uint32
}
type killCursorsOp struct {
cursorIds []int64
}
type requestInfo struct {
bufferPos int
replyFunc replyFunc
}
func newSocket(server *mongoServer, conn net.Conn, timeout time.Duration) *mongoSocket {
socket := &mongoSocket{
conn: conn,
addr: server.Addr,
server: server,
replyFuncs: make(map[uint32]replyFunc),
}
socket.gotNonce.L = &socket.Mutex
if err := socket.InitialAcquire(server.Info(), timeout); err != nil {
panic("newSocket: InitialAcquire returned error: " + err.Error())
}
stats.socketsAlive(+1)
debugf("Socket %p to %s: initialized", socket, socket.addr)
socket.resetNonce()
go socket.readLoop()
return socket
}
// Server returns the server that the socket is associated with.
// It returns nil while the socket is cached in its respective server.
func (socket *mongoSocket) Server() *mongoServer {
socket.Lock()
server := socket.server
socket.Unlock()
return server
}
// ServerInfo returns details for the server at the time the socket
// was initially acquired.
func (socket *mongoSocket) ServerInfo() *mongoServerInfo {
socket.Lock()
serverInfo := socket.serverInfo
socket.Unlock()
return serverInfo
}
// InitialAcquire obtains the first reference to the socket, either
// right after the connection is made or once a recycled socket is
// being put back in use.
func (socket *mongoSocket) InitialAcquire(serverInfo *mongoServerInfo, timeout time.Duration) error {
socket.Lock()
if socket.references > 0 {
panic("Socket acquired out of cache with references")
}
if socket.dead != nil {
dead := socket.dead
socket.Unlock()
return dead
}
socket.references++
socket.serverInfo = serverInfo
socket.timeout = timeout
stats.socketsInUse(+1)
stats.socketRefs(+1)
socket.Unlock()
return nil
}
// Acquire obtains an additional reference to the socket.
// The socket will only be recycled when it's released as many
// times as it's been acquired.
func (socket *mongoSocket) Acquire() (info *mongoServerInfo) {
socket.Lock()
if socket.references == 0 {
panic("Socket got non-initial acquire with references == 0")
}
// We'll track references to dead sockets as well.
// Caller is still supposed to release the socket.
socket.references++
stats.socketRefs(+1)
serverInfo := socket.serverInfo
socket.Unlock()
return serverInfo
}
// Release decrements a socket reference. The socket will be
// recycled once its released as many times as it's been acquired.
func (socket *mongoSocket) Release() {
socket.Lock()
if socket.references == 0 {
panic("socket.Release() with references == 0")
}
socket.references--
stats.socketRefs(-1)
if socket.references == 0 {
stats.socketsInUse(-1)
server := socket.server
socket.Unlock()
socket.LogoutAll()
// If the socket is dead server is nil.
if server != nil {
server.RecycleSocket(socket)
}
} else {
socket.Unlock()
}
}
// SetTimeout changes the timeout used on socket operations.
func (socket *mongoSocket) SetTimeout(d time.Duration) {
socket.Lock()
socket.timeout = d
socket.Unlock()
}
type deadlineType int
const (
readDeadline deadlineType = 1
writeDeadline deadlineType = 2
)
func (socket *mongoSocket) updateDeadline(which deadlineType) {
var when time.Time
if socket.timeout > 0 {
when = time.Now().Add(socket.timeout)
}
whichstr := ""
switch which {
case readDeadline | writeDeadline:
whichstr = "read/write"
socket.conn.SetDeadline(when)
case readDeadline:
whichstr = "read"
socket.conn.SetReadDeadline(when)
case writeDeadline:
whichstr = "write"
socket.conn.SetWriteDeadline(when)
default:
panic("invalid parameter to updateDeadline")
}
debugf("Socket %p to %s: updated %s deadline to %s ahead (%s)", socket, socket.addr, whichstr, socket.timeout, when)
}
// Close terminates the socket use.
func (socket *mongoSocket) Close() {
socket.kill(errors.New("Closed explicitly"), false)
}
func (socket *mongoSocket) kill(err error, abend bool) {
socket.Lock()
if socket.dead != nil {
debugf("Socket %p to %s: killed again: %s (previously: %s)", socket, socket.addr, err.Error(), socket.dead.Error())
socket.Unlock()
return
}
logf("Socket %p to %s: closing: %s (abend=%v)", socket, socket.addr, err.Error(), abend)
socket.dead = err
socket.conn.Close()
stats.socketsAlive(-1)
replyFuncs := socket.replyFuncs
socket.replyFuncs = make(map[uint32]replyFunc)
server := socket.server
socket.server = nil
socket.gotNonce.Broadcast()
socket.Unlock()
for _, replyFunc := range replyFuncs {
logf("Socket %p to %s: notifying replyFunc of closed socket: %s", socket, socket.addr, err.Error())
replyFunc(err, nil, -1, nil)
}
if abend {
server.AbendSocket(socket)
}
}
func (socket *mongoSocket) SimpleQuery(op *queryOp) (data []byte, err error) {
var wait, change sync.Mutex
var replyDone bool
var replyData []byte
var replyErr error
wait.Lock()
op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
change.Lock()
if !replyDone {
replyDone = true
replyErr = err
if err == nil {
replyData = docData
}
}
change.Unlock()
wait.Unlock()
}
err = socket.Query(op)
if err != nil {
return nil, err
}
wait.Lock()
change.Lock()
data = replyData
err = replyErr
change.Unlock()
return data, err
}
func (socket *mongoSocket) Query(ops ...interface{}) (err error) {
if lops := socket.flushLogout(); len(lops) > 0 {
ops = append(lops, ops...)
}
buf := make([]byte, 0, 256)
// Serialize operations synchronously to avoid interrupting
// other goroutines while we can't really be sending data.
// Also, record id positions so that we can compute request
// ids at once later with the lock already held.
requests := make([]requestInfo, len(ops))
requestCount := 0
for _, op := range ops {
debugf("Socket %p to %s: serializing op: %#v", socket, socket.addr, op)
start := len(buf)
var replyFunc replyFunc
switch op := op.(type) {
case *updateOp:
buf = addHeader(buf, 2001)
buf = addInt32(buf, 0) // Reserved
buf = addCString(buf, op.collection)
buf = addInt32(buf, int32(op.flags))
debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.selector)
buf, err = addBSON(buf, op.selector)
if err != nil {
return err
}
debugf("Socket %p to %s: serializing update document: %#v", socket, socket.addr, op.update)
buf, err = addBSON(buf, op.update)
if err != nil {
return err
}
case *insertOp:
buf = addHeader(buf, 2002)
buf = addInt32(buf, int32(op.flags))
buf = addCString(buf, op.collection)
for _, doc := range op.documents {
debugf("Socket %p to %s: serializing document for insertion: %#v", socket, socket.addr, doc)
buf, err = addBSON(buf, doc)
if err != nil {
return err
}
}
case *queryOp:
buf = addHeader(buf, 2004)
buf = addInt32(buf, int32(op.flags))
buf = addCString(buf, op.collection)
buf = addInt32(buf, op.skip)
buf = addInt32(buf, op.limit)
buf, err = addBSON(buf, op.finalQuery(socket))
if err != nil {
return err
}
if op.selector != nil {
buf, err = addBSON(buf, op.selector)
if err != nil {
return err
}
}
replyFunc = op.replyFunc
case *getMoreOp:
buf = addHeader(buf, 2005)
buf = addInt32(buf, 0) // Reserved
buf = addCString(buf, op.collection)
buf = addInt32(buf, op.limit)
buf = addInt64(buf, op.cursorId)
replyFunc = op.replyFunc
case *deleteOp:
buf = addHeader(buf, 2006)
buf = addInt32(buf, 0) // Reserved
buf = addCString(buf, op.collection)
buf = addInt32(buf, int32(op.flags))
debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.selector)
buf, err = addBSON(buf, op.selector)
if err != nil {
return err
}
case *killCursorsOp:
buf = addHeader(buf, 2007)
buf = addInt32(buf, 0) // Reserved
buf = addInt32(buf, int32(len(op.cursorIds)))
for _, cursorId := range op.cursorIds {
buf = addInt64(buf, cursorId)
}
default:
panic("internal error: unknown operation type")
}
setInt32(buf, start, int32(len(buf)-start))
if replyFunc != nil {
request := &requests[requestCount]
request.replyFunc = replyFunc
request.bufferPos = start
requestCount++
}
}
// Buffer is ready for the pipe. Lock, allocate ids, and enqueue.
socket.Lock()
if socket.dead != nil {
dead := socket.dead
socket.Unlock()
debugf("Socket %p to %s: failing query, already closed: %s", socket, socket.addr, socket.dead.Error())
// XXX This seems necessary in case the session is closed concurrently
// with a query being performed, but it's not yet tested:
for i := 0; i != requestCount; i++ {
request := &requests[i]
if request.replyFunc != nil {
request.replyFunc(dead, nil, -1, nil)
}
}
return dead
}
wasWaiting := len(socket.replyFuncs) > 0
// Reserve id 0 for requests which should have no responses.
requestId := socket.nextRequestId + 1
if requestId == 0 {
requestId++
}
socket.nextRequestId = requestId + uint32(requestCount)
for i := 0; i != requestCount; i++ {
request := &requests[i]
setInt32(buf, request.bufferPos+4, int32(requestId))
socket.replyFuncs[requestId] = request.replyFunc
requestId++
}
debugf("Socket %p to %s: sending %d op(s) (%d bytes)", socket, socket.addr, len(ops), len(buf))
stats.sentOps(len(ops))
socket.updateDeadline(writeDeadline)
_, err = socket.conn.Write(buf)
if !wasWaiting && requestCount > 0 {
socket.updateDeadline(readDeadline)
}
socket.Unlock()
return err
}
func fill(r net.Conn, b []byte) error {
l := len(b)
n, err := r.Read(b)
for n != l && err == nil {
var ni int
ni, err = r.Read(b[n:])
n += ni
}
return err
}
// Estimated minimum cost per socket: 1 goroutine + memory for the largest
// document ever seen.
func (socket *mongoSocket) readLoop() {
p := make([]byte, 36) // 16 from header + 20 from OP_REPLY fixed fields
s := make([]byte, 4)
conn := socket.conn // No locking, conn never changes.
for {
// XXX Handle timeouts, , etc
err := fill(conn, p)
if err != nil {
socket.kill(err, true)
return
}
totalLen := getInt32(p, 0)
responseTo := getInt32(p, 8)
opCode := getInt32(p, 12)
// Don't use socket.server.Addr here. socket is not
// locked and socket.server may go away.
debugf("Socket %p to %s: got reply (%d bytes)", socket, socket.addr, totalLen)
_ = totalLen
if opCode != 1 {
socket.kill(errors.New("opcode != 1, corrupted data?"), true)
return
}
reply := replyOp{
flags: uint32(getInt32(p, 16)),
cursorId: getInt64(p, 20),
firstDoc: getInt32(p, 28),
replyDocs: getInt32(p, 32),
}
stats.receivedOps(+1)
stats.receivedDocs(int(reply.replyDocs))
socket.Lock()
replyFunc, ok := socket.replyFuncs[uint32(responseTo)]
if ok {
delete(socket.replyFuncs, uint32(responseTo))
}
socket.Unlock()
if replyFunc != nil && reply.replyDocs == 0 {
replyFunc(nil, &reply, -1, nil)
} else {
for i := 0; i != int(reply.replyDocs); i++ {
err := fill(conn, s)
if err != nil {
if replyFunc != nil {
replyFunc(err, nil, -1, nil)
}
socket.kill(err, true)
return
}
b := make([]byte, int(getInt32(s, 0)))
// copy(b, s) in an efficient way.
b[0] = s[0]
b[1] = s[1]
b[2] = s[2]
b[3] = s[3]
err = fill(conn, b[4:])
if err != nil {
if replyFunc != nil {
replyFunc(err, nil, -1, nil)
}
socket.kill(err, true)
return
}
if globalDebug && globalLogger != nil {
m := bson.M{}
if err := bson.Unmarshal(b, m); err == nil {
debugf("Socket %p to %s: received document: %#v", socket, socket.addr, m)
}
}
if replyFunc != nil {
replyFunc(nil, &reply, i, b)
}
// XXX Do bound checking against totalLen.
}
}
socket.Lock()
if len(socket.replyFuncs) == 0 {
// Nothing else to read for now. Disable deadline.
socket.conn.SetReadDeadline(time.Time{})
} else {
socket.updateDeadline(readDeadline)
}
socket.Unlock()
// XXX Do bound checking against totalLen.
}
}
var emptyHeader = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
func addHeader(b []byte, opcode int) []byte {
i := len(b)
b = append(b, emptyHeader...)
// Enough for current opcodes.
b[i+12] = byte(opcode)
b[i+13] = byte(opcode >> 8)
return b
}
func addInt32(b []byte, i int32) []byte {
return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24))
}
func addInt64(b []byte, i int64) []byte {
return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24),
byte(i>>32), byte(i>>40), byte(i>>48), byte(i>>56))
}
func addCString(b []byte, s string) []byte {
b = append(b, []byte(s)...)
b = append(b, 0)
return b
}
func addBSON(b []byte, doc interface{}) ([]byte, error) {
if doc == nil {
return append(b, 5, 0, 0, 0, 0), nil
}
data, err := bson.Marshal(doc)
if err != nil {
return b, err
}
return append(b, data...), nil
}
func setInt32(b []byte, pos int, i int32) {
b[pos] = byte(i)
b[pos+1] = byte(i >> 8)
b[pos+2] = byte(i >> 16)
b[pos+3] = byte(i >> 24)
}
func getInt32(b []byte, pos int) int32 {
return (int32(b[pos+0])) |
(int32(b[pos+1]) << 8) |
(int32(b[pos+2]) << 16) |
(int32(b[pos+3]) << 24)
}
func getInt64(b []byte, pos int) int64 {
return (int64(b[pos+0])) |
(int64(b[pos+1]) << 8) |
(int64(b[pos+2]) << 16) |
(int64(b[pos+3]) << 24) |
(int64(b[pos+4]) << 32) |
(int64(b[pos+5]) << 40) |
(int64(b[pos+6]) << 48) |
(int64(b[pos+7]) << 56)
}

147
vendor/labix.org/v2/mgo/stats.go generated vendored Normal file
View File

@ -0,0 +1,147 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"sync"
)
var stats *Stats
var statsMutex sync.Mutex
func SetStats(enabled bool) {
statsMutex.Lock()
if enabled {
if stats == nil {
stats = &Stats{}
}
} else {
stats = nil
}
statsMutex.Unlock()
}
func GetStats() (snapshot Stats) {
statsMutex.Lock()
snapshot = *stats
statsMutex.Unlock()
return
}
func ResetStats() {
statsMutex.Lock()
debug("Resetting stats")
old := stats
stats = &Stats{}
// These are absolute values:
stats.Clusters = old.Clusters
stats.SocketsInUse = old.SocketsInUse
stats.SocketsAlive = old.SocketsAlive
stats.SocketRefs = old.SocketRefs
statsMutex.Unlock()
return
}
type Stats struct {
Clusters int
MasterConns int
SlaveConns int
SentOps int
ReceivedOps int
ReceivedDocs int
SocketsAlive int
SocketsInUse int
SocketRefs int
}
func (stats *Stats) cluster(delta int) {
if stats != nil {
statsMutex.Lock()
stats.Clusters += delta
statsMutex.Unlock()
}
}
func (stats *Stats) conn(delta int, master bool) {
if stats != nil {
statsMutex.Lock()
if master {
stats.MasterConns += delta
} else {
stats.SlaveConns += delta
}
statsMutex.Unlock()
}
}
func (stats *Stats) sentOps(delta int) {
if stats != nil {
statsMutex.Lock()
stats.SentOps += delta
statsMutex.Unlock()
}
}
func (stats *Stats) receivedOps(delta int) {
if stats != nil {
statsMutex.Lock()
stats.ReceivedOps += delta
statsMutex.Unlock()
}
}
func (stats *Stats) receivedDocs(delta int) {
if stats != nil {
statsMutex.Lock()
stats.ReceivedDocs += delta
statsMutex.Unlock()
}
}
func (stats *Stats) socketsInUse(delta int) {
if stats != nil {
statsMutex.Lock()
stats.SocketsInUse += delta
statsMutex.Unlock()
}
}
func (stats *Stats) socketsAlive(delta int) {
if stats != nil {
statsMutex.Lock()
stats.SocketsAlive += delta
statsMutex.Unlock()
}
}
func (stats *Stats) socketRefs(delta int) {
if stats != nil {
statsMutex.Lock()
stats.SocketRefs += delta
statsMutex.Unlock()
}
}

240
vendor/labix.org/v2/mgo/suite_test.go generated vendored Normal file
View File

@ -0,0 +1,240 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo_test
import (
"errors"
"flag"
"fmt"
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
. "launchpad.net/gocheck"
"net"
"os/exec"
"strconv"
"syscall"
"testing"
"time"
)
var fast = flag.Bool("fast", false, "Skip slow tests")
type M bson.M
type cLogger C
func (c *cLogger) Output(calldepth int, s string) error {
ns := time.Now().UnixNano()
t := float64(ns%100e9) / 1e9
((*C)(c)).Logf("[LOG] %.05f %s", t, s)
return nil
}
func TestAll(t *testing.T) {
TestingT(t)
}
type S struct {
session *mgo.Session
stopped bool
build mgo.BuildInfo
frozen []string
}
func (s *S) versionAtLeast(v ...int) bool {
for i := range v {
if i == len(s.build.VersionArray) {
return false
}
if s.build.VersionArray[i] < v[i] {
return false
}
}
return true
}
var _ = Suite(&S{})
func (s *S) SetUpSuite(c *C) {
mgo.SetDebug(true)
mgo.SetStats(true)
s.StartAll()
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
s.build, err = session.BuildInfo()
c.Check(err, IsNil)
session.Close()
}
func (s *S) SetUpTest(c *C) {
err := run("mongo --nodb testdb/dropall.js")
if err != nil {
panic(err.Error())
}
mgo.SetLogger((*cLogger)(c))
mgo.ResetStats()
}
func (s *S) TearDownTest(c *C) {
if s.stopped {
s.StartAll()
}
for _, host := range s.frozen {
if host != "" {
s.Thaw(host)
}
}
var stats mgo.Stats
for i := 0; ; i++ {
stats = mgo.GetStats()
if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 {
break
}
if i == 20 {
c.Fatal("Test left sockets in a dirty state")
}
c.Logf("Waiting for sockets to die: %d in use, %d alive", stats.SocketsInUse, stats.SocketsAlive)
time.Sleep(500 * time.Millisecond)
}
for i := 0; ; i++ {
stats = mgo.GetStats()
if stats.Clusters == 0 {
break
}
if i == 60 {
c.Fatal("Test left clusters alive")
}
c.Logf("Waiting for clusters to die: %d alive", stats.Clusters)
time.Sleep(1 * time.Second)
}
}
func (s *S) Stop(host string) {
// Give a moment for slaves to sync and avoid getting rollback issues.
time.Sleep(2 * time.Second)
err := run("cd _testdb && supervisorctl stop " + supvName(host))
if err != nil {
panic(err)
}
s.stopped = true
}
func (s *S) pid(host string) int {
output, err := exec.Command("lsof", "-iTCP:"+hostPort(host), "-sTCP:LISTEN", "-Fp").CombinedOutput()
if err != nil {
panic(err)
}
pidstr := string(output[1 : len(output)-1])
pid, err := strconv.Atoi(pidstr)
if err != nil {
panic("cannot convert pid to int: " + pidstr)
}
return pid
}
func (s *S) Freeze(host string) {
err := syscall.Kill(s.pid(host), syscall.SIGSTOP)
if err != nil {
panic(err)
}
s.frozen = append(s.frozen, host)
}
func (s *S) Thaw(host string) {
err := syscall.Kill(s.pid(host), syscall.SIGCONT)
if err != nil {
panic(err)
}
for i, frozen := range s.frozen {
if frozen == host {
s.frozen[i] = ""
}
}
}
func (s *S) StartAll() {
// Restart any stopped nodes.
run("cd _testdb && supervisorctl start all")
err := run("cd testdb && mongo --nodb wait.js")
if err != nil {
panic(err)
}
s.stopped = false
}
func run(command string) error {
output, err := exec.Command("/bin/sh", "-c", command).CombinedOutput()
if err != nil {
msg := fmt.Sprintf("Failed to execute: %s: %s\n%s", command, err.Error(), string(output))
return errors.New(msg)
}
return nil
}
var supvNames = map[string]string{
"40001": "db1",
"40002": "db2",
"40011": "rs1a",
"40012": "rs1b",
"40013": "rs1c",
"40021": "rs2a",
"40022": "rs2b",
"40023": "rs2c",
"40031": "rs3a",
"40032": "rs3b",
"40033": "rs3c",
"40041": "rs4a",
"40101": "cfg1",
"40102": "cfg2",
"40103": "cfg3",
"40201": "s1",
"40202": "s2",
"40203": "s3",
}
// supvName returns the supervisord name for the given host address.
func supvName(host string) string {
host, port, err := net.SplitHostPort(host)
if err != nil {
panic(err)
}
name, ok := supvNames[port]
if !ok {
panic("Unknown host: " + host)
}
return name
}
func hostPort(host string) string {
_, port, err := net.SplitHostPort(host)
if err != nil {
panic(err)
}
return port
}

47
vendor/labix.org/v2/mgo/testdb/dropall.js generated vendored Normal file
View File

@ -0,0 +1,47 @@
var ports = [40001, 40002, 40011, 40012, 40013, 40021, 40022, 40023, 40041, 40101, 40102, 40103, 40201, 40202, 40203]
var auth = [40002, 40103, 40203, 40031]
for (var i in ports) {
var port = ports[i]
var server = "localhost:" + port
var mongo = new Mongo("localhost:" + port)
var admin = mongo.getDB("admin")
for (var j in auth) {
if (auth[j] == port) {
admin.auth("root", "rapadura")
admin.system.users.find().forEach(function(u) {
if (u.user == "root" || u.user == "reader") {
return;
}
if (typeof admin.dropUser == "function") {
mongo.getDB(u.db).dropUser(u.user);
} else {
admin.removeUser(u.user);
}
})
break
}
}
var result = admin.runCommand({"listDatabases": 1})
// Why is the command returning undefined!?
while (typeof result.databases == "undefined") {
print("dropall.js: listing databases of :" + port + " got:", result)
result = admin.runCommand({"listDatabases": 1})
}
var dbs = result.databases
for (var j = 0; j != dbs.length; j++) {
var db = dbs[j]
switch (db.name) {
case "admin":
case "local":
case "config":
break
default:
mongo.getDB(db.name).dropDatabase()
}
}
}
// vim:ts=4:sw=4:et

103
vendor/labix.org/v2/mgo/testdb/init.js generated vendored Normal file
View File

@ -0,0 +1,103 @@
//var settings = {heartbeatSleep: 0.05, heartbeatTimeout: 0.5}
var settings = {};
// We know the master of the first set (pri=1), but not of the second.
var rs1cfg = {_id: "rs1",
members: [{_id: 1, host: "127.0.0.1:40011", priority: 1, tags: {rs1: "a"}},
{_id: 2, host: "127.0.0.1:40012", priority: 0, tags: {rs1: "b"}},
{_id: 3, host: "127.0.0.1:40013", priority: 0, tags: {rs1: "c"}}],
settings: settings}
var rs2cfg = {_id: "rs2",
members: [{_id: 1, host: "127.0.0.1:40021", priority: 1, tags: {rs2: "a"}},
{_id: 2, host: "127.0.0.1:40022", priority: 1, tags: {rs2: "b"}},
{_id: 3, host: "127.0.0.1:40023", priority: 1, tags: {rs2: "c"}}],
settings: settings}
var rs3cfg = {_id: "rs3",
members: [{_id: 1, host: "127.0.0.1:40031", priority: 1, tags: {rs3: "a"}},
{_id: 2, host: "127.0.0.1:40032", priority: 1, tags: {rs3: "b"}},
{_id: 3, host: "127.0.0.1:40033", priority: 1, tags: {rs3: "c"}}],
settings: settings}
for (var i = 0; i != 60; i++) {
try {
db1 = new Mongo("127.0.0.1:40001").getDB("admin")
db2 = new Mongo("127.0.0.1:40002").getDB("admin")
rs1a = new Mongo("127.0.0.1:40011").getDB("admin")
rs2a = new Mongo("127.0.0.1:40021").getDB("admin")
rs3a = new Mongo("127.0.0.1:40031").getDB("admin")
break
} catch(err) {
print("Can't connect yet...")
}
sleep(1000)
}
rs1a.runCommand({replSetInitiate: rs1cfg})
rs2a.runCommand({replSetInitiate: rs2cfg})
rs3a.runCommand({replSetInitiate: rs3cfg})
function configShards() {
cfg1 = new Mongo("127.0.0.1:40201").getDB("admin")
cfg1.runCommand({addshard: "127.0.0.1:40001"})
cfg1.runCommand({addshard: "rs1/127.0.0.1:40011"})
cfg2 = new Mongo("127.0.0.1:40202").getDB("admin")
cfg2.runCommand({addshard: "rs2/127.0.0.1:40021"})
cfg3 = new Mongo("127.0.0.1:40203").getDB("admin")
cfg3.runCommand({addshard: "rs3/127.0.0.1:40031"})
}
function configAuth() {
var addrs = ["127.0.0.1:40002", "127.0.0.1:40203", "127.0.0.1:40031"]
for (var i in addrs) {
var db = new Mongo(addrs[i]).getDB("admin")
var v = db.serverBuildInfo().versionArray
if (v < [2, 5]) {
db.addUser("root", "rapadura")
} else {
db.createUser({user: "root", pwd: "rapadura", roles: ["root"]})
}
db.auth("root", "rapadura")
if (v >= [2, 6]) {
db.createUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]})
} else if (v >= [2, 4]) {
db.addUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]})
} else {
db.addUser("reader", "rapadura", true)
}
}
}
function countHealthy(rs) {
var status = rs.runCommand({replSetGetStatus: 1})
var count = 0
if (typeof status.members != "undefined") {
for (var i = 0; i != status.members.length; i++) {
var m = status.members[i]
if (m.health == 1 && (m.state == 1 || m.state == 2)) {
count += 1
}
}
}
return count
}
var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length
for (var i = 0; i != 60; i++) {
var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a)
print("Replica sets have", count, "healthy nodes.")
if (count == totalRSMembers) {
sleep(2000)
configShards()
configAuth()
quit(0)
}
sleep(1000)
}
print("Replica sets didn't sync up properly.")
quit(12)
// vim:ts=4:sw=4:et

54
vendor/labix.org/v2/mgo/testdb/setup.sh generated vendored Executable file
View File

@ -0,0 +1,54 @@
#!/bin/sh -e
start() {
mkdir _testdb
cd _testdb
mkdir db1 db2 rs1a rs1b rs1c rs2a rs2b rs2c rs3a rs3b rs3c rs4a cfg1 cfg2 cfg3
ln -s ../testdb/supervisord.conf supervisord.conf
echo keyfile > keyfile
chmod 600 keyfile
echo "Running supervisord..."
supervisord || ( echo "Supervisord failed executing ($?)" && exit 1 )
COUNT=$(grep '^\[program' supervisord.conf | wc -l)
echo "Supervisord is up, starting $COUNT processes..."
for i in $(seq 10); do
RUNNING=$(supervisorctl status | grep RUNNING | wc -l)
echo "$RUNNING processes running..."
if [ x$COUNT = x$RUNNING ]; then
echo "Running setup.js with mongo..."
mongo --nodb ../testdb/init.js
exit 0
fi
sleep 1
done
echo "Failed to start all processes. Check out what's up at $PWD now!"
exit 1
}
stop() {
if [ -d _testdb ]; then
echo "Shutting down test cluster..."
(cd _testdb && supervisorctl shutdown)
rm -rf _testdb
fi
}
if [ ! -f suite_test.go ]; then
echo "This script must be run from within the source directory."
exit 1
fi
case "$1" in
start)
start
;;
stop)
stop
;;
esac
# vim:ts=4:sw=4:et

62
vendor/labix.org/v2/mgo/testdb/supervisord.conf generated vendored Normal file
View File

@ -0,0 +1,62 @@
[supervisord]
logfile = %(here)s/supervisord.log
pidfile = %(here)s/supervisord.pid
directory = %(here)s
#nodaemon = true
[inet_http_server]
port = 127.0.0.1:9001
[supervisorctl]
serverurl = http://127.0.0.1:9001
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[program:db1]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db1 --bind_ip=127.0.0.1 --port 40001
[program:db2]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db2 --bind_ip=127.0.0.1 --port 40002 --auth
[program:rs1a]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1a --bind_ip=127.0.0.1 --port 40011
[program:rs1b]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1b --bind_ip=127.0.0.1 --port 40012
[program:rs1c]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1c --bind_ip=127.0.0.1 --port 40013
[program:rs2a]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(here)s/rs2a --bind_ip=127.0.0.1 --port 40021
[program:rs2b]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(here)s/rs2b --bind_ip=127.0.0.1 --port 40022
[program:rs2c]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(here)s/rs2c --bind_ip=127.0.0.1 --port 40023
[program:rs3a]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(here)s/rs3a --bind_ip=127.0.0.1 --port 40031 --auth --keyFile=%(here)s/keyfile
[program:rs3b]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(here)s/rs3b --bind_ip=127.0.0.1 --port 40032 --auth --keyFile=%(here)s/keyfile
[program:rs3c]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(here)s/rs3c --bind_ip=127.0.0.1 --port 40033 --auth --keyFile=%(here)s/keyfile
[program:rs4a]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs4 --dbpath %(here)s/rs4a --bind_ip=127.0.0.1 --port 40041
[program:cfg1]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg1 --bind_ip=127.0.0.1 --port 40101
[program:cfg2]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg2 --bind_ip=127.0.0.1 --port 40102
[program:cfg3]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg3 --bind_ip=127.0.0.1 --port 40103 --auth --keyFile=%(here)s/keyfile
[program:s1]
command = mongos --configdb 127.0.0.1:40101 --bind_ip=127.0.0.1 --port 40201 --chunkSize 1
[program:s2]
command = mongos --configdb 127.0.0.1:40102 --bind_ip=127.0.0.1 --port 40202 --chunkSize 1
[program:s3]
command = mongos --configdb 127.0.0.1:40103 --bind_ip=127.0.0.1 --port 40203 --chunkSize 1 --keyFile=%(here)s/keyfile

58
vendor/labix.org/v2/mgo/testdb/wait.js generated vendored Normal file
View File

@ -0,0 +1,58 @@
// We know the master of the first set (pri=1), but not of the second.
var settings = {}
var rs1cfg = {_id: "rs1",
members: [{_id: 1, host: "127.0.0.1:40011", priority: 1},
{_id: 2, host: "127.0.0.1:40012", priority: 0},
{_id: 3, host: "127.0.0.1:40013", priority: 0}]}
var rs2cfg = {_id: "rs2",
members: [{_id: 1, host: "127.0.0.1:40021", priority: 1},
{_id: 2, host: "127.0.0.1:40022", priority: 1},
{_id: 3, host: "127.0.0.1:40023", priority: 0}]}
var rs3cfg = {_id: "rs3",
members: [{_id: 1, host: "127.0.0.1:40031", priority: 1},
{_id: 2, host: "127.0.0.1:40032", priority: 1},
{_id: 3, host: "127.0.0.1:40033", priority: 1}],
settings: settings}
for (var i = 0; i != 60; i++) {
try {
rs1a = new Mongo("127.0.0.1:40011").getDB("admin")
rs2a = new Mongo("127.0.0.1:40021").getDB("admin")
rs3a = new Mongo("127.0.0.1:40031").getDB("admin")
rs3a.auth("root", "rapadura")
db1 = new Mongo("127.0.0.1:40001").getDB("admin")
db2 = new Mongo("127.0.0.1:40002").getDB("admin")
break
} catch(err) {
print("Can't connect yet...")
}
sleep(1000)
}
function countHealthy(rs) {
var status = rs.runCommand({replSetGetStatus: 1})
var count = 0
if (typeof status.members != "undefined") {
for (var i = 0; i != status.members.length; i++) {
var m = status.members[i]
if (m.health == 1 && (m.state == 1 || m.state == 2)) {
count += 1
}
}
}
return count
}
var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length
for (var i = 0; i != 60; i++) {
var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a)
print("Replica sets have", count, "healthy nodes.")
if (count == totalRSMembers) {
quit(0)
}
sleep(1000)
}
print("Replica sets didn't sync up properly.")
quit(12)

68
vendor/labix.org/v2/mgo/txn/chaos.go generated vendored Normal file
View File

@ -0,0 +1,68 @@
package txn
import (
mrand "math/rand"
"time"
)
var chaosEnabled = false
var chaosSetting Chaos
// Chaos holds parameters for the failure injection mechanism.
type Chaos struct {
// KillChance is the 0.0 to 1.0 chance that a given checkpoint
// within the algorithm will raise an interruption that will
// stop the procedure.
KillChance float64
// SlowdownChance is the 0.0 to 1.0 chance that a given checkpoint
// within the algorithm will be delayed by Slowdown before
// continuing.
SlowdownChance float64
Slowdown time.Duration
// If Breakpoint is set, the above settings will only affect the
// named breakpoint.
Breakpoint string
}
// SetChaos sets the failure injection parameters to c.
func SetChaos(c Chaos) {
chaosSetting = c
chaosEnabled = c.KillChance > 0 || c.SlowdownChance > 0
}
func chaos(bpname string) {
if !chaosEnabled {
return
}
switch chaosSetting.Breakpoint {
case "", bpname:
kc := chaosSetting.KillChance
if kc > 0 && mrand.Intn(1000) < int(kc*1000) {
panic(chaosError{})
}
if bpname == "insert" {
return
}
sc := chaosSetting.SlowdownChance
if sc > 0 && mrand.Intn(1000) < int(kc*1000) {
time.Sleep(chaosSetting.Slowdown)
}
}
}
type chaosError struct{}
func (f *flusher) handleChaos(err *error) {
v := recover()
if v == nil {
return
}
if _, ok := v.(chaosError); ok {
f.debugf("Killed by chaos!")
*err = ErrChaos
return
}
panic(v)
}

108
vendor/labix.org/v2/mgo/txn/debug.go generated vendored Normal file
View File

@ -0,0 +1,108 @@
package txn
import (
"bytes"
"fmt"
"labix.org/v2/mgo/bson"
"sort"
"sync/atomic"
)
var (
debugEnabled bool
logger log_Logger
)
type log_Logger interface {
Output(calldepth int, s string) error
}
// Specify the *log.Logger where logged messages should be sent to.
func SetLogger(l log_Logger) {
logger = l
}
// SetDebug enables or disables debugging.
func SetDebug(debug bool) {
debugEnabled = debug
}
var ErrChaos = fmt.Errorf("interrupted by chaos")
var debugId uint32
func debugPrefix() string {
d := atomic.AddUint32(&debugId, 1) - 1
s := make([]byte, 0, 10)
for i := uint(0); i < 8; i++ {
s = append(s, "abcdefghijklmnop"[(d>>(4*i))&0xf])
if d>>(4*(i+1)) == 0 {
break
}
}
s = append(s, ')', ' ')
return string(s)
}
func logf(format string, args ...interface{}) {
if logger != nil {
logger.Output(2, fmt.Sprintf(format, argsForLog(args)...))
}
}
func debugf(format string, args ...interface{}) {
if debugEnabled && logger != nil {
logger.Output(2, fmt.Sprintf(format, argsForLog(args)...))
}
}
func argsForLog(args []interface{}) []interface{} {
for i, arg := range args {
switch v := arg.(type) {
case bson.ObjectId:
args[i] = v.Hex()
case []bson.ObjectId:
lst := make([]string, len(v))
for j, id := range v {
lst[j] = id.Hex()
}
args[i] = lst
case map[docKey][]bson.ObjectId:
buf := &bytes.Buffer{}
var dkeys docKeys
for dkey := range v {
dkeys = append(dkeys, dkey)
}
sort.Sort(dkeys)
for i, dkey := range dkeys {
if i > 0 {
buf.WriteByte(' ')
}
buf.WriteString(fmt.Sprintf("%v: {", dkey))
for j, id := range v[dkey] {
if j > 0 {
buf.WriteByte(' ')
}
buf.WriteString(id.Hex())
}
buf.WriteByte('}')
}
args[i] = buf.String()
case map[docKey][]int64:
buf := &bytes.Buffer{}
var dkeys docKeys
for dkey := range v {
dkeys = append(dkeys, dkey)
}
sort.Sort(dkeys)
for i, dkey := range dkeys {
if i > 0 {
buf.WriteByte(' ')
}
buf.WriteString(fmt.Sprintf("%v: %v", dkey, v[dkey]))
}
args[i] = buf.String()
}
}
return args
}

996
vendor/labix.org/v2/mgo/txn/flusher.go generated vendored Normal file
View File

@ -0,0 +1,996 @@
package txn
import (
"fmt"
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
"sort"
)
func flush(r *Runner, t *transaction) error {
f := &flusher{
Runner: r,
goal: t,
goalKeys: make(map[docKey]bool),
queue: make(map[docKey][]token),
debugId: debugPrefix(),
}
for _, dkey := range f.goal.docKeys() {
f.goalKeys[dkey] = true
}
return f.run()
}
type flusher struct {
*Runner
goal *transaction
goalKeys map[docKey]bool
queue map[docKey][]token
debugId string
}
func (f *flusher) run() (err error) {
if chaosEnabled {
defer f.handleChaos(&err)
}
f.debugf("Processing %s", f.goal)
seen := make(map[bson.ObjectId]*transaction)
if err := f.recurse(f.goal, seen); err != nil {
return err
}
if f.goal.done() {
return nil
}
// Sparse workloads will generally be managed entirely by recurse.
// Getting here means one or more transactions have dependencies
// and perhaps cycles.
// Build successors data for Tarjan's sort. Must consider
// that entries in txn-queue are not necessarily valid.
successors := make(map[bson.ObjectId][]bson.ObjectId)
ready := true
for _, dqueue := range f.queue {
NextPair:
for i := 0; i < len(dqueue); i++ {
pred := dqueue[i]
predid := pred.id()
predt := seen[predid]
if predt == nil || predt.Nonce != pred.nonce() {
continue
}
predsuccids, ok := successors[predid]
if !ok {
successors[predid] = nil
}
for j := i + 1; j < len(dqueue); j++ {
succ := dqueue[j]
succid := succ.id()
succt := seen[succid]
if succt == nil || succt.Nonce != succ.nonce() {
continue
}
if _, ok := successors[succid]; !ok {
successors[succid] = nil
}
// Found a valid pred/succ pair.
i = j - 1
for _, predsuccid := range predsuccids {
if predsuccid == succid {
continue NextPair
}
}
successors[predid] = append(predsuccids, succid)
if succid == f.goal.Id {
// There are still pre-requisites to handle.
ready = false
}
continue NextPair
}
}
}
f.debugf("Queues: %v", f.queue)
f.debugf("Successors: %v", successors)
if ready {
f.debugf("Goal %s has no real pre-requisites", f.goal)
return f.advance(f.goal, nil, true)
}
// Robert Tarjan's algorithm for detecting strongly-connected
// components is used for topological sorting and detecting
// cycles at once. The order in which transactions are applied
// in commonly affected documents must be a global agreement.
sorted := tarjanSort(successors)
if debugEnabled {
f.debugf("Tarjan output: %v", sorted)
}
pull := make(map[bson.ObjectId]*transaction)
for i := len(sorted) - 1; i >= 0; i-- {
scc := sorted[i]
f.debugf("Flushing %v", scc)
if len(scc) == 1 {
pull[scc[0]] = seen[scc[0]]
}
for _, id := range scc {
if err := f.advance(seen[id], pull, true); err != nil {
return err
}
}
if len(scc) > 1 {
for _, id := range scc {
pull[id] = seen[id]
}
}
}
return nil
}
func (f *flusher) recurse(t *transaction, seen map[bson.ObjectId]*transaction) error {
seen[t.Id] = t
err := f.advance(t, nil, false)
if err != errPreReqs {
return err
}
for _, dkey := range t.docKeys() {
for _, dtt := range f.queue[dkey] {
id := dtt.id()
if seen[id] != nil {
continue
}
qt, err := f.load(id)
if err != nil {
return err
}
err = f.recurse(qt, seen)
if err != nil {
return err
}
}
}
return nil
}
func (f *flusher) advance(t *transaction, pull map[bson.ObjectId]*transaction, force bool) error {
for {
switch t.State {
case tpreparing, tprepared:
revnos, err := f.prepare(t, force)
if err != nil {
return err
}
if t.State != tprepared {
continue
}
if err = f.assert(t, revnos, pull); err != nil {
return err
}
if t.State != tprepared {
continue
}
if err = f.checkpoint(t, revnos); err != nil {
return err
}
case tapplying:
return f.apply(t, pull)
case taborting:
return f.abortOrReload(t, nil, pull)
case tapplied, taborted:
return nil
default:
panic(fmt.Errorf("transaction in unknown state: %q", t.State))
}
}
panic("unreachable")
}
type stash string
const (
stashStable stash = ""
stashInsert stash = "insert"
stashRemove stash = "remove"
)
type txnInfo struct {
Queue []token `bson:"txn-queue"`
Revno int64 `bson:"txn-revno,omitempty"`
Insert bson.ObjectId `bson:"txn-insert,omitempty"`
Remove bson.ObjectId `bson:"txn-remove,omitempty"`
}
type stashState string
const (
stashNew stashState = ""
stashInserting stashState = "inserting"
)
var txnFields = bson.D{{"txn-queue", 1}, {"txn-revno", 1}, {"txn-remove", 1}, {"txn-insert", 1}}
var errPreReqs = fmt.Errorf("transaction has pre-requisites and force is false")
// prepare injects t's id onto txn-queue for all affected documents
// and collects the current txn-queue and txn-revno values during
// the process. If the prepared txn-queue indicates that there are
// pre-requisite transactions to be applied and the force parameter
// is false, errPreReqs will be returned. Otherwise, the current
// tip revision numbers for all the documents are returned.
func (f *flusher) prepare(t *transaction, force bool) (revnos []int64, err error) {
if t.State != tpreparing {
return f.rescan(t, force)
}
f.debugf("Preparing %s", t)
// Iterate in a stable way across all runners. This isn't
// strictly required, but reduces the chances of cycles.
dkeys := t.docKeys()
sort.Sort(dkeys)
revno := make(map[docKey]int64)
info := txnInfo{}
tt := tokenFor(t)
NextDoc:
for _, dkey := range dkeys {
change := mgo.Change{
Update: bson.D{{"$addToSet", bson.D{{"txn-queue", tt}}}},
ReturnNew: true,
}
c := f.tc.Database.C(dkey.C)
cquery := c.FindId(dkey.Id).Select(txnFields)
RetryDoc:
change.Upsert = false
chaos("")
if _, err := cquery.Apply(change, &info); err == nil {
if info.Remove == "" {
// Fast path, unless workload is insert/remove heavy.
revno[dkey] = info.Revno
f.queue[dkey] = info.Queue
f.debugf("[A] Prepared document %v with revno %d and queue: %v", dkey, info.Revno, info.Queue)
continue NextDoc
} else {
// Handle remove in progress before preparing it.
if err := f.loadAndApply(info.Remove); err != nil {
return nil, err
}
goto RetryDoc
}
} else if err != mgo.ErrNotFound {
return nil, err
}
// Document missing. Use stash collection.
change.Upsert = true
chaos("")
_, err := f.sc.FindId(dkey).Apply(change, &info)
if err != nil {
return nil, err
}
if info.Insert != "" {
// Handle insert in progress before preparing it.
if err := f.loadAndApply(info.Insert); err != nil {
return nil, err
}
goto RetryDoc
}
// Must confirm stash is still in use and is the same one
// prepared, since applying a remove overwrites the stash.
docFound := false
stashFound := false
if err = c.FindId(dkey.Id).Select(txnFields).One(&info); err == nil {
docFound = true
} else if err != mgo.ErrNotFound {
return nil, err
} else if err = f.sc.FindId(dkey).One(&info); err == nil {
stashFound = true
if info.Revno == 0 {
// Missing revno in the stash only happens when it
// has been upserted, in which case it defaults to -1.
// Txn-inserted documents get revno -1 while in the stash
// for the first time, and -revno-1 == 2 when they go live.
info.Revno = -1
}
} else if err != mgo.ErrNotFound {
return nil, err
}
if docFound && info.Remove == "" || stashFound && info.Insert == "" {
for _, dtt := range info.Queue {
if dtt != tt {
continue
}
// Found tt properly prepared.
if stashFound {
f.debugf("[B] Prepared document %v on stash with revno %d and queue: %v", dkey, info.Revno, info.Queue)
} else {
f.debugf("[B] Prepared document %v with revno %d and queue: %v", dkey, info.Revno, info.Queue)
}
revno[dkey] = info.Revno
f.queue[dkey] = info.Queue
continue NextDoc
}
}
// The stash wasn't valid and tt got overwriten. Try again.
f.unstashToken(tt, dkey)
goto RetryDoc
}
// Save the prepared nonce onto t.
nonce := tt.nonce()
qdoc := bson.D{{"_id", t.Id}, {"s", tpreparing}}
udoc := bson.D{{"$set", bson.D{{"s", tprepared}, {"n", nonce}}}}
chaos("set-prepared")
err = f.tc.Update(qdoc, udoc)
if err == nil {
t.State = tprepared
t.Nonce = nonce
} else if err == mgo.ErrNotFound {
f.debugf("Can't save nonce of %s: LOST RACE", tt)
if err := f.reload(t); err != nil {
return nil, err
} else if t.State == tpreparing {
panic("can't save nonce yet transaction is still preparing")
} else if t.State != tprepared {
return t.Revnos, nil
}
tt = t.token()
} else if err != nil {
return nil, err
}
prereqs, found := f.hasPreReqs(tt, dkeys)
if !found {
// Must only happen when reloading above.
return f.rescan(t, force)
} else if prereqs && !force {
f.debugf("Prepared queue with %s [has prereqs & not forced].", tt)
return nil, errPreReqs
}
for _, op := range t.Ops {
dkey := op.docKey()
revnos = append(revnos, revno[dkey])
drevno := revno[dkey]
switch {
case op.Insert != nil && drevno < 0:
revno[dkey] = -drevno+1
case op.Update != nil && drevno >= 0:
revno[dkey] = drevno+1
case op.Remove && drevno >= 0:
revno[dkey] = -drevno-1
}
}
if !prereqs {
f.debugf("Prepared queue with %s [no prereqs]. Revnos: %v", tt, revnos)
} else {
f.debugf("Prepared queue with %s [forced] Revnos: %v", tt, revnos)
}
return revnos, nil
}
func (f *flusher) unstashToken(tt token, dkey docKey) error {
qdoc := bson.D{{"_id", dkey}, {"txn-queue", tt}}
udoc := bson.D{{"$pull", bson.D{{"txn-queue", tt}}}}
chaos("")
if err := f.sc.Update(qdoc, udoc); err == nil {
chaos("")
err = f.sc.Remove(bson.D{{"_id", dkey}, {"txn-queue", bson.D{}}})
} else if err != mgo.ErrNotFound {
return err
}
return nil
}
func (f *flusher) rescan(t *transaction, force bool) (revnos []int64, err error) {
f.debugf("Rescanning %s", t)
if t.State != tprepared {
panic(fmt.Errorf("rescanning transaction in invalid state: %q", t.State))
}
// Iterate in a stable way across all runners. This isn't
// strictly required, but reduces the chances of cycles.
dkeys := t.docKeys()
sort.Sort(dkeys)
tt := t.token()
if !force {
prereqs, found := f.hasPreReqs(tt, dkeys)
if found && prereqs {
// Its state is already known.
return nil, errPreReqs
}
}
revno := make(map[docKey]int64)
info := txnInfo{}
for _, dkey := range dkeys {
retry := 0
RetryDoc:
c := f.tc.Database.C(dkey.C)
if err := c.FindId(dkey.Id).Select(txnFields).One(&info); err == mgo.ErrNotFound {
// Document is missing. Look in stash.
if err := f.sc.FindId(dkey).One(&info); err == mgo.ErrNotFound {
// Stash also doesn't exist. Maybe someone applied it.
if err := f.reload(t); err != nil {
return nil, err
} else if t.State != tprepared {
return t.Revnos, err
}
// Not applying either.
retry++
if retry < 3 {
// Retry since there might be an insert/remove race.
goto RetryDoc
}
// Neither the doc nor the stash seem to exist.
return nil, fmt.Errorf("cannot find document %v for applying transaction %s", dkey, t)
} else if err != nil {
return nil, err
}
// Stash found.
if info.Insert != "" {
// Handle insert in progress before assuming ordering is good.
if err := f.loadAndApply(info.Insert); err != nil {
return nil, err
}
goto RetryDoc
}
if info.Revno == 0 {
// Missing revno in the stash means -1.
info.Revno = -1
}
} else if err != nil {
return nil, err
} else if info.Remove != "" {
// Handle remove in progress before assuming ordering is good.
if err := f.loadAndApply(info.Remove); err != nil {
return nil, err
}
goto RetryDoc
}
revno[dkey] = info.Revno
found := false
for _, id := range info.Queue {
if id == tt {
found = true
break
}
}
f.queue[dkey] = info.Queue
if !found {
// Previously set txn-queue was popped by someone.
// Transaction is being/has been applied elsewhere.
f.debugf("Rescanned document %v misses %s in queue: %v", dkey, tt, info.Queue)
err := f.reload(t)
if t.State == tpreparing || t.State == tprepared {
panic("rescanned document misses transaction in queue")
}
return t.Revnos, err
}
}
prereqs, found := f.hasPreReqs(tt, dkeys)
if !found {
panic("rescanning loop guarantees that this can't happen")
} else if prereqs && !force {
f.debugf("Rescanned queue with %s: has prereqs, not forced", tt)
return nil, errPreReqs
}
for _, op := range t.Ops {
dkey := op.docKey()
revnos = append(revnos, revno[dkey])
if op.isChange() {
revno[dkey] += 1
}
}
if !prereqs {
f.debugf("Rescanned queue with %s: no prereqs, revnos: %v", tt, revnos)
} else {
f.debugf("Rescanned queue with %s: has prereqs, forced, revnos: %v", tt, revnos)
}
return revnos, nil
}
func (f *flusher) hasPreReqs(tt token, dkeys docKeys) (prereqs, found bool) {
found = true
NextDoc:
for _, dkey := range dkeys {
for _, dtt := range f.queue[dkey] {
if dtt == tt {
continue NextDoc
} else if dtt.id() != tt.id() {
prereqs = true
}
}
found = false
}
return
}
func (f *flusher) reload(t *transaction) error {
var newt transaction
query := f.tc.FindId(t.Id)
query.Select(bson.D{{"s", 1}, {"n", 1}, {"r", 1}})
if err := query.One(&newt); err != nil {
return fmt.Errorf("failed to reload transaction: %v", err)
}
t.State = newt.State
t.Nonce = newt.Nonce
t.Revnos = newt.Revnos
f.debugf("Reloaded %s: %q", t, t.State)
return nil
}
func (f *flusher) loadAndApply(id bson.ObjectId) error {
t, err := f.load(id)
if err != nil {
return err
}
return f.advance(t, nil, true)
}
// assert verifies that all assertions in t match the content that t
// will be applied upon. If an assertion fails, the transaction state
// is changed to aborted.
func (f *flusher) assert(t *transaction, revnos []int64, pull map[bson.ObjectId]*transaction) error {
f.debugf("Asserting %s with revnos %v", t, revnos)
if t.State != tprepared {
panic(fmt.Errorf("asserting transaction in invalid state: %q", t.State))
}
qdoc := make(bson.D, 3)
revno := make(map[docKey]int64)
for i, op := range t.Ops {
dkey := op.docKey()
if _, ok := revno[dkey]; !ok {
revno[dkey] = revnos[i]
}
if op.Assert == nil {
continue
}
if op.Assert == DocMissing {
if revnos[i] >= 0 {
return f.abortOrReload(t, revnos, pull)
}
continue
}
if op.Insert != nil {
return fmt.Errorf("Insert can only Assert txn.DocMissing", op.Assert)
}
// if revnos[i] < 0 { abort }?
qdoc = append(qdoc[:0], bson.DocElem{"_id", op.Id})
if op.Assert != DocMissing {
var revnoq interface{}
if n := revno[dkey]; n == 0 {
revnoq = bson.D{{"$exists", false}}
} else {
revnoq = n
}
// XXX Add tt to the query here, once we're sure it's all working.
// Not having it increases the chances of breaking on bad logic.
qdoc = append(qdoc, bson.DocElem{"txn-revno", revnoq})
if op.Assert != DocExists {
qdoc = append(qdoc, bson.DocElem{"$or", []interface{}{op.Assert}})
}
}
c := f.tc.Database.C(op.C)
if err := c.Find(qdoc).Select(bson.D{{"_id", 1}}).One(nil); err == mgo.ErrNotFound {
// Assertion failed or someone else started applying.
return f.abortOrReload(t, revnos, pull)
} else if err != nil {
return err
}
}
f.debugf("Asserting %s succeeded", t)
return nil
}
func (f *flusher) abortOrReload(t *transaction, revnos []int64, pull map[bson.ObjectId]*transaction) (err error) {
f.debugf("Aborting or reloading %s (was %q)", t, t.State)
if t.State == tprepared {
qdoc := bson.D{{"_id", t.Id}, {"s", tprepared}}
udoc := bson.D{{"$set", bson.D{{"s", taborting}}}}
chaos("set-aborting")
if err = f.tc.Update(qdoc, udoc); err == nil {
t.State = taborting
} else if err == mgo.ErrNotFound {
if err = f.reload(t); err != nil || t.State != taborting {
f.debugf("Won't abort %s. Reloaded state: %q", t, t.State)
return err
}
} else {
return err
}
} else if t.State != taborting {
panic(fmt.Errorf("aborting transaction in invalid state: %q", t.State))
}
if len(revnos) > 0 {
if pull == nil {
pull = map[bson.ObjectId]*transaction{t.Id: t}
}
seen := make(map[docKey]bool)
for i, op := range t.Ops {
dkey := op.docKey()
if seen[op.docKey()] {
continue
}
seen[dkey] = true
pullAll := tokensToPull(f.queue[dkey], pull, "")
if len(pullAll) == 0 {
continue
}
udoc := bson.D{{"$pullAll", bson.D{{"txn-queue", pullAll}}}}
chaos("")
if revnos[i] < 0 {
err = f.sc.UpdateId(dkey, udoc)
} else {
c := f.tc.Database.C(dkey.C)
err = c.UpdateId(dkey.Id, udoc)
}
if err != nil && err != mgo.ErrNotFound {
return err
}
}
}
udoc := bson.D{{"$set", bson.D{{"s", taborted}}}}
chaos("set-aborted")
if err := f.tc.UpdateId(t.Id, udoc); err != nil && err != mgo.ErrNotFound {
return err
}
t.State = taborted
f.debugf("Aborted %s", t)
return nil
}
func (f *flusher) checkpoint(t *transaction, revnos []int64) error {
var debugRevnos map[docKey][]int64
if debugEnabled {
debugRevnos = make(map[docKey][]int64)
for i, op := range t.Ops {
dkey := op.docKey()
debugRevnos[dkey] = append(debugRevnos[dkey], revnos[i])
}
f.debugf("Ready to apply %s. Saving revnos %v", t, debugRevnos)
}
// Save in t the txn-revno values the transaction must run on.
qdoc := bson.D{{"_id", t.Id}, {"s", tprepared}}
udoc := bson.D{{"$set", bson.D{{"s", tapplying}, {"r", revnos}}}}
chaos("set-applying")
err := f.tc.Update(qdoc, udoc)
if err == nil {
t.State = tapplying
t.Revnos = revnos
f.debugf("Ready to apply %s. Saving revnos %v: DONE", t, debugRevnos)
} else if err == mgo.ErrNotFound {
f.debugf("Ready to apply %s. Saving revnos %v: LOST RACE", t, debugRevnos)
return f.reload(t)
}
return nil
}
func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) error {
f.debugf("Applying transaction %s", t)
if t.State != tapplying {
panic(fmt.Errorf("applying transaction in invalid state: %q", t.State))
}
if pull == nil {
pull = map[bson.ObjectId]*transaction{t.Id: t}
}
// Compute the operation in which t's id may be pulled
// out of txn-queue. That's on its last change, or the
// first assertion.
pullOp := make(map[docKey]int)
for i := range t.Ops {
op := &t.Ops[i]
dkey := op.docKey()
if _, ok := pullOp[dkey]; !ok || op.isChange() {
pullOp[dkey] = i
}
}
logRevnos := append([]int64(nil), t.Revnos...)
logDoc := bson.D{{"_id", t.Id}}
tt := tokenFor(t)
for i := range t.Ops {
op := &t.Ops[i]
dkey := op.docKey()
dqueue := f.queue[dkey]
revno := t.Revnos[i]
var opName string
if debugEnabled {
opName = op.name()
f.debugf("Applying %s op %d (%s) on %v with txn-revno %d", t, i, opName, dkey, revno)
}
c := f.tc.Database.C(op.C)
qdoc := bson.D{{"_id", dkey.Id}, {"txn-revno", revno}, {"txn-queue", tt}}
if op.Insert != nil {
qdoc[0].Value = dkey
if revno == -1 {
qdoc[1].Value = bson.D{{"$exists", false}}
}
} else if revno == 0 {
// There's no document with revno 0. The only way to see it is
// when an existent document participates in a transaction the
// first time. Txn-inserted documents get revno -1 while in the
// stash for the first time, and -revno-1 == 2 when they go live.
qdoc[1].Value = bson.D{{"$exists", false}}
}
dontPull := tt
isPullOp := pullOp[dkey] == i
if isPullOp {
dontPull = ""
}
pullAll := tokensToPull(dqueue, pull, dontPull)
var d bson.D
var outcome string
var err error
switch {
case op.Update != nil:
if revno < 0 {
err = mgo.ErrNotFound
f.debugf("Won't try to apply update op; negative revision means the document is missing or stashed");
} else {
newRevno := revno + 1
logRevnos[i] = newRevno
if d, err = objToDoc(op.Update); err != nil {
return err
}
if d, err = addToDoc(d, "$pullAll", bson.D{{"txn-queue", pullAll}}); err != nil {
return err
}
if d, err = addToDoc(d, "$set", bson.D{{"txn-revno", newRevno}}); err != nil {
return err
}
chaos("")
err = c.Update(qdoc, d)
}
case op.Remove:
if revno < 0 {
err = mgo.ErrNotFound
} else {
newRevno := -revno - 1
logRevnos[i] = newRevno
nonce := newNonce()
stash := txnInfo{}
change := mgo.Change{
Update: bson.D{{"$push", bson.D{{"n", nonce}}}},
Upsert: true,
ReturnNew: true,
}
if _, err = f.sc.FindId(dkey).Apply(change, &stash); err != nil {
return err
}
change = mgo.Change{
Update: bson.D{{"$set", bson.D{{"txn-remove", t.Id}}}},
ReturnNew: true,
}
var info txnInfo
if _, err = c.Find(qdoc).Apply(change, &info); err == nil {
// The document still exists so the stash previously
// observed was either out of date or necessarily
// contained the token being applied.
f.debugf("Marked document %v to be removed on revno %d with queue: %v", dkey, info.Revno, info.Queue)
updated := false
if !hasToken(stash.Queue, tt) {
var set, unset bson.D
if revno == 0 {
// Missing revno in stash means -1.
set = bson.D{{"txn-queue", info.Queue}}
unset = bson.D{{"n", 1}, {"txn-revno", 1}}
} else {
set = bson.D{{"txn-queue", info.Queue}, {"txn-revno", newRevno}}
unset = bson.D{{"n", 1}}
}
qdoc := bson.D{{"_id", dkey}, {"n", nonce}}
udoc := bson.D{{"$set", set}, {"$unset", unset}}
if err = f.sc.Update(qdoc, udoc); err == nil {
updated = true
} else if err != mgo.ErrNotFound {
return err
}
}
if updated {
f.debugf("Updated stash for document %v with revno %d and queue: %v", dkey, newRevno, info.Queue)
} else {
f.debugf("Stash for document %v was up-to-date", dkey)
}
err = c.Remove(qdoc)
}
}
case op.Insert != nil:
if revno >= 0 {
err = mgo.ErrNotFound
} else {
newRevno := -revno + 1
logRevnos[i] = newRevno
if d, err = objToDoc(op.Insert); err != nil {
return err
}
change := mgo.Change{
Update: bson.D{{"$set", bson.D{{"txn-insert", t.Id}}}},
ReturnNew: true,
}
chaos("")
var info txnInfo
if _, err = f.sc.Find(qdoc).Apply(change, &info); err == nil {
f.debugf("Stash for document %v has revno %d and queue: %v", dkey, info.Revno, info.Queue)
d = setInDoc(d, bson.D{{"_id", op.Id}, {"txn-revno", newRevno}, {"txn-queue", info.Queue}})
// Unlikely yet unfortunate race in here if this gets seriously
// delayed. If someone inserts+removes meanwhile, this will
// reinsert, and there's no way to avoid that while keeping the
// collection clean or compromising sharding. applyOps can solve
// the former, but it can't shard (SERVER-1439).
chaos("insert")
err = c.Insert(d)
if err == nil || mgo.IsDup(err) {
if err == nil {
f.debugf("New document %v inserted with revno %d and queue: %v", dkey, info.Revno, info.Queue)
} else {
f.debugf("Document %v already existed", dkey)
}
chaos("")
if err = f.sc.Remove(qdoc); err == nil {
f.debugf("Stash for document %v removed", dkey)
}
}
if pullOp[dkey] == i && len(pullAll) > 0 {
_ = f.sc.UpdateId(dkey, bson.D{{"$pullAll", bson.D{{"txn-queue", pullAll}}}})
}
}
}
case op.Assert != nil:
// TODO pullAll if pullOp[dkey] == i
}
if err == nil {
outcome = "DONE"
} else if err == mgo.ErrNotFound || mgo.IsDup(err) {
outcome = "MISS"
err = nil
} else {
outcome = err.Error()
}
if debugEnabled {
f.debugf("Applying %s op %d (%s) on %v with txn-revno %d: %s", t, i, opName, dkey, revno, outcome)
}
if err != nil {
return err
}
if f.lc != nil && op.isChange() {
// Add change to the log document.
var dr bson.D
for li := range logDoc {
elem := &logDoc[li]
if elem.Name == op.C {
dr = elem.Value.(bson.D)
break
}
}
if dr == nil {
logDoc = append(logDoc, bson.DocElem{op.C, bson.D{{"d", []interface{}{}}, {"r", []int64{}}}})
dr = logDoc[len(logDoc)-1].Value.(bson.D)
}
dr[0].Value = append(dr[0].Value.([]interface{}), op.Id)
dr[1].Value = append(dr[1].Value.([]int64), logRevnos[i])
}
}
t.State = tapplied
if f.lc != nil {
// Insert log document into the changelog collection.
f.debugf("Inserting %s into change log", t)
err := f.lc.Insert(logDoc)
if err != nil && !mgo.IsDup(err) {
return err
}
}
// It's been applied, so errors are ignored here. It's fine for someone
// else to win the race and mark it as applied, and it's also fine for
// it to remain pending until a later point when someone will perceive
// it has been applied and mark it at such.
f.debugf("Marking %s as applied", t)
chaos("set-applied")
f.tc.Update(bson.D{{"_id", t.Id}, {"s", tapplying}}, bson.D{{"$set", bson.D{{"s", tapplied}}}})
return nil
}
func tokensToPull(dqueue []token, pull map[bson.ObjectId]*transaction, dontPull token) []token {
var result []token
for j := len(dqueue) - 1; j >= 0; j-- {
dtt := dqueue[j]
if dt, ok := pull[dtt.id()]; ok {
if dt.Nonce == dtt.nonce() {
// It's valid and is being pulled out, so everything
// preceding it must have been handled already.
if dtt == dontPull {
// Not time to pull this one out yet.
j--
}
result = append(result, dqueue[:j+1]...)
break
}
// It was handled before and this is a leftover invalid
// nonce in the queue. Cherry-pick it out.
result = append(result, dtt)
}
}
return result
}
func objToDoc(obj interface{}) (d bson.D, err error) {
data, err := bson.Marshal(obj)
if err != nil {
return nil, err
}
err = bson.Unmarshal(data, &d)
if err != nil {
return nil, err
}
return d, err
}
func addToDoc(doc bson.D, key string, add bson.D) (bson.D, error) {
for i := range doc {
elem := &doc[i]
if elem.Name != key {
continue
}
if old, ok := elem.Value.(bson.D); ok {
elem.Value = append(old, add...)
return doc, nil
} else {
return nil, fmt.Errorf("invalid %q value in change document: %#v", key, elem.Value)
}
}
return append(doc, bson.DocElem{key, add}), nil
}
func setInDoc(doc bson.D, set bson.D) bson.D {
dlen := len(doc)
NextS:
for s := range set {
sname := set[s].Name
for d := 0; d < dlen; d++ {
if doc[d].Name == sname {
doc[d].Value = set[s].Value
continue NextS
}
}
doc = append(doc, set[s])
}
return doc
}
func hasToken(tokens []token, tt token) bool {
for _, ttt := range tokens {
if ttt == tt {
return true
}
}
return false
}
func (f *flusher) debugf(format string, args ...interface{}) {
if !debugEnabled {
return
}
debugf(f.debugId+format, args...)
}

101
vendor/labix.org/v2/mgo/txn/mgo_test.go generated vendored Normal file
View File

@ -0,0 +1,101 @@
package txn_test
import (
"bytes"
"labix.org/v2/mgo"
. "launchpad.net/gocheck"
"os/exec"
"time"
)
// ----------------------------------------------------------------------------
// The mgo test suite
type MgoSuite struct {
output bytes.Buffer
server *exec.Cmd
session *mgo.Session
}
var mgoaddr = "127.0.0.1:50017"
func (s *MgoSuite) SetUpSuite(c *C) {
//mgo.SetDebug(true)
mgo.SetStats(true)
dbdir := c.MkDir()
args := []string{
"--dbpath", dbdir,
"--bind_ip", "127.0.0.1",
"--port", "50017",
"--nssize", "1",
"--noprealloc",
"--smallfiles",
"--nojournal",
"-vvvvv",
}
s.server = exec.Command("mongod", args...)
s.server.Stdout = &s.output
s.server.Stderr = &s.output
err := s.server.Start()
if err != nil {
panic(err)
}
}
func (s *MgoSuite) TearDownSuite(c *C) {
s.server.Process.Kill()
s.server.Process.Wait()
}
func (s *MgoSuite) SetUpTest(c *C) {
err := DropAll(mgoaddr)
if err != nil {
panic(err)
}
mgo.SetLogger(c)
mgo.ResetStats()
s.session, err = mgo.Dial(mgoaddr)
c.Assert(err, IsNil)
}
func (s *MgoSuite) TearDownTest(c *C) {
if s.session != nil {
s.session.Close()
}
for i := 0; ; i++ {
stats := mgo.GetStats()
if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 {
break
}
if i == 20 {
c.Fatal("Test left sockets in a dirty state")
}
c.Logf("Waiting for sockets to die: %d in use, %d alive", stats.SocketsInUse, stats.SocketsAlive)
time.Sleep(500 * time.Millisecond)
}
}
func DropAll(mongourl string) (err error) {
session, err := mgo.Dial(mongourl)
if err != nil {
return err
}
defer session.Close()
names, err := session.DatabaseNames()
if err != nil {
return err
}
for _, name := range names {
switch name {
case "admin", "local", "config":
default:
err = session.DB(name).DropDatabase()
if err != nil {
return err
}
}
}
return nil
}

389
vendor/labix.org/v2/mgo/txn/sim_test.go generated vendored Normal file
View File

@ -0,0 +1,389 @@
package txn_test
import (
"flag"
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
"labix.org/v2/mgo/txn"
. "launchpad.net/gocheck"
"math/rand"
"time"
)
var (
duration = flag.Duration("duration", 200*time.Millisecond, "duration for each simulation")
seed = flag.Int64("seed", 0, "seed for rand")
)
type params struct {
killChance float64
slowdownChance float64
slowdown time.Duration
unsafe bool
workers int
accounts int
changeHalf bool
reinsertCopy bool
reinsertZeroed bool
changelog bool
changes int
}
func (s *S) TestSim1Worker(c *C) {
simulate(c, params{
workers: 1,
accounts: 4,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSim4WorkersDense(c *C) {
simulate(c, params{
workers: 4,
accounts: 2,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSim4WorkersSparse(c *C) {
simulate(c, params{
workers: 4,
accounts: 10,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSimHalf1Worker(c *C) {
simulate(c, params{
workers: 1,
accounts: 4,
changeHalf: true,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSimHalf4WorkersDense(c *C) {
simulate(c, params{
workers: 4,
accounts: 2,
changeHalf: true,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSimHalf4WorkersSparse(c *C) {
simulate(c, params{
workers: 4,
accounts: 10,
changeHalf: true,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSimReinsertCopy1Worker(c *C) {
simulate(c, params{
workers: 1,
accounts: 10,
reinsertCopy: true,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSimReinsertCopy4Workers(c *C) {
simulate(c, params{
workers: 4,
accounts: 10,
reinsertCopy: true,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSimReinsertZeroed1Worker(c *C) {
simulate(c, params{
workers: 1,
accounts: 10,
reinsertZeroed: true,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSimReinsertZeroed4Workers(c *C) {
simulate(c, params{
workers: 4,
accounts: 10,
reinsertZeroed: true,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSimChangeLog(c *C) {
simulate(c, params{
workers: 4,
accounts: 10,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
changelog: true,
})
}
type balanceChange struct {
id bson.ObjectId
origin int
target int
amount int
}
func simulate(c *C, params params) {
seed := *seed
if seed == 0 {
seed = time.Now().UnixNano()
}
rand.Seed(seed)
c.Logf("Seed: %v", seed)
txn.SetChaos(txn.Chaos{
KillChance: params.killChance,
SlowdownChance: params.slowdownChance,
Slowdown: params.slowdown,
})
defer txn.SetChaos(txn.Chaos{})
session, err := mgo.Dial(mgoaddr)
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("test")
tc := db.C("tc")
runner := txn.NewRunner(tc)
tclog := db.C("tc.log")
if params.changelog {
info := mgo.CollectionInfo{
Capped: true,
MaxBytes: 1000000,
}
err := tclog.Create(&info)
c.Assert(err, IsNil)
runner.ChangeLog(tclog)
}
accounts := db.C("accounts")
for i := 0; i < params.accounts; i++ {
err := accounts.Insert(M{"_id": i, "balance": 300})
c.Assert(err, IsNil)
}
var stop time.Time
if params.changes <= 0 {
stop = time.Now().Add(*duration)
}
max := params.accounts
if params.reinsertCopy || params.reinsertZeroed {
max = int(float64(params.accounts) * 1.5)
}
changes := make(chan balanceChange, 1024)
//session.SetMode(mgo.Eventual, true)
for i := 0; i < params.workers; i++ {
go func() {
n := 0
for {
if n > 0 && n == params.changes {
break
}
if !stop.IsZero() && time.Now().After(stop) {
break
}
change := balanceChange{
id: bson.NewObjectId(),
origin: rand.Intn(max),
target: rand.Intn(max),
amount: 100,
}
var old Account
var oldExists bool
if params.reinsertCopy || params.reinsertZeroed {
if err := accounts.FindId(change.origin).One(&old); err != mgo.ErrNotFound {
c.Check(err, IsNil)
change.amount = old.Balance
oldExists = true
}
}
var ops []txn.Op
switch {
case params.reinsertCopy && oldExists:
ops = []txn.Op{{
C: "accounts",
Id: change.origin,
Assert: M{"balance": change.amount},
Remove: true,
}, {
C: "accounts",
Id: change.target,
Assert: txn.DocMissing,
Insert: M{"balance": change.amount},
}}
case params.reinsertZeroed && oldExists:
ops = []txn.Op{{
C: "accounts",
Id: change.target,
Assert: txn.DocMissing,
Insert: M{"balance": 0},
}, {
C: "accounts",
Id: change.origin,
Assert: M{"balance": change.amount},
Remove: true,
}, {
C: "accounts",
Id: change.target,
Assert: txn.DocExists,
Update: M{"$inc": M{"balance": change.amount}},
}}
case params.changeHalf:
ops = []txn.Op{{
C: "accounts",
Id: change.origin,
Assert: M{"balance": M{"$gte": change.amount}},
Update: M{"$inc": M{"balance": -change.amount / 2}},
}, {
C: "accounts",
Id: change.target,
Assert: txn.DocExists,
Update: M{"$inc": M{"balance": change.amount / 2}},
}, {
C: "accounts",
Id: change.origin,
Update: M{"$inc": M{"balance": -change.amount / 2}},
}, {
C: "accounts",
Id: change.target,
Update: M{"$inc": M{"balance": change.amount / 2}},
}}
default:
ops = []txn.Op{{
C: "accounts",
Id: change.origin,
Assert: M{"balance": M{"$gte": change.amount}},
Update: M{"$inc": M{"balance": -change.amount}},
}, {
C: "accounts",
Id: change.target,
Assert: txn.DocExists,
Update: M{"$inc": M{"balance": change.amount}},
}}
}
err = runner.Run(ops, change.id, nil)
if err != nil && err != txn.ErrAborted && err != txn.ErrChaos {
c.Check(err, IsNil)
}
n++
changes <- change
}
changes <- balanceChange{}
}()
}
alive := params.workers
changeLog := make([]balanceChange, 0, 1024)
for alive > 0 {
change := <-changes
if change.id == "" {
alive--
} else {
changeLog = append(changeLog, change)
}
}
c.Check(len(changeLog), Not(Equals), 0, Commentf("No operations were even attempted."))
txn.SetChaos(txn.Chaos{})
err = runner.ResumeAll()
c.Assert(err, IsNil)
n, err := accounts.Count()
c.Check(err, IsNil)
c.Check(n, Equals, params.accounts, Commentf("Number of accounts has changed."))
n, err = accounts.Find(M{"balance": M{"$lt": 0}}).Count()
c.Check(err, IsNil)
c.Check(n, Equals, 0, Commentf("There are %d accounts with negative balance.", n))
globalBalance := 0
iter := accounts.Find(nil).Iter()
account := Account{}
for iter.Next(&account) {
globalBalance += account.Balance
}
c.Check(iter.Close(), IsNil)
c.Check(globalBalance, Equals, params.accounts*300, Commentf("Total amount of money should be constant."))
// Compute and verify the exact final state of all accounts.
balance := make(map[int]int)
for i := 0; i < params.accounts; i++ {
balance[i] += 300
}
var applied, aborted int
for _, change := range changeLog {
err := runner.Resume(change.id)
if err == txn.ErrAborted {
aborted++
continue
} else if err != nil {
c.Fatalf("resuming %s failed: %v", change.id, err)
}
balance[change.origin] -= change.amount
balance[change.target] += change.amount
applied++
}
iter = accounts.Find(nil).Iter()
for iter.Next(&account) {
c.Assert(account.Balance, Equals, balance[account.Id])
}
c.Check(iter.Close(), IsNil)
c.Logf("Total transactions: %d (%d applied, %d aborted)", len(changeLog), applied, aborted)
if params.changelog {
n, err := tclog.Count()
c.Assert(err, IsNil)
// Check if the capped collection is full.
dummy := make([]byte, 1024)
tclog.Insert(M{"_id": bson.NewObjectId(), "dummy": dummy})
m, err := tclog.Count()
c.Assert(err, IsNil)
if m == n+1 {
// Wasn't full, so it must have seen it all.
c.Assert(err, IsNil)
c.Assert(n, Equals, applied)
}
}
}

96
vendor/labix.org/v2/mgo/txn/tarjan.go generated vendored Normal file
View File

@ -0,0 +1,96 @@
package txn
import (
"labix.org/v2/mgo/bson"
"sort"
)
func tarjanSort(successors map[bson.ObjectId][]bson.ObjectId) [][]bson.ObjectId {
// http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
data := &tarjanData{
successors: successors,
nodes: make([]tarjanNode, 0, len(successors)),
index: make(map[bson.ObjectId]int, len(successors)),
}
// Sort all nodes to stabilize the logic.
var all []string
for id := range successors {
all = append(all, string(id))
}
sort.Strings(all)
for _, strid := range all {
id := bson.ObjectId(strid)
if _, seen := data.index[id]; !seen {
data.strongConnect(id)
}
}
return data.output
}
type tarjanData struct {
successors map[bson.ObjectId][]bson.ObjectId
output [][]bson.ObjectId
nodes []tarjanNode
stack []bson.ObjectId
index map[bson.ObjectId]int
}
type tarjanNode struct {
lowlink int
stacked bool
}
type idList []bson.ObjectId
func (l idList) Len() int { return len(l) }
func (l idList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l idList) Less(i, j int) bool { return l[i] < l[j] }
func (data *tarjanData) strongConnect(id bson.ObjectId) *tarjanNode {
index := len(data.nodes)
data.index[id] = index
data.stack = append(data.stack, id)
data.nodes = append(data.nodes, tarjanNode{index, true})
node := &data.nodes[index]
// Sort to stabilize the algorithm.
succids := idList(data.successors[id])
sort.Sort(succids)
for _, succid := range succids {
succindex, seen := data.index[succid]
if !seen {
succnode := data.strongConnect(succid)
if succnode.lowlink < node.lowlink {
node.lowlink = succnode.lowlink
}
} else if data.nodes[succindex].stacked {
// Part of the current strongly-connected component.
if succindex < node.lowlink {
node.lowlink = succindex
}
}
}
if node.lowlink == index {
// Root node; pop stack and output new
// strongly-connected component.
var scc []bson.ObjectId
i := len(data.stack) - 1
for {
stackid := data.stack[i]
stackindex := data.index[stackid]
data.nodes[stackindex].stacked = false
scc = append(scc, stackid)
if stackindex == index {
break
}
i--
}
data.stack = data.stack[:i]
data.output = append(data.output, scc)
}
return node
}

44
vendor/labix.org/v2/mgo/txn/tarjan_test.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
package txn
import (
"fmt"
"labix.org/v2/mgo/bson"
. "launchpad.net/gocheck"
)
type TarjanSuite struct{}
var _ = Suite(TarjanSuite{})
func bid(n int) bson.ObjectId {
return bson.ObjectId(fmt.Sprintf("%024d", n))
}
func bids(ns ...int) (ids []bson.ObjectId) {
for _, n := range ns {
ids = append(ids, bid(n))
}
return
}
func (TarjanSuite) TestExample(c *C) {
successors := map[bson.ObjectId][]bson.ObjectId{
bid(1): bids(2),
bid(2): bids(1, 5),
bid(3): bids(4),
bid(4): bids(3, 5),
bid(5): bids(6),
bid(6): bids(7),
bid(7): bids(8),
bid(8): bids(6, 9),
bid(9): bids(),
}
c.Assert(tarjanSort(successors), DeepEquals, [][]bson.ObjectId{
bids(9),
bids(8, 7, 6),
bids(5),
bids(2, 1),
bids(4, 3),
})
}

518
vendor/labix.org/v2/mgo/txn/txn.go generated vendored Normal file
View File

@ -0,0 +1,518 @@
// The txn package implements support for multi-document transactions.
//
// For details check the following blog post:
//
// http://blog.labix.org/2012/08/22/multi-doc-transactions-for-mongodb
//
package txn
import (
"encoding/binary"
"fmt"
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
"reflect"
"sort"
"sync"
crand "crypto/rand"
mrand "math/rand"
)
type state int
const (
tpreparing state = 1 // One or more documents not prepared
tprepared state = 2 // Prepared but not yet ready to run
taborting state = 3 // Assertions failed, cleaning up
tapplying state = 4 // Changes are in progress
taborted state = 5 // Pre-conditions failed, nothing done
tapplied state = 6 // All changes applied
)
func (s state) String() string {
switch s {
case tpreparing:
return "preparing"
case tprepared:
return "prepared"
case taborting:
return "aborting"
case tapplying:
return "applying"
case taborted:
return "aborted"
case tapplied:
return "applied"
}
panic(fmt.Errorf("unknown state: %d", s))
}
var rand *mrand.Rand
var randmu sync.Mutex
func init() {
var seed int64
err := binary.Read(crand.Reader, binary.BigEndian, &seed)
if err != nil {
panic(err)
}
rand = mrand.New(mrand.NewSource(seed))
}
type transaction struct {
Id bson.ObjectId `bson:"_id"`
State state `bson:"s"`
Info interface{} `bson:"i,omitempty"`
Ops []Op `bson:"o"`
Nonce string `bson:"n,omitempty"`
Revnos []int64 `bson:"r,omitempty"`
docKeysCached docKeys
}
func (t *transaction) String() string {
if t.Nonce == "" {
return t.Id.Hex()
}
return string(t.token())
}
func (t *transaction) done() bool {
return t.State == tapplied || t.State == taborted
}
func (t *transaction) token() token {
if t.Nonce == "" {
panic("transaction has no nonce")
}
return tokenFor(t)
}
func (t *transaction) docKeys() docKeys {
if t.docKeysCached != nil {
return t.docKeysCached
}
dkeys := make(docKeys, 0, len(t.Ops))
NextOp:
for _, op := range t.Ops {
dkey := op.docKey()
for i := range dkeys {
if dkey == dkeys[i] {
continue NextOp
}
}
dkeys = append(dkeys, dkey)
}
sort.Sort(dkeys)
t.docKeysCached = dkeys
return dkeys
}
// tokenFor returns a unique transaction token that
// is composed by t's id and a nonce. If t already has
// a nonce assigned to it, it will be used, otherwise
// a new nonce will be generated.
func tokenFor(t *transaction) token {
nonce := t.Nonce
if nonce == "" {
nonce = newNonce()
}
return token(t.Id.Hex() + "_" + nonce)
}
func newNonce() string {
randmu.Lock()
r := rand.Uint32()
randmu.Unlock()
n := make([]byte, 8)
for i := uint(0); i < 8; i++ {
n[i] = "0123456789abcdef"[(r>>(4*i))&0xf]
}
return string(n)
}
type token string
func (tt token) id() bson.ObjectId { return bson.ObjectIdHex(string(tt[:24])) }
func (tt token) nonce() string { return string(tt[25:]) }
// Op represents an operation to a single document that may be
// applied as part of a transaction with other operations.
type Op struct {
// C and Id identify the collection and document this operation
// refers to. Id is matched against the "_id" document field.
C string `bson:"c"`
Id interface{} `bson:"d"`
// Assert optionally holds a query document that is used to
// test the operation document at the time the transaction is
// going to be applied. The assertions for all operations in
// a transaction are tested before any changes take place,
// and the transaction is entirely aborted if any of them
// fails. This is also the only way to prevent a transaction
// from being being applied (the transaction continues despite
// the outcome of Insert, Update, and Remove).
Assert interface{} `bson:"a,omitempty"`
// The Insert, Update and Remove fields describe the mutation
// intended by the operation. At most one of them may be set
// per operation. If none are set, Assert must be set and the
// operation becomes a read-only test.
//
// Insert holds the document to be inserted at the time the
// transaction is applied. The Id field will be inserted
// into the document automatically as its _id field. The
// transaction will continue even if the document already
// exists. Use Assert with txn.DocMissing if the insertion is
// required.
//
// Update holds the update document to be applied at the time
// the transaction is applied. The transaction will continue
// even if a document with Id is missing. Use Assert to
// test for the document presence or its contents.
//
// Remove indicates whether to remove the document with Id.
// The transaction continues even if the document doesn't yet
// exist at the time the transaction is applied. Use Assert
// with txn.DocExists to make sure it will be removed.
Insert interface{} `bson:"i,omitempty"`
Update interface{} `bson:"u,omitempty"`
Remove bool `bson:"r,omitempty"`
}
func (op *Op) isChange() bool {
return op.Update != nil || op.Insert != nil || op.Remove
}
func (op *Op) docKey() docKey {
return docKey{op.C, op.Id}
}
func (op *Op) name() string {
switch {
case op.Update != nil:
return "update"
case op.Insert != nil:
return "insert"
case op.Remove:
return "remove"
case op.Assert != nil:
return "assert"
}
return "none"
}
const (
// DocExists and DocMissing may be used on an operation's
// Assert value to assert that the document with the given
// Id exists or does not exist, respectively.
DocExists = "d+"
DocMissing = "d-"
)
// A Runner applies operations as part of a transaction onto any number
// of collections within a database. See the Run method for details.
type Runner struct {
tc *mgo.Collection // txns
sc *mgo.Collection // stash
lc *mgo.Collection // log
}
// NewRunner returns a new transaction runner that uses tc to hold its
// transactions.
//
// Multiple transaction collections may exist in a single database, but
// all collections that are touched by operations in a given transaction
// collection must be handled exclusively by it.
//
// A second collection with the same name of tc but suffixed by ".stash"
// will be used for implementing the transactional behavior of insert
// and remove operations.
func NewRunner(tc *mgo.Collection) *Runner {
return &Runner{tc, tc.Database.C(tc.Name + ".stash"), nil}
}
var ErrAborted = fmt.Errorf("transaction aborted")
// Run creates a new transaction with ops and runs it immediately.
// The id parameter specifies the transaction id, and may be written
// down ahead of time to later verify the success of the change and
// resume it, when the procedure is interrupted for any reason. If
// empty, a random id will be generated.
// The info parameter, if not nil, is included under the "i"
// field of the transaction document.
//
// Operations across documents are not atomically applied, but are
// guaranteed to be eventually all applied in the order provided or
// all aborted, as long as the affected documents are only modified
// through transactions. If documents are simultaneously modified
// by transactions and out of transactions the behavior is undefined.
//
// If Run returns no errors, all operations were applied successfully.
// If it returns ErrAborted, one or more operations can't be applied
// and the transaction was entirely aborted with no changes performed.
// Otherwise, if the transaction is interrupted while running for any
// reason, it may be resumed explicitly or by attempting to apply
// another transaction on any of the documents targeted by ops, as
// long as the interruption was made after the transaction document
// itself was inserted. Run Resume with the obtained transaction id
// to confirm whether the transaction was applied or not.
//
// Any number of transactions may be run concurrently, with one
// runner or many.
func (r *Runner) Run(ops []Op, id bson.ObjectId, info interface{}) (err error) {
const efmt = "error in transaction op %d: %s"
for i := range ops {
op := &ops[i]
if op.C == "" || op.Id == nil {
return fmt.Errorf(efmt, i, "C or Id missing")
}
changes := 0
if op.Insert != nil {
changes++
}
if op.Update != nil {
changes++
}
if op.Remove {
changes++
}
if changes > 1 {
return fmt.Errorf(efmt, i, "more than one of Insert/Update/Remove set")
}
if changes == 0 && op.Assert == nil {
return fmt.Errorf(efmt, i, "none of Assert/Insert/Update/Remove set")
}
}
if id == "" {
id = bson.NewObjectId()
}
// Insert transaction sooner rather than later, to stay on the safer side.
t := transaction{
Id: id,
Ops: ops,
State: tpreparing,
Info: info,
}
if err = r.tc.Insert(&t); err != nil {
return err
}
if err = flush(r, &t); err != nil {
return err
}
if t.State == taborted {
return ErrAborted
} else if t.State != tapplied {
panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State))
}
return nil
}
// ResumeAll resumes all pending transactions. All ErrAborted errors
// from individual transactions are ignored.
func (r *Runner) ResumeAll() (err error) {
debugf("Resuming all unfinished transactions")
iter := r.tc.Find(bson.D{{"s", bson.D{{"$in", []state{tpreparing, tprepared, tapplying}}}}}).Iter()
var t transaction
for iter.Next(&t) {
if t.State == tapplied || t.State == taborted {
continue
}
debugf("Resuming %s from %q", t.Id, t.State)
if err := flush(r, &t); err != nil {
return err
}
if !t.done() {
panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State))
}
}
return nil
}
// Resume resumes the transaction with id. It returns mgo.ErrNotFound
// if the transaction is not found. Otherwise, it has the same semantics
// of the Run method after the transaction is inserted.
func (r *Runner) Resume(id bson.ObjectId) (err error) {
t, err := r.load(id)
if err != nil {
return err
}
if !t.done() {
debugf("Resuming %s from %q", t, t.State)
if err := flush(r, t); err != nil {
return err
}
}
if t.State == taborted {
return ErrAborted
} else if t.State != tapplied {
panic(fmt.Errorf("invalid state for %s after flush: %q", t, t.State))
}
return nil
}
// ChangeLog enables logging of changes to the given collection
// every time a transaction that modifies content is done being
// applied.
//
// Saved documents are in the format:
//
// {"_id": <txn id>, <collection>: {"d": [<doc id>, ...], "r": [<doc revno>, ...]}}
//
// The document revision is the value of the txn-revno field after
// the change has been applied. Negative values indicate the document
// was not present in the collection. Revisions will not change when
// updates or removes are applied to missing documents or inserts are
// attempted when the document isn't present.
func (r *Runner) ChangeLog(logc *mgo.Collection) {
r.lc = logc
}
// PurgeMissing removes from collections any state that refers to transaction
// documents that for whatever reason have been lost from the system (removed
// by accident or lost in a hard crash, for example).
//
// This method should very rarely be needed, if at all, and should never be
// used during the normal operation of an application. Its purpose is to put
// a system that has seen unavoidable corruption back in a working state.
func (r *Runner) PurgeMissing(collections ...string) error {
type M map[string]interface{}
type S []interface{}
pipeline := []M{
{"$project": M{"_id": 1, "txn-queue": 1}},
{"$unwind": "$txn-queue"},
{"$sort": M{"_id": 1, "txn-queue": 1}},
//{"$group": M{"_id": M{"$substr": S{"$txn-queue", 0, 24}}, "docids": M{"$push": "$_id"}}},
}
type TRef struct {
DocId interface{} "_id"
TxnId string "txn-queue"
}
found := make(map[bson.ObjectId]bool)
colls := make(map[string]bool)
sort.Strings(collections)
for _, collection := range collections {
c := r.tc.Database.C(collection)
iter := c.Pipe(pipeline).Iter()
var tref TRef
for iter.Next(&tref) {
txnId := bson.ObjectIdHex(tref.TxnId[:24])
if found[txnId] {
continue
}
if r.tc.FindId(txnId).One(nil) == nil {
found[txnId] = true
continue
}
logf("WARNING: purging from document %s/%v the missing transaction id %s", collection, tref.DocId, txnId)
err := c.UpdateId(tref.DocId, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}})
if err != nil {
return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err)
}
}
colls[collection] = true
}
type StashTRef struct {
Id docKey "_id"
TxnId string "txn-queue"
}
iter := r.sc.Pipe(pipeline).Iter()
var stref StashTRef
for iter.Next(&stref) {
txnId := bson.ObjectIdHex(stref.TxnId[:24])
if found[txnId] {
continue
}
if r.tc.FindId(txnId).One(nil) == nil {
found[txnId] = true
continue
}
logf("WARNING: purging from stash document %s/%v the missing transaction id %s", stref.Id.C, stref.Id.Id, txnId)
err := r.sc.UpdateId(stref.Id, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}})
if err != nil {
return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err)
}
}
return nil
}
func (r *Runner) load(id bson.ObjectId) (*transaction, error) {
var t transaction
err := r.tc.FindId(id).One(&t)
if err == mgo.ErrNotFound {
return nil, fmt.Errorf("cannot find transaction %s", id)
} else if err != nil {
return nil, err
}
return &t, nil
}
type docKey struct {
C string
Id interface{}
}
type docKeys []docKey
func (ks docKeys) Len() int { return len(ks) }
func (ks docKeys) Swap(i, j int) { ks[i], ks[j] = ks[j], ks[i] }
func (ks docKeys) Less(i, j int) bool {
a, b := ks[i], ks[j]
if a.C != b.C {
return a.C < b.C
}
av, an := valueNature(a.Id)
bv, bn := valueNature(b.Id)
if an != bn {
return an < bn
}
switch an {
case natureString:
return av.(string) < bv.(string)
case natureInt:
return av.(int64) < bv.(int64)
case natureFloat:
return av.(float64) < bv.(float64)
case natureBool:
return !av.(bool) && bv.(bool)
}
panic("unreachable")
}
type typeNature int
const (
// The order of these values matters. Transactions
// from applications using different ordering will
// be incompatible with each other.
_ typeNature = iota
natureString
natureInt
natureFloat
natureBool
)
func valueNature(v interface{}) (value interface{}, nature typeNature) {
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.String:
return rv.String(), natureString
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return rv.Int(), natureInt
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return int64(rv.Uint()), natureInt
case reflect.Float32, reflect.Float64:
return rv.Float(), natureFloat
case reflect.Bool:
return rv.Bool(), natureBool
}
panic("document id type unsupported by txn: " + rv.Kind().String())
}

521
vendor/labix.org/v2/mgo/txn/txn_test.go generated vendored Normal file
View File

@ -0,0 +1,521 @@
package txn_test
import (
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
"labix.org/v2/mgo/txn"
. "launchpad.net/gocheck"
"testing"
)
func TestAll(t *testing.T) {
TestingT(t)
}
type S struct {
MgoSuite
db *mgo.Database
tc, sc *mgo.Collection
accounts *mgo.Collection
runner *txn.Runner
}
var _ = Suite(&S{})
type M map[string]interface{}
func (s *S) SetUpTest(c *C) {
txn.SetChaos(txn.Chaos{})
txn.SetLogger(c)
txn.SetDebug(true)
s.MgoSuite.SetUpTest(c)
s.db = s.session.DB("test")
s.tc = s.db.C("tc")
s.sc = s.db.C("tc.stash")
s.accounts = s.db.C("accounts")
s.runner = txn.NewRunner(s.tc)
}
type Account struct {
Id int `bson:"_id"`
Balance int
}
func (s *S) TestDocExists(c *C) {
err := s.accounts.Insert(M{"_id": 0, "balance": 300})
c.Assert(err, IsNil)
exists := []txn.Op{{
C: "accounts",
Id: 0,
Assert: txn.DocExists,
}}
missing := []txn.Op{{
C: "accounts",
Id: 0,
Assert: txn.DocMissing,
}}
err = s.runner.Run(exists, "", nil)
c.Assert(err, IsNil)
err = s.runner.Run(missing, "", nil)
c.Assert(err, Equals, txn.ErrAborted)
err = s.accounts.RemoveId(0)
c.Assert(err, IsNil)
err = s.runner.Run(exists, "", nil)
c.Assert(err, Equals, txn.ErrAborted)
err = s.runner.Run(missing, "", nil)
c.Assert(err, IsNil)
}
func (s *S) TestInsert(c *C) {
err := s.accounts.Insert(M{"_id": 0, "balance": 300})
c.Assert(err, IsNil)
ops := []txn.Op{{
C: "accounts",
Id: 0,
Insert: M{"balance": 200},
}}
err = s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
var account Account
err = s.accounts.FindId(0).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 300)
ops[0].Id = 1
err = s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
err = s.accounts.FindId(1).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 200)
}
func (s *S) TestRemove(c *C) {
err := s.accounts.Insert(M{"_id": 0, "balance": 300})
c.Assert(err, IsNil)
ops := []txn.Op{{
C: "accounts",
Id: 0,
Remove: true,
}}
err = s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
err = s.accounts.FindId(0).One(nil)
c.Assert(err, Equals, mgo.ErrNotFound)
err = s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
}
func (s *S) TestUpdate(c *C) {
var err error
err = s.accounts.Insert(M{"_id": 0, "balance": 200})
c.Assert(err, IsNil)
err = s.accounts.Insert(M{"_id": 1, "balance": 200})
c.Assert(err, IsNil)
ops := []txn.Op{{
C: "accounts",
Id: 0,
Update: M{"$inc": M{"balance": 100}},
}}
err = s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
var account Account
err = s.accounts.FindId(0).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 300)
ops[0].Id = 1
err = s.accounts.FindId(1).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 200)
}
func (s *S) TestInsertUpdate(c *C) {
ops := []txn.Op{{
C: "accounts",
Id: 0,
Insert: M{"_id": 0, "balance": 200},
}, {
C: "accounts",
Id: 0,
Update: M{"$inc": M{"balance": 100}},
}}
err := s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
var account Account
err = s.accounts.FindId(0).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 300)
err = s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
err = s.accounts.FindId(0).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 400)
}
func (s *S) TestUpdateInsert(c *C) {
ops := []txn.Op{{
C: "accounts",
Id: 0,
Update: M{"$inc": M{"balance": 100}},
}, {
C: "accounts",
Id: 0,
Insert: M{"_id": 0, "balance": 200},
}}
err := s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
var account Account
err = s.accounts.FindId(0).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 200)
err = s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
err = s.accounts.FindId(0).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 300)
}
func (s *S) TestInsertRemoveInsert(c *C) {
ops := []txn.Op{{
C: "accounts",
Id: 0,
Insert: M{"_id": 0, "balance": 200},
}, {
C: "accounts",
Id: 0,
Remove: true,
}, {
C: "accounts",
Id: 0,
Insert: M{"_id": 0, "balance": 300},
}}
err := s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
var account Account
err = s.accounts.FindId(0).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 300)
}
func (s *S) TestQueueStashing(c *C) {
txn.SetChaos(txn.Chaos{
KillChance: 1,
Breakpoint: "set-applying",
})
opses := [][]txn.Op{{{
C: "accounts",
Id: 0,
Insert: M{"balance": 100},
}}, {{
C: "accounts",
Id: 0,
Remove: true,
}}, {{
C: "accounts",
Id: 0,
Insert: M{"balance": 200},
}}, {{
C: "accounts",
Id: 0,
Update: M{"$inc": M{"balance": 100}},
}}}
var last bson.ObjectId
for _, ops := range opses {
last = bson.NewObjectId()
err := s.runner.Run(ops, last, nil)
c.Assert(err, Equals, txn.ErrChaos)
}
txn.SetChaos(txn.Chaos{})
err := s.runner.Resume(last)
c.Assert(err, IsNil)
var account Account
err = s.accounts.FindId(0).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 300)
}
func (s *S) TestInfo(c *C) {
ops := []txn.Op{{
C: "accounts",
Id: 0,
Assert: txn.DocMissing,
}}
id := bson.NewObjectId()
err := s.runner.Run(ops, id, M{"n": 42})
c.Assert(err, IsNil)
var t struct{ I struct{ N int } }
err = s.tc.FindId(id).One(&t)
c.Assert(err, IsNil)
c.Assert(t.I.N, Equals, 42)
}
func (s *S) TestErrors(c *C) {
doc := bson.M{"foo": 1}
tests := []txn.Op{{
C: "c",
Id: 0,
}, {
C: "c",
Id: 0,
Insert: doc,
Remove: true,
}, {
C: "c",
Id: 0,
Insert: doc,
Update: doc,
}, {
C: "c",
Id: 0,
Update: doc,
Remove: true,
}, {
C: "c",
Assert: doc,
}, {
Id: 0,
Assert: doc,
}}
txn.SetChaos(txn.Chaos{KillChance: 1.0})
for _, op := range tests {
c.Logf("op: %v", op)
err := s.runner.Run([]txn.Op{op}, "", nil)
c.Assert(err, ErrorMatches, "error in transaction op 0: .*")
}
}
func (s *S) TestAssertNestedOr(c *C) {
// Assert uses $or internally. Ensure nesting works.
err := s.accounts.Insert(M{"_id": 0, "balance": 300})
c.Assert(err, IsNil)
ops := []txn.Op{{
C: "accounts",
Id: 0,
Assert: bson.D{{"$or", []bson.D{{{"balance", 100}}, {{"balance", 300}}}}},
Update: bson.D{{"$inc", bson.D{{"balance", 100}}}},
}}
err = s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
var account Account
err = s.accounts.FindId(0).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 400)
}
func (s *S) TestVerifyFieldOrdering(c *C) {
// Used to have a map in certain operations, which means
// the ordering of fields would be messed up.
fields := bson.D{{"a", 1}, {"b", 2}, {"c", 3}}
ops := []txn.Op{{
C: "accounts",
Id: 0,
Insert: fields,
}}
err := s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
var d bson.D
err = s.accounts.FindId(0).One(&d)
c.Assert(err, IsNil)
var filtered bson.D
for _, e := range d {
switch e.Name {
case "a", "b", "c":
filtered = append(filtered, e)
}
}
c.Assert(filtered, DeepEquals, fields)
}
func (s *S) TestChangeLog(c *C) {
chglog := s.db.C("chglog")
s.runner.ChangeLog(chglog)
ops := []txn.Op{{
C: "debts",
Id: 0,
Assert: txn.DocMissing,
}, {
C: "accounts",
Id: 0,
Insert: M{"balance": 300},
}, {
C: "accounts",
Id: 1,
Insert: M{"balance": 300},
}, {
C: "people",
Id: "joe",
Insert: M{"accounts": []int64{0, 1}},
}}
id := bson.NewObjectId()
err := s.runner.Run(ops, id, nil)
c.Assert(err, IsNil)
type IdList []interface{}
type Log struct {
Docs IdList "d"
Revnos []int64 "r"
}
var m map[string]*Log
err = chglog.FindId(id).One(&m)
c.Assert(err, IsNil)
c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{2, 2}})
c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{2}})
c.Assert(m["debts"], IsNil)
ops = []txn.Op{{
C: "accounts",
Id: 0,
Update: M{"$inc": M{"balance": 100}},
}, {
C: "accounts",
Id: 1,
Update: M{"$inc": M{"balance": 100}},
}}
id = bson.NewObjectId()
err = s.runner.Run(ops, id, nil)
c.Assert(err, IsNil)
m = nil
err = chglog.FindId(id).One(&m)
c.Assert(err, IsNil)
c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{3, 3}})
c.Assert(m["people"], IsNil)
ops = []txn.Op{{
C: "accounts",
Id: 0,
Remove: true,
}, {
C: "people",
Id: "joe",
Remove: true,
}}
id = bson.NewObjectId()
err = s.runner.Run(ops, id, nil)
c.Assert(err, IsNil)
m = nil
err = chglog.FindId(id).One(&m)
c.Assert(err, IsNil)
c.Assert(m["accounts"], DeepEquals, &Log{IdList{0}, []int64{-4}})
c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{-3}})
}
func (s *S) TestPurgeMissing(c *C) {
txn.SetChaos(txn.Chaos{
KillChance: 1,
Breakpoint: "set-applying",
})
err := s.accounts.Insert(M{"_id": 0, "balance": 100})
c.Assert(err, IsNil)
err = s.accounts.Insert(M{"_id": 1, "balance": 100})
c.Assert(err, IsNil)
ops1 := []txn.Op{{
C: "accounts",
Id: 3,
Insert: M{"balance": 100},
}}
ops2 := []txn.Op{{
C: "accounts",
Id: 0,
Remove: true,
}, {
C: "accounts",
Id: 1,
Update: M{"$inc": M{"balance": 100}},
}, {
C: "accounts",
Id: 2,
Insert: M{"balance": 100},
}}
err = s.runner.Run(ops1, "", nil)
c.Assert(err, Equals, txn.ErrChaos)
last := bson.NewObjectId()
err = s.runner.Run(ops2, last, nil)
c.Assert(err, Equals, txn.ErrChaos)
err = s.tc.RemoveId(last)
c.Assert(err, IsNil)
txn.SetChaos(txn.Chaos{})
err = s.runner.ResumeAll()
c.Assert(err, IsNil)
err = s.runner.Run(ops2, "", nil)
c.Assert(err, ErrorMatches, "cannot find transaction .*")
err = s.runner.PurgeMissing("accounts")
c.Assert(err, IsNil)
err = s.runner.Run(ops2, "", nil)
c.Assert(err, IsNil)
expect := []struct{ Id, Balance int }{
{0, -1},
{1, 200},
{2, 100},
{3, 100},
}
var got Account
for _, want := range expect {
err = s.accounts.FindId(want.Id).One(&got)
if want.Balance == -1 {
if err != mgo.ErrNotFound {
c.Errorf("Account %d should not exist, find got err=%#v", err)
}
} else if err != nil {
c.Errorf("Account %d should have balance of %d, but wasn't found", want.Id, want.Balance)
} else if got.Balance != want.Balance {
c.Errorf("Account %d should have balance of %d, got %d", want.Id, want.Balance, got.Balance)
}
}
}

3
vendor/launchpad.net/goyaml/.bzr/README generated vendored Normal file
View File

@ -0,0 +1,3 @@
This is a Bazaar control directory.
Do not change any files in this directory.
See http://bazaar.canonical.com/ for more information about Bazaar.

1
vendor/launchpad.net/goyaml/.bzr/branch-format generated vendored Normal file
View File

@ -0,0 +1 @@
Bazaar-NG meta directory, format 1

1
vendor/launchpad.net/goyaml/.bzr/branch/branch.conf generated vendored Normal file
View File

@ -0,0 +1 @@
parent_location = http://bazaar.launchpad.net/~goyaml/goyaml/trunk/

1
vendor/launchpad.net/goyaml/.bzr/branch/format generated vendored Normal file
View File

@ -0,0 +1 @@
Bazaar Branch Format 7 (needs bzr 1.6)

View File

@ -0,0 +1 @@
51 gustavo@niemeyer.net-20140305200416-7gh64vkcckre5mob

1
vendor/launchpad.net/goyaml/.bzr/checkout/conflicts generated vendored Normal file
View File

@ -0,0 +1 @@
BZR conflict list format 1

BIN
vendor/launchpad.net/goyaml/.bzr/checkout/dirstate generated vendored Normal file

Binary file not shown.

1
vendor/launchpad.net/goyaml/.bzr/checkout/format generated vendored Normal file
View File

@ -0,0 +1 @@
Bazaar Working Tree Format 6 (bzr 1.14)

0
vendor/launchpad.net/goyaml/.bzr/checkout/views generated vendored Normal file
View File

1
vendor/launchpad.net/goyaml/.bzr/repository/format generated vendored Normal file
View File

@ -0,0 +1 @@
Bazaar repository format 2a (needs bzr 1.16 or later)

View File

@ -0,0 +1,5 @@
B+Tree Graph Index 2
node_ref_lists=0
key_elements=1
len=0
row_lengths=

View File

@ -0,0 +1,6 @@
B+Tree Graph Index 2
node_ref_lists=0
key_elements=1
len=1
row_lengths=1
Á» Pj¦È<08> R8ŒŸPYXظ½ï½ß“Û<E2809C>ûª×ÊuΡ RÓÓ0³5ÆAX0F)04̤ϮW<><1F>þZ

14
vendor/launchpad.net/goyaml/.bzrignore generated vendored Normal file
View File

@ -0,0 +1,14 @@
[568].out
_*
*.cgo*.*
yaml-*/stamp-h1
yaml-*/Makefile
yaml-*/*/Makefile
yaml-*/libtool
yaml-*/config*
yaml-*/*/*.lo
yaml-*/*/*.la
yaml-*/*/.libs
yaml-*/*/.deps
yaml-*/tests/*

1
vendor/launchpad.net/goyaml/.lbox generated vendored Normal file
View File

@ -0,0 +1 @@
propose -cr -for=lp:goyaml

20
vendor/launchpad.net/goyaml/.lbox.check generated vendored Executable file
View File

@ -0,0 +1,20 @@
#!/bin/sh
set -e
BADFMT=`find * -name '*.go' | xargs gofmt -l`
if [ -n "$BADFMT" ]; then
BADFMT=`echo "$BADFMT" | sed "s/^/ /"`
echo -e "gofmt is sad:\n\n$BADFMT"
exit 1
fi
VERSION=`go version | awk '{print $3}'`
if [ $VERSION == 'devel' ]; then
go tool vet \
-methods \
-printf \
-rangeloops \
-printfuncs 'ErrorContextf:1,notFoundf:0,badReqErrorf:0,Commitf:0,Snapshotf:0,Debugf:0' \
.
fi

185
vendor/launchpad.net/goyaml/LICENSE generated vendored Normal file
View File

@ -0,0 +1,185 @@
This software is licensed under the LGPLv3, included below.
As a special exception to the GNU Lesser General Public License version 3
("LGPL3"), the copyright holders of this Library give you permission to
convey to a third party a Combined Work that links statically or dynamically
to this Library without providing any Minimal Corresponding Source or
Minimal Application Code as set out in 4d or providing the installation
information set out in section 4e, provided that you comply with the other
provisions of LGPL3 and provided that you meet, for the Application the
terms and conditions of the license(s) which apply to the Application.
Except as stated in this special exception, the provisions of LGPL3 will
continue to comply in full to this Library. If you modify this Library, you
may apply this exception to your version of this Library, but you are not
obliged to do so. If you do not wish to do so, delete this exception
statement from your version. This exception does not (and cannot) modify any
license terms which apply to the Application, with which you must still
comply.
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.

19
vendor/launchpad.net/goyaml/LICENSE.libyaml generated vendored Normal file
View File

@ -0,0 +1,19 @@
Copyright (c) 2006 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

39
vendor/launchpad.net/goyaml/Makefile generated vendored Normal file
View File

@ -0,0 +1,39 @@
include $(GOROOT)/src/Make.inc
YAML=yaml-0.1.3
LIBYAML=$(PWD)/$(YAML)/src/.libs/libyaml.a
TARG=launchpad.net/goyaml
GOFILES=\
goyaml.go\
resolve.go\
CGOFILES=\
decode.go\
encode.go\
CGO_OFILES+=\
helpers.o\
api.o\
scanner.o\
reader.o\
parser.o\
writer.o\
emitter.o\
GOFMT=gofmt
BADFMT:=$(shell $(GOFMT) -l $(GOFILES) $(CGOFILES) $(wildcard *_test.go))
all: package
gofmt: $(BADFMT)
@for F in $(BADFMT); do $(GOFMT) -w $$F && echo $$F; done
include $(GOROOT)/src/Make.pkg
ifneq ($(BADFMT),)
ifneq ($(MAKECMDGOALS),gofmt)
$(warning WARNING: make gofmt: $(BADFMT))
endif
endif

742
vendor/launchpad.net/goyaml/apic.go generated vendored Normal file
View File

@ -0,0 +1,742 @@
package goyaml
import (
"io"
"os"
)
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
// Check if we can move the queue at the beginning of the buffer.
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
if parser.tokens_head != len(parser.tokens) {
copy(parser.tokens, parser.tokens[parser.tokens_head:])
}
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
parser.tokens_head = 0
}
parser.tokens = append(parser.tokens, *token)
if pos < 0 {
return
}
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
parser.tokens[parser.tokens_head+pos] = *token
}
// Create a new parser object.
func yaml_parser_initialize(parser *yaml_parser_t) bool {
*parser = yaml_parser_t{
raw_buffer: make([]byte, 0, input_raw_buffer_size),
buffer: make([]byte, 0, input_buffer_size),
}
return true
}
// Destroy a parser object.
func yaml_parser_delete(parser *yaml_parser_t) {
*parser = yaml_parser_t{}
}
// String read handler.
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
if parser.input_pos == len(parser.input) {
return 0, io.EOF
}
n = copy(buffer, parser.input[parser.input_pos:])
parser.input_pos += n
return n, nil
}
// File read handler.
func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
return parser.input_file.Read(buffer)
}
// Set a string input.
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_string_read_handler
parser.input = input
parser.input_pos = 0
}
// Set a file input.
func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_file_read_handler
parser.input_file = file
}
// Set the source encoding.
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
if parser.encoding != yaml_ANY_ENCODING {
panic("must set the encoding only once")
}
parser.encoding = encoding
}
// Create a new emitter object.
func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
*emitter = yaml_emitter_t{
buffer: make([]byte, output_buffer_size),
raw_buffer: make([]byte, 0, output_raw_buffer_size),
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
events: make([]yaml_event_t, 0, initial_queue_size),
}
return true
}
// Destroy an emitter object.
func yaml_emitter_delete(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{}
}
// String write handler.
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
return nil
}
// File write handler.
func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
_, err := emitter.output_file.Write(buffer)
return err
}
// Set a string output.
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_string_write_handler
emitter.output_buffer = output_buffer
}
// Set a file output.
func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_file_write_handler
emitter.output_file = file
}
// Set the output encoding.
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
if emitter.encoding != yaml_ANY_ENCODING {
panic("must set the output encoding only once")
}
emitter.encoding = encoding
}
// Set the canonical output style.
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
emitter.canonical = canonical
}
//// Set the indentation increment.
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
if indent < 2 || indent > 9 {
indent = 2
}
emitter.best_indent = indent
}
// Set the preferred line width.
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
if width < 0 {
width = -1
}
emitter.best_width = width
}
// Set if unescaped non-ASCII characters are allowed.
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
emitter.unicode = unicode
}
// Set the preferred line break character.
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
emitter.line_break = line_break
}
///*
// * Destroy a token object.
// */
//
//YAML_DECLARE(void)
//yaml_token_delete(yaml_token_t *token)
//{
// assert(token); // Non-NULL token object expected.
//
// switch (token.type)
// {
// case YAML_TAG_DIRECTIVE_TOKEN:
// yaml_free(token.data.tag_directive.handle);
// yaml_free(token.data.tag_directive.prefix);
// break;
//
// case YAML_ALIAS_TOKEN:
// yaml_free(token.data.alias.value);
// break;
//
// case YAML_ANCHOR_TOKEN:
// yaml_free(token.data.anchor.value);
// break;
//
// case YAML_TAG_TOKEN:
// yaml_free(token.data.tag.handle);
// yaml_free(token.data.tag.suffix);
// break;
//
// case YAML_SCALAR_TOKEN:
// yaml_free(token.data.scalar.value);
// break;
//
// default:
// break;
// }
//
// memset(token, 0, sizeof(yaml_token_t));
//}
//
///*
// * Check if a string is a valid UTF-8 sequence.
// *
// * Check 'reader.c' for more details on UTF-8 encoding.
// */
//
//static int
//yaml_check_utf8(yaml_char_t *start, size_t length)
//{
// yaml_char_t *end = start+length;
// yaml_char_t *pointer = start;
//
// while (pointer < end) {
// unsigned char octet;
// unsigned int width;
// unsigned int value;
// size_t k;
//
// octet = pointer[0];
// width = (octet & 0x80) == 0x00 ? 1 :
// (octet & 0xE0) == 0xC0 ? 2 :
// (octet & 0xF0) == 0xE0 ? 3 :
// (octet & 0xF8) == 0xF0 ? 4 : 0;
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
// if (!width) return 0;
// if (pointer+width > end) return 0;
// for (k = 1; k < width; k ++) {
// octet = pointer[k];
// if ((octet & 0xC0) != 0x80) return 0;
// value = (value << 6) + (octet & 0x3F);
// }
// if (!((width == 1) ||
// (width == 2 && value >= 0x80) ||
// (width == 3 && value >= 0x800) ||
// (width == 4 && value >= 0x10000))) return 0;
//
// pointer += width;
// }
//
// return 1;
//}
//
// Create STREAM-START.
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
*event = yaml_event_t{
typ: yaml_STREAM_START_EVENT,
encoding: encoding,
}
return true
}
// Create STREAM-END.
func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_STREAM_END_EVENT,
}
return true
}
// Create DOCUMENT-START.
func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
tag_directives []yaml_tag_directive_t, implicit bool) bool {
*event = yaml_event_t{
typ: yaml_DOCUMENT_START_EVENT,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: implicit,
}
return true
}
// Create DOCUMENT-END.
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
*event = yaml_event_t{
typ: yaml_DOCUMENT_END_EVENT,
implicit: implicit,
}
return true
}
///*
// * Create ALIAS.
// */
//
//YAML_DECLARE(int)
//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
//{
// mark yaml_mark_t = { 0, 0, 0 }
// anchor_copy *yaml_char_t = NULL
//
// assert(event) // Non-NULL event object is expected.
// assert(anchor) // Non-NULL anchor is expected.
//
// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
//
// anchor_copy = yaml_strdup(anchor)
// if (!anchor_copy)
// return 0
//
// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
//
// return 1
//}
// Create SCALAR.
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
*event = yaml_event_t{
typ: yaml_SCALAR_EVENT,
anchor: anchor,
tag: tag,
value: value,
implicit: plain_implicit,
quoted_implicit: quoted_implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-START.
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-END.
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_END_EVENT,
}
return true
}
// Create MAPPING-START.
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
*event = yaml_event_t{
typ: yaml_MAPPING_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
return true
}
// Create MAPPING-END.
func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_MAPPING_END_EVENT,
}
return true
}
// Destroy an event object.
func yaml_event_delete(event *yaml_event_t) {
*event = yaml_event_t{}
}
///*
// * Create a document object.
// */
//
//YAML_DECLARE(int)
//yaml_document_initialize(document *yaml_document_t,
// version_directive *yaml_version_directive_t,
// tag_directives_start *yaml_tag_directive_t,
// tag_directives_end *yaml_tag_directive_t,
// start_implicit int, end_implicit int)
//{
// struct {
// error yaml_error_type_t
// } context
// struct {
// start *yaml_node_t
// end *yaml_node_t
// top *yaml_node_t
// } nodes = { NULL, NULL, NULL }
// version_directive_copy *yaml_version_directive_t = NULL
// struct {
// start *yaml_tag_directive_t
// end *yaml_tag_directive_t
// top *yaml_tag_directive_t
// } tag_directives_copy = { NULL, NULL, NULL }
// value yaml_tag_directive_t = { NULL, NULL }
// mark yaml_mark_t = { 0, 0, 0 }
//
// assert(document) // Non-NULL document object is expected.
// assert((tag_directives_start && tag_directives_end) ||
// (tag_directives_start == tag_directives_end))
// // Valid tag directives are expected.
//
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
//
// if (version_directive) {
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
// if (!version_directive_copy) goto error
// version_directive_copy.major = version_directive.major
// version_directive_copy.minor = version_directive.minor
// }
//
// if (tag_directives_start != tag_directives_end) {
// tag_directive *yaml_tag_directive_t
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
// goto error
// for (tag_directive = tag_directives_start
// tag_directive != tag_directives_end; tag_directive ++) {
// assert(tag_directive.handle)
// assert(tag_directive.prefix)
// if (!yaml_check_utf8(tag_directive.handle,
// strlen((char *)tag_directive.handle)))
// goto error
// if (!yaml_check_utf8(tag_directive.prefix,
// strlen((char *)tag_directive.prefix)))
// goto error
// value.handle = yaml_strdup(tag_directive.handle)
// value.prefix = yaml_strdup(tag_directive.prefix)
// if (!value.handle || !value.prefix) goto error
// if (!PUSH(&context, tag_directives_copy, value))
// goto error
// value.handle = NULL
// value.prefix = NULL
// }
// }
//
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
// tag_directives_copy.start, tag_directives_copy.top,
// start_implicit, end_implicit, mark, mark)
//
// return 1
//
//error:
// STACK_DEL(&context, nodes)
// yaml_free(version_directive_copy)
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
// }
// STACK_DEL(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
//
// return 0
//}
//
///*
// * Destroy a document object.
// */
//
//YAML_DECLARE(void)
//yaml_document_delete(document *yaml_document_t)
//{
// struct {
// error yaml_error_type_t
// } context
// tag_directive *yaml_tag_directive_t
//
// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
//
// assert(document) // Non-NULL document object is expected.
//
// while (!STACK_EMPTY(&context, document.nodes)) {
// node yaml_node_t = POP(&context, document.nodes)
// yaml_free(node.tag)
// switch (node.type) {
// case YAML_SCALAR_NODE:
// yaml_free(node.data.scalar.value)
// break
// case YAML_SEQUENCE_NODE:
// STACK_DEL(&context, node.data.sequence.items)
// break
// case YAML_MAPPING_NODE:
// STACK_DEL(&context, node.data.mapping.pairs)
// break
// default:
// assert(0) // Should not happen.
// }
// }
// STACK_DEL(&context, document.nodes)
//
// yaml_free(document.version_directive)
// for (tag_directive = document.tag_directives.start
// tag_directive != document.tag_directives.end
// tag_directive++) {
// yaml_free(tag_directive.handle)
// yaml_free(tag_directive.prefix)
// }
// yaml_free(document.tag_directives.start)
//
// memset(document, 0, sizeof(yaml_document_t))
//}
//
///**
// * Get a document node.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_node(document *yaml_document_t, index int)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
// return document.nodes.start + index - 1
// }
// return NULL
//}
//
///**
// * Get the root object.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_root_node(document *yaml_document_t)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (document.nodes.top != document.nodes.start) {
// return document.nodes.start
// }
// return NULL
//}
//
///*
// * Add a scalar node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_scalar(document *yaml_document_t,
// tag *yaml_char_t, value *yaml_char_t, length int,
// style yaml_scalar_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// value_copy *yaml_char_t = NULL
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
// assert(value) // Non-NULL value is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (length < 0) {
// length = strlen((char *)value)
// }
//
// if (!yaml_check_utf8(value, length)) goto error
// value_copy = yaml_malloc(length+1)
// if (!value_copy) goto error
// memcpy(value_copy, value, length)
// value_copy[length] = '\0'
//
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// yaml_free(tag_copy)
// yaml_free(value_copy)
//
// return 0
//}
//
///*
// * Add a sequence node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_sequence(document *yaml_document_t,
// tag *yaml_char_t, style yaml_sequence_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_item_t
// end *yaml_node_item_t
// top *yaml_node_item_t
// } items = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
//
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, items)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Add a mapping node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_mapping(document *yaml_document_t,
// tag *yaml_char_t, style yaml_mapping_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_pair_t
// end *yaml_node_pair_t
// top *yaml_node_pair_t
// } pairs = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
//
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, pairs)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Append an item to a sequence node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_sequence_item(document *yaml_document_t,
// sequence int, item int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// assert(document) // Non-NULL document is required.
// assert(sequence > 0
// && document.nodes.start + sequence <= document.nodes.top)
// // Valid sequence id is required.
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
// // A sequence node is required.
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
// // Valid item id is required.
//
// if (!PUSH(&context,
// document.nodes.start[sequence-1].data.sequence.items, item))
// return 0
//
// return 1
//}
//
///*
// * Append a pair of a key and a value to a mapping node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_mapping_pair(document *yaml_document_t,
// mapping int, key int, value int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// pair yaml_node_pair_t
//
// assert(document) // Non-NULL document is required.
// assert(mapping > 0
// && document.nodes.start + mapping <= document.nodes.top)
// // Valid mapping id is required.
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
// // A mapping node is required.
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
// // Valid key id is required.
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
// // Valid value id is required.
//
// pair.key = key
// pair.value = value
//
// if (!PUSH(&context,
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
// return 0
//
// return 1
//}
//
//

473
vendor/launchpad.net/goyaml/decode.go generated vendored Normal file
View File

@ -0,0 +1,473 @@
package goyaml
import (
"reflect"
"strconv"
)
const (
documentNode = 1 << iota
mappingNode
sequenceNode
scalarNode
aliasNode
)
type node struct {
kind int
line, column int
tag string
value string
implicit bool
children []*node
anchors map[string]*node
}
// ----------------------------------------------------------------------------
// Parser, produces a node tree out of a libyaml event stream.
type parser struct {
parser yaml_parser_t
event yaml_event_t
doc *node
}
func newParser(b []byte) *parser {
p := parser{}
if !yaml_parser_initialize(&p.parser) {
panic("Failed to initialize YAML emitter")
}
if len(b) == 0 {
b = []byte{'\n'}
}
yaml_parser_set_input_string(&p.parser, b)
p.skip()
if p.event.typ != yaml_STREAM_START_EVENT {
panic("Expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
}
p.skip()
return &p
}
func (p *parser) destroy() {
if p.event.typ != yaml_NO_EVENT {
yaml_event_delete(&p.event)
}
yaml_parser_delete(&p.parser)
}
func (p *parser) skip() {
if p.event.typ != yaml_NO_EVENT {
if p.event.typ == yaml_STREAM_END_EVENT {
panic("Attempted to go past the end of stream. Corrupted value?")
}
yaml_event_delete(&p.event)
}
if !yaml_parser_parse(&p.parser, &p.event) {
p.fail()
}
}
func (p *parser) fail() {
var where string
var line int
if p.parser.problem_mark.line != 0 {
line = p.parser.problem_mark.line
} else if p.parser.context_mark.line != 0 {
line = p.parser.context_mark.line
}
if line != 0 {
where = "line " + strconv.Itoa(line) + ": "
}
var msg string
if len(p.parser.problem) > 0 {
msg = p.parser.problem
} else {
msg = "Unknown problem parsing YAML content"
}
panic(where + msg)
}
func (p *parser) anchor(n *node, anchor []byte) {
if anchor != nil {
p.doc.anchors[string(anchor)] = n
}
}
func (p *parser) parse() *node {
switch p.event.typ {
case yaml_SCALAR_EVENT:
return p.scalar()
case yaml_ALIAS_EVENT:
return p.alias()
case yaml_MAPPING_START_EVENT:
return p.mapping()
case yaml_SEQUENCE_START_EVENT:
return p.sequence()
case yaml_DOCUMENT_START_EVENT:
return p.document()
case yaml_STREAM_END_EVENT:
// Happens when attempting to decode an empty buffer.
return nil
default:
panic("Attempted to parse unknown event: " +
strconv.Itoa(int(p.event.typ)))
}
panic("Unreachable")
}
func (p *parser) node(kind int) *node {
return &node{
kind: kind,
line: p.event.start_mark.line,
column: p.event.start_mark.column,
}
}
func (p *parser) document() *node {
n := p.node(documentNode)
n.anchors = make(map[string]*node)
p.doc = n
p.skip()
n.children = append(n.children, p.parse())
if p.event.typ != yaml_DOCUMENT_END_EVENT {
panic("Expected end of document event but got " +
strconv.Itoa(int(p.event.typ)))
}
p.skip()
return n
}
func (p *parser) alias() *node {
n := p.node(aliasNode)
n.value = string(p.event.anchor)
p.skip()
return n
}
func (p *parser) scalar() *node {
n := p.node(scalarNode)
n.value = string(p.event.value)
n.tag = string(p.event.tag)
n.implicit = p.event.implicit
p.anchor(n, p.event.anchor)
p.skip()
return n
}
func (p *parser) sequence() *node {
n := p.node(sequenceNode)
p.anchor(n, p.event.anchor)
p.skip()
for p.event.typ != yaml_SEQUENCE_END_EVENT {
n.children = append(n.children, p.parse())
}
p.skip()
return n
}
func (p *parser) mapping() *node {
n := p.node(mappingNode)
p.anchor(n, p.event.anchor)
p.skip()
for p.event.typ != yaml_MAPPING_END_EVENT {
n.children = append(n.children, p.parse(), p.parse())
}
p.skip()
return n
}
// ----------------------------------------------------------------------------
// Decoder, unmarshals a node into a provided value.
type decoder struct {
doc *node
aliases map[string]bool
}
func newDecoder() *decoder {
d := &decoder{}
d.aliases = make(map[string]bool)
return d
}
// d.setter deals with setters and pointer dereferencing and initialization.
//
// It's a slightly convoluted case to handle properly:
//
// - nil pointers should be initialized, unless being set to nil
// - we don't know at this point yet what's the value to SetYAML() with.
// - we can't separate pointer deref/init and setter checking, because
// a setter may be found while going down a pointer chain.
//
// Thus, here is how it takes care of it:
//
// - out is provided as a pointer, so that it can be replaced.
// - when looking at a non-setter ptr, *out=ptr.Elem(), unless tag=!!null
// - when a setter is found, *out=interface{}, and a set() function is
// returned to call SetYAML() with the value of *out once it's defined.
//
func (d *decoder) setter(tag string, out *reflect.Value, good *bool) (set func()) {
again := true
for again {
again = false
setter, _ := (*out).Interface().(Setter)
if tag != "!!null" || setter != nil {
if pv := (*out); pv.Kind() == reflect.Ptr {
if pv.IsNil() {
*out = reflect.New(pv.Type().Elem()).Elem()
pv.Set((*out).Addr())
} else {
*out = pv.Elem()
}
setter, _ = pv.Interface().(Setter)
again = true
}
}
if setter != nil {
var arg interface{}
*out = reflect.ValueOf(&arg).Elem()
return func() {
*good = setter.SetYAML(tag, arg)
}
}
}
return nil
}
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
switch n.kind {
case documentNode:
good = d.document(n, out)
case scalarNode:
good = d.scalar(n, out)
case aliasNode:
good = d.alias(n, out)
case mappingNode:
good = d.mapping(n, out)
case sequenceNode:
good = d.sequence(n, out)
default:
panic("Internal error: unknown node kind: " + strconv.Itoa(n.kind))
}
return
}
func (d *decoder) document(n *node, out reflect.Value) (good bool) {
if len(n.children) == 1 {
d.doc = n
d.unmarshal(n.children[0], out)
return true
}
return false
}
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
an, ok := d.doc.anchors[n.value]
if !ok {
panic("Unknown anchor '" + n.value + "' referenced")
}
if d.aliases[n.value] {
panic("Anchor '" + n.value + "' value contains itself")
}
d.aliases[n.value] = true
good = d.unmarshal(an, out)
delete(d.aliases, n.value)
return good
}
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
var tag string
var resolved interface{}
if n.tag == "" && !n.implicit {
resolved = n.value
} else {
tag, resolved = resolve(n.tag, n.value)
if set := d.setter(tag, &out, &good); set != nil {
defer set()
}
}
switch out.Kind() {
case reflect.String:
if resolved != nil {
out.SetString(n.value)
good = true
}
case reflect.Interface:
if resolved == nil {
out.Set(reflect.Zero(out.Type()))
} else {
out.Set(reflect.ValueOf(resolved))
}
good = true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch resolved := resolved.(type) {
case int:
if !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
}
case int64:
if !out.OverflowInt(resolved) {
out.SetInt(resolved)
good = true
}
case float64:
if resolved < 1<<63-1 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
}
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch resolved := resolved.(type) {
case int:
if resolved >= 0 {
out.SetUint(uint64(resolved))
good = true
}
case int64:
if resolved >= 0 {
out.SetUint(uint64(resolved))
good = true
}
case float64:
if resolved < 1<<64-1 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
}
case reflect.Bool:
switch resolved := resolved.(type) {
case bool:
out.SetBool(resolved)
good = true
}
case reflect.Float32, reflect.Float64:
switch resolved := resolved.(type) {
case int:
out.SetFloat(float64(resolved))
good = true
case int64:
out.SetFloat(float64(resolved))
good = true
case float64:
out.SetFloat(resolved)
good = true
}
case reflect.Ptr:
switch resolved.(type) {
case nil:
out.Set(reflect.Zero(out.Type()))
good = true
default:
if out.Type().Elem() == reflect.TypeOf(resolved) {
elem := reflect.New(out.Type().Elem())
elem.Elem().Set(reflect.ValueOf(resolved))
out.Set(elem)
good = true
}
}
}
return good
}
func settableValueOf(i interface{}) reflect.Value {
v := reflect.ValueOf(i)
sv := reflect.New(v.Type()).Elem()
sv.Set(v)
return sv
}
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
if set := d.setter("!!seq", &out, &good); set != nil {
defer set()
}
var iface reflect.Value
if out.Kind() == reflect.Interface {
// No type hints. Will have to use a generic sequence.
iface = out
out = settableValueOf(make([]interface{}, 0))
}
if out.Kind() != reflect.Slice {
return false
}
et := out.Type().Elem()
l := len(n.children)
for i := 0; i < l; i++ {
e := reflect.New(et).Elem()
if ok := d.unmarshal(n.children[i], e); ok {
out.Set(reflect.Append(out, e))
}
}
if iface.IsValid() {
iface.Set(out)
}
return true
}
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
if set := d.setter("!!map", &out, &good); set != nil {
defer set()
}
if out.Kind() == reflect.Struct {
return d.mappingStruct(n, out)
}
if out.Kind() == reflect.Interface {
// No type hints. Will have to use a generic map.
iface := out
out = settableValueOf(make(map[interface{}]interface{}))
iface.Set(out)
}
if out.Kind() != reflect.Map {
return false
}
outt := out.Type()
kt := outt.Key()
et := outt.Elem()
if out.IsNil() {
out.Set(reflect.MakeMap(outt))
}
l := len(n.children)
for i := 0; i < l; i += 2 {
k := reflect.New(kt).Elem()
if d.unmarshal(n.children[i], k) {
e := reflect.New(et).Elem()
if d.unmarshal(n.children[i+1], e) {
out.SetMapIndex(k, e)
}
}
}
return true
}
func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
sinfo, err := getStructInfo(out.Type())
if err != nil {
panic(err)
}
name := settableValueOf("")
l := len(n.children)
for i := 0; i < l; i += 2 {
if !d.unmarshal(n.children[i], name) {
continue
}
if info, ok := sinfo.FieldsMap[name.String()]; ok {
var field reflect.Value
if info.Inline == nil {
field = out.Field(info.Num)
} else {
field = out.FieldByIndex(info.Inline)
}
d.unmarshal(n.children[i+1], field)
}
}
return true
}

519
vendor/launchpad.net/goyaml/decode_test.go generated vendored Normal file
View File

@ -0,0 +1,519 @@
package goyaml_test
import (
. "launchpad.net/gocheck"
"launchpad.net/goyaml"
"math"
"reflect"
)
var unmarshalIntTest = 123
var unmarshalTests = []struct {
data string
value interface{}
}{
{
"",
&struct{}{},
}, {
"{}", &struct{}{},
}, {
"v: hi",
map[string]string{"v": "hi"},
}, {
"v: hi", map[string]interface{}{"v": "hi"},
}, {
"v: true",
map[string]string{"v": "true"},
}, {
"v: true",
map[string]interface{}{"v": true},
}, {
"v: 10",
map[string]interface{}{"v": 10},
}, {
"v: 0b10",
map[string]interface{}{"v": 2},
}, {
"v: 0xA",
map[string]interface{}{"v": 10},
}, {
"v: 4294967296",
map[string]int64{"v": 4294967296},
}, {
"v: 0.1",
map[string]interface{}{"v": 0.1},
}, {
"v: .1",
map[string]interface{}{"v": 0.1},
}, {
"v: .Inf",
map[string]interface{}{"v": math.Inf(+1)},
}, {
"v: -.Inf",
map[string]interface{}{"v": math.Inf(-1)},
}, {
"v: -10",
map[string]interface{}{"v": -10},
}, {
"v: -.1",
map[string]interface{}{"v": -0.1},
},
// Simple values.
{
"123",
&unmarshalIntTest,
},
// Floats from spec
{
"canonical: 6.8523e+5",
map[string]interface{}{"canonical": 6.8523e+5},
}, {
"expo: 685.230_15e+03",
map[string]interface{}{"expo": 685.23015e+03},
}, {
"fixed: 685_230.15",
map[string]interface{}{"fixed": 685230.15},
}, {
"neginf: -.inf",
map[string]interface{}{"neginf": math.Inf(-1)},
}, {
"fixed: 685_230.15",
map[string]float64{"fixed": 685230.15},
},
//{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported
//{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails.
// Bools from spec
{
"canonical: y",
map[string]interface{}{"canonical": true},
}, {
"answer: NO",
map[string]interface{}{"answer": false},
}, {
"logical: True",
map[string]interface{}{"logical": true},
}, {
"option: on",
map[string]interface{}{"option": true},
}, {
"option: on",
map[string]bool{"option": true},
},
// Ints from spec
{
"canonical: 685230",
map[string]interface{}{"canonical": 685230},
}, {
"decimal: +685_230",
map[string]interface{}{"decimal": 685230},
}, {
"octal: 02472256",
map[string]interface{}{"octal": 685230},
}, {
"hexa: 0x_0A_74_AE",
map[string]interface{}{"hexa": 685230},
}, {
"bin: 0b1010_0111_0100_1010_1110",
map[string]interface{}{"bin": 685230},
}, {
"bin: -0b101010",
map[string]interface{}{"bin": -42},
}, {
"decimal: +685_230",
map[string]int{"decimal": 685230},
},
//{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported
// Nulls from spec
{
"empty:",
map[string]interface{}{"empty": nil},
}, {
"canonical: ~",
map[string]interface{}{"canonical": nil},
}, {
"english: null",
map[string]interface{}{"english": nil},
}, {
"~: null key",
map[interface{}]string{nil: "null key"},
}, {
"empty:",
map[string]*bool{"empty": nil},
},
// Flow sequence
{
"seq: [A,B]",
map[string]interface{}{"seq": []interface{}{"A", "B"}},
}, {
"seq: [A,B,C,]",
map[string][]string{"seq": []string{"A", "B", "C"}},
}, {
"seq: [A,1,C]",
map[string][]string{"seq": []string{"A", "1", "C"}},
}, {
"seq: [A,1,C]",
map[string][]int{"seq": []int{1}},
}, {
"seq: [A,1,C]",
map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
},
// Block sequence
{
"seq:\n - A\n - B",
map[string]interface{}{"seq": []interface{}{"A", "B"}},
}, {
"seq:\n - A\n - B\n - C",
map[string][]string{"seq": []string{"A", "B", "C"}},
}, {
"seq:\n - A\n - 1\n - C",
map[string][]string{"seq": []string{"A", "1", "C"}},
}, {
"seq:\n - A\n - 1\n - C",
map[string][]int{"seq": []int{1}},
}, {
"seq:\n - A\n - 1\n - C",
map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
},
// Literal block scalar
{
"scalar: | # Comment\n\n literal\n\n \ttext\n\n",
map[string]string{"scalar": "\nliteral\n\n\ttext\n"},
},
// Folded block scalar
{
"scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n",
map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"},
},
// Map inside interface with no type hints.
{
"a: {b: c}",
map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
},
// Structs and type conversions.
{
"hello: world",
&struct{ Hello string }{"world"},
}, {
"a: {b: c}",
&struct{ A struct{ B string } }{struct{ B string }{"c"}},
}, {
"a: {b: c}",
&struct{ A *struct{ B string } }{&struct{ B string }{"c"}},
}, {
"a: {b: c}",
&struct{ A map[string]string }{map[string]string{"b": "c"}},
}, {
"a: {b: c}",
&struct{ A *map[string]string }{&map[string]string{"b": "c"}},
}, {
"a:",
&struct{ A map[string]string }{},
}, {
"a: 1",
&struct{ A int }{1},
}, {
"a: 1",
&struct{ A float64 }{1},
}, {
"a: 1.0",
&struct{ A int }{1},
}, {
"a: 1.0",
&struct{ A uint }{1},
}, {
"a: [1, 2]",
&struct{ A []int }{[]int{1, 2}},
}, {
"a: 1",
&struct{ B int }{0},
}, {
"a: 1",
&struct {
B int "a"
}{1},
}, {
"a: y",
&struct{ A bool }{true},
},
// Some cross type conversions
{
"v: 42",
map[string]uint{"v": 42},
}, {
"v: -42",
map[string]uint{},
}, {
"v: 4294967296",
map[string]uint64{"v": 4294967296},
}, {
"v: -4294967296",
map[string]uint64{},
},
// Overflow cases.
{
"v: 4294967297",
map[string]int32{},
}, {
"v: 128",
map[string]int8{},
},
// Quoted values.
{
"'1': '\"2\"'",
map[interface{}]interface{}{"1": "\"2\""},
}, {
"v:\n- A\n- 'B\n\n C'\n",
map[string][]string{"v": []string{"A", "B\nC"}},
},
// Explicit tags.
{
"v: !!float '1.1'",
map[string]interface{}{"v": 1.1},
}, {
"v: !!null ''",
map[string]interface{}{"v": nil},
}, {
"%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'",
map[string]interface{}{"v": 1},
},
// Anchors and aliases.
{
"a: &x 1\nb: &y 2\nc: *x\nd: *y\n",
&struct{ A, B, C, D int }{1, 2, 1, 2},
}, {
"a: &a {c: 1}\nb: *a",
&struct {
A, B struct {
C int
}
}{struct{ C int }{1}, struct{ C int }{1}},
}, {
"a: &a [1, 2]\nb: *a",
&struct{ B []int }{[]int{1, 2}},
},
// Bug #1133337
{
"foo: ''",
map[string]*string{"foo": new(string)},
}, {
"foo: null",
map[string]string{},
},
// Ignored field
{
"a: 1\nb: 2\n",
&struct {
A int
B int "-"
}{1, 0},
},
// Bug #1191981
{
"" +
"%YAML 1.1\n" +
"--- !!str\n" +
`"Generic line break (no glyph)\n\` + "\n" +
` Generic line break (glyphed)\n\` + "\n" +
` Line separator\u2028\` + "\n" +
` Paragraph separator\u2029"` + "\n",
"" +
"Generic line break (no glyph)\n" +
"Generic line break (glyphed)\n" +
"Line separator\u2028Paragraph separator\u2029",
},
// Struct inlining
{
"a: 1\nb: 2\nc: 3\n",
&struct {
A int
C inlineB `yaml:",inline"`
}{1, inlineB{2, inlineC{3}}},
},
}
type inlineB struct {
B int
inlineC `yaml:",inline"`
}
type inlineC struct {
C int
}
func (s *S) TestUnmarshal(c *C) {
for i, item := range unmarshalTests {
t := reflect.ValueOf(item.value).Type()
var value interface{}
switch t.Kind() {
case reflect.Map:
value = reflect.MakeMap(t).Interface()
case reflect.String:
t := reflect.ValueOf(item.value).Type()
v := reflect.New(t)
value = v.Interface()
default:
pt := reflect.ValueOf(item.value).Type()
pv := reflect.New(pt.Elem())
value = pv.Interface()
}
err := goyaml.Unmarshal([]byte(item.data), value)
c.Assert(err, IsNil, Commentf("Item #%d", i))
if t.Kind() == reflect.String {
c.Assert(*value.(*string), Equals, item.value, Commentf("Item #%d", i))
} else {
c.Assert(value, DeepEquals, item.value, Commentf("Item #%d", i))
}
}
}
func (s *S) TestUnmarshalNaN(c *C) {
value := map[string]interface{}{}
err := goyaml.Unmarshal([]byte("notanum: .NaN"), &value)
c.Assert(err, IsNil)
c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
}
var unmarshalErrorTests = []struct {
data, error string
}{
{"v: !!float 'error'", "YAML error: Can't decode !!str 'error' as a !!float"},
{"v: [A,", "YAML error: line 1: did not find expected node content"},
{"v:\n- [A,", "YAML error: line 2: did not find expected node content"},
{"a: *b\n", "YAML error: Unknown anchor 'b' referenced"},
{"a: &a\n b: *a\n", "YAML error: Anchor 'a' value contains itself"},
{"value: -", "YAML error: block sequence entries are not allowed in this context"},
}
func (s *S) TestUnmarshalErrors(c *C) {
for _, item := range unmarshalErrorTests {
var value interface{}
err := goyaml.Unmarshal([]byte(item.data), &value)
c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
}
}
var setterTests = []struct {
data, tag string
value interface{}
}{
{"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}},
{"_: [1,A]", "!!seq", []interface{}{1, "A"}},
{"_: 10", "!!int", 10},
{"_: null", "!!null", nil},
{"_: !!foo 'BAR!'", "!!foo", "BAR!"},
}
var setterResult = map[int]bool{}
type typeWithSetter struct {
tag string
value interface{}
}
func (o *typeWithSetter) SetYAML(tag string, value interface{}) (ok bool) {
o.tag = tag
o.value = value
if i, ok := value.(int); ok {
if result, ok := setterResult[i]; ok {
return result
}
}
return true
}
type typeWithSetterField struct {
Field *typeWithSetter "_"
}
func (s *S) TestUnmarshalWithSetter(c *C) {
for _, item := range setterTests {
obj := &typeWithSetterField{}
err := goyaml.Unmarshal([]byte(item.data), obj)
c.Assert(err, IsNil)
c.Assert(obj.Field, NotNil,
Commentf("Pointer not initialized (%#v)", item.value))
c.Assert(obj.Field.tag, Equals, item.tag)
c.Assert(obj.Field.value, DeepEquals, item.value)
}
}
func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) {
obj := &typeWithSetter{}
err := goyaml.Unmarshal([]byte(setterTests[0].data), obj)
c.Assert(err, IsNil)
c.Assert(obj.tag, Equals, setterTests[0].tag)
value, ok := obj.value.(map[interface{}]interface{})
c.Assert(ok, Equals, true)
c.Assert(value["_"], DeepEquals, setterTests[0].value)
}
func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) {
setterResult[2] = false
setterResult[4] = false
defer func() {
delete(setterResult, 2)
delete(setterResult, 4)
}()
m := map[string]*typeWithSetter{}
data := "{abc: 1, def: 2, ghi: 3, jkl: 4}"
err := goyaml.Unmarshal([]byte(data), m)
c.Assert(err, IsNil)
c.Assert(m["abc"], NotNil)
c.Assert(m["def"], IsNil)
c.Assert(m["ghi"], NotNil)
c.Assert(m["jkl"], IsNil)
c.Assert(m["abc"].value, Equals, 1)
c.Assert(m["ghi"].value, Equals, 3)
}
//var data []byte
//func init() {
// var err error
// data, err = ioutil.ReadFile("/tmp/file.yaml")
// if err != nil {
// panic(err)
// }
//}
//
//func (s *S) BenchmarkUnmarshal(c *C) {
// var err error
// for i := 0; i < c.N; i++ {
// var v map[string]interface{}
// err = goyaml.Unmarshal(data, &v)
// }
// if err != nil {
// panic(err)
// }
//}
//
//func (s *S) BenchmarkMarshal(c *C) {
// var v map[string]interface{}
// goyaml.Unmarshal(data, &v)
// c.ResetTimer()
// for i := 0; i < c.N; i++ {
// goyaml.Marshal(&v)
// }
//}

1682
vendor/launchpad.net/goyaml/emitterc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

221
vendor/launchpad.net/goyaml/encode.go generated vendored Normal file
View File

@ -0,0 +1,221 @@
package goyaml
import (
"reflect"
"sort"
"strconv"
)
type encoder struct {
emitter yaml_emitter_t
event yaml_event_t
out []byte
flow bool
}
func newEncoder() (e *encoder) {
e = &encoder{}
e.must(yaml_emitter_initialize(&e.emitter))
yaml_emitter_set_output_string(&e.emitter, &e.out)
e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
e.emit()
e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
e.emit()
return e
}
func (e *encoder) finish() {
e.must(yaml_document_end_event_initialize(&e.event, true))
e.emit()
e.emitter.open_ended = false
e.must(yaml_stream_end_event_initialize(&e.event))
e.emit()
}
func (e *encoder) destroy() {
yaml_emitter_delete(&e.emitter)
}
func (e *encoder) emit() {
// This will internally delete the e.event value.
if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
e.must(false)
}
}
func (e *encoder) must(ok bool) {
if !ok {
msg := e.emitter.problem
if msg == "" {
msg = "Unknown problem generating YAML content"
}
panic(msg)
}
}
func (e *encoder) marshal(tag string, in reflect.Value) {
var value interface{}
if getter, ok := in.Interface().(Getter); ok {
tag, value = getter.GetYAML()
if value == nil {
e.nilv()
return
}
in = reflect.ValueOf(value)
}
switch in.Kind() {
case reflect.Interface:
if in.IsNil() {
e.nilv()
} else {
e.marshal(tag, in.Elem())
}
case reflect.Map:
e.mapv(tag, in)
case reflect.Ptr:
if in.IsNil() {
e.nilv()
} else {
e.marshal(tag, in.Elem())
}
case reflect.Struct:
e.structv(tag, in)
case reflect.Slice:
e.slicev(tag, in)
case reflect.String:
e.stringv(tag, in)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
e.intv(tag, in)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
e.uintv(tag, in)
case reflect.Float32, reflect.Float64:
e.floatv(tag, in)
case reflect.Bool:
e.boolv(tag, in)
default:
panic("Can't marshal type yet: " + in.Type().String())
}
}
func (e *encoder) mapv(tag string, in reflect.Value) {
e.mappingv(tag, func() {
keys := keyList(in.MapKeys())
sort.Sort(keys)
for _, k := range keys {
e.marshal("", k)
e.marshal("", in.MapIndex(k))
}
})
}
func (e *encoder) structv(tag string, in reflect.Value) {
sinfo, err := getStructInfo(in.Type())
if err != nil {
panic(err)
}
e.mappingv(tag, func() {
for _, info := range sinfo.FieldsList {
var value reflect.Value
if info.Inline == nil {
value = in.Field(info.Num)
} else {
value = in.FieldByIndex(info.Inline)
}
if info.OmitEmpty && isZero(value) {
continue
}
e.marshal("", reflect.ValueOf(info.Key))
e.flow = info.Flow
e.marshal("", value)
}
})
}
func (e *encoder) mappingv(tag string, f func()) {
implicit := tag == ""
style := yaml_BLOCK_MAPPING_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_MAPPING_STYLE
}
e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
e.emit()
f()
e.must(yaml_mapping_end_event_initialize(&e.event))
e.emit()
}
func (e *encoder) slicev(tag string, in reflect.Value) {
implicit := tag == ""
style := yaml_BLOCK_SEQUENCE_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_SEQUENCE_STYLE
}
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
e.emit()
n := in.Len()
for i := 0; i < n; i++ {
e.marshal("", in.Index(i))
}
e.must(yaml_sequence_end_event_initialize(&e.event))
e.emit()
}
func (e *encoder) stringv(tag string, in reflect.Value) {
var style yaml_scalar_style_t
s := in.String()
if rtag, _ := resolve("", s); rtag != "!!str" {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
} else {
style = yaml_PLAIN_SCALAR_STYLE
}
e.emitScalar(s, "", tag, style)
}
func (e *encoder) boolv(tag string, in reflect.Value) {
var s string
if in.Bool() {
s = "true"
} else {
s = "false"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) intv(tag string, in reflect.Value) {
s := strconv.FormatInt(in.Int(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) uintv(tag string, in reflect.Value) {
s := strconv.FormatUint(in.Uint(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) floatv(tag string, in reflect.Value) {
// FIXME: Handle 64 bits here.
s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
switch s {
case "+Inf":
s = ".inf"
case "-Inf":
s = "-.inf"
case "NaN":
s = ".nan"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) nilv() {
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
implicit := tag == ""
if !implicit {
style = yaml_PLAIN_SCALAR_STYLE
}
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
e.emit()
}

379
vendor/launchpad.net/goyaml/encode_test.go generated vendored Normal file
View File

@ -0,0 +1,379 @@
package goyaml_test
import (
"fmt"
. "launchpad.net/gocheck"
"launchpad.net/goyaml"
"math"
"strconv"
"strings"
)
var marshalIntTest = 123
var marshalTests = []struct {
value interface{}
data string
}{
{
&struct{}{},
"{}\n",
}, {
map[string]string{"v": "hi"},
"v: hi\n",
}, {
map[string]interface{}{"v": "hi"},
"v: hi\n",
}, {
map[string]string{"v": "true"},
"v: \"true\"\n",
}, {
map[string]string{"v": "false"},
"v: \"false\"\n",
}, {
map[string]interface{}{"v": true},
"v: true\n",
}, {
map[string]interface{}{"v": false},
"v: false\n",
}, {
map[string]interface{}{"v": 10},
"v: 10\n",
}, {
map[string]interface{}{"v": -10},
"v: -10\n",
}, {
map[string]uint{"v": 42},
"v: 42\n",
}, {
map[string]interface{}{"v": int64(4294967296)},
"v: 4294967296\n",
}, {
map[string]int64{"v": int64(4294967296)},
"v: 4294967296\n",
}, {
map[string]uint64{"v": 4294967296},
"v: 4294967296\n",
}, {
map[string]interface{}{"v": "10"},
"v: \"10\"\n",
}, {
map[string]interface{}{"v": 0.1},
"v: 0.1\n",
}, {
map[string]interface{}{"v": float64(0.1)},
"v: 0.1\n",
}, {
map[string]interface{}{"v": -0.1},
"v: -0.1\n",
}, {
map[string]interface{}{"v": math.Inf(+1)},
"v: .inf\n",
}, {
map[string]interface{}{"v": math.Inf(-1)},
"v: -.inf\n",
}, {
map[string]interface{}{"v": math.NaN()},
"v: .nan\n",
}, {
map[string]interface{}{"v": nil},
"v: null\n",
}, {
map[string]interface{}{"v": ""},
"v: \"\"\n",
}, {
map[string][]string{"v": []string{"A", "B"}},
"v:\n- A\n- B\n",
}, {
map[string][]string{"v": []string{"A", "B\nC"}},
"v:\n- A\n- 'B\n\n C'\n",
}, {
map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
"v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
}, {
map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
"a:\n b: c\n",
}, {
map[string]interface{}{"a": "-"},
"a: '-'\n",
},
// Simple values.
{
&marshalIntTest,
"123\n",
},
// Structures
{
&struct{ Hello string }{"world"},
"hello: world\n",
}, {
&struct {
A struct {
B string
}
}{struct{ B string }{"c"}},
"a:\n b: c\n",
}, {
&struct {
A *struct {
B string
}
}{&struct{ B string }{"c"}},
"a:\n b: c\n",
}, {
&struct {
A *struct {
B string
}
}{},
"a: null\n",
}, {
&struct{ A int }{1},
"a: 1\n",
}, {
&struct{ A []int }{[]int{1, 2}},
"a:\n- 1\n- 2\n",
}, {
&struct {
B int "a"
}{1},
"a: 1\n",
}, {
&struct{ A bool }{true},
"a: true\n",
},
// Conditional flag
{
&struct {
A int "a,omitempty"
B int "b,omitempty"
}{1, 0},
"a: 1\n",
}, {
&struct {
A int "a,omitempty"
B int "b,omitempty"
}{0, 0},
"{}\n",
}, {
&struct {
A *struct{ X int } "a,omitempty"
B int "b,omitempty"
}{nil, 0},
"{}\n",
},
// Flow flag
{
&struct {
A []int "a,flow"
}{[]int{1, 2}},
"a: [1, 2]\n",
}, {
&struct {
A map[string]string "a,flow"
}{map[string]string{"b": "c", "d": "e"}},
"a: {b: c, d: e}\n",
}, {
&struct {
A struct {
B, D string
} "a,flow"
}{struct{ B, D string }{"c", "e"}},
"a: {b: c, d: e}\n",
},
// Unexported field
{
&struct {
u int
A int
}{0, 1},
"a: 1\n",
},
// Ignored field
{
&struct {
A int
B int "-"
}{1, 2},
"a: 1\n",
},
// Struct inlining
{
&struct {
A int
C inlineB `yaml:",inline"`
}{1, inlineB{2, inlineC{3}}},
"a: 1\nb: 2\nc: 3\n",
},
}
func (s *S) TestMarshal(c *C) {
for _, item := range marshalTests {
data, err := goyaml.Marshal(item.value)
c.Assert(err, IsNil)
c.Assert(string(data), Equals, item.data)
}
}
var marshalErrorTests = []struct {
value interface{}
error string
}{
{
&struct {
B int
inlineB ",inline"
}{1, inlineB{2, inlineC{3}}},
`Duplicated key 'b' in struct struct \{ B int; .*`,
},
}
func (s *S) TestMarshalErrors(c *C) {
for _, item := range marshalErrorTests {
_, err := goyaml.Marshal(item.value)
c.Assert(err, ErrorMatches, item.error)
}
}
var marshalTaggedIfaceTest interface{} = &struct{ A string }{"B"}
var getterTests = []struct {
data, tag string
value interface{}
}{
{"_:\n hi: there\n", "", map[interface{}]interface{}{"hi": "there"}},
{"_:\n- 1\n- A\n", "", []interface{}{1, "A"}},
{"_: 10\n", "", 10},
{"_: null\n", "", nil},
{"_: !foo BAR!\n", "!foo", "BAR!"},
{"_: !foo 1\n", "!foo", "1"},
{"_: !foo '\"1\"'\n", "!foo", "\"1\""},
{"_: !foo 1.1\n", "!foo", 1.1},
{"_: !foo 1\n", "!foo", 1},
{"_: !foo 1\n", "!foo", uint(1)},
{"_: !foo true\n", "!foo", true},
{"_: !foo\n- A\n- B\n", "!foo", []string{"A", "B"}},
{"_: !foo\n A: B\n", "!foo", map[string]string{"A": "B"}},
{"_: !foo\n a: B\n", "!foo", &marshalTaggedIfaceTest},
}
func (s *S) TestMarshalTypeCache(c *C) {
var data []byte
var err error
func() {
type T struct{ A int }
data, err = goyaml.Marshal(&T{})
c.Assert(err, IsNil)
}()
func() {
type T struct{ B int }
data, err = goyaml.Marshal(&T{})
c.Assert(err, IsNil)
}()
c.Assert(string(data), Equals, "b: 0\n")
}
type typeWithGetter struct {
tag string
value interface{}
}
func (o typeWithGetter) GetYAML() (tag string, value interface{}) {
return o.tag, o.value
}
type typeWithGetterField struct {
Field typeWithGetter "_"
}
func (s *S) TestMashalWithGetter(c *C) {
for _, item := range getterTests {
obj := &typeWithGetterField{}
obj.Field.tag = item.tag
obj.Field.value = item.value
data, err := goyaml.Marshal(obj)
c.Assert(err, IsNil)
c.Assert(string(data), Equals, string(item.data))
}
}
func (s *S) TestUnmarshalWholeDocumentWithGetter(c *C) {
obj := &typeWithGetter{}
obj.tag = ""
obj.value = map[string]string{"hello": "world!"}
data, err := goyaml.Marshal(obj)
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "hello: world!\n")
}
func (s *S) TestSortedOutput(c *C) {
order := []interface{}{
false,
true,
1,
uint(1),
1.0,
1.1,
1.2,
2,
uint(2),
2.0,
2.1,
"",
".1",
".2",
".a",
"1",
"2",
"a!10",
"a/2",
"a/10",
"a~10",
"ab/1",
"b/1",
"b/01",
"b/2",
"b/02",
"b/3",
"b/03",
"b1",
"b01",
"b3",
"c2.10",
"c10.2",
"d1",
"d12",
"d12a",
}
m := make(map[interface{}]int)
for _, k := range order {
m[k] = 1
}
data, err := goyaml.Marshal(m)
c.Assert(err, IsNil)
out := "\n" + string(data)
last := 0
for i, k := range order {
repr := fmt.Sprint(k)
if s, ok := k.(string); ok {
if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
repr = `"` + repr + `"`
}
}
index := strings.Index(out, "\n"+repr+":")
if index == -1 {
c.Fatalf("%#v is not in the output: %#v", k, out)
}
if index < last {
c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
}
last = index
}
}

307
vendor/launchpad.net/goyaml/goyaml.go generated vendored Normal file
View File

@ -0,0 +1,307 @@
// Package goyaml implements YAML support for the Go language.
//
// WARNING: You are using an out of date import path. Please update your code and import the following instead:
//
// gonuts.org/v1/yaml
//
// The package name has changed from "yaml" from "goyaml", but the package API has not changed.
//
package goyaml
import (
"errors"
"fmt"
"reflect"
"runtime"
"strings"
"sync"
)
func handleErr(err *error) {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
} else if _, ok := r.(*reflect.ValueError); ok {
panic(r)
} else if _, ok := r.(externalPanic); ok {
panic(r)
} else if s, ok := r.(string); ok {
*err = errors.New("YAML error: " + s)
} else if e, ok := r.(error); ok {
*err = e
} else {
panic(r)
}
}
}
// Objects implementing the goyaml.Setter interface will receive the YAML
// tag and value via the SetYAML method during unmarshaling, rather than
// being implicitly assigned by the goyaml machinery. If setting the value
// works, the method should return true. If it returns false, the given
// value will be omitted from maps and slices.
type Setter interface {
SetYAML(tag string, value interface{}) bool
}
// Objects implementing the goyaml.Getter interface will get the GetYAML()
// method called when goyaml is requested to marshal the given value, and
// the result of this method will be marshaled in place of the actual object.
type Getter interface {
GetYAML() (tag string, value interface{})
}
// Unmarshal decodes the first document found within the in byte slice
// and assigns decoded values into the object pointed by out.
//
// Maps, pointers to structs and ints, etc, may all be used as out values.
// If an internal pointer within a struct is not initialized, goyaml
// will initialize it if necessary for unmarshalling the provided data,
// but the struct provided as out must not be a nil pointer.
//
// The type of the decoded values and the type of out will be considered,
// and Unmarshal() will do the best possible job to unmarshal values
// appropriately. It is NOT considered an error, though, to skip values
// because they are not available in the decoded YAML, or if they are not
// compatible with the out value. To ensure something was properly
// unmarshaled use a map or compare against the previous value for the
// field (usually the zero value).
//
// Struct fields are only unmarshalled if they are exported (have an
// upper case first letter), and will be unmarshalled using the field
// name lowercased by default. When custom field names are desired, the
// tag value may be used to tweak the name. Everything before the first
// comma in the field tag will be used as the name. The values following
// the comma are used to tweak the marshalling process (see Marshal).
// Conflicting names result in a runtime error.
//
// For example:
//
// type T struct {
// F int `yaml:"a,omitempty"`
// B int
// }
// var T t
// goyaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
//
// See the documentation of Marshal for the format of tags and a list of
// supported tag options.
//
func Unmarshal(in []byte, out interface{}) (err error) {
defer handleErr(&err)
d := newDecoder()
p := newParser(in)
defer p.destroy()
node := p.parse()
if node != nil {
d.unmarshal(node, reflect.ValueOf(out))
}
return nil
}
// Marshal serializes the value provided into a YAML document. The structure
// of the generated document will reflect the structure of the value itself.
// Maps, pointers to structs and ints, etc, may all be used as the in value.
//
// In the case of struct values, only exported fields will be serialized.
// The lowercased field name is used as the key for each exported field,
// but this behavior may be changed using the respective field tag.
// The tag may also contain flags to tweak the marshalling behavior for
// the field. Conflicting names result in a runtime error. The tag format
// accepted is:
//
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported:
//
// omitempty Only include the field if it's not set to the zero
// value for the type or to empty slices or maps.
// Does not apply to zero valued structs.
//
// flow Marshal using a flow style (useful for structs,
// sequences and maps.
//
// inline Inline the struct it's applied to, so its fields
// are processed as if they were part of the outer
// struct.
//
// In addition, if the key is "-", the field is ignored.
//
// For example:
//
// type T struct {
// F int "a,omitempty"
// B int
// }
// goyaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
// goyaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
//
func Marshal(in interface{}) (out []byte, err error) {
defer handleErr(&err)
e := newEncoder()
defer e.destroy()
e.marshal("", reflect.ValueOf(in))
e.finish()
out = e.out
return
}
// --------------------------------------------------------------------------
// Maintain a mapping of keys to structure field indexes
// The code in this section was copied from gobson.
// structInfo holds details for the serialization of fields of
// a given struct.
type structInfo struct {
FieldsMap map[string]fieldInfo
FieldsList []fieldInfo
// InlineMap is the number of the field in the struct that
// contains an ,inline map, or -1 if there's none.
InlineMap int
}
type fieldInfo struct {
Key string
Num int
OmitEmpty bool
Flow bool
// Inline holds the field index if the field is part of an inlined struct.
Inline []int
}
var structMap = make(map[reflect.Type]*structInfo)
var fieldMapMutex sync.RWMutex
type externalPanic string
func (e externalPanic) String() string {
return string(e)
}
func getStructInfo(st reflect.Type) (*structInfo, error) {
fieldMapMutex.RLock()
sinfo, found := structMap[st]
fieldMapMutex.RUnlock()
if found {
return sinfo, nil
}
n := st.NumField()
fieldsMap := make(map[string]fieldInfo)
fieldsList := make([]fieldInfo, 0, n)
inlineMap := -1
for i := 0; i != n; i++ {
field := st.Field(i)
if field.PkgPath != "" {
continue // Private field
}
info := fieldInfo{Num: i}
tag := field.Tag.Get("yaml")
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
tag = string(field.Tag)
}
if tag == "-" {
continue
}
inline := false
fields := strings.Split(tag, ",")
if len(fields) > 1 {
for _, flag := range fields[1:] {
switch flag {
case "omitempty":
info.OmitEmpty = true
case "flow":
info.Flow = true
case "inline":
inline = true
default:
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
panic(externalPanic(msg))
}
}
tag = fields[0]
}
if inline {
switch field.Type.Kind() {
//case reflect.Map:
// if inlineMap >= 0 {
// return nil, errors.New("Multiple ,inline maps in struct " + st.String())
// }
// if field.Type.Key() != reflect.TypeOf("") {
// return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
// }
// inlineMap = info.Num
case reflect.Struct:
sinfo, err := getStructInfo(field.Type)
if err != nil {
return nil, err
}
for _, finfo := range sinfo.FieldsList {
if _, found := fieldsMap[finfo.Key]; found {
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
if finfo.Inline == nil {
finfo.Inline = []int{i, finfo.Num}
} else {
finfo.Inline = append([]int{i}, finfo.Inline...)
}
fieldsMap[finfo.Key] = finfo
fieldsList = append(fieldsList, finfo)
}
default:
//panic("Option ,inline needs a struct value or map field")
panic("Option ,inline needs a struct value field")
}
continue
}
if tag != "" {
info.Key = tag
} else {
info.Key = strings.ToLower(field.Name)
}
if _, found = fieldsMap[info.Key]; found {
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
fieldsList = append(fieldsList, info)
fieldsMap[info.Key] = info
}
sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
fieldMapMutex.Lock()
structMap[st] = sinfo
fieldMapMutex.Unlock()
return sinfo, nil
}
func isZero(v reflect.Value) bool {
switch v.Kind() {
case reflect.String:
return len(v.String()) == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
case reflect.Slice:
return v.Len() == 0
case reflect.Map:
return v.Len() == 0
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Bool:
return !v.Bool()
}
return false
}

1096
vendor/launchpad.net/goyaml/parserc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

391
vendor/launchpad.net/goyaml/readerc.go generated vendored Normal file
View File

@ -0,0 +1,391 @@
package goyaml
import (
"io"
)
// Set the reader error and return 0.
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
parser.error = yaml_READER_ERROR
parser.problem = problem
parser.problem_offset = offset
parser.problem_value = value
return false
}
// Byte order marks.
const (
bom_UTF8 = "\xef\xbb\xbf"
bom_UTF16LE = "\xff\xfe"
bom_UTF16BE = "\xfe\xff"
)
// Determine the input stream encoding by checking the BOM symbol. If no BOM is
// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
// Ensure that we had enough bytes in the raw buffer.
for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
if !yaml_parser_update_raw_buffer(parser) {
return false
}
}
// Determine the encoding.
buf := parser.raw_buffer
pos := parser.raw_buffer_pos
avail := len(buf) - pos
if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
parser.encoding = yaml_UTF16LE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
parser.encoding = yaml_UTF16BE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
parser.encoding = yaml_UTF8_ENCODING
parser.raw_buffer_pos += 3
parser.offset += 3
} else {
parser.encoding = yaml_UTF8_ENCODING
}
return true
}
// Update the raw buffer.
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
size_read := 0
// Return if the raw buffer is full.
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
return true
}
// Return on EOF.
if parser.eof {
return true
}
// Move the remaining bytes in the raw buffer to the beginning.
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
}
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
parser.raw_buffer_pos = 0
// Call the read handler to fill the buffer.
size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
if err == io.EOF {
parser.eof = true
} else if err != nil {
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
}
return true
}
// Ensure that the buffer contains at least `length` characters.
// Return true on success, false on failure.
//
// The length is supposed to be significantly less that the buffer size.
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
if parser.read_handler == nil {
panic("read handler must be set")
}
// If the EOF flag is set and the raw buffer is empty, do nothing.
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
return true
}
// Return if the buffer contains enough characters.
if parser.unread >= length {
return true
}
// Determine the input encoding if it is not known yet.
if parser.encoding == yaml_ANY_ENCODING {
if !yaml_parser_determine_encoding(parser) {
return false
}
}
// Move the unread characters to the beginning of the buffer.
buffer_len := len(parser.buffer)
if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
buffer_len -= parser.buffer_pos
parser.buffer_pos = 0
} else if parser.buffer_pos == buffer_len {
buffer_len = 0
parser.buffer_pos = 0
}
// Open the whole buffer for writing, and cut it before returning.
parser.buffer = parser.buffer[:cap(parser.buffer)]
// Fill the buffer until it has enough characters.
first := true
for parser.unread < length {
// Fill the raw buffer if necessary.
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
if !yaml_parser_update_raw_buffer(parser) {
parser.buffer = parser.buffer[:buffer_len]
return false
}
}
first = false
// Decode the raw buffer.
inner:
for parser.raw_buffer_pos != len(parser.raw_buffer) {
var value rune
var width int
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
// Decode the next character.
switch parser.encoding {
case yaml_UTF8_ENCODING:
// Decode a UTF-8 character. Check RFC 3629
// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
//
// The following table (taken from the RFC) is used for
// decoding.
//
// Char. number range | UTF-8 octet sequence
// (hexadecimal) | (binary)
// --------------------+------------------------------------
// 0000 0000-0000 007F | 0xxxxxxx
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
//
// Additionally, the characters in the range 0xD800-0xDFFF
// are prohibited as they are reserved for use with UTF-16
// surrogate pairs.
// Determine the length of the UTF-8 sequence.
octet := parser.raw_buffer[parser.raw_buffer_pos]
switch {
case octet&0x80 == 0x00:
width = 1
case octet&0xE0 == 0xC0:
width = 2
case octet&0xF0 == 0xE0:
width = 3
case octet&0xF8 == 0xF0:
width = 4
default:
// The leading octet is invalid.
return yaml_parser_set_reader_error(parser,
"invalid leading UTF-8 octet",
parser.offset, int(octet))
}
// Check if the raw buffer contains an incomplete character.
if width > raw_unread {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-8 octet sequence",
parser.offset, -1)
}
break inner
}
// Decode the leading octet.
switch {
case octet&0x80 == 0x00:
value = rune(octet & 0x7F)
case octet&0xE0 == 0xC0:
value = rune(octet & 0x1F)
case octet&0xF0 == 0xE0:
value = rune(octet & 0x0F)
case octet&0xF8 == 0xF0:
value = rune(octet & 0x07)
default:
value = 0
}
// Check and decode the trailing octets.
for k := 1; k < width; k++ {
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
// Check if the octet is valid.
if (octet & 0xC0) != 0x80 {
return yaml_parser_set_reader_error(parser,
"invalid trailing UTF-8 octet",
parser.offset+k, int(octet))
}
// Decode the octet.
value = (value << 6) + rune(octet&0x3F)
}
// Check the length of the sequence against the value.
switch {
case width == 1:
case width == 2 && value >= 0x80:
case width == 3 && value >= 0x800:
case width == 4 && value >= 0x10000:
default:
return yaml_parser_set_reader_error(parser,
"invalid length of a UTF-8 sequence",
parser.offset, -1)
}
// Check the range of the value.
if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
return yaml_parser_set_reader_error(parser,
"invalid Unicode character",
parser.offset, int(value))
}
case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
var low, high int
if parser.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
high, low = 1, 0
}
// The UTF-16 encoding is not as simple as one might
// naively think. Check RFC 2781
// (http://www.ietf.org/rfc/rfc2781.txt).
//
// Normally, two subsequent bytes describe a Unicode
// character. However a special technique (called a
// surrogate pair) is used for specifying character
// values larger than 0xFFFF.
//
// A surrogate pair consists of two pseudo-characters:
// high surrogate area (0xD800-0xDBFF)
// low surrogate area (0xDC00-0xDFFF)
//
// The following formulas are used for decoding
// and encoding characters using surrogate pairs:
//
// U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
// U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
// W1 = 110110yyyyyyyyyy
// W2 = 110111xxxxxxxxxx
//
// where U is the character value, W1 is the high surrogate
// area, W2 is the low surrogate area.
// Check for incomplete UTF-16 character.
if raw_unread < 2 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 character",
parser.offset, -1)
}
break inner
}
// Get the character.
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
// Check for unexpected low surrogate area.
if value&0xFC00 == 0xDC00 {
return yaml_parser_set_reader_error(parser,
"unexpected low surrogate area",
parser.offset, int(value))
}
// Check for a high surrogate area.
if value&0xFC00 == 0xD800 {
width = 4
// Check for incomplete surrogate pair.
if raw_unread < 4 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 surrogate pair",
parser.offset, -1)
}
break inner
}
// Get the next character.
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
// Check for a low surrogate area.
if value2&0xFC00 != 0xDC00 {
return yaml_parser_set_reader_error(parser,
"expected low surrogate area",
parser.offset+2, int(value2))
}
// Generate the value of the surrogate pair.
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
} else {
width = 2
}
default:
panic("impossible")
}
// Check if the character is in the allowed range:
// #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
// | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
// | [#x10000-#x10FFFF] (32 bit)
switch {
case value == 0x09:
case value == 0x0A:
case value == 0x0D:
case value >= 0x20 && value <= 0x7E:
case value == 0x85:
case value >= 0xA0 && value <= 0xD7FF:
case value >= 0xE000 && value <= 0xFFFD:
case value >= 0x10000 && value <= 0x10FFFF:
default:
return yaml_parser_set_reader_error(parser,
"control characters are not allowed",
parser.offset, int(value))
}
// Move the raw pointers.
parser.raw_buffer_pos += width
parser.offset += width
// Finally put the character into the buffer.
if value <= 0x7F {
// 0000 0000-0000 007F . 0xxxxxxx
parser.buffer[buffer_len+0] = byte(value)
} else if value <= 0x7FF {
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
} else if value <= 0xFFFF {
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
} else {
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
}
buffer_len += width
parser.unread++
}
// On EOF, put NUL into the buffer and return.
if parser.eof {
parser.buffer[buffer_len] = 0
buffer_len++
parser.unread++
break
}
}
parser.buffer = parser.buffer[:buffer_len]
return true
}

Some files were not shown because too many files have changed in this diff Show More