diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..ca702dc --- /dev/null +++ b/Dockerfile @@ -0,0 +1,8 @@ +FROM fedora +RUN dnf install -y golang +RUN mkdir -p /usr/local/src/github.com/vbatts/ +ENV GOPATH=/usr/local +ADD ./ /usr/local/src/github.com/vbatts/imgsrv/ +RUN go install github.com/vbatts/imgsrv +EXPOSE 7777 +ENTRYPOINT ["/usr/local/bin/imgsrv"] diff --git a/glide.lock b/glide.lock new file mode 100644 index 0000000..e468e90 --- /dev/null +++ b/glide.lock @@ -0,0 +1,18 @@ +hash: e99d6e208d9806cd3133aaf20912a1e3ece517fc36c3ad1e67631bc2ba7b4009 +updated: 2017-02-06T12:03:37.357927455-05:00 +imports: +- name: github.com/gorilla/context + version: 08b5f424b9271eedf6f9f0ce86cb9396ed337a42 +- name: github.com/gorilla/mux + version: 392c28fe23e1c45ddba891b0320b3b5df220beea +- name: github.com/vbatts/go-httplog + version: becd5526f5dcce33a513c4c0ef2c70c761ca66e0 +- name: labix.org/v2/mgo + version: "287" + subpackages: + - bson +- name: labix.org/v2/mgo/sasl + version: "" +- name: launchpad.net/goyaml + version: "51" +testImports: [] diff --git a/glide.yaml b/glide.yaml new file mode 100644 index 0000000..b130ad5 --- /dev/null +++ b/glide.yaml @@ -0,0 +1,8 @@ +package: github.com/vbatts/imgsrv +import: +- package: github.com/gorilla/mux +- package: github.com/vbatts/go-httplog +- package: labix.org/v2/mgo + subpackages: + - bson +- package: launchpad.net/goyaml diff --git a/vendor/github.com/gorilla/context b/vendor/github.com/gorilla/context new file mode 160000 index 0000000..08b5f42 --- /dev/null +++ b/vendor/github.com/gorilla/context @@ -0,0 +1 @@ +Subproject commit 08b5f424b9271eedf6f9f0ce86cb9396ed337a42 diff --git a/vendor/github.com/gorilla/mux b/vendor/github.com/gorilla/mux new file mode 160000 index 0000000..392c28f --- /dev/null +++ b/vendor/github.com/gorilla/mux @@ -0,0 +1 @@ +Subproject commit 392c28fe23e1c45ddba891b0320b3b5df220beea diff --git a/vendor/github.com/vbatts/go-httplog b/vendor/github.com/vbatts/go-httplog new file mode 160000 index 0000000..becd552 --- /dev/null +++ b/vendor/github.com/vbatts/go-httplog @@ -0,0 +1 @@ +Subproject commit becd5526f5dcce33a513c4c0ef2c70c761ca66e0 diff --git a/vendor/labix.org/v2/mgo/.bzr/README b/vendor/labix.org/v2/mgo/.bzr/README new file mode 100644 index 0000000..f82dc1c --- /dev/null +++ b/vendor/labix.org/v2/mgo/.bzr/README @@ -0,0 +1,3 @@ +This is a Bazaar control directory. +Do not change any files in this directory. +See http://bazaar.canonical.com/ for more information about Bazaar. diff --git a/vendor/labix.org/v2/mgo/.bzr/branch-format b/vendor/labix.org/v2/mgo/.bzr/branch-format new file mode 100644 index 0000000..9eb09b7 --- /dev/null +++ b/vendor/labix.org/v2/mgo/.bzr/branch-format @@ -0,0 +1 @@ +Bazaar-NG meta directory, format 1 diff --git a/vendor/labix.org/v2/mgo/.bzr/branch/branch.conf b/vendor/labix.org/v2/mgo/.bzr/branch/branch.conf new file mode 100644 index 0000000..6e3d0e2 --- /dev/null +++ b/vendor/labix.org/v2/mgo/.bzr/branch/branch.conf @@ -0,0 +1 @@ +parent_location = http://bazaar.launchpad.net/~niemeyer/mgo/v2/ diff --git a/vendor/labix.org/v2/mgo/.bzr/branch/format b/vendor/labix.org/v2/mgo/.bzr/branch/format new file mode 100644 index 0000000..dc392f4 --- /dev/null +++ b/vendor/labix.org/v2/mgo/.bzr/branch/format @@ -0,0 +1 @@ +Bazaar Branch Format 7 (needs bzr 1.6) diff --git a/vendor/labix.org/v2/mgo/.bzr/branch/last-revision b/vendor/labix.org/v2/mgo/.bzr/branch/last-revision new file mode 100644 index 0000000..b04b872 --- /dev/null +++ b/vendor/labix.org/v2/mgo/.bzr/branch/last-revision @@ -0,0 +1 @@ +287 gustavo@niemeyer.net-20140701140051-90be2tvk93pcczzi diff --git a/vendor/labix.org/v2/mgo/.bzr/checkout/conflicts b/vendor/labix.org/v2/mgo/.bzr/checkout/conflicts new file mode 100644 index 0000000..0dc2d3a --- /dev/null +++ b/vendor/labix.org/v2/mgo/.bzr/checkout/conflicts @@ -0,0 +1 @@ +BZR conflict list format 1 diff --git a/vendor/labix.org/v2/mgo/.bzr/checkout/dirstate b/vendor/labix.org/v2/mgo/.bzr/checkout/dirstate new file mode 100644 index 0000000..ce83042 Binary files /dev/null and b/vendor/labix.org/v2/mgo/.bzr/checkout/dirstate differ diff --git a/vendor/labix.org/v2/mgo/.bzr/checkout/format b/vendor/labix.org/v2/mgo/.bzr/checkout/format new file mode 100644 index 0000000..e0261c7 --- /dev/null +++ b/vendor/labix.org/v2/mgo/.bzr/checkout/format @@ -0,0 +1 @@ +Bazaar Working Tree Format 6 (bzr 1.14) diff --git a/vendor/labix.org/v2/mgo/.bzr/checkout/views b/vendor/labix.org/v2/mgo/.bzr/checkout/views new file mode 100644 index 0000000..e69de29 diff --git a/vendor/labix.org/v2/mgo/.bzr/repository/format b/vendor/labix.org/v2/mgo/.bzr/repository/format new file mode 100644 index 0000000..b200528 --- /dev/null +++ b/vendor/labix.org/v2/mgo/.bzr/repository/format @@ -0,0 +1 @@ +Bazaar repository format 2a (needs bzr 1.16 or later) diff --git a/vendor/labix.org/v2/mgo/.bzr/repository/indices/3e6c1cf43ea6d1aa3348e4f5d16312f2.cix b/vendor/labix.org/v2/mgo/.bzr/repository/indices/3e6c1cf43ea6d1aa3348e4f5d16312f2.cix new file mode 100644 index 0000000..426a58d Binary files /dev/null and b/vendor/labix.org/v2/mgo/.bzr/repository/indices/3e6c1cf43ea6d1aa3348e4f5d16312f2.cix differ diff --git a/vendor/labix.org/v2/mgo/.bzr/repository/indices/3e6c1cf43ea6d1aa3348e4f5d16312f2.iix b/vendor/labix.org/v2/mgo/.bzr/repository/indices/3e6c1cf43ea6d1aa3348e4f5d16312f2.iix new file mode 100644 index 0000000..42ad105 Binary files /dev/null and b/vendor/labix.org/v2/mgo/.bzr/repository/indices/3e6c1cf43ea6d1aa3348e4f5d16312f2.iix differ diff --git a/vendor/labix.org/v2/mgo/.bzr/repository/indices/3e6c1cf43ea6d1aa3348e4f5d16312f2.rix b/vendor/labix.org/v2/mgo/.bzr/repository/indices/3e6c1cf43ea6d1aa3348e4f5d16312f2.rix new file mode 100644 index 0000000..68c5fdd Binary files /dev/null and b/vendor/labix.org/v2/mgo/.bzr/repository/indices/3e6c1cf43ea6d1aa3348e4f5d16312f2.rix differ diff --git a/vendor/labix.org/v2/mgo/.bzr/repository/indices/3e6c1cf43ea6d1aa3348e4f5d16312f2.six b/vendor/labix.org/v2/mgo/.bzr/repository/indices/3e6c1cf43ea6d1aa3348e4f5d16312f2.six new file mode 100644 index 0000000..a2afde6 --- /dev/null +++ b/vendor/labix.org/v2/mgo/.bzr/repository/indices/3e6c1cf43ea6d1aa3348e4f5d16312f2.six @@ -0,0 +1,5 @@ +B+Tree Graph Index 2 +node_ref_lists=0 +key_elements=1 +len=0 +row_lengths= diff --git a/vendor/labix.org/v2/mgo/.bzr/repository/indices/3e6c1cf43ea6d1aa3348e4f5d16312f2.tix b/vendor/labix.org/v2/mgo/.bzr/repository/indices/3e6c1cf43ea6d1aa3348e4f5d16312f2.tix new file mode 100644 index 0000000..48d39de Binary files /dev/null and b/vendor/labix.org/v2/mgo/.bzr/repository/indices/3e6c1cf43ea6d1aa3348e4f5d16312f2.tix differ diff --git a/vendor/labix.org/v2/mgo/.bzr/repository/pack-names b/vendor/labix.org/v2/mgo/.bzr/repository/pack-names new file mode 100644 index 0000000..68b65e7 --- /dev/null +++ b/vendor/labix.org/v2/mgo/.bzr/repository/pack-names @@ -0,0 +1,6 @@ +B+Tree Graph Index 2 +node_ref_lists=0 +key_elements=1 +len=1 +row_lengths=1 +x ı0ԙ`R8 /{;+<-T w7Lɥ@AMC 6Ox \ No newline at end of file diff --git a/vendor/labix.org/v2/mgo/.bzr/repository/packs/3e6c1cf43ea6d1aa3348e4f5d16312f2.pack b/vendor/labix.org/v2/mgo/.bzr/repository/packs/3e6c1cf43ea6d1aa3348e4f5d16312f2.pack new file mode 100644 index 0000000..3421040 Binary files /dev/null and b/vendor/labix.org/v2/mgo/.bzr/repository/packs/3e6c1cf43ea6d1aa3348e4f5d16312f2.pack differ diff --git a/vendor/labix.org/v2/mgo/.bzrignore b/vendor/labix.org/v2/mgo/.bzrignore new file mode 100644 index 0000000..340cde7 --- /dev/null +++ b/vendor/labix.org/v2/mgo/.bzrignore @@ -0,0 +1,2 @@ +_* +[856].out diff --git a/vendor/labix.org/v2/mgo/LICENSE b/vendor/labix.org/v2/mgo/LICENSE new file mode 100644 index 0000000..770c767 --- /dev/null +++ b/vendor/labix.org/v2/mgo/LICENSE @@ -0,0 +1,25 @@ +mgo - MongoDB driver for Go + +Copyright (c) 2010-2013 - Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/labix.org/v2/mgo/Makefile b/vendor/labix.org/v2/mgo/Makefile new file mode 100644 index 0000000..51bee73 --- /dev/null +++ b/vendor/labix.org/v2/mgo/Makefile @@ -0,0 +1,5 @@ +startdb: + @testdb/setup.sh start + +stopdb: + @testdb/setup.sh stop diff --git a/vendor/labix.org/v2/mgo/auth.go b/vendor/labix.org/v2/mgo/auth.go new file mode 100644 index 0000000..7f3ba8c --- /dev/null +++ b/vendor/labix.org/v2/mgo/auth.go @@ -0,0 +1,412 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo + +import ( + "crypto/md5" + "encoding/hex" + "errors" + "fmt" + "labix.org/v2/mgo/bson" + "sync" +) + +type authCmd struct { + Authenticate int + + Nonce string + User string + Key string +} + +type startSaslCmd struct { + StartSASL int `bson:"startSasl"` +} + +type authResult struct { + ErrMsg string + Ok bool +} + +type getNonceCmd struct { + GetNonce int +} + +type getNonceResult struct { + Nonce string + Err string "$err" + Code int +} + +type logoutCmd struct { + Logout int +} + +type saslCmd struct { + Start int `bson:"saslStart,omitempty"` + Continue int `bson:"saslContinue,omitempty"` + ConversationId int `bson:"conversationId,omitempty"` + Mechanism string `bson:"mechanism,omitempty"` + Payload []byte +} + +type saslResult struct { + Ok bool `bson:"ok"` + NotOk bool `bson:"code"` // Server <= 2.3.2 returns ok=1 & code>0 on errors (WTF?) + Done bool + + ConversationId int `bson:"conversationId"` + Payload []byte + ErrMsg string +} + +type saslStepper interface { + Step(serverData []byte) (clientData []byte, done bool, err error) + Close() +} + +func (socket *mongoSocket) getNonce() (nonce string, err error) { + socket.Lock() + for socket.cachedNonce == "" && socket.dead == nil { + debugf("Socket %p to %s: waiting for nonce", socket, socket.addr) + socket.gotNonce.Wait() + } + if socket.cachedNonce == "mongos" { + socket.Unlock() + return "", errors.New("Can't authenticate with mongos; see http://j.mp/mongos-auth") + } + debugf("Socket %p to %s: got nonce", socket, socket.addr) + nonce, err = socket.cachedNonce, socket.dead + socket.cachedNonce = "" + socket.Unlock() + if err != nil { + nonce = "" + } + return +} + +func (socket *mongoSocket) resetNonce() { + debugf("Socket %p to %s: requesting a new nonce", socket, socket.addr) + op := &queryOp{} + op.query = &getNonceCmd{GetNonce: 1} + op.collection = "admin.$cmd" + op.limit = -1 + op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { + if err != nil { + socket.kill(errors.New("getNonce: "+err.Error()), true) + return + } + result := &getNonceResult{} + err = bson.Unmarshal(docData, &result) + if err != nil { + socket.kill(errors.New("Failed to unmarshal nonce: "+err.Error()), true) + return + } + debugf("Socket %p to %s: nonce unmarshalled: %#v", socket, socket.addr, result) + if result.Code == 13390 { + // mongos doesn't yet support auth (see http://j.mp/mongos-auth) + result.Nonce = "mongos" + } else if result.Nonce == "" { + var msg string + if result.Err != "" { + msg = fmt.Sprintf("Got an empty nonce: %s (%d)", result.Err, result.Code) + } else { + msg = "Got an empty nonce" + } + socket.kill(errors.New(msg), true) + return + } + socket.Lock() + if socket.cachedNonce != "" { + socket.Unlock() + panic("resetNonce: nonce already cached") + } + socket.cachedNonce = result.Nonce + socket.gotNonce.Signal() + socket.Unlock() + } + err := socket.Query(op) + if err != nil { + socket.kill(errors.New("resetNonce: "+err.Error()), true) + } +} + +func (socket *mongoSocket) Login(cred Credential) error { + socket.Lock() + for _, sockCred := range socket.creds { + if sockCred == cred { + debugf("Socket %p to %s: login: db=%q user=%q (already logged in)", socket, socket.addr, cred.Source, cred.Username) + socket.Unlock() + return nil + } + } + if socket.dropLogout(cred) { + debugf("Socket %p to %s: login: db=%q user=%q (cached)", socket, socket.addr, cred.Source, cred.Username) + socket.creds = append(socket.creds, cred) + socket.Unlock() + return nil + } + socket.Unlock() + + debugf("Socket %p to %s: login: db=%q user=%q", socket, socket.addr, cred.Source, cred.Username) + + var err error + switch cred.Mechanism { + case "", "MONGO-CR": + err = socket.loginClassic(cred) + case "PLAIN": + err = socket.loginPlain(cred) + case "MONGO-X509": + err = fmt.Errorf("unsupported authentication mechanism: %s", cred.Mechanism) + default: + // Try SASL for everything else, if it is available. + err = socket.loginSASL(cred) + } + + if err != nil { + debugf("Socket %p to %s: login error: %s", socket, socket.addr, err) + } else { + debugf("Socket %p to %s: login successful", socket, socket.addr) + } + return err +} + +func (socket *mongoSocket) loginClassic(cred Credential) error { + // Note that this only works properly because this function is + // synchronous, which means the nonce won't get reset while we're + // using it and any other login requests will block waiting for a + // new nonce provided in the defer call below. + nonce, err := socket.getNonce() + if err != nil { + return err + } + defer socket.resetNonce() + + psum := md5.New() + psum.Write([]byte(cred.Username + ":mongo:" + cred.Password)) + + ksum := md5.New() + ksum.Write([]byte(nonce + cred.Username)) + ksum.Write([]byte(hex.EncodeToString(psum.Sum(nil)))) + + key := hex.EncodeToString(ksum.Sum(nil)) + + cmd := authCmd{Authenticate: 1, User: cred.Username, Nonce: nonce, Key: key} + res := authResult{} + return socket.loginRun(cred.Source, &cmd, &res, func() error { + if !res.Ok { + return errors.New(res.ErrMsg) + } + socket.Lock() + socket.dropAuth(cred.Source) + socket.creds = append(socket.creds, cred) + socket.Unlock() + return nil + }) +} + +func (socket *mongoSocket) loginPlain(cred Credential) error { + cmd := saslCmd{Start: 1, Mechanism: "PLAIN", Payload: []byte("\x00" + cred.Username + "\x00" + cred.Password)} + res := authResult{} + return socket.loginRun(cred.Source, &cmd, &res, func() error { + if !res.Ok { + return errors.New(res.ErrMsg) + } + socket.Lock() + socket.dropAuth(cred.Source) + socket.creds = append(socket.creds, cred) + socket.Unlock() + return nil + }) +} + +func (socket *mongoSocket) loginSASL(cred Credential) error { + sasl, err := saslNew(cred, socket.Server().Addr) + if err != nil { + return err + } + defer sasl.Close() + + // The goal of this logic is to carry a locked socket until the + // local SASL step confirms the auth is valid; the socket needs to be + // locked so that concurrent action doesn't leave the socket in an + // auth state that doesn't reflect the operations that took place. + // As a simple case, imagine inverting login=>logout to logout=>login. + // + // The logic below works because the lock func isn't called concurrently. + locked := false + lock := func(b bool) { + if locked != b { + locked = b + if b { + socket.Lock() + } else { + socket.Unlock() + } + } + } + + lock(true) + defer lock(false) + + start := 1 + cmd := saslCmd{} + res := saslResult{} + for { + payload, done, err := sasl.Step(res.Payload) + if err != nil { + return err + } + if done && res.Done { + socket.dropAuth(cred.Source) + socket.creds = append(socket.creds, cred) + break + } + lock(false) + + cmd = saslCmd{ + Start: start, + Continue: 1 - start, + ConversationId: res.ConversationId, + Mechanism: cred.Mechanism, + Payload: payload, + } + start = 0 + err = socket.loginRun(cred.Source, &cmd, &res, func() error { + // See the comment on lock for why this is necessary. + lock(true) + if !res.Ok || res.NotOk { + return fmt.Errorf("server returned error on SASL authentication step: %s", res.ErrMsg) + } + return nil + }) + if err != nil { + return err + } + if done && res.Done { + socket.dropAuth(cred.Source) + socket.creds = append(socket.creds, cred) + break + } + } + + return nil +} + +func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error { + var mutex sync.Mutex + var replyErr error + mutex.Lock() + + op := queryOp{} + op.query = query + op.collection = db + ".$cmd" + op.limit = -1 + op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { + defer mutex.Unlock() + + if err != nil { + replyErr = err + return + } + + err = bson.Unmarshal(docData, result) + if err != nil { + replyErr = err + } else { + // Must handle this within the read loop for the socket, so + // that concurrent login requests are properly ordered. + replyErr = f() + } + } + + err := socket.Query(&op) + if err != nil { + return err + } + mutex.Lock() // Wait. + return replyErr +} + +func (socket *mongoSocket) Logout(db string) { + socket.Lock() + cred, found := socket.dropAuth(db) + if found { + debugf("Socket %p to %s: logout: db=%q (flagged)", socket, socket.addr, db) + socket.logout = append(socket.logout, cred) + } + socket.Unlock() +} + +func (socket *mongoSocket) LogoutAll() { + socket.Lock() + if l := len(socket.creds); l > 0 { + debugf("Socket %p to %s: logout all (flagged %d)", socket, socket.addr, l) + socket.logout = append(socket.logout, socket.creds...) + socket.creds = socket.creds[0:0] + } + socket.Unlock() +} + +func (socket *mongoSocket) flushLogout() (ops []interface{}) { + socket.Lock() + if l := len(socket.logout); l > 0 { + debugf("Socket %p to %s: logout all (flushing %d)", socket, socket.addr, l) + for i := 0; i != l; i++ { + op := queryOp{} + op.query = &logoutCmd{1} + op.collection = socket.logout[i].Source + ".$cmd" + op.limit = -1 + ops = append(ops, &op) + } + socket.logout = socket.logout[0:0] + } + socket.Unlock() + return +} + +func (socket *mongoSocket) dropAuth(db string) (cred Credential, found bool) { + for i, sockCred := range socket.creds { + if sockCred.Source == db { + copy(socket.creds[i:], socket.creds[i+1:]) + socket.creds = socket.creds[:len(socket.creds)-1] + return sockCred, true + } + } + return cred, false +} + +func (socket *mongoSocket) dropLogout(cred Credential) (found bool) { + for i, sockCred := range socket.logout { + if sockCred == cred { + copy(socket.logout[i:], socket.logout[i+1:]) + socket.logout = socket.logout[:len(socket.logout)-1] + return true + } + } + return false +} diff --git a/vendor/labix.org/v2/mgo/auth_test.go b/vendor/labix.org/v2/mgo/auth_test.go new file mode 100644 index 0000000..07080ca --- /dev/null +++ b/vendor/labix.org/v2/mgo/auth_test.go @@ -0,0 +1,935 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo_test + +import ( + "flag" + "fmt" + "labix.org/v2/mgo" + . "launchpad.net/gocheck" + "net/url" + "sync" + "time" +) + +func (s *S) TestAuthLoginDatabase(c *C) { + // Test both with a normal database and with an authenticated shard. + for _, addr := range []string{"localhost:40002", "localhost:40203"} { + session, err := mgo.Dial(addr) + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"n": 1}) + c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") + + admindb := session.DB("admin") + + err = admindb.Login("root", "wrong") + c.Assert(err, ErrorMatches, "auth fail(s|ed)") + + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + err = coll.Insert(M{"n": 1}) + c.Assert(err, IsNil) + } +} + +func (s *S) TestAuthLoginSession(c *C) { + // Test both with a normal database and with an authenticated shard. + for _, addr := range []string{"localhost:40002", "localhost:40203"} { + session, err := mgo.Dial(addr) + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"n": 1}) + c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") + + cred := mgo.Credential{ + Username: "root", + Password: "wrong", + } + err = session.Login(&cred) + c.Assert(err, ErrorMatches, "auth fail(s|ed)") + + cred.Password = "rapadura" + + err = session.Login(&cred) + c.Assert(err, IsNil) + + err = coll.Insert(M{"n": 1}) + c.Assert(err, IsNil) + } +} + +func (s *S) TestAuthLoginLogout(c *C) { + // Test both with a normal database and with an authenticated shard. + for _, addr := range []string{"localhost:40002", "localhost:40203"} { + session, err := mgo.Dial(addr) + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + admindb.Logout() + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"n": 1}) + c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") + + // Must have dropped auth from the session too. + session = session.Copy() + defer session.Close() + + coll = session.DB("mydb").C("mycoll") + err = coll.Insert(M{"n": 1}) + c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") + } +} + +func (s *S) TestAuthLoginLogoutAll(c *C) { + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + session.LogoutAll() + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"n": 1}) + c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") + + // Must have dropped auth from the session too. + session = session.Copy() + defer session.Close() + + coll = session.DB("mydb").C("mycoll") + err = coll.Insert(M{"n": 1}) + c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") +} + +func (s *S) TestAuthUpsertUserErrors(c *C) { + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + mydb := session.DB("mydb") + + err = mydb.UpsertUser(&mgo.User{}) + c.Assert(err, ErrorMatches, "user has no Username") + + err = mydb.UpsertUser(&mgo.User{Username: "user", Password: "pass", UserSource: "source"}) + c.Assert(err, ErrorMatches, "user has both Password/PasswordHash and UserSource set") + + err = mydb.UpsertUser(&mgo.User{Username: "user", Password: "pass", OtherDBRoles: map[string][]mgo.Role{"db": nil}}) + c.Assert(err, ErrorMatches, "user with OtherDBRoles is only supported in admin database") +} + +func (s *S) TestAuthUpsertUser(c *C) { + if !s.versionAtLeast(2, 4) { + c.Skip("UpsertUser only works on 2.4+") + } + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + mydb := session.DB("mydb") + + ruser := &mgo.User{ + Username: "myruser", + Password: "mypass", + Roles: []mgo.Role{mgo.RoleRead}, + } + rwuser := &mgo.User{ + Username: "myrwuser", + Password: "mypass", + Roles: []mgo.Role{mgo.RoleReadWrite}, + } + + err = mydb.UpsertUser(ruser) + c.Assert(err, IsNil) + err = mydb.UpsertUser(rwuser) + c.Assert(err, IsNil) + + err = mydb.Login("myruser", "mypass") + c.Assert(err, IsNil) + + admindb.Logout() + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"n": 1}) + c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") + + err = mydb.Login("myrwuser", "mypass") + c.Assert(err, IsNil) + + err = coll.Insert(M{"n": 1}) + c.Assert(err, IsNil) + + myotherdb := session.DB("myotherdb") + + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + // Test UserSource. + rwuserother := &mgo.User{ + Username: "myrwuser", + UserSource: "mydb", + Roles: []mgo.Role{mgo.RoleRead}, + } + + err = myotherdb.UpsertUser(rwuserother) + if s.versionAtLeast(2, 6) { + c.Assert(err, ErrorMatches, `MongoDB 2.6\+ does not support the UserSource setting`) + return + } + c.Assert(err, IsNil) + + admindb.Logout() + + // Test indirection via UserSource: we can't write to it, because + // the roles for myrwuser are different there. + othercoll := myotherdb.C("myothercoll") + err = othercoll.Insert(M{"n": 1}) + c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") + + // Reading works, though. + err = othercoll.Find(nil).One(nil) + c.Assert(err, Equals, mgo.ErrNotFound) + + // Can't login directly into the database using UserSource, though. + err = myotherdb.Login("myrwuser", "mypass") + c.Assert(err, ErrorMatches, "auth fail(s|ed)") +} + +func (s *S) TestAuthUpsertUserOtherDBRoles(c *C) { + if !s.versionAtLeast(2, 4) { + c.Skip("UpsertUser only works on 2.4+") + } + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + ruser := &mgo.User{ + Username: "myruser", + Password: "mypass", + OtherDBRoles: map[string][]mgo.Role{"mydb": []mgo.Role{mgo.RoleRead}}, + } + + err = admindb.UpsertUser(ruser) + c.Assert(err, IsNil) + defer admindb.RemoveUser("myruser") + + admindb.Logout() + err = admindb.Login("myruser", "mypass") + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"n": 1}) + c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") + + err = coll.Find(nil).One(nil) + c.Assert(err, Equals, mgo.ErrNotFound) +} + +func (s *S) TestAuthUpsertUserUpdates(c *C) { + if !s.versionAtLeast(2, 4) { + c.Skip("UpsertUser only works on 2.4+") + } + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + mydb := session.DB("mydb") + + // Insert a user that can read. + user := &mgo.User{ + Username: "myruser", + Password: "mypass", + Roles: []mgo.Role{mgo.RoleRead}, + } + err = mydb.UpsertUser(user) + c.Assert(err, IsNil) + + // Now update the user password. + user = &mgo.User{ + Username: "myruser", + Password: "mynewpass", + } + err = mydb.UpsertUser(user) + c.Assert(err, IsNil) + + // Login with the new user. + usession, err := mgo.Dial("myruser:mynewpass@localhost:40002/mydb") + c.Assert(err, IsNil) + defer usession.Close() + + // Can read, but not write. + err = usession.DB("mydb").C("mycoll").Find(nil).One(nil) + c.Assert(err, Equals, mgo.ErrNotFound) + err = usession.DB("mydb").C("mycoll").Insert(M{"ok": 1}) + c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") + + // Update the user role. + user = &mgo.User{ + Username: "myruser", + Roles: []mgo.Role{mgo.RoleReadWrite}, + } + err = mydb.UpsertUser(user) + c.Assert(err, IsNil) + + // Dial again to ensure the password hasn't changed. + usession, err = mgo.Dial("myruser:mynewpass@localhost:40002/mydb") + c.Assert(err, IsNil) + defer usession.Close() + + // Now it can write. + err = usession.DB("mydb").C("mycoll").Insert(M{"ok": 1}) + c.Assert(err, IsNil) +} + +func (s *S) TestAuthAddUser(c *C) { + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + mydb := session.DB("mydb") + err = mydb.AddUser("myruser", "mypass", true) + c.Assert(err, IsNil) + err = mydb.AddUser("mywuser", "mypass", false) + c.Assert(err, IsNil) + + err = mydb.Login("myruser", "mypass") + c.Assert(err, IsNil) + + admindb.Logout() + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"n": 1}) + c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") + + err = mydb.Login("mywuser", "mypass") + c.Assert(err, IsNil) + + err = coll.Insert(M{"n": 1}) + c.Assert(err, IsNil) +} + +func (s *S) TestAuthAddUserReplaces(c *C) { + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + mydb := session.DB("mydb") + err = mydb.AddUser("myuser", "myoldpass", false) + c.Assert(err, IsNil) + err = mydb.AddUser("myuser", "mynewpass", true) + c.Assert(err, IsNil) + + admindb.Logout() + + err = mydb.Login("myuser", "myoldpass") + c.Assert(err, ErrorMatches, "auth fail(s|ed)") + err = mydb.Login("myuser", "mynewpass") + c.Assert(err, IsNil) + + // ReadOnly flag was changed too. + err = mydb.C("mycoll").Insert(M{"n": 1}) + c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") +} + +func (s *S) TestAuthRemoveUser(c *C) { + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + mydb := session.DB("mydb") + err = mydb.AddUser("myuser", "mypass", true) + c.Assert(err, IsNil) + err = mydb.RemoveUser("myuser") + c.Assert(err, IsNil) + + err = mydb.Login("myuser", "mypass") + c.Assert(err, ErrorMatches, "auth fail(s|ed)") +} + +func (s *S) TestAuthLoginTwiceDoesNothing(c *C) { + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + oldStats := mgo.GetStats() + + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + newStats := mgo.GetStats() + c.Assert(newStats.SentOps, Equals, oldStats.SentOps) +} + +func (s *S) TestAuthLoginLogoutLoginDoesNothing(c *C) { + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + oldStats := mgo.GetStats() + + admindb.Logout() + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + newStats := mgo.GetStats() + c.Assert(newStats.SentOps, Equals, oldStats.SentOps) +} + +func (s *S) TestAuthLoginSwitchUser(c *C) { + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"n": 1}) + c.Assert(err, IsNil) + + err = admindb.Login("reader", "rapadura") + c.Assert(err, IsNil) + + // Can't write. + err = coll.Insert(M{"n": 1}) + c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") + + // But can read. + result := struct{ N int }{} + err = coll.Find(nil).One(&result) + c.Assert(err, IsNil) + c.Assert(result.N, Equals, 1) +} + +func (s *S) TestAuthLoginChangePassword(c *C) { + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + mydb := session.DB("mydb") + err = mydb.AddUser("myuser", "myoldpass", false) + c.Assert(err, IsNil) + + err = mydb.Login("myuser", "myoldpass") + c.Assert(err, IsNil) + + err = mydb.AddUser("myuser", "mynewpass", true) + c.Assert(err, IsNil) + + err = mydb.Login("myuser", "mynewpass") + c.Assert(err, IsNil) + + admindb.Logout() + + // The second login must be in effect, which means read-only. + err = mydb.C("mycoll").Insert(M{"n": 1}) + c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") +} + +func (s *S) TestAuthLoginCachingWithSessionRefresh(c *C) { + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + session.Refresh() + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"n": 1}) + c.Assert(err, IsNil) +} + +func (s *S) TestAuthLoginCachingWithSessionCopy(c *C) { + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + session = session.Copy() + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"n": 1}) + c.Assert(err, IsNil) +} + +func (s *S) TestAuthLoginCachingWithSessionClone(c *C) { + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + session = session.Clone() + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"n": 1}) + c.Assert(err, IsNil) +} + +func (s *S) TestAuthLoginCachingWithNewSession(c *C) { + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + session = session.New() + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"n": 1}) + c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized for .*") +} + +func (s *S) TestAuthLoginCachingAcrossPool(c *C) { + // Logins are cached even when the conenction goes back + // into the pool. + + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + // Add another user to test the logout case at the same time. + mydb := session.DB("mydb") + err = mydb.AddUser("myuser", "mypass", false) + c.Assert(err, IsNil) + + err = mydb.Login("myuser", "mypass") + c.Assert(err, IsNil) + + // Logout root explicitly, to test both cases. + admindb.Logout() + + // Give socket back to pool. + session.Refresh() + + // Brand new session, should use socket from the pool. + other := session.New() + defer other.Close() + + oldStats := mgo.GetStats() + + err = other.DB("admin").Login("root", "rapadura") + c.Assert(err, IsNil) + err = other.DB("mydb").Login("myuser", "mypass") + c.Assert(err, IsNil) + + // Both logins were cached, so no ops. + newStats := mgo.GetStats() + c.Assert(newStats.SentOps, Equals, oldStats.SentOps) + + // And they actually worked. + err = other.DB("mydb").C("mycoll").Insert(M{"n": 1}) + c.Assert(err, IsNil) + + other.DB("admin").Logout() + + err = other.DB("mydb").C("mycoll").Insert(M{"n": 1}) + c.Assert(err, IsNil) +} + +func (s *S) TestAuthLoginCachingAcrossPoolWithLogout(c *C) { + // Now verify that logouts are properly flushed if they + // are not revalidated after leaving the pool. + + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + // Add another user to test the logout case at the same time. + mydb := session.DB("mydb") + err = mydb.AddUser("myuser", "mypass", true) + c.Assert(err, IsNil) + + err = mydb.Login("myuser", "mypass") + c.Assert(err, IsNil) + + // Just some data to query later. + err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) + c.Assert(err, IsNil) + + // Give socket back to pool. + session.Refresh() + + // Brand new session, should use socket from the pool. + other := session.New() + defer other.Close() + + oldStats := mgo.GetStats() + + err = other.DB("mydb").Login("myuser", "mypass") + c.Assert(err, IsNil) + + // Login was cached, so no ops. + newStats := mgo.GetStats() + c.Assert(newStats.SentOps, Equals, oldStats.SentOps) + + // Can't write, since root has been implicitly logged out + // when the collection went into the pool, and not revalidated. + err = other.DB("mydb").C("mycoll").Insert(M{"n": 1}) + c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") + + // But can read due to the revalidated myuser login. + result := struct{ N int }{} + err = other.DB("mydb").C("mycoll").Find(nil).One(&result) + c.Assert(err, IsNil) + c.Assert(result.N, Equals, 1) +} + +func (s *S) TestAuthEventual(c *C) { + // Eventual sessions don't keep sockets around, so they are + // an interesting test case. + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + admindb := session.DB("admin") + err = admindb.Login("root", "rapadura") + c.Assert(err, IsNil) + + err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) + c.Assert(err, IsNil) + + var wg sync.WaitGroup + wg.Add(20) + + for i := 0; i != 10; i++ { + go func() { + defer wg.Done() + var result struct{ N int } + err := session.DB("mydb").C("mycoll").Find(nil).One(&result) + c.Assert(err, IsNil) + c.Assert(result.N, Equals, 1) + }() + } + + for i := 0; i != 10; i++ { + go func() { + defer wg.Done() + err := session.DB("mydb").C("mycoll").Insert(M{"n": 1}) + c.Assert(err, IsNil) + }() + } + + wg.Wait() +} + +func (s *S) TestAuthURL(c *C) { + session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002/") + c.Assert(err, IsNil) + defer session.Close() + + err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) + c.Assert(err, IsNil) +} + +func (s *S) TestAuthURLWrongCredentials(c *C) { + session, err := mgo.Dial("mongodb://root:wrong@localhost:40002/") + if session != nil { + session.Close() + } + c.Assert(err, ErrorMatches, "auth fail(s|ed)") + c.Assert(session, IsNil) +} + +func (s *S) TestAuthURLWithNewSession(c *C) { + // When authentication is in the URL, the new session will + // actually carry it on as well, even if logged out explicitly. + session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002/") + c.Assert(err, IsNil) + defer session.Close() + + session.DB("admin").Logout() + + // Do it twice to ensure it passes the needed data on. + session = session.New() + defer session.Close() + session = session.New() + defer session.Close() + + err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) + c.Assert(err, IsNil) +} + +func (s *S) TestAuthURLWithDatabase(c *C) { + session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + mydb := session.DB("mydb") + err = mydb.AddUser("myruser", "mypass", true) + c.Assert(err, IsNil) + + // Test once with database, and once with source. + for i := 0; i < 2; i++ { + var url string + if i == 0 { + url = "mongodb://myruser:mypass@localhost:40002/mydb" + } else { + url = "mongodb://myruser:mypass@localhost:40002/admin?authSource=mydb" + } + usession, err := mgo.Dial(url) + c.Assert(err, IsNil) + defer usession.Close() + + ucoll := usession.DB("mydb").C("mycoll") + err = ucoll.FindId(0).One(nil) + c.Assert(err, Equals, mgo.ErrNotFound) + err = ucoll.Insert(M{"n": 1}) + c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") + } +} + +func (s *S) TestDefaultDatabase(c *C) { + tests := []struct{ url, db string }{ + {"mongodb://root:rapadura@localhost:40002", "test"}, + {"mongodb://root:rapadura@localhost:40002/admin", "admin"}, + {"mongodb://localhost:40001", "test"}, + {"mongodb://localhost:40001/", "test"}, + {"mongodb://localhost:40001/mydb", "mydb"}, + } + + for _, test := range tests { + session, err := mgo.Dial(test.url) + c.Assert(err, IsNil) + defer session.Close() + + c.Logf("test: %#v", test) + c.Assert(session.DB("").Name, Equals, test.db) + + scopy := session.Copy() + c.Check(scopy.DB("").Name, Equals, test.db) + scopy.Close() + } +} + +func (s *S) TestAuthDirect(c *C) { + // Direct connections must work to the master and slaves. + for _, port := range []string{"40031", "40032", "40033"} { + url := fmt.Sprintf("mongodb://root:rapadura@localhost:%s/?connect=direct", port) + session, err := mgo.Dial(url) + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.Monotonic, true) + + var result struct{} + err = session.DB("mydb").C("mycoll").Find(nil).One(&result) + c.Assert(err, Equals, mgo.ErrNotFound) + } +} + +func (s *S) TestAuthDirectWithLogin(c *C) { + // Direct connections must work to the master and slaves. + for _, port := range []string{"40031", "40032", "40033"} { + url := fmt.Sprintf("mongodb://localhost:%s/?connect=direct", port) + session, err := mgo.Dial(url) + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.Monotonic, true) + session.SetSyncTimeout(3 * time.Second) + + err = session.DB("admin").Login("root", "rapadura") + c.Assert(err, IsNil) + + var result struct{} + err = session.DB("mydb").C("mycoll").Find(nil).One(&result) + c.Assert(err, Equals, mgo.ErrNotFound) + } +} + +var ( + plainFlag = flag.String("plain", "", "Host to test PLAIN authentication against (depends on custom environment)") + plainUser = "einstein" + plainPass = "password" +) + +func (s *S) TestAuthPlainCred(c *C) { + if *plainFlag == "" { + c.Skip("no -plain") + } + cred := &mgo.Credential{ + Username: plainUser, + Password: plainPass, + Source: "$external", + Mechanism: "PLAIN", + } + c.Logf("Connecting to %s...", *plainFlag) + session, err := mgo.Dial(*plainFlag) + c.Assert(err, IsNil) + defer session.Close() + + records := session.DB("records").C("records") + + c.Logf("Connected! Testing the need for authentication...") + err = records.Find(nil).One(nil) + c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") + + c.Logf("Authenticating...") + err = session.Login(cred) + c.Assert(err, IsNil) + c.Logf("Authenticated!") + + c.Logf("Connected! Testing the need for authentication...") + err = records.Find(nil).One(nil) + c.Assert(err, Equals, mgo.ErrNotFound) +} + +func (s *S) TestAuthPlainURL(c *C) { + if *plainFlag == "" { + c.Skip("no -plain") + } + c.Logf("Connecting to %s...", *plainFlag) + session, err := mgo.Dial(fmt.Sprintf("%s:%s@%s?authMechanism=PLAIN", url.QueryEscape(plainUser), url.QueryEscape(plainPass), *plainFlag)) + c.Assert(err, IsNil) + defer session.Close() + + c.Logf("Connected! Testing the need for authentication...") + err = session.DB("records").C("records").Find(nil).One(nil) + c.Assert(err, Equals, mgo.ErrNotFound) +} + +var ( + kerberosFlag = flag.Bool("kerberos", false, "Test Kerberos authentication (depends on custom environment)") + kerberosHost = "mmscustmongo.10gen.me" + kerberosUser = "mmsagent/mmscustagent.10gen.me@10GEN.ME" +) + +func (s *S) TestAuthKerberosCred(c *C) { + if !*kerberosFlag { + c.Skip("no -kerberos") + } + cred := &mgo.Credential{ + Username: kerberosUser, + Mechanism: "GSSAPI", + } + c.Logf("Connecting to %s...", kerberosHost) + session, err := mgo.Dial(kerberosHost) + c.Assert(err, IsNil) + defer session.Close() + + c.Logf("Connected! Testing the need for authentication...") + names, err := session.DatabaseNames() + c.Assert(err, ErrorMatches, "unauthorized") + + c.Logf("Authenticating...") + err = session.Login(cred) + c.Assert(err, IsNil) + c.Logf("Authenticated!") + + names, err = session.DatabaseNames() + c.Assert(err, IsNil) + c.Assert(len(names) > 0, Equals, true) +} + +func (s *S) TestAuthKerberosURL(c *C) { + if !*kerberosFlag { + c.Skip("no -kerberos") + } + c.Logf("Connecting to %s...", kerberosHost) + session, err := mgo.Dial(url.QueryEscape(kerberosUser) + "@" + kerberosHost + "?authMechanism=GSSAPI") + c.Assert(err, IsNil) + defer session.Close() + names, err := session.DatabaseNames() + c.Assert(err, IsNil) + c.Assert(len(names) > 0, Equals, true) +} diff --git a/vendor/labix.org/v2/mgo/bson/LICENSE b/vendor/labix.org/v2/mgo/bson/LICENSE new file mode 100644 index 0000000..8903260 --- /dev/null +++ b/vendor/labix.org/v2/mgo/bson/LICENSE @@ -0,0 +1,25 @@ +BSON library for Go + +Copyright (c) 2010-2012 - Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/labix.org/v2/mgo/bson/bson.go b/vendor/labix.org/v2/mgo/bson/bson.go new file mode 100644 index 0000000..3ebfd84 --- /dev/null +++ b/vendor/labix.org/v2/mgo/bson/bson.go @@ -0,0 +1,682 @@ +// BSON library for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package bson is an implementation of the BSON specification for Go: +// +// http://bsonspec.org +// +// It was created as part of the mgo MongoDB driver for Go, but is standalone +// and may be used on its own without the driver. +package bson + +import ( + "crypto/md5" + "crypto/rand" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io" + "os" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" +) + +// -------------------------------------------------------------------------- +// The public API. + +// A value implementing the bson.Getter interface will have its GetBSON +// method called when the given value has to be marshalled, and the result +// of this method will be marshaled in place of the actual object. +// +// If GetBSON returns return a non-nil error, the marshalling procedure +// will stop and error out with the provided value. +type Getter interface { + GetBSON() (interface{}, error) +} + +// A value implementing the bson.Setter interface will receive the BSON +// value via the SetBSON method during unmarshaling, and the object +// itself will not be changed as usual. +// +// If setting the value works, the method should return nil or alternatively +// bson.SetZero to set the respective field to its zero value (nil for +// pointer types). If SetBSON returns a value of type bson.TypeError, the +// BSON value will be omitted from a map or slice being decoded and the +// unmarshalling will continue. If it returns any other non-nil error, the +// unmarshalling procedure will stop and error out with the provided value. +// +// This interface is generally useful in pointer receivers, since the method +// will want to change the receiver. A type field that implements the Setter +// interface doesn't have to be a pointer, though. +// +// Unlike the usual behavior, unmarshalling onto a value that implements a +// Setter interface will NOT reset the value to its zero state. This allows +// the value to decide by itself how to be unmarshalled. +// +// For example: +// +// type MyString string +// +// func (s *MyString) SetBSON(raw bson.Raw) error { +// return raw.Unmarshal(s) +// } +// +type Setter interface { + SetBSON(raw Raw) error +} + +// SetZero may be returned from a SetBSON method to have the value set to +// its respective zero value. When used in pointer values, this will set the +// field to nil rather than to the pre-allocated value. +var SetZero = errors.New("set to zero") + +// M is a convenient alias for a map[string]interface{} map, useful for +// dealing with BSON in a native way. For instance: +// +// bson.M{"a": 1, "b": true} +// +// There's no special handling for this type in addition to what's done anyway +// for an equivalent map type. Elements in the map will be dumped in an +// undefined ordered. See also the bson.D type for an ordered alternative. +type M map[string]interface{} + +// D represents a BSON document containing ordered elements. For example: +// +// bson.D{{"a", 1}, {"b", true}} +// +// In some situations, such as when creating indexes for MongoDB, the order in +// which the elements are defined is important. If the order is not important, +// using a map is generally more comfortable. See bson.M and bson.RawD. +type D []DocElem + +// See the D type. +type DocElem struct { + Name string + Value interface{} +} + +// Map returns a map out of the ordered element name/value pairs in d. +func (d D) Map() (m M) { + m = make(M, len(d)) + for _, item := range d { + m[item.Name] = item.Value + } + return m +} + +// The Raw type represents raw unprocessed BSON documents and elements. +// Kind is the kind of element as defined per the BSON specification, and +// Data is the raw unprocessed data for the respective element. +// Using this type it is possible to unmarshal or marshal values partially. +// +// Relevant documentation: +// +// http://bsonspec.org/#/specification +// +type Raw struct { + Kind byte + Data []byte +} + +// RawD represents a BSON document containing raw unprocessed elements. +// This low-level representation may be useful when lazily processing +// documents of uncertain content, or when manipulating the raw content +// documents in general. +type RawD []RawDocElem + +// See the RawD type. +type RawDocElem struct { + Name string + Value Raw +} + +// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes +// long. MongoDB objects by default have such a property set in their "_id" +// property. +// +// http://www.mongodb.org/display/DOCS/Object+IDs +type ObjectId string + +// ObjectIdHex returns an ObjectId from the provided hex representation. +// Calling this function with an invalid hex representation will +// cause a runtime panic. See the IsObjectIdHex function. +func ObjectIdHex(s string) ObjectId { + d, err := hex.DecodeString(s) + if err != nil || len(d) != 12 { + panic(fmt.Sprintf("Invalid input to ObjectIdHex: %q", s)) + } + return ObjectId(d) +} + +// IsObjectIdHex returns whether s is a valid hex representation of +// an ObjectId. See the ObjectIdHex function. +func IsObjectIdHex(s string) bool { + if len(s) != 24 { + return false + } + _, err := hex.DecodeString(s) + return err == nil +} + +// objectIdCounter is atomically incremented when generating a new ObjectId +// using NewObjectId() function. It's used as a counter part of an id. +var objectIdCounter uint32 = 0 + +// machineId stores machine id generated once and used in subsequent calls +// to NewObjectId function. +var machineId = readMachineId() + +// readMachineId generates machine id and puts it into the machineId global +// variable. If this function fails to get the hostname, it will cause +// a runtime error. +func readMachineId() []byte { + var sum [3]byte + id := sum[:] + hostname, err1 := os.Hostname() + if err1 != nil { + _, err2 := io.ReadFull(rand.Reader, id) + if err2 != nil { + panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2)) + } + return id + } + hw := md5.New() + hw.Write([]byte(hostname)) + copy(id, hw.Sum(nil)) + return id +} + +// NewObjectId returns a new unique ObjectId. +func NewObjectId() ObjectId { + var b [12]byte + // Timestamp, 4 bytes, big endian + binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix())) + // Machine, first 3 bytes of md5(hostname) + b[4] = machineId[0] + b[5] = machineId[1] + b[6] = machineId[2] + // Pid, 2 bytes, specs don't specify endianness, but we use big endian. + pid := os.Getpid() + b[7] = byte(pid >> 8) + b[8] = byte(pid) + // Increment, 3 bytes, big endian + i := atomic.AddUint32(&objectIdCounter, 1) + b[9] = byte(i >> 16) + b[10] = byte(i >> 8) + b[11] = byte(i) + return ObjectId(b[:]) +} + +// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled +// with the provided number of seconds from epoch UTC, and all other parts +// filled with zeroes. It's not safe to insert a document with an id generated +// by this method, it is useful only for queries to find documents with ids +// generated before or after the specified timestamp. +func NewObjectIdWithTime(t time.Time) ObjectId { + var b [12]byte + binary.BigEndian.PutUint32(b[:4], uint32(t.Unix())) + return ObjectId(string(b[:])) +} + +// String returns a hex string representation of the id. +// Example: ObjectIdHex("4d88e15b60f486e428412dc9"). +func (id ObjectId) String() string { + return fmt.Sprintf(`ObjectIdHex("%x")`, string(id)) +} + +// Hex returns a hex representation of the ObjectId. +func (id ObjectId) Hex() string { + return hex.EncodeToString([]byte(id)) +} + +// MarshalJSON turns a bson.ObjectId into a json.Marshaller. +func (id ObjectId) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%x"`, string(id))), nil +} + +// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller. +func (id *ObjectId) UnmarshalJSON(data []byte) error { + if len(data) != 26 || data[0] != '"' || data[25] != '"' { + return errors.New(fmt.Sprintf("Invalid ObjectId in JSON: %s", string(data))) + } + var buf [12]byte + _, err := hex.Decode(buf[:], data[1:25]) + if err != nil { + return errors.New(fmt.Sprintf("Invalid ObjectId in JSON: %s (%s)", string(data), err)) + } + *id = ObjectId(string(buf[:])) + return nil +} + +// Valid returns true if id is valid. A valid id must contain exactly 12 bytes. +func (id ObjectId) Valid() bool { + return len(id) == 12 +} + +// byteSlice returns byte slice of id from start to end. +// Calling this function with an invalid id will cause a runtime panic. +func (id ObjectId) byteSlice(start, end int) []byte { + if len(id) != 12 { + panic(fmt.Sprintf("Invalid ObjectId: %q", string(id))) + } + return []byte(string(id)[start:end]) +} + +// Time returns the timestamp part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ObjectId) Time() time.Time { + // First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch. + secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4))) + return time.Unix(secs, 0) +} + +// Machine returns the 3-byte machine id part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ObjectId) Machine() []byte { + return id.byteSlice(4, 7) +} + +// Pid returns the process id part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ObjectId) Pid() uint16 { + return binary.BigEndian.Uint16(id.byteSlice(7, 9)) +} + +// Counter returns the incrementing value part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ObjectId) Counter() int32 { + b := id.byteSlice(9, 12) + // Counter is stored as big-endian 3-byte value + return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2])) +} + +// The Symbol type is similar to a string and is used in languages with a +// distinct symbol type. +type Symbol string + +// Now returns the current time with millisecond precision. MongoDB stores +// timestamps with the same precision, so a Time returned from this method +// will not change after a roundtrip to the database. That's the only reason +// why this function exists. Using the time.Now function also works fine +// otherwise. +func Now() time.Time { + return time.Unix(0, time.Now().UnixNano()/1e6*1e6) +} + +// MongoTimestamp is a special internal type used by MongoDB that for some +// strange reason has its own datatype defined in BSON. +type MongoTimestamp int64 + +type orderKey int64 + +// MaxKey is a special value that compares higher than all other possible BSON +// values in a MongoDB database. +var MaxKey = orderKey(1<<63 - 1) + +// MinKey is a special value that compares lower than all other possible BSON +// values in a MongoDB database. +var MinKey = orderKey(-1 << 63) + +type undefined struct{} + +// Undefined represents the undefined BSON value. +var Undefined undefined + +// Binary is a representation for non-standard binary values. Any kind should +// work, but the following are known as of this writing: +// +// 0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}. +// 0x01 - Function (!?) +// 0x02 - Obsolete generic. +// 0x03 - UUID +// 0x05 - MD5 +// 0x80 - User defined. +// +type Binary struct { + Kind byte + Data []byte +} + +// RegEx represents a regular expression. The Options field may contain +// individual characters defining the way in which the pattern should be +// applied, and must be sorted. Valid options as of this writing are 'i' for +// case insensitive matching, 'm' for multi-line matching, 'x' for verbose +// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all +// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match +// unicode. The value of the Options parameter is not verified before being +// marshaled into the BSON format. +type RegEx struct { + Pattern string + Options string +} + +// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it +// will be marshaled as a mapping from identifiers to values that may be +// used when evaluating the provided Code. +type JavaScript struct { + Code string + Scope interface{} +} + +const initialBufferSize = 64 + +func handleErr(err *error) { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } else if _, ok := r.(externalPanic); ok { + panic(r) + } else if s, ok := r.(string); ok { + *err = errors.New(s) + } else if e, ok := r.(error); ok { + *err = e + } else { + panic(r) + } + } +} + +// Marshal serializes the in value, which may be a map or a struct value. +// In the case of struct values, only exported fields will be serialized. +// The lowercased field name is used as the key for each exported field, +// but this behavior may be changed using the respective field tag. +// The tag may also contain flags to tweak the marshalling behavior for +// the field. The tag formats accepted are: +// +// "[][,[,]]" +// +// `(...) bson:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// +// minsize Marshal an int64 value as an int32, if that's feasible +// while preserving the numeric value. +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the bson keys of other struct fields. +// +// Some examples: +// +// type T struct { +// A bool +// B int "myb" +// C string "myc,omitempty" +// D string `bson:",omitempty" json:"jsonkey"` +// E int64 ",minsize" +// F int64 "myf,omitempty,minsize" +// } +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := &encoder{make([]byte, 0, initialBufferSize)} + e.addDoc(reflect.ValueOf(in)) + return e.out, nil +} + +// Unmarshal deserializes data from in into the out value. The out value +// must be a map, a pointer to a struct, or a pointer to a bson.D value. +// The lowercased field name is used as the key for each exported field, +// but this behavior may be changed using the respective field tag. +// The tag may also contain flags to tweak the marshalling behavior for +// the field. The tag formats accepted are: +// +// "[][,[,]]" +// +// `(...) bson:"[][,[,]]" (...)` +// +// The following flags are currently supported during unmarshal (see the +// Marshal method for other flags): +// +// inline Inline the field, which must be a struct or a map. +// Inlined structs are handled as if its fields were part +// of the outer struct. An inlined map causes keys that do +// not match any other struct field to be inserted in the +// map rather than being discarded as usual. +// +// The target field or element types of out may not necessarily match +// the BSON values of the provided data. The following conversions are +// made automatically: +// +// - Numeric types are converted if at least the integer part of the +// value would be preserved correctly +// - Bools are converted to numeric types as 1 or 0 +// - Numeric types are converted to bools as true if not 0 or false otherwise +// - Binary and string BSON data is converted to a string, array or byte slice +// +// If the value would not fit the type and cannot be converted, it's +// silently skipped. +// +// Pointer values are initialized when necessary. +func Unmarshal(in []byte, out interface{}) (err error) { + defer handleErr(&err) + v := reflect.ValueOf(out) + switch v.Kind() { + case reflect.Map, reflect.Ptr: + d := newDecoder(in) + d.readDocTo(v) + case reflect.Struct: + return errors.New("Unmarshal can't deal with struct values. Use a pointer.") + default: + return errors.New("Unmarshal needs a map or a pointer to a struct.") + } + return nil +} + +// Unmarshal deserializes raw into the out value. If the out value type +// is not compatible with raw, a *bson.TypeError is returned. +// +// See the Unmarshal function documentation for more details on the +// unmarshalling process. +func (raw Raw) Unmarshal(out interface{}) (err error) { + defer handleErr(&err) + v := reflect.ValueOf(out) + switch v.Kind() { + case reflect.Ptr: + v = v.Elem() + fallthrough + case reflect.Map: + d := newDecoder(raw.Data) + good := d.readElemTo(v, raw.Kind) + if !good { + return &TypeError{v.Type(), raw.Kind} + } + case reflect.Struct: + return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.") + default: + return errors.New("Raw Unmarshal needs a map or a valid pointer.") + } + return nil +} + +type TypeError struct { + Type reflect.Type + Kind byte +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String()) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + InlineMap int + Zero reflect.Value +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + MinSize bool + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var structMapMutex sync.RWMutex + +type externalPanic string + +func (e externalPanic) String() string { + return string(e) +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + structMapMutex.RLock() + sinfo, found := structMap[st] + structMapMutex.RUnlock() + if found { + return sinfo, nil + } + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("bson") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + // XXX Drop this after a few releases. + if s := strings.Index(tag, "/"); s >= 0 { + recommend := tag[:s] + for _, c := range tag[s+1:] { + switch c { + case 'c': + recommend += ",omitempty" + case 's': + recommend += ",minsize" + default: + msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", string([]byte{uint8(c)}), tag, st) + panic(externalPanic(msg)) + } + } + msg := fmt.Sprintf("Replace tag %q in field %s of type %s by %q", tag, field.Name, st, recommend) + panic(externalPanic(msg)) + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "minsize": + info.MinSize = true + case "inline": + inline = true + default: + msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st) + panic(externalPanic(msg)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + panic("Option ,inline needs a struct value or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + sinfo = &structInfo{ + fieldsMap, + fieldsList, + inlineMap, + reflect.New(st).Elem(), + } + structMapMutex.Lock() + structMap[st] = sinfo + structMapMutex.Unlock() + return sinfo, nil +} diff --git a/vendor/labix.org/v2/mgo/bson/bson_test.go b/vendor/labix.org/v2/mgo/bson/bson_test.go new file mode 100644 index 0000000..1263e97 --- /dev/null +++ b/vendor/labix.org/v2/mgo/bson/bson_test.go @@ -0,0 +1,1466 @@ +// BSON library for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// gobson - BSON library for Go. + +package bson_test + +import ( + "encoding/binary" + "encoding/json" + "errors" + "labix.org/v2/mgo/bson" + . "launchpad.net/gocheck" + "net/url" + "reflect" + "testing" + "time" +) + +func TestAll(t *testing.T) { + TestingT(t) +} + +type S struct{} + +var _ = Suite(&S{}) + +// Wrap up the document elements contained in data, prepending the int32 +// length of the data, and appending the '\x00' value closing the document. +func wrapInDoc(data string) string { + result := make([]byte, len(data)+5) + binary.LittleEndian.PutUint32(result, uint32(len(result))) + copy(result[4:], []byte(data)) + return string(result) +} + +func makeZeroDoc(value interface{}) (zero interface{}) { + v := reflect.ValueOf(value) + t := v.Type() + switch t.Kind() { + case reflect.Map: + mv := reflect.MakeMap(t) + zero = mv.Interface() + case reflect.Ptr: + pv := reflect.New(v.Type().Elem()) + zero = pv.Interface() + case reflect.Slice: + zero = reflect.New(t).Interface() + default: + panic("unsupported doc type") + } + return zero +} + +func testUnmarshal(c *C, data string, obj interface{}) { + zero := makeZeroDoc(obj) + err := bson.Unmarshal([]byte(data), zero) + c.Assert(err, IsNil) + c.Assert(zero, DeepEquals, obj) +} + +type testItemType struct { + obj interface{} + data string +} + +// -------------------------------------------------------------------------- +// Samples from bsonspec.org: + +var sampleItems = []testItemType{ + {bson.M{"hello": "world"}, + "\x16\x00\x00\x00\x02hello\x00\x06\x00\x00\x00world\x00\x00"}, + {bson.M{"BSON": []interface{}{"awesome", float64(5.05), 1986}}, + "1\x00\x00\x00\x04BSON\x00&\x00\x00\x00\x020\x00\x08\x00\x00\x00" + + "awesome\x00\x011\x00333333\x14@\x102\x00\xc2\x07\x00\x00\x00\x00"}, +} + +func (s *S) TestMarshalSampleItems(c *C) { + for i, item := range sampleItems { + data, err := bson.Marshal(item.obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, item.data, Commentf("Failed on item %d", i)) + } +} + +func (s *S) TestUnmarshalSampleItems(c *C) { + for i, item := range sampleItems { + value := bson.M{} + err := bson.Unmarshal([]byte(item.data), value) + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, item.obj, Commentf("Failed on item %d", i)) + } +} + +// -------------------------------------------------------------------------- +// Every type, ordered by the type flag. These are not wrapped with the +// length and last \x00 from the document. wrapInDoc() computes them. +// Note that all of them should be supported as two-way conversions. + +var allItems = []testItemType{ + {bson.M{}, + ""}, + {bson.M{"_": float64(5.05)}, + "\x01_\x00333333\x14@"}, + {bson.M{"_": "yo"}, + "\x02_\x00\x03\x00\x00\x00yo\x00"}, + {bson.M{"_": bson.M{"a": true}}, + "\x03_\x00\x09\x00\x00\x00\x08a\x00\x01\x00"}, + {bson.M{"_": []interface{}{true, false}}, + "\x04_\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, + {bson.M{"_": []byte("yo")}, + "\x05_\x00\x02\x00\x00\x00\x00yo"}, + {bson.M{"_": bson.Binary{0x80, []byte("udef")}}, + "\x05_\x00\x04\x00\x00\x00\x80udef"}, + {bson.M{"_": bson.Undefined}, // Obsolete, but still seen in the wild. + "\x06_\x00"}, + {bson.M{"_": bson.ObjectId("0123456789ab")}, + "\x07_\x000123456789ab"}, + {bson.M{"_": false}, + "\x08_\x00\x00"}, + {bson.M{"_": true}, + "\x08_\x00\x01"}, + {bson.M{"_": time.Unix(0, 258e6)}, // Note the NS <=> MS conversion. + "\x09_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, + {bson.M{"_": nil}, + "\x0A_\x00"}, + {bson.M{"_": bson.RegEx{"ab", "cd"}}, + "\x0B_\x00ab\x00cd\x00"}, + {bson.M{"_": bson.JavaScript{"code", nil}}, + "\x0D_\x00\x05\x00\x00\x00code\x00"}, + {bson.M{"_": bson.Symbol("sym")}, + "\x0E_\x00\x04\x00\x00\x00sym\x00"}, + {bson.M{"_": bson.JavaScript{"code", bson.M{"": nil}}}, + "\x0F_\x00\x14\x00\x00\x00\x05\x00\x00\x00code\x00" + + "\x07\x00\x00\x00\x0A\x00\x00"}, + {bson.M{"_": 258}, + "\x10_\x00\x02\x01\x00\x00"}, + {bson.M{"_": bson.MongoTimestamp(258)}, + "\x11_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, + {bson.M{"_": int64(258)}, + "\x12_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, + {bson.M{"_": int64(258 << 32)}, + "\x12_\x00\x00\x00\x00\x00\x02\x01\x00\x00"}, + {bson.M{"_": bson.MaxKey}, + "\x7F_\x00"}, + {bson.M{"_": bson.MinKey}, + "\xFF_\x00"}, +} + +func (s *S) TestMarshalAllItems(c *C) { + for i, item := range allItems { + data, err := bson.Marshal(item.obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, wrapInDoc(item.data), Commentf("Failed on item %d: %#v", i, item)) + } +} + +func (s *S) TestUnmarshalAllItems(c *C) { + for i, item := range allItems { + value := bson.M{} + err := bson.Unmarshal([]byte(wrapInDoc(item.data)), value) + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, item.obj, Commentf("Failed on item %d: %#v", i, item)) + } +} + +func (s *S) TestUnmarshalRawAllItems(c *C) { + for i, item := range allItems { + if len(item.data) == 0 { + continue + } + value := item.obj.(bson.M)["_"] + if value == nil { + continue + } + pv := reflect.New(reflect.ValueOf(value).Type()) + raw := bson.Raw{item.data[0], []byte(item.data[3:])} + c.Logf("Unmarshal raw: %#v, %#v", raw, pv.Interface()) + err := raw.Unmarshal(pv.Interface()) + c.Assert(err, IsNil) + c.Assert(pv.Elem().Interface(), DeepEquals, value, Commentf("Failed on item %d: %#v", i, item)) + } +} + +func (s *S) TestUnmarshalRawIncompatible(c *C) { + raw := bson.Raw{0x08, []byte{0x01}} // true + err := raw.Unmarshal(&struct{}{}) + c.Assert(err, ErrorMatches, "BSON kind 0x08 isn't compatible with type struct \\{\\}") +} + +func (s *S) TestUnmarshalZeroesStruct(c *C) { + data, err := bson.Marshal(bson.M{"b": 2}) + c.Assert(err, IsNil) + type T struct{ A, B int } + v := T{A: 1} + err = bson.Unmarshal(data, &v) + c.Assert(err, IsNil) + c.Assert(v.A, Equals, 0) + c.Assert(v.B, Equals, 2) +} + +func (s *S) TestUnmarshalZeroesMap(c *C) { + data, err := bson.Marshal(bson.M{"b": 2}) + c.Assert(err, IsNil) + m := bson.M{"a": 1} + err = bson.Unmarshal(data, &m) + c.Assert(err, IsNil) + c.Assert(m, DeepEquals, bson.M{"b": 2}) +} + +func (s *S) TestUnmarshalNonNilInterface(c *C) { + data, err := bson.Marshal(bson.M{"b": 2}) + c.Assert(err, IsNil) + m := bson.M{"a": 1} + var i interface{} + i = m + err = bson.Unmarshal(data, &i) + c.Assert(err, IsNil) + c.Assert(i, DeepEquals, bson.M{"b": 2}) + c.Assert(m, DeepEquals, bson.M{"a": 1}) +} + +// -------------------------------------------------------------------------- +// Some one way marshaling operations which would unmarshal differently. + +var oneWayMarshalItems = []testItemType{ + // These are being passed as pointers, and will unmarshal as values. + {bson.M{"": &bson.Binary{0x02, []byte("old")}}, + "\x05\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, + {bson.M{"": &bson.Binary{0x80, []byte("udef")}}, + "\x05\x00\x04\x00\x00\x00\x80udef"}, + {bson.M{"": &bson.RegEx{"ab", "cd"}}, + "\x0B\x00ab\x00cd\x00"}, + {bson.M{"": &bson.JavaScript{"code", nil}}, + "\x0D\x00\x05\x00\x00\x00code\x00"}, + {bson.M{"": &bson.JavaScript{"code", bson.M{"": nil}}}, + "\x0F\x00\x14\x00\x00\x00\x05\x00\x00\x00code\x00" + + "\x07\x00\x00\x00\x0A\x00\x00"}, + + // There's no float32 type in BSON. Will encode as a float64. + {bson.M{"": float32(5.05)}, + "\x01\x00\x00\x00\x00@33\x14@"}, + + // The array will be unmarshaled as a slice instead. + {bson.M{"": [2]bool{true, false}}, + "\x04\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, + + // The typed slice will be unmarshaled as []interface{}. + {bson.M{"": []bool{true, false}}, + "\x04\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, + + // Will unmarshal as a []byte. + {bson.M{"": bson.Binary{0x00, []byte("yo")}}, + "\x05\x00\x02\x00\x00\x00\x00yo"}, + {bson.M{"": bson.Binary{0x02, []byte("old")}}, + "\x05\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, + + // No way to preserve the type information here. We might encode as a zero + // value, but this would mean that pointer values in structs wouldn't be + // able to correctly distinguish between unset and set to the zero value. + {bson.M{"": (*byte)(nil)}, + "\x0A\x00"}, + + // No int types smaller than int32 in BSON. Could encode this as a char, + // but it would still be ambiguous, take more, and be awkward in Go when + // loaded without typing information. + {bson.M{"": byte(8)}, + "\x10\x00\x08\x00\x00\x00"}, + + // There are no unsigned types in BSON. Will unmarshal as int32 or int64. + {bson.M{"": uint32(258)}, + "\x10\x00\x02\x01\x00\x00"}, + {bson.M{"": uint64(258)}, + "\x12\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, + {bson.M{"": uint64(258 << 32)}, + "\x12\x00\x00\x00\x00\x00\x02\x01\x00\x00"}, + + // This will unmarshal as int. + {bson.M{"": int32(258)}, + "\x10\x00\x02\x01\x00\x00"}, + + // That's a special case. The unsigned value is too large for an int32, + // so an int64 is used instead. + {bson.M{"": uint32(1<<32 - 1)}, + "\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"}, + {bson.M{"": uint(1<<32 - 1)}, + "\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"}, +} + +func (s *S) TestOneWayMarshalItems(c *C) { + for i, item := range oneWayMarshalItems { + data, err := bson.Marshal(item.obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, wrapInDoc(item.data), + Commentf("Failed on item %d", i)) + } +} + +// -------------------------------------------------------------------------- +// Two-way tests for user-defined structures using the samples +// from bsonspec.org. + +type specSample1 struct { + Hello string +} + +type specSample2 struct { + BSON []interface{} "BSON" +} + +var structSampleItems = []testItemType{ + {&specSample1{"world"}, + "\x16\x00\x00\x00\x02hello\x00\x06\x00\x00\x00world\x00\x00"}, + {&specSample2{[]interface{}{"awesome", float64(5.05), 1986}}, + "1\x00\x00\x00\x04BSON\x00&\x00\x00\x00\x020\x00\x08\x00\x00\x00" + + "awesome\x00\x011\x00333333\x14@\x102\x00\xc2\x07\x00\x00\x00\x00"}, +} + +func (s *S) TestMarshalStructSampleItems(c *C) { + for i, item := range structSampleItems { + data, err := bson.Marshal(item.obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, item.data, + Commentf("Failed on item %d", i)) + } +} + +func (s *S) TestUnmarshalStructSampleItems(c *C) { + for _, item := range structSampleItems { + testUnmarshal(c, item.data, item.obj) + } +} + +func (s *S) Test64bitInt(c *C) { + var i int64 = (1 << 31) + if int(i) > 0 { + data, err := bson.Marshal(bson.M{"i": int(i)}) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, wrapInDoc("\x12i\x00\x00\x00\x00\x80\x00\x00\x00\x00")) + + var result struct{ I int } + err = bson.Unmarshal(data, &result) + c.Assert(err, IsNil) + c.Assert(int64(result.I), Equals, i) + } +} + +// -------------------------------------------------------------------------- +// Generic two-way struct marshaling tests. + +var bytevar = byte(8) +var byteptr = &bytevar + +var structItems = []testItemType{ + {&struct{ Ptr *byte }{nil}, + "\x0Aptr\x00"}, + {&struct{ Ptr *byte }{&bytevar}, + "\x10ptr\x00\x08\x00\x00\x00"}, + {&struct{ Ptr **byte }{&byteptr}, + "\x10ptr\x00\x08\x00\x00\x00"}, + {&struct{ Byte byte }{8}, + "\x10byte\x00\x08\x00\x00\x00"}, + {&struct{ Byte byte }{0}, + "\x10byte\x00\x00\x00\x00\x00"}, + {&struct { + V byte "Tag" + }{8}, + "\x10Tag\x00\x08\x00\x00\x00"}, + {&struct { + V *struct { + Byte byte + } + }{&struct{ Byte byte }{8}}, + "\x03v\x00" + "\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00"}, + {&struct{ priv byte }{}, ""}, + + // The order of the dumped fields should be the same in the struct. + {&struct{ A, C, B, D, F, E *byte }{}, + "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x0Ae\x00"}, + + {&struct{ V bson.Raw }{bson.Raw{0x03, []byte("\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00")}}, + "\x03v\x00" + "\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00"}, + {&struct{ V bson.Raw }{bson.Raw{0x10, []byte("\x00\x00\x00\x00")}}, + "\x10v\x00" + "\x00\x00\x00\x00"}, + + // Byte arrays. + {&struct{ V [2]byte }{[2]byte{'y', 'o'}}, + "\x05v\x00\x02\x00\x00\x00\x00yo"}, +} + +func (s *S) TestMarshalStructItems(c *C) { + for i, item := range structItems { + data, err := bson.Marshal(item.obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, wrapInDoc(item.data), + Commentf("Failed on item %d", i)) + } +} + +func (s *S) TestUnmarshalStructItems(c *C) { + for _, item := range structItems { + testUnmarshal(c, wrapInDoc(item.data), item.obj) + } +} + +func (s *S) TestUnmarshalRawStructItems(c *C) { + for i, item := range structItems { + raw := bson.Raw{0x03, []byte(wrapInDoc(item.data))} + zero := makeZeroDoc(item.obj) + err := raw.Unmarshal(zero) + c.Assert(err, IsNil) + c.Assert(zero, DeepEquals, item.obj, Commentf("Failed on item %d: %#v", i, item)) + } +} + +func (s *S) TestUnmarshalRawNil(c *C) { + // Regression test: shouldn't try to nil out the pointer itself, + // as it's not settable. + raw := bson.Raw{0x0A, []byte{}} + err := raw.Unmarshal(&struct{}{}) + c.Assert(err, IsNil) +} + +// -------------------------------------------------------------------------- +// One-way marshaling tests. + +type dOnIface struct { + D interface{} +} + +type ignoreField struct { + Before string + Ignore string `bson:"-"` + After string +} + +var marshalItems = []testItemType{ + // Ordered document dump. Will unmarshal as a dictionary by default. + {bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", nil}, {"f", nil}, {"e", true}}, + "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x08e\x00\x01"}, + {MyD{{"a", nil}, {"c", nil}, {"b", nil}, {"d", nil}, {"f", nil}, {"e", true}}, + "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x08e\x00\x01"}, + {&dOnIface{bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", true}}}, + "\x03d\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x0Ab\x00\x08d\x00\x01")}, + + {bson.RawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}, + "\x0Aa\x00" + "\x0Ac\x00" + "\x08b\x00\x01"}, + {MyRawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}, + "\x0Aa\x00" + "\x0Ac\x00" + "\x08b\x00\x01"}, + {&dOnIface{bson.RawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}}, + "\x03d\x00" + wrapInDoc("\x0Aa\x00"+"\x0Ac\x00"+"\x08b\x00\x01")}, + + {&ignoreField{"before", "ignore", "after"}, + "\x02before\x00\a\x00\x00\x00before\x00\x02after\x00\x06\x00\x00\x00after\x00"}, + + // Marshalling a Raw document does nothing. + {bson.Raw{0x03, []byte(wrapInDoc("anything"))}, + "anything"}, + {bson.Raw{Data: []byte(wrapInDoc("anything"))}, + "anything"}, +} + +func (s *S) TestMarshalOneWayItems(c *C) { + for _, item := range marshalItems { + data, err := bson.Marshal(item.obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, wrapInDoc(item.data)) + } +} + +// -------------------------------------------------------------------------- +// One-way unmarshaling tests. + +var unmarshalItems = []testItemType{ + // Field is private. Should not attempt to unmarshal it. + {&struct{ priv byte }{}, + "\x10priv\x00\x08\x00\x00\x00"}, + + // Wrong casing. Field names are lowercased. + {&struct{ Byte byte }{}, + "\x10Byte\x00\x08\x00\x00\x00"}, + + // Ignore non-existing field. + {&struct{ Byte byte }{9}, + "\x10boot\x00\x08\x00\x00\x00" + "\x10byte\x00\x09\x00\x00\x00"}, + + // Do not unmarshal on ignored field. + {&ignoreField{"before", "", "after"}, + "\x02before\x00\a\x00\x00\x00before\x00" + + "\x02-\x00\a\x00\x00\x00ignore\x00" + + "\x02after\x00\x06\x00\x00\x00after\x00"}, + + // Ignore unsuitable types silently. + {map[string]string{"str": "s"}, + "\x02str\x00\x02\x00\x00\x00s\x00" + "\x10int\x00\x01\x00\x00\x00"}, + {map[string][]int{"array": []int{5, 9}}, + "\x04array\x00" + wrapInDoc("\x100\x00\x05\x00\x00\x00"+"\x021\x00\x02\x00\x00\x00s\x00"+"\x102\x00\x09\x00\x00\x00")}, + + // Wrong type. Shouldn't init pointer. + {&struct{ Str *byte }{}, + "\x02str\x00\x02\x00\x00\x00s\x00"}, + {&struct{ Str *struct{ Str string } }{}, + "\x02str\x00\x02\x00\x00\x00s\x00"}, + + // Ordered document. + {&struct{ bson.D }{bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", true}}}, + "\x03d\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x0Ab\x00\x08d\x00\x01")}, + + // Raw document. + {&bson.Raw{0x03, []byte(wrapInDoc("\x10byte\x00\x08\x00\x00\x00"))}, + "\x10byte\x00\x08\x00\x00\x00"}, + + // RawD document. + {&struct{ bson.RawD }{bson.RawD{{"a", bson.Raw{0x0A, []byte{}}}, {"c", bson.Raw{0x0A, []byte{}}}, {"b", bson.Raw{0x08, []byte{0x01}}}}}, + "\x03rawd\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x08b\x00\x01")}, + + // Decode old binary. + {bson.M{"_": []byte("old")}, + "\x05_\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, + + // Decode old binary without length. According to the spec, this shouldn't happen. + {bson.M{"_": []byte("old")}, + "\x05_\x00\x03\x00\x00\x00\x02old"}, +} + +func (s *S) TestUnmarshalOneWayItems(c *C) { + for _, item := range unmarshalItems { + testUnmarshal(c, wrapInDoc(item.data), item.obj) + } +} + +func (s *S) TestUnmarshalNilInStruct(c *C) { + // Nil is the default value, so we need to ensure it's indeed being set. + b := byte(1) + v := &struct{ Ptr *byte }{&b} + err := bson.Unmarshal([]byte(wrapInDoc("\x0Aptr\x00")), v) + c.Assert(err, IsNil) + c.Assert(v, DeepEquals, &struct{ Ptr *byte }{nil}) +} + +// -------------------------------------------------------------------------- +// Marshalling error cases. + +type structWithDupKeys struct { + Name byte + Other byte "name" // Tag should precede. +} + +var marshalErrorItems = []testItemType{ + {bson.M{"": uint64(1 << 63)}, + "BSON has no uint64 type, and value is too large to fit correctly in an int64"}, + {bson.M{"": bson.ObjectId("tooshort")}, + "ObjectIDs must be exactly 12 bytes long \\(got 8\\)"}, + {int64(123), + "Can't marshal int64 as a BSON document"}, + {bson.M{"": 1i}, + "Can't marshal complex128 in a BSON document"}, + {&structWithDupKeys{}, + "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, + {bson.Raw{0x0A, []byte{}}, + "Attempted to unmarshal Raw kind 10 as a document"}, + {&inlineCantPtr{&struct{ A, B int }{1, 2}}, + "Option ,inline needs a struct value or map field"}, + {&inlineDupName{1, struct{ A, B int }{2, 3}}, + "Duplicated key 'a' in struct bson_test.inlineDupName"}, + {&inlineDupMap{}, + "Multiple ,inline maps in struct bson_test.inlineDupMap"}, + {&inlineBadKeyMap{}, + "Option ,inline needs a map with string keys in struct bson_test.inlineBadKeyMap"}, + {&inlineMap{A: 1, M: map[string]interface{}{"a": 1}}, + `Can't have key "a" in inlined map; conflicts with struct field`}, +} + +func (s *S) TestMarshalErrorItems(c *C) { + for _, item := range marshalErrorItems { + data, err := bson.Marshal(item.obj) + c.Assert(err, ErrorMatches, item.data) + c.Assert(data, IsNil) + } +} + +// -------------------------------------------------------------------------- +// Unmarshalling error cases. + +type unmarshalErrorType struct { + obj interface{} + data string + error string +} + +var unmarshalErrorItems = []unmarshalErrorType{ + // Tag name conflicts with existing parameter. + {&structWithDupKeys{}, + "\x10name\x00\x08\x00\x00\x00", + "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, + + // Non-string map key. + {map[int]interface{}{}, + "\x10name\x00\x08\x00\x00\x00", + "BSON map must have string keys. Got: map\\[int\\]interface \\{\\}"}, + + {nil, + "\xEEname\x00", + "Unknown element kind \\(0xEE\\)"}, + + {struct{ Name bool }{}, + "\x10name\x00\x08\x00\x00\x00", + "Unmarshal can't deal with struct values. Use a pointer."}, + + {123, + "\x10name\x00\x08\x00\x00\x00", + "Unmarshal needs a map or a pointer to a struct."}, +} + +func (s *S) TestUnmarshalErrorItems(c *C) { + for _, item := range unmarshalErrorItems { + data := []byte(wrapInDoc(item.data)) + var value interface{} + switch reflect.ValueOf(item.obj).Kind() { + case reflect.Map, reflect.Ptr: + value = makeZeroDoc(item.obj) + case reflect.Invalid: + value = bson.M{} + default: + value = item.obj + } + err := bson.Unmarshal(data, value) + c.Assert(err, ErrorMatches, item.error) + } +} + +type unmarshalRawErrorType struct { + obj interface{} + raw bson.Raw + error string +} + +var unmarshalRawErrorItems = []unmarshalRawErrorType{ + // Tag name conflicts with existing parameter. + {&structWithDupKeys{}, + bson.Raw{0x03, []byte("\x10byte\x00\x08\x00\x00\x00")}, + "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, + + {&struct{}{}, + bson.Raw{0xEE, []byte{}}, + "Unknown element kind \\(0xEE\\)"}, + + {struct{ Name bool }{}, + bson.Raw{0x10, []byte("\x08\x00\x00\x00")}, + "Raw Unmarshal can't deal with struct values. Use a pointer."}, + + {123, + bson.Raw{0x10, []byte("\x08\x00\x00\x00")}, + "Raw Unmarshal needs a map or a valid pointer."}, +} + +func (s *S) TestUnmarshalRawErrorItems(c *C) { + for i, item := range unmarshalRawErrorItems { + err := item.raw.Unmarshal(item.obj) + c.Assert(err, ErrorMatches, item.error, Commentf("Failed on item %d: %#v\n", i, item)) + } +} + +var corruptedData = []string{ + "\x04\x00\x00\x00\x00", // Shorter than minimum + "\x06\x00\x00\x00\x00", // Not enough data + "\x05\x00\x00", // Broken length + "\x05\x00\x00\x00\xff", // Corrupted termination + "\x0A\x00\x00\x00\x0Aooop\x00", // Unfinished C string + + // Array end past end of string (s[2]=0x07 is correct) + wrapInDoc("\x04\x00\x09\x00\x00\x00\x0A\x00\x00"), + + // Array end within string, but past acceptable. + wrapInDoc("\x04\x00\x08\x00\x00\x00\x0A\x00\x00"), + + // Document end within string, but past acceptable. + wrapInDoc("\x03\x00\x08\x00\x00\x00\x0A\x00\x00"), + + // String with corrupted end. + wrapInDoc("\x02\x00\x03\x00\x00\x00yo\xFF"), +} + +func (s *S) TestUnmarshalMapDocumentTooShort(c *C) { + for _, data := range corruptedData { + err := bson.Unmarshal([]byte(data), bson.M{}) + c.Assert(err, ErrorMatches, "Document is corrupted") + + err = bson.Unmarshal([]byte(data), &struct{}{}) + c.Assert(err, ErrorMatches, "Document is corrupted") + } +} + +// -------------------------------------------------------------------------- +// Setter test cases. + +var setterResult = map[string]error{} + +type setterType struct { + received interface{} +} + +func (o *setterType) SetBSON(raw bson.Raw) error { + err := raw.Unmarshal(&o.received) + if err != nil { + panic("The panic:" + err.Error()) + } + if s, ok := o.received.(string); ok { + if result, ok := setterResult[s]; ok { + return result + } + } + return nil +} + +type ptrSetterDoc struct { + Field *setterType "_" +} + +type valSetterDoc struct { + Field setterType "_" +} + +func (s *S) TestUnmarshalAllItemsWithPtrSetter(c *C) { + for _, item := range allItems { + for i := 0; i != 2; i++ { + var field *setterType + if i == 0 { + obj := &ptrSetterDoc{} + err := bson.Unmarshal([]byte(wrapInDoc(item.data)), obj) + c.Assert(err, IsNil) + field = obj.Field + } else { + obj := &valSetterDoc{} + err := bson.Unmarshal([]byte(wrapInDoc(item.data)), obj) + c.Assert(err, IsNil) + field = &obj.Field + } + if item.data == "" { + // Nothing to unmarshal. Should be untouched. + if i == 0 { + c.Assert(field, IsNil) + } else { + c.Assert(field.received, IsNil) + } + } else { + expected := item.obj.(bson.M)["_"] + c.Assert(field, NotNil, Commentf("Pointer not initialized (%#v)", expected)) + c.Assert(field.received, DeepEquals, expected) + } + } + } +} + +func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) { + obj := &setterType{} + err := bson.Unmarshal([]byte(sampleItems[0].data), obj) + c.Assert(err, IsNil) + c.Assert(obj.received, DeepEquals, bson.M{"hello": "world"}) +} + +func (s *S) TestUnmarshalSetterOmits(c *C) { + setterResult["2"] = &bson.TypeError{} + setterResult["4"] = &bson.TypeError{} + defer func() { + delete(setterResult, "2") + delete(setterResult, "4") + }() + + m := map[string]*setterType{} + data := wrapInDoc("\x02abc\x00\x02\x00\x00\x001\x00" + + "\x02def\x00\x02\x00\x00\x002\x00" + + "\x02ghi\x00\x02\x00\x00\x003\x00" + + "\x02jkl\x00\x02\x00\x00\x004\x00") + err := bson.Unmarshal([]byte(data), m) + c.Assert(err, IsNil) + c.Assert(m["abc"], NotNil) + c.Assert(m["def"], IsNil) + c.Assert(m["ghi"], NotNil) + c.Assert(m["jkl"], IsNil) + + c.Assert(m["abc"].received, Equals, "1") + c.Assert(m["ghi"].received, Equals, "3") +} + +func (s *S) TestUnmarshalSetterErrors(c *C) { + boom := errors.New("BOOM") + setterResult["2"] = boom + defer delete(setterResult, "2") + + m := map[string]*setterType{} + data := wrapInDoc("\x02abc\x00\x02\x00\x00\x001\x00" + + "\x02def\x00\x02\x00\x00\x002\x00" + + "\x02ghi\x00\x02\x00\x00\x003\x00") + err := bson.Unmarshal([]byte(data), m) + c.Assert(err, Equals, boom) + c.Assert(m["abc"], NotNil) + c.Assert(m["def"], IsNil) + c.Assert(m["ghi"], IsNil) + + c.Assert(m["abc"].received, Equals, "1") +} + +func (s *S) TestDMap(c *C) { + d := bson.D{{"a", 1}, {"b", 2}} + c.Assert(d.Map(), DeepEquals, bson.M{"a": 1, "b": 2}) +} + +func (s *S) TestUnmarshalSetterSetZero(c *C) { + setterResult["foo"] = bson.SetZero + defer delete(setterResult, "field") + + data, err := bson.Marshal(bson.M{"field": "foo"}) + c.Assert(err, IsNil) + + m := map[string]*setterType{} + err = bson.Unmarshal([]byte(data), m) + c.Assert(err, IsNil) + + value, ok := m["field"] + c.Assert(ok, Equals, true) + c.Assert(value, IsNil) +} + +// -------------------------------------------------------------------------- +// Getter test cases. + +type typeWithGetter struct { + result interface{} + err error +} + +func (t *typeWithGetter) GetBSON() (interface{}, error) { + if t == nil { + return "", nil + } + return t.result, t.err +} + +type docWithGetterField struct { + Field *typeWithGetter "_" +} + +func (s *S) TestMarshalAllItemsWithGetter(c *C) { + for i, item := range allItems { + if item.data == "" { + continue + } + obj := &docWithGetterField{} + obj.Field = &typeWithGetter{result: item.obj.(bson.M)["_"]} + data, err := bson.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, wrapInDoc(item.data), + Commentf("Failed on item #%d", i)) + } +} + +func (s *S) TestMarshalWholeDocumentWithGetter(c *C) { + obj := &typeWithGetter{result: sampleItems[0].obj} + data, err := bson.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, sampleItems[0].data) +} + +func (s *S) TestGetterErrors(c *C) { + e := errors.New("oops") + + obj1 := &docWithGetterField{} + obj1.Field = &typeWithGetter{sampleItems[0].obj, e} + data, err := bson.Marshal(obj1) + c.Assert(err, ErrorMatches, "oops") + c.Assert(data, IsNil) + + obj2 := &typeWithGetter{sampleItems[0].obj, e} + data, err = bson.Marshal(obj2) + c.Assert(err, ErrorMatches, "oops") + c.Assert(data, IsNil) +} + +type intGetter int64 + +func (t intGetter) GetBSON() (interface{}, error) { + return int64(t), nil +} + +type typeWithIntGetter struct { + V intGetter ",minsize" +} + +func (s *S) TestMarshalShortWithGetter(c *C) { + obj := typeWithIntGetter{42} + data, err := bson.Marshal(obj) + c.Assert(err, IsNil) + m := bson.M{} + err = bson.Unmarshal(data, m) + c.Assert(err, IsNil) + c.Assert(m["v"], Equals, 42) +} + +func (s *S) TestMarshalWithGetterNil(c *C) { + obj := docWithGetterField{} + data, err := bson.Marshal(obj) + c.Assert(err, IsNil) + m := bson.M{} + err = bson.Unmarshal(data, m) + c.Assert(err, IsNil) + c.Assert(m, DeepEquals, bson.M{"_": ""}) +} + +// -------------------------------------------------------------------------- +// Cross-type conversion tests. + +type crossTypeItem struct { + obj1 interface{} + obj2 interface{} +} + +type condStr struct { + V string ",omitempty" +} +type condStrNS struct { + V string `a:"A" bson:",omitempty" b:"B"` +} +type condBool struct { + V bool ",omitempty" +} +type condInt struct { + V int ",omitempty" +} +type condUInt struct { + V uint ",omitempty" +} +type condFloat struct { + V float64 ",omitempty" +} +type condIface struct { + V interface{} ",omitempty" +} +type condPtr struct { + V *bool ",omitempty" +} +type condSlice struct { + V []string ",omitempty" +} +type condMap struct { + V map[string]int ",omitempty" +} +type namedCondStr struct { + V string "myv,omitempty" +} +type condTime struct { + V time.Time ",omitempty" +} +type condStruct struct { + V struct{ A []int } ",omitempty" +} + +type shortInt struct { + V int64 ",minsize" +} +type shortUint struct { + V uint64 ",minsize" +} +type shortIface struct { + V interface{} ",minsize" +} +type shortPtr struct { + V *int64 ",minsize" +} +type shortNonEmptyInt struct { + V int64 ",minsize,omitempty" +} + +type inlineInt struct { + V struct{ A, B int } ",inline" +} +type inlineCantPtr struct { + V *struct{ A, B int } ",inline" +} +type inlineDupName struct { + A int + V struct{ A, B int } ",inline" +} +type inlineMap struct { + A int + M map[string]interface{} ",inline" +} +type inlineMapInt struct { + A int + M map[string]int ",inline" +} +type inlineMapMyM struct { + A int + M MyM ",inline" +} +type inlineDupMap struct { + M1 map[string]interface{} ",inline" + M2 map[string]interface{} ",inline" +} +type inlineBadKeyMap struct { + M map[int]int ",inline" +} + +type ( + MyString string + MyBytes []byte + MyBool bool + MyD []bson.DocElem + MyRawD []bson.RawDocElem + MyM map[string]interface{} +) + +var ( + truevar = true + falsevar = false + + int64var = int64(42) + int64ptr = &int64var + intvar = int(42) + intptr = &intvar +) + +func parseURL(s string) *url.URL { + u, err := url.Parse(s) + if err != nil { + panic(err) + } + return u +} + +// That's a pretty fun test. It will dump the first item, generate a zero +// value equivalent to the second one, load the dumped data onto it, and then +// verify that the resulting value is deep-equal to the untouched second value. +// Then, it will do the same in the *opposite* direction! +var twoWayCrossItems = []crossTypeItem{ + // int<=>int + {&struct{ I int }{42}, &struct{ I int8 }{42}}, + {&struct{ I int }{42}, &struct{ I int32 }{42}}, + {&struct{ I int }{42}, &struct{ I int64 }{42}}, + {&struct{ I int8 }{42}, &struct{ I int32 }{42}}, + {&struct{ I int8 }{42}, &struct{ I int64 }{42}}, + {&struct{ I int32 }{42}, &struct{ I int64 }{42}}, + + // uint<=>uint + {&struct{ I uint }{42}, &struct{ I uint8 }{42}}, + {&struct{ I uint }{42}, &struct{ I uint32 }{42}}, + {&struct{ I uint }{42}, &struct{ I uint64 }{42}}, + {&struct{ I uint8 }{42}, &struct{ I uint32 }{42}}, + {&struct{ I uint8 }{42}, &struct{ I uint64 }{42}}, + {&struct{ I uint32 }{42}, &struct{ I uint64 }{42}}, + + // float32<=>float64 + {&struct{ I float32 }{42}, &struct{ I float64 }{42}}, + + // int<=>uint + {&struct{ I uint }{42}, &struct{ I int }{42}}, + {&struct{ I uint }{42}, &struct{ I int8 }{42}}, + {&struct{ I uint }{42}, &struct{ I int32 }{42}}, + {&struct{ I uint }{42}, &struct{ I int64 }{42}}, + {&struct{ I uint8 }{42}, &struct{ I int }{42}}, + {&struct{ I uint8 }{42}, &struct{ I int8 }{42}}, + {&struct{ I uint8 }{42}, &struct{ I int32 }{42}}, + {&struct{ I uint8 }{42}, &struct{ I int64 }{42}}, + {&struct{ I uint32 }{42}, &struct{ I int }{42}}, + {&struct{ I uint32 }{42}, &struct{ I int8 }{42}}, + {&struct{ I uint32 }{42}, &struct{ I int32 }{42}}, + {&struct{ I uint32 }{42}, &struct{ I int64 }{42}}, + {&struct{ I uint64 }{42}, &struct{ I int }{42}}, + {&struct{ I uint64 }{42}, &struct{ I int8 }{42}}, + {&struct{ I uint64 }{42}, &struct{ I int32 }{42}}, + {&struct{ I uint64 }{42}, &struct{ I int64 }{42}}, + + // int <=> float + {&struct{ I int }{42}, &struct{ I float64 }{42}}, + + // int <=> bool + {&struct{ I int }{1}, &struct{ I bool }{true}}, + {&struct{ I int }{0}, &struct{ I bool }{false}}, + + // uint <=> float64 + {&struct{ I uint }{42}, &struct{ I float64 }{42}}, + + // uint <=> bool + {&struct{ I uint }{1}, &struct{ I bool }{true}}, + {&struct{ I uint }{0}, &struct{ I bool }{false}}, + + // float64 <=> bool + {&struct{ I float64 }{1}, &struct{ I bool }{true}}, + {&struct{ I float64 }{0}, &struct{ I bool }{false}}, + + // string <=> string and string <=> []byte + {&struct{ S []byte }{[]byte("abc")}, &struct{ S string }{"abc"}}, + {&struct{ S []byte }{[]byte("def")}, &struct{ S bson.Symbol }{"def"}}, + {&struct{ S string }{"ghi"}, &struct{ S bson.Symbol }{"ghi"}}, + + // map <=> struct + {&struct { + A struct { + B, C int + } + }{struct{ B, C int }{1, 2}}, + map[string]map[string]int{"a": map[string]int{"b": 1, "c": 2}}}, + + {&struct{ A bson.Symbol }{"abc"}, map[string]string{"a": "abc"}}, + {&struct{ A bson.Symbol }{"abc"}, map[string][]byte{"a": []byte("abc")}}, + {&struct{ A []byte }{[]byte("abc")}, map[string]string{"a": "abc"}}, + {&struct{ A uint }{42}, map[string]int{"a": 42}}, + {&struct{ A uint }{42}, map[string]float64{"a": 42}}, + {&struct{ A uint }{1}, map[string]bool{"a": true}}, + {&struct{ A int }{42}, map[string]uint{"a": 42}}, + {&struct{ A int }{42}, map[string]float64{"a": 42}}, + {&struct{ A int }{1}, map[string]bool{"a": true}}, + {&struct{ A float64 }{42}, map[string]float32{"a": 42}}, + {&struct{ A float64 }{42}, map[string]int{"a": 42}}, + {&struct{ A float64 }{42}, map[string]uint{"a": 42}}, + {&struct{ A float64 }{1}, map[string]bool{"a": true}}, + {&struct{ A bool }{true}, map[string]int{"a": 1}}, + {&struct{ A bool }{true}, map[string]uint{"a": 1}}, + {&struct{ A bool }{true}, map[string]float64{"a": 1}}, + {&struct{ A **byte }{&byteptr}, map[string]byte{"a": 8}}, + + // url.URL <=> string + {&struct{ URL *url.URL }{parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}}, + {&struct{ URL url.URL }{*parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}}, + + // Slices + {&struct{ S []int }{[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}}, + {&struct{ S *[]int }{&[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}}, + + // Conditionals + {&condBool{true}, map[string]bool{"v": true}}, + {&condBool{}, map[string]bool{}}, + {&condInt{1}, map[string]int{"v": 1}}, + {&condInt{}, map[string]int{}}, + {&condUInt{1}, map[string]uint{"v": 1}}, + {&condUInt{}, map[string]uint{}}, + {&condFloat{}, map[string]int{}}, + {&condStr{"yo"}, map[string]string{"v": "yo"}}, + {&condStr{}, map[string]string{}}, + {&condStrNS{"yo"}, map[string]string{"v": "yo"}}, + {&condStrNS{}, map[string]string{}}, + {&condSlice{[]string{"yo"}}, map[string][]string{"v": []string{"yo"}}}, + {&condSlice{}, map[string][]string{}}, + {&condMap{map[string]int{"k": 1}}, bson.M{"v": bson.M{"k": 1}}}, + {&condMap{}, map[string][]string{}}, + {&condIface{"yo"}, map[string]string{"v": "yo"}}, + {&condIface{""}, map[string]string{"v": ""}}, + {&condIface{}, map[string]string{}}, + {&condPtr{&truevar}, map[string]bool{"v": true}}, + {&condPtr{&falsevar}, map[string]bool{"v": false}}, + {&condPtr{}, map[string]string{}}, + + {&condTime{time.Unix(123456789, 123e6)}, map[string]time.Time{"v": time.Unix(123456789, 123e6)}}, + {&condTime{}, map[string]string{}}, + + {&condStruct{struct{ A []int }{[]int{1}}}, bson.M{"v": bson.M{"a": []interface{}{1}}}}, + {&condStruct{struct{ A []int }{}}, bson.M{}}, + + {&namedCondStr{"yo"}, map[string]string{"myv": "yo"}}, + {&namedCondStr{}, map[string]string{}}, + + {&shortInt{1}, map[string]interface{}{"v": 1}}, + {&shortInt{1 << 30}, map[string]interface{}{"v": 1 << 30}}, + {&shortInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, + {&shortUint{1 << 30}, map[string]interface{}{"v": 1 << 30}}, + {&shortUint{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, + {&shortIface{int64(1) << 31}, map[string]interface{}{"v": int64(1 << 31)}}, + {&shortPtr{int64ptr}, map[string]interface{}{"v": intvar}}, + + {&shortNonEmptyInt{1}, map[string]interface{}{"v": 1}}, + {&shortNonEmptyInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, + {&shortNonEmptyInt{}, map[string]interface{}{}}, + + {&inlineInt{struct{ A, B int }{1, 2}}, map[string]interface{}{"a": 1, "b": 2}}, + {&inlineMap{A: 1, M: map[string]interface{}{"b": 2}}, map[string]interface{}{"a": 1, "b": 2}}, + {&inlineMap{A: 1, M: nil}, map[string]interface{}{"a": 1}}, + {&inlineMapInt{A: 1, M: map[string]int{"b": 2}}, map[string]int{"a": 1, "b": 2}}, + {&inlineMapInt{A: 1, M: nil}, map[string]int{"a": 1}}, + {&inlineMapMyM{A: 1, M: MyM{"b": MyM{"c": 3}}}, map[string]interface{}{"a": 1, "b": map[string]interface{}{"c": 3}}}, + + // []byte <=> MyBytes + {&struct{ B MyBytes }{[]byte("abc")}, map[string]string{"b": "abc"}}, + {&struct{ B MyBytes }{[]byte{}}, map[string]string{"b": ""}}, + {&struct{ B MyBytes }{}, map[string]bool{}}, + {&struct{ B []byte }{[]byte("abc")}, map[string]MyBytes{"b": []byte("abc")}}, + + // bool <=> MyBool + {&struct{ B MyBool }{true}, map[string]bool{"b": true}}, + {&struct{ B MyBool }{}, map[string]bool{"b": false}}, + {&struct{ B MyBool }{}, map[string]string{}}, + {&struct{ B bool }{}, map[string]MyBool{"b": false}}, + + // arrays + {&struct{ V [2]int }{[...]int{1, 2}}, map[string][2]int{"v": [2]int{1, 2}}}, + + // zero time + {&struct{ V time.Time }{}, map[string]interface{}{"v": time.Time{}}}, + + // zero time + 1 second + 1 millisecond; overflows int64 as nanoseconds + {&struct{ V time.Time }{time.Unix(-62135596799, 1e6).Local()}, + map[string]interface{}{"v": time.Unix(-62135596799, 1e6).Local()}}, + + // bson.D <=> []DocElem + {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}}, + {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &MyD{{"a", MyD{{"b", 1}, {"c", 2}}}}}, + + // bson.RawD <=> []RawDocElem + {&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}}, + {&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &MyRawD{{"a", bson.Raw{0x08, []byte{0x01}}}}}, + + // bson.M <=> map + {bson.M{"a": bson.M{"b": 1, "c": 2}}, MyM{"a": MyM{"b": 1, "c": 2}}}, + {bson.M{"a": bson.M{"b": 1, "c": 2}}, map[string]interface{}{"a": map[string]interface{}{"b": 1, "c": 2}}}, + + // bson.M <=> map[MyString] + {bson.M{"a": bson.M{"b": 1, "c": 2}}, map[MyString]interface{}{"a": map[MyString]interface{}{"b": 1, "c": 2}}}, +} + +// Same thing, but only one way (obj1 => obj2). +var oneWayCrossItems = []crossTypeItem{ + // map <=> struct + {map[string]interface{}{"a": 1, "b": "2", "c": 3}, map[string]int{"a": 1, "c": 3}}, + + // inline map elides badly typed values + {map[string]interface{}{"a": 1, "b": "2", "c": 3}, &inlineMapInt{A: 1, M: map[string]int{"c": 3}}}, + + // Can't decode int into struct. + {bson.M{"a": bson.M{"b": 2}}, &struct{ A bool }{}}, + + // Would get decoded into a int32 too in the opposite direction. + {&shortIface{int64(1) << 30}, map[string]interface{}{"v": 1 << 30}}, +} + +func testCrossPair(c *C, dump interface{}, load interface{}) { + c.Logf("Dump: %#v", dump) + c.Logf("Load: %#v", load) + zero := makeZeroDoc(load) + data, err := bson.Marshal(dump) + c.Assert(err, IsNil) + c.Logf("Dumped: %#v", string(data)) + err = bson.Unmarshal(data, zero) + c.Assert(err, IsNil) + c.Logf("Loaded: %#v", zero) + c.Assert(zero, DeepEquals, load) +} + +func (s *S) TestTwoWayCrossPairs(c *C) { + for _, item := range twoWayCrossItems { + testCrossPair(c, item.obj1, item.obj2) + testCrossPair(c, item.obj2, item.obj1) + } +} + +func (s *S) TestOneWayCrossPairs(c *C) { + for _, item := range oneWayCrossItems { + testCrossPair(c, item.obj1, item.obj2) + } +} + +// -------------------------------------------------------------------------- +// ObjectId hex representation test. + +func (s *S) TestObjectIdHex(c *C) { + id := bson.ObjectIdHex("4d88e15b60f486e428412dc9") + c.Assert(id.String(), Equals, `ObjectIdHex("4d88e15b60f486e428412dc9")`) + c.Assert(id.Hex(), Equals, "4d88e15b60f486e428412dc9") +} + +func (s *S) TestIsObjectIdHex(c *C) { + test := []struct { + id string + valid bool + }{ + {"4d88e15b60f486e428412dc9", true}, + {"4d88e15b60f486e428412dc", false}, + {"4d88e15b60f486e428412dc9e", false}, + {"4d88e15b60f486e428412dcx", false}, + } + for _, t := range test { + c.Assert(bson.IsObjectIdHex(t.id), Equals, t.valid) + } +} + +// -------------------------------------------------------------------------- +// ObjectId parts extraction tests. + +type objectIdParts struct { + id bson.ObjectId + timestamp int64 + machine []byte + pid uint16 + counter int32 +} + +var objectIds = []objectIdParts{ + objectIdParts{ + bson.ObjectIdHex("4d88e15b60f486e428412dc9"), + 1300816219, + []byte{0x60, 0xf4, 0x86}, + 0xe428, + 4271561, + }, + objectIdParts{ + bson.ObjectIdHex("000000000000000000000000"), + 0, + []byte{0x00, 0x00, 0x00}, + 0x0000, + 0, + }, + objectIdParts{ + bson.ObjectIdHex("00000000aabbccddee000001"), + 0, + []byte{0xaa, 0xbb, 0xcc}, + 0xddee, + 1, + }, +} + +func (s *S) TestObjectIdPartsExtraction(c *C) { + for i, v := range objectIds { + t := time.Unix(v.timestamp, 0) + c.Assert(v.id.Time(), Equals, t, Commentf("#%d Wrong timestamp value", i)) + c.Assert(v.id.Machine(), DeepEquals, v.machine, Commentf("#%d Wrong machine id value", i)) + c.Assert(v.id.Pid(), Equals, v.pid, Commentf("#%d Wrong pid value", i)) + c.Assert(v.id.Counter(), Equals, v.counter, Commentf("#%d Wrong counter value", i)) + } +} + +func (s *S) TestNow(c *C) { + before := time.Now() + time.Sleep(1e6) + now := bson.Now() + time.Sleep(1e6) + after := time.Now() + c.Assert(now.After(before) && now.Before(after), Equals, true, Commentf("now=%s, before=%s, after=%s", now, before, after)) +} + +// -------------------------------------------------------------------------- +// ObjectId generation tests. + +func (s *S) TestNewObjectId(c *C) { + // Generate 10 ids + ids := make([]bson.ObjectId, 10) + for i := 0; i < 10; i++ { + ids[i] = bson.NewObjectId() + } + for i := 1; i < 10; i++ { + prevId := ids[i-1] + id := ids[i] + // Test for uniqueness among all other 9 generated ids + for j, tid := range ids { + if j != i { + c.Assert(id, Not(Equals), tid, Commentf("Generated ObjectId is not unique")) + } + } + // Check that timestamp was incremented and is within 30 seconds of the previous one + secs := id.Time().Sub(prevId.Time()).Seconds() + c.Assert((secs >= 0 && secs <= 30), Equals, true, Commentf("Wrong timestamp in generated ObjectId")) + // Check that machine ids are the same + c.Assert(id.Machine(), DeepEquals, prevId.Machine()) + // Check that pids are the same + c.Assert(id.Pid(), Equals, prevId.Pid()) + // Test for proper increment + delta := int(id.Counter() - prevId.Counter()) + c.Assert(delta, Equals, 1, Commentf("Wrong increment in generated ObjectId")) + } +} + +func (s *S) TestNewObjectIdWithTime(c *C) { + t := time.Unix(12345678, 0) + id := bson.NewObjectIdWithTime(t) + c.Assert(id.Time(), Equals, t) + c.Assert(id.Machine(), DeepEquals, []byte{0x00, 0x00, 0x00}) + c.Assert(int(id.Pid()), Equals, 0) + c.Assert(int(id.Counter()), Equals, 0) +} + +// -------------------------------------------------------------------------- +// ObjectId JSON marshalling. + +type jsonType struct { + Id *bson.ObjectId +} + +func (s *S) TestObjectIdJSONMarshaling(c *C) { + id := bson.ObjectIdHex("4d88e15b60f486e428412dc9") + v := jsonType{Id: &id} + data, err := json.Marshal(&v) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, `{"Id":"4d88e15b60f486e428412dc9"}`) +} + +func (s *S) TestObjectIdJSONUnmarshaling(c *C) { + data := []byte(`{"Id":"4d88e15b60f486e428412dc9"}`) + v := jsonType{} + err := json.Unmarshal(data, &v) + c.Assert(err, IsNil) + c.Assert(*v.Id, Equals, bson.ObjectIdHex("4d88e15b60f486e428412dc9")) +} + +func (s *S) TestObjectIdJSONUnmarshalingError(c *C) { + v := jsonType{} + err := json.Unmarshal([]byte(`{"Id":"4d88e15b60f486e428412dc9A"}`), &v) + c.Assert(err, ErrorMatches, `Invalid ObjectId in JSON: "4d88e15b60f486e428412dc9A"`) + err = json.Unmarshal([]byte(`{"Id":"4d88e15b60f486e428412dcZ"}`), &v) + c.Assert(err, ErrorMatches, `Invalid ObjectId in JSON: "4d88e15b60f486e428412dcZ" .*`) +} + +// -------------------------------------------------------------------------- +// Some simple benchmarks. + +type BenchT struct { + A, B, C, D, E, F string +} + +func BenchmarkUnmarhsalStruct(b *testing.B) { + v := BenchT{A: "A", D: "D", E: "E"} + data, err := bson.Marshal(&v) + if err != nil { + panic(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + err = bson.Unmarshal(data, &v) + } + if err != nil { + panic(err) + } +} + +func BenchmarkUnmarhsalMap(b *testing.B) { + m := bson.M{"a": "a", "d": "d", "e": "e"} + data, err := bson.Marshal(&m) + if err != nil { + panic(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + err = bson.Unmarshal(data, &m) + } + if err != nil { + panic(err) + } +} diff --git a/vendor/labix.org/v2/mgo/bson/decode.go b/vendor/labix.org/v2/mgo/bson/decode.go new file mode 100644 index 0000000..1ec034e --- /dev/null +++ b/vendor/labix.org/v2/mgo/bson/decode.go @@ -0,0 +1,795 @@ +// BSON library for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// gobson - BSON library for Go. + +package bson + +import ( + "fmt" + "math" + "net/url" + "reflect" + "sync" + "time" +) + +type decoder struct { + in []byte + i int + docType reflect.Type +} + +var typeM = reflect.TypeOf(M{}) + +func newDecoder(in []byte) *decoder { + return &decoder{in, 0, typeM} +} + +// -------------------------------------------------------------------------- +// Some helper functions. + +func corrupted() { + panic("Document is corrupted") +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +// -------------------------------------------------------------------------- +// Unmarshaling of documents. + +const ( + setterUnknown = iota + setterNone + setterType + setterAddr +) + +var setterStyle map[reflect.Type]int +var setterIface reflect.Type +var setterMutex sync.RWMutex + +func init() { + var iface Setter + setterIface = reflect.TypeOf(&iface).Elem() + setterStyle = make(map[reflect.Type]int) +} + +func getSetter(outt reflect.Type, out reflect.Value) Setter { + setterMutex.RLock() + style := setterStyle[outt] + setterMutex.RUnlock() + if style == setterNone { + return nil + } + if style == setterUnknown { + setterMutex.Lock() + defer setterMutex.Unlock() + if outt.Implements(setterIface) { + setterStyle[outt] = setterType + } else if reflect.PtrTo(outt).Implements(setterIface) { + setterStyle[outt] = setterAddr + } else { + setterStyle[outt] = setterNone + return nil + } + style = setterStyle[outt] + } + if style == setterAddr { + if !out.CanAddr() { + return nil + } + out = out.Addr() + } else if outt.Kind() == reflect.Ptr && out.IsNil() { + out.Set(reflect.New(outt.Elem())) + } + return out.Interface().(Setter) +} + +func clearMap(m reflect.Value) { + var none reflect.Value + for _, k := range m.MapKeys() { + m.SetMapIndex(k, none) + } +} + +func (d *decoder) readDocTo(out reflect.Value) { + var elemType reflect.Type + outt := out.Type() + outk := outt.Kind() + + for { + if outk == reflect.Ptr && out.IsNil() { + out.Set(reflect.New(outt.Elem())) + } + if setter := getSetter(outt, out); setter != nil { + var raw Raw + d.readDocTo(reflect.ValueOf(&raw)) + err := setter.SetBSON(raw) + if _, ok := err.(*TypeError); err != nil && !ok { + panic(err) + } + return + } + if outk == reflect.Ptr { + out = out.Elem() + outt = out.Type() + outk = out.Kind() + continue + } + break + } + + var fieldsMap map[string]fieldInfo + var inlineMap reflect.Value + start := d.i + + origout := out + if outk == reflect.Interface { + if d.docType.Kind() == reflect.Map { + mv := reflect.MakeMap(d.docType) + out.Set(mv) + out = mv + } else { + dv := reflect.New(d.docType).Elem() + out.Set(dv) + out = dv + } + outt = out.Type() + outk = outt.Kind() + } + + docType := d.docType + keyType := typeString + convertKey := false + switch outk { + case reflect.Map: + keyType = outt.Key() + if keyType.Kind() != reflect.String { + panic("BSON map must have string keys. Got: " + outt.String()) + } + if keyType != typeString { + convertKey = true + } + elemType = outt.Elem() + if elemType == typeIface { + d.docType = outt + } + if out.IsNil() { + out.Set(reflect.MakeMap(out.Type())) + } else if out.Len() > 0 { + clearMap(out) + } + case reflect.Struct: + if outt != typeRaw { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + fieldsMap = sinfo.FieldsMap + out.Set(sinfo.Zero) + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + if !inlineMap.IsNil() && inlineMap.Len() > 0 { + clearMap(inlineMap) + } + elemType = inlineMap.Type().Elem() + if elemType == typeIface { + d.docType = inlineMap.Type() + } + } + } + case reflect.Slice: + switch outt.Elem() { + case typeDocElem: + origout.Set(d.readDocElems(outt)) + return + case typeRawDocElem: + origout.Set(d.readRawDocElems(outt)) + return + } + fallthrough + default: + panic("Unsupported document type for unmarshalling: " + out.Type().String()) + } + + end := int(d.readInt32()) + end += d.i - 4 + if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { + corrupted() + } + for d.in[d.i] != '\x00' { + kind := d.readByte() + name := d.readCStr() + if d.i >= end { + corrupted() + } + + switch outk { + case reflect.Map: + e := reflect.New(elemType).Elem() + if d.readElemTo(e, kind) { + k := reflect.ValueOf(name) + if convertKey { + k = k.Convert(keyType) + } + out.SetMapIndex(k, e) + } + case reflect.Struct: + if outt == typeRaw { + d.dropElem(kind) + } else { + if info, ok := fieldsMap[name]; ok { + if info.Inline == nil { + d.readElemTo(out.Field(info.Num), kind) + } else { + d.readElemTo(out.FieldByIndex(info.Inline), kind) + } + } else if inlineMap.IsValid() { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + e := reflect.New(elemType).Elem() + if d.readElemTo(e, kind) { + inlineMap.SetMapIndex(reflect.ValueOf(name), e) + } + } else { + d.dropElem(kind) + } + } + case reflect.Slice: + } + + if d.i >= end { + corrupted() + } + } + d.i++ // '\x00' + if d.i != end { + corrupted() + } + d.docType = docType + + if outt == typeRaw { + out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]})) + } +} + +func (d *decoder) readArrayDocTo(out reflect.Value) { + end := int(d.readInt32()) + end += d.i - 4 + if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { + corrupted() + } + i := 0 + l := out.Len() + for d.in[d.i] != '\x00' { + if i >= l { + panic("Length mismatch on array field") + } + kind := d.readByte() + for d.i < end && d.in[d.i] != '\x00' { + d.i++ + } + if d.i >= end { + corrupted() + } + d.i++ + d.readElemTo(out.Index(i), kind) + if d.i >= end { + corrupted() + } + i++ + } + if i != l { + panic("Length mismatch on array field") + } + d.i++ // '\x00' + if d.i != end { + corrupted() + } +} + +func (d *decoder) readSliceDoc(t reflect.Type) interface{} { + tmp := make([]reflect.Value, 0, 8) + elemType := t.Elem() + + end := int(d.readInt32()) + end += d.i - 4 + if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { + corrupted() + } + for d.in[d.i] != '\x00' { + kind := d.readByte() + for d.i < end && d.in[d.i] != '\x00' { + d.i++ + } + if d.i >= end { + corrupted() + } + d.i++ + e := reflect.New(elemType).Elem() + if d.readElemTo(e, kind) { + tmp = append(tmp, e) + } + if d.i >= end { + corrupted() + } + } + d.i++ // '\x00' + if d.i != end { + corrupted() + } + + n := len(tmp) + slice := reflect.MakeSlice(t, n, n) + for i := 0; i != n; i++ { + slice.Index(i).Set(tmp[i]) + } + return slice.Interface() +} + +var typeSlice = reflect.TypeOf([]interface{}{}) +var typeIface = typeSlice.Elem() + +func (d *decoder) readDocElems(typ reflect.Type) reflect.Value { + docType := d.docType + d.docType = typ + slice := make([]DocElem, 0, 8) + d.readDocWith(func(kind byte, name string) { + e := DocElem{Name: name} + v := reflect.ValueOf(&e.Value) + if d.readElemTo(v.Elem(), kind) { + slice = append(slice, e) + } + }) + slicev := reflect.New(typ).Elem() + slicev.Set(reflect.ValueOf(slice)) + d.docType = docType + return slicev +} + +func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value { + docType := d.docType + d.docType = typ + slice := make([]RawDocElem, 0, 8) + d.readDocWith(func(kind byte, name string) { + e := RawDocElem{Name: name} + v := reflect.ValueOf(&e.Value) + if d.readElemTo(v.Elem(), kind) { + slice = append(slice, e) + } + }) + slicev := reflect.New(typ).Elem() + slicev.Set(reflect.ValueOf(slice)) + d.docType = docType + return slicev +} + +func (d *decoder) readDocWith(f func(kind byte, name string)) { + end := int(d.readInt32()) + end += d.i - 4 + if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { + corrupted() + } + for d.in[d.i] != '\x00' { + kind := d.readByte() + name := d.readCStr() + if d.i >= end { + corrupted() + } + f(kind, name) + if d.i >= end { + corrupted() + } + } + d.i++ // '\x00' + if d.i != end { + corrupted() + } +} + +// -------------------------------------------------------------------------- +// Unmarshaling of individual elements within a document. + +var blackHole = settableValueOf(struct{}{}) + +func (d *decoder) dropElem(kind byte) { + d.readElemTo(blackHole, kind) +} + +// Attempt to decode an element from the document and put it into out. +// If the types are not compatible, the returned ok value will be +// false and out will be unchanged. +func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { + + start := d.i + + if kind == '\x03' { + // Special case for documents. Delegate to readDocTo(). + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map: + d.readDocTo(out) + default: + switch out.Interface().(type) { + case D: + out.Set(d.readDocElems(out.Type())) + case RawD: + out.Set(d.readRawDocElems(out.Type())) + default: + d.readDocTo(blackHole) + } + } + return true + } + + var in interface{} + + switch kind { + case 0x01: // Float64 + in = d.readFloat64() + case 0x02: // UTF-8 string + in = d.readStr() + case 0x03: // Document + panic("Can't happen. Handled above.") + case 0x04: // Array + outt := out.Type() + for outt.Kind() == reflect.Ptr { + outt = outt.Elem() + } + switch outt.Kind() { + case reflect.Array: + d.readArrayDocTo(out) + return true + case reflect.Slice: + in = d.readSliceDoc(outt) + default: + in = d.readSliceDoc(typeSlice) + } + case 0x05: // Binary + b := d.readBinary() + if b.Kind == 0x00 || b.Kind == 0x02 { + in = b.Data + } else { + in = b + } + case 0x06: // Undefined (obsolete, but still seen in the wild) + in = Undefined + case 0x07: // ObjectId + in = ObjectId(d.readBytes(12)) + case 0x08: // Bool + in = d.readBool() + case 0x09: // Timestamp + // MongoDB handles timestamps as milliseconds. + i := d.readInt64() + if i == -62135596800000 { + in = time.Time{} // In UTC for convenience. + } else { + in = time.Unix(i/1e3, i%1e3*1e6) + } + case 0x0A: // Nil + in = nil + case 0x0B: // RegEx + in = d.readRegEx() + case 0x0D: // JavaScript without scope + in = JavaScript{Code: d.readStr()} + case 0x0E: // Symbol + in = Symbol(d.readStr()) + case 0x0F: // JavaScript with scope + d.i += 4 // Skip length + js := JavaScript{d.readStr(), make(M)} + d.readDocTo(reflect.ValueOf(js.Scope)) + in = js + case 0x10: // Int32 + in = int(d.readInt32()) + case 0x11: // Mongo-specific timestamp + in = MongoTimestamp(d.readInt64()) + case 0x12: // Int64 + in = d.readInt64() + case 0x7F: // Max key + in = MaxKey + case 0xFF: // Min key + in = MinKey + default: + panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind)) + } + + outt := out.Type() + + if outt == typeRaw { + out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]})) + return true + } + + if setter := getSetter(outt, out); setter != nil { + err := setter.SetBSON(Raw{kind, d.in[start:d.i]}) + if err == SetZero { + out.Set(reflect.Zero(outt)) + return true + } + if err == nil { + return true + } + if _, ok := err.(*TypeError); !ok { + panic(err) + } + return false + } + + if in == nil { + out.Set(reflect.Zero(outt)) + return true + } + + outk := outt.Kind() + + // Dereference and initialize pointer if necessary. + first := true + for outk == reflect.Ptr { + if !out.IsNil() { + out = out.Elem() + } else { + elem := reflect.New(outt.Elem()) + if first { + // Only set if value is compatible. + first = false + defer func(out, elem reflect.Value) { + if good { + out.Set(elem) + } + }(out, elem) + } else { + out.Set(elem) + } + out = elem + } + outt = out.Type() + outk = outt.Kind() + } + + inv := reflect.ValueOf(in) + if outt == inv.Type() { + out.Set(inv) + return true + } + + switch outk { + case reflect.Interface: + out.Set(inv) + return true + case reflect.String: + switch inv.Kind() { + case reflect.String: + out.SetString(inv.String()) + return true + case reflect.Slice: + if b, ok := in.([]byte); ok { + out.SetString(string(b)) + return true + } + } + case reflect.Slice, reflect.Array: + // Remember, array (0x04) slices are built with the correct + // element type. If we are here, must be a cross BSON kind + // conversion (e.g. 0x05 unmarshalling on string). + if outt.Elem().Kind() != reflect.Uint8 { + break + } + switch inv.Kind() { + case reflect.String: + slice := []byte(inv.String()) + out.Set(reflect.ValueOf(slice)) + return true + case reflect.Slice: + switch outt.Kind() { + case reflect.Array: + reflect.Copy(out, inv) + case reflect.Slice: + out.SetBytes(inv.Bytes()) + } + return true + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch inv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + out.SetInt(inv.Int()) + return true + case reflect.Float32, reflect.Float64: + out.SetInt(int64(inv.Float())) + return true + case reflect.Bool: + if inv.Bool() { + out.SetInt(1) + } else { + out.SetInt(0) + } + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + panic("Can't happen. No uint types in BSON?") + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch inv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + out.SetUint(uint64(inv.Int())) + return true + case reflect.Float32, reflect.Float64: + out.SetUint(uint64(inv.Float())) + return true + case reflect.Bool: + if inv.Bool() { + out.SetUint(1) + } else { + out.SetUint(0) + } + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + panic("Can't happen. No uint types in BSON.") + } + case reflect.Float32, reflect.Float64: + switch inv.Kind() { + case reflect.Float32, reflect.Float64: + out.SetFloat(inv.Float()) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + out.SetFloat(float64(inv.Int())) + return true + case reflect.Bool: + if inv.Bool() { + out.SetFloat(1) + } else { + out.SetFloat(0) + } + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + panic("Can't happen. No uint types in BSON?") + } + case reflect.Bool: + switch inv.Kind() { + case reflect.Bool: + out.SetBool(inv.Bool()) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + out.SetBool(inv.Int() != 0) + return true + case reflect.Float32, reflect.Float64: + out.SetBool(inv.Float() != 0) + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + panic("Can't happen. No uint types in BSON?") + } + case reflect.Struct: + if outt == typeURL && inv.Kind() == reflect.String { + u, err := url.Parse(inv.String()) + if err != nil { + panic(err) + } + out.Set(reflect.ValueOf(u).Elem()) + return true + } + } + + return false +} + +// -------------------------------------------------------------------------- +// Parsers of basic types. + +func (d *decoder) readRegEx() RegEx { + re := RegEx{} + re.Pattern = d.readCStr() + re.Options = d.readCStr() + return re +} + +func (d *decoder) readBinary() Binary { + l := d.readInt32() + b := Binary{} + b.Kind = d.readByte() + b.Data = d.readBytes(l) + if b.Kind == 0x02 && len(b.Data) >= 4 { + // Weird obsolete format with redundant length. + b.Data = b.Data[4:] + } + return b +} + +func (d *decoder) readStr() string { + l := d.readInt32() + b := d.readBytes(l - 1) + if d.readByte() != '\x00' { + corrupted() + } + return string(b) +} + +func (d *decoder) readCStr() string { + start := d.i + end := start + l := len(d.in) + for ; end != l; end++ { + if d.in[end] == '\x00' { + break + } + } + d.i = end + 1 + if d.i > l { + corrupted() + } + return string(d.in[start:end]) +} + +func (d *decoder) readBool() bool { + if d.readByte() == 1 { + return true + } + return false +} + +func (d *decoder) readFloat64() float64 { + return math.Float64frombits(uint64(d.readInt64())) +} + +func (d *decoder) readInt32() int32 { + b := d.readBytes(4) + return int32((uint32(b[0]) << 0) | + (uint32(b[1]) << 8) | + (uint32(b[2]) << 16) | + (uint32(b[3]) << 24)) +} + +func (d *decoder) readInt64() int64 { + b := d.readBytes(8) + return int64((uint64(b[0]) << 0) | + (uint64(b[1]) << 8) | + (uint64(b[2]) << 16) | + (uint64(b[3]) << 24) | + (uint64(b[4]) << 32) | + (uint64(b[5]) << 40) | + (uint64(b[6]) << 48) | + (uint64(b[7]) << 56)) +} + +func (d *decoder) readByte() byte { + i := d.i + d.i++ + if d.i > len(d.in) { + corrupted() + } + return d.in[i] +} + +func (d *decoder) readBytes(length int32) []byte { + start := d.i + d.i += int(length) + if d.i > len(d.in) { + corrupted() + } + return d.in[start : start+int(length)] +} diff --git a/vendor/labix.org/v2/mgo/bson/encode.go b/vendor/labix.org/v2/mgo/bson/encode.go new file mode 100644 index 0000000..6ba383a --- /dev/null +++ b/vendor/labix.org/v2/mgo/bson/encode.go @@ -0,0 +1,462 @@ +// BSON library for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// gobson - BSON library for Go. + +package bson + +import ( + "fmt" + "math" + "net/url" + "reflect" + "strconv" + "time" +) + +// -------------------------------------------------------------------------- +// Some internal infrastructure. + +var ( + typeBinary = reflect.TypeOf(Binary{}) + typeObjectId = reflect.TypeOf(ObjectId("")) + typeSymbol = reflect.TypeOf(Symbol("")) + typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0)) + typeOrderKey = reflect.TypeOf(MinKey) + typeDocElem = reflect.TypeOf(DocElem{}) + typeRawDocElem = reflect.TypeOf(RawDocElem{}) + typeRaw = reflect.TypeOf(Raw{}) + typeURL = reflect.TypeOf(url.URL{}) + typeTime = reflect.TypeOf(time.Time{}) + typeString = reflect.TypeOf("") +) + +const itoaCacheSize = 32 + +var itoaCache []string + +func init() { + itoaCache = make([]string, itoaCacheSize) + for i := 0; i != itoaCacheSize; i++ { + itoaCache[i] = strconv.Itoa(i) + } +} + +func itoa(i int) string { + if i < itoaCacheSize { + return itoaCache[i] + } + return strconv.Itoa(i) +} + +// -------------------------------------------------------------------------- +// Marshaling of the document value itself. + +type encoder struct { + out []byte +} + +func (e *encoder) addDoc(v reflect.Value) { + for { + if vi, ok := v.Interface().(Getter); ok { + getv, err := vi.GetBSON() + if err != nil { + panic(err) + } + v = reflect.ValueOf(getv) + continue + } + if v.Kind() == reflect.Ptr { + v = v.Elem() + continue + } + break + } + + if v.Type() == typeRaw { + raw := v.Interface().(Raw) + if raw.Kind != 0x03 && raw.Kind != 0x00 { + panic("Attempted to unmarshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document") + } + e.addBytes(raw.Data...) + return + } + + start := e.reserveInt32() + + switch v.Kind() { + case reflect.Map: + e.addMap(v) + case reflect.Struct: + e.addStruct(v) + case reflect.Array, reflect.Slice: + e.addSlice(v) + default: + panic("Can't marshal " + v.Type().String() + " as a BSON document") + } + + e.addBytes(0) + e.setInt32(start, int32(len(e.out)-start)) +} + +func (e *encoder) addMap(v reflect.Value) { + for _, k := range v.MapKeys() { + e.addElem(k.String(), v.MapIndex(k), false) + } +} + +func (e *encoder) addStruct(v reflect.Value) { + sinfo, err := getStructInfo(v.Type()) + if err != nil { + panic(err) + } + var value reflect.Value + if sinfo.InlineMap >= 0 { + m := v.Field(sinfo.InlineMap) + if m.Len() > 0 { + for _, k := range m.MapKeys() { + ks := k.String() + if _, found := sinfo.FieldsMap[ks]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks)) + } + e.addElem(ks, m.MapIndex(k), false) + } + } + } + for _, info := range sinfo.FieldsList { + if info.Inline == nil { + value = v.Field(info.Num) + } else { + value = v.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.addElem(info.Key, value, info.MinSize) + } +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.String: + return len(v.String()) == 0 + case reflect.Ptr, reflect.Interface: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + if v.Type() == typeTime { + return v.Interface().(time.Time).IsZero() + } + for i := v.NumField()-1; i >= 0; i-- { + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} + +func (e *encoder) addSlice(v reflect.Value) { + vi := v.Interface() + if d, ok := vi.(D); ok { + for _, elem := range d { + e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) + } + return + } + if d, ok := vi.(RawD); ok { + for _, elem := range d { + e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) + } + return + } + l := v.Len() + et := v.Type().Elem() + if et == typeDocElem { + for i := 0; i < l; i++ { + elem := v.Index(i).Interface().(DocElem) + e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) + } + return + } + if et == typeRawDocElem { + for i := 0; i < l; i++ { + elem := v.Index(i).Interface().(RawDocElem) + e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) + } + return + } + for i := 0; i < l; i++ { + e.addElem(itoa(i), v.Index(i), false) + } +} + +// -------------------------------------------------------------------------- +// Marshaling of elements in a document. + +func (e *encoder) addElemName(kind byte, name string) { + e.addBytes(kind) + e.addBytes([]byte(name)...) + e.addBytes(0) +} + +func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { + + if !v.IsValid() { + e.addElemName('\x0A', name) + return + } + + if getter, ok := v.Interface().(Getter); ok { + getv, err := getter.GetBSON() + if err != nil { + panic(err) + } + e.addElem(name, reflect.ValueOf(getv), minSize) + return + } + + switch v.Kind() { + + case reflect.Interface: + e.addElem(name, v.Elem(), minSize) + + case reflect.Ptr: + e.addElem(name, v.Elem(), minSize) + + case reflect.String: + s := v.String() + switch v.Type() { + case typeObjectId: + if len(s) != 12 { + panic("ObjectIDs must be exactly 12 bytes long (got " + + strconv.Itoa(len(s)) + ")") + } + e.addElemName('\x07', name) + e.addBytes([]byte(s)...) + case typeSymbol: + e.addElemName('\x0E', name) + e.addStr(s) + default: + e.addElemName('\x02', name) + e.addStr(s) + } + + case reflect.Float32, reflect.Float64: + e.addElemName('\x01', name) + e.addInt64(int64(math.Float64bits(v.Float()))) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + u := v.Uint() + if int64(u) < 0 { + panic("BSON has no uint64 type, and value is too large to fit correctly in an int64") + } else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) { + e.addElemName('\x10', name) + e.addInt32(int32(u)) + } else { + e.addElemName('\x12', name) + e.addInt64(int64(u)) + } + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch v.Type() { + case typeMongoTimestamp: + e.addElemName('\x11', name) + e.addInt64(v.Int()) + + case typeOrderKey: + if v.Int() == int64(MaxKey) { + e.addElemName('\x7F', name) + } else { + e.addElemName('\xFF', name) + } + + default: + i := v.Int() + if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 { + // It fits into an int32, encode as such. + e.addElemName('\x10', name) + e.addInt32(int32(i)) + } else { + e.addElemName('\x12', name) + e.addInt64(i) + } + } + + case reflect.Bool: + e.addElemName('\x08', name) + if v.Bool() { + e.addBytes(1) + } else { + e.addBytes(0) + } + + case reflect.Map: + e.addElemName('\x03', name) + e.addDoc(v) + + case reflect.Slice: + vt := v.Type() + et := vt.Elem() + if et.Kind() == reflect.Uint8 { + e.addElemName('\x05', name) + e.addBinary('\x00', v.Bytes()) + } else if et == typeDocElem || et == typeRawDocElem { + e.addElemName('\x03', name) + e.addDoc(v) + } else { + e.addElemName('\x04', name) + e.addDoc(v) + } + + case reflect.Array: + et := v.Type().Elem() + if et.Kind() == reflect.Uint8 { + e.addElemName('\x05', name) + e.addBinary('\x00', v.Slice(0, v.Len()).Interface().([]byte)) + } else { + e.addElemName('\x04', name) + e.addDoc(v) + } + + case reflect.Struct: + switch s := v.Interface().(type) { + + case Raw: + kind := s.Kind + if kind == 0x00 { + kind = 0x03 + } + e.addElemName(kind, name) + e.addBytes(s.Data...) + + case Binary: + e.addElemName('\x05', name) + e.addBinary(s.Kind, s.Data) + + case RegEx: + e.addElemName('\x0B', name) + e.addCStr(s.Pattern) + e.addCStr(s.Options) + + case JavaScript: + if s.Scope == nil { + e.addElemName('\x0D', name) + e.addStr(s.Code) + } else { + e.addElemName('\x0F', name) + start := e.reserveInt32() + e.addStr(s.Code) + e.addDoc(reflect.ValueOf(s.Scope)) + e.setInt32(start, int32(len(e.out)-start)) + } + + case time.Time: + // MongoDB handles timestamps as milliseconds. + e.addElemName('\x09', name) + e.addInt64(s.Unix() * 1000 + int64(s.Nanosecond() / 1e6)) + + case url.URL: + e.addElemName('\x02', name) + e.addStr(s.String()) + + case undefined: + e.addElemName('\x06', name) + + default: + e.addElemName('\x03', name) + e.addDoc(v) + } + + default: + panic("Can't marshal " + v.Type().String() + " in a BSON document") + } +} + +// -------------------------------------------------------------------------- +// Marshaling of base types. + +func (e *encoder) addBinary(subtype byte, v []byte) { + if subtype == 0x02 { + // Wonder how that brilliant idea came to life. Obsolete, luckily. + e.addInt32(int32(len(v) + 4)) + e.addBytes(subtype) + e.addInt32(int32(len(v))) + } else { + e.addInt32(int32(len(v))) + e.addBytes(subtype) + } + e.addBytes(v...) +} + +func (e *encoder) addStr(v string) { + e.addInt32(int32(len(v) + 1)) + e.addCStr(v) +} + +func (e *encoder) addCStr(v string) { + e.addBytes([]byte(v)...) + e.addBytes(0) +} + +func (e *encoder) reserveInt32() (pos int) { + pos = len(e.out) + e.addBytes(0, 0, 0, 0) + return pos +} + +func (e *encoder) setInt32(pos int, v int32) { + e.out[pos+0] = byte(v) + e.out[pos+1] = byte(v >> 8) + e.out[pos+2] = byte(v >> 16) + e.out[pos+3] = byte(v >> 24) +} + +func (e *encoder) addInt32(v int32) { + u := uint32(v) + e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24)) +} + +func (e *encoder) addInt64(v int64) { + u := uint64(v) + e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24), + byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56)) +} + +func (e *encoder) addBytes(v ...byte) { + e.out = append(e.out, v...) +} diff --git a/vendor/labix.org/v2/mgo/bulk.go b/vendor/labix.org/v2/mgo/bulk.go new file mode 100644 index 0000000..5a9d37b --- /dev/null +++ b/vendor/labix.org/v2/mgo/bulk.go @@ -0,0 +1,71 @@ +package mgo + +// Bulk represents an operation that can be prepared with several +// orthogonal changes before being delivered to the server. +// +// WARNING: This API is still experimental. +// +// Relevant documentation: +// +// http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api +// +type Bulk struct { + c *Collection + ordered bool + inserts []interface{} +} + +// BulkError holds an error returned from running a Bulk operation. +// +// TODO: This is private for the moment, until we understand exactly how +// to report these multi-errors in a useful and convenient way. +type bulkError struct { + err error +} + +// BulkResult holds the results for a bulk operation. +type BulkResult struct { + // Be conservative while we understand exactly how to report these + // results in a useful and convenient way, and also how to emulate + // them with prior servers. + private bool +} + +func (e *bulkError) Error() string { + return e.err.Error() +} + +// Bulk returns a value to prepare the execution of a bulk operation. +// +// WARNING: This API is still experimental. +// +func (c *Collection) Bulk() *Bulk { + return &Bulk{c: c, ordered: true} +} + +// Unordered puts the bulk operation in unordered mode. +// +// In unordered mode the indvidual operations may be sent +// out of order, which means latter operations may proceed +// even if prior ones have failed. +func (b *Bulk) Unordered() { + b.ordered = false +} + +// Insert queues up the provided documents for insertion. +func (b *Bulk) Insert(docs ...interface{}) { + b.inserts = append(b.inserts, docs...) +} + +// Run runs all the operations queued up. +func (b *Bulk) Run() (*BulkResult, error) { + op := &insertOp{b.c.FullName, b.inserts, 0} + if !b.ordered { + op.flags = 1 // ContinueOnError + } + _, err := b.c.writeQuery(op) + if err != nil { + return nil, &bulkError{err} + } + return &BulkResult{}, nil +} diff --git a/vendor/labix.org/v2/mgo/bulk_test.go b/vendor/labix.org/v2/mgo/bulk_test.go new file mode 100644 index 0000000..f8abca8 --- /dev/null +++ b/vendor/labix.org/v2/mgo/bulk_test.go @@ -0,0 +1,89 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2014 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo_test + +import ( + "labix.org/v2/mgo" + . "launchpad.net/gocheck" +) + +func (s *S) TestBulkInsert(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + bulk := coll.Bulk() + bulk.Insert(M{"n": 1}) + bulk.Insert(M{"n": 2}, M{"n": 3}) + r, err := bulk.Run() + c.Assert(err, IsNil) + c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) + + type doc struct{ N int } + var res []doc + err = coll.Find(nil).Sort("n").All(&res) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}}) +} + +func (s *S) TestBulkInsertError(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + bulk := coll.Bulk() + bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"n": 3}) + _, err = bulk.Run() + c.Assert(err, ErrorMatches, ".*duplicate key.*") + + type doc struct{ N int `_id` } + var res []doc + err = coll.Find(nil).Sort("_id").All(&res) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []doc{{1}, {2}}) +} + +func (s *S) TestBulkInsertErrorUnordered(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + bulk := coll.Bulk() + bulk.Unordered() + bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3}) + _, err = bulk.Run() + c.Assert(err, ErrorMatches, ".*duplicate key.*") + + type doc struct{ N int `_id` } + var res []doc + err = coll.Find(nil).Sort("_id").All(&res) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}}) +} diff --git a/vendor/labix.org/v2/mgo/cluster.go b/vendor/labix.org/v2/mgo/cluster.go new file mode 100644 index 0000000..b4ed5bf --- /dev/null +++ b/vendor/labix.org/v2/mgo/cluster.go @@ -0,0 +1,616 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo + +import ( + "errors" + "labix.org/v2/mgo/bson" + "net" + "sync" + "time" +) + +// --------------------------------------------------------------------------- +// Mongo cluster encapsulation. +// +// A cluster enables the communication with one or more servers participating +// in a mongo cluster. This works with individual servers, a replica set, +// a replica pair, one or multiple mongos routers, etc. + +type mongoCluster struct { + sync.RWMutex + serverSynced sync.Cond + userSeeds []string + dynaSeeds []string + servers mongoServers + masters mongoServers + references int + syncing bool + direct bool + failFast bool + syncCount uint + cachedIndex map[string]bool + sync chan bool + dial dialer +} + +func newCluster(userSeeds []string, direct, failFast bool, dial dialer) *mongoCluster { + cluster := &mongoCluster{ + userSeeds: userSeeds, + references: 1, + direct: direct, + failFast: failFast, + dial: dial, + } + cluster.serverSynced.L = cluster.RWMutex.RLocker() + cluster.sync = make(chan bool, 1) + stats.cluster(+1) + go cluster.syncServersLoop() + return cluster +} + +// Acquire increases the reference count for the cluster. +func (cluster *mongoCluster) Acquire() { + cluster.Lock() + cluster.references++ + debugf("Cluster %p acquired (refs=%d)", cluster, cluster.references) + cluster.Unlock() +} + +// Release decreases the reference count for the cluster. Once +// it reaches zero, all servers will be closed. +func (cluster *mongoCluster) Release() { + cluster.Lock() + if cluster.references == 0 { + panic("cluster.Release() with references == 0") + } + cluster.references-- + debugf("Cluster %p released (refs=%d)", cluster, cluster.references) + if cluster.references == 0 { + for _, server := range cluster.servers.Slice() { + server.Close() + } + // Wake up the sync loop so it can die. + cluster.syncServers() + stats.cluster(-1) + } + cluster.Unlock() +} + +func (cluster *mongoCluster) LiveServers() (servers []string) { + cluster.RLock() + for _, serv := range cluster.servers.Slice() { + servers = append(servers, serv.Addr) + } + cluster.RUnlock() + return servers +} + +func (cluster *mongoCluster) removeServer(server *mongoServer) { + cluster.Lock() + cluster.masters.Remove(server) + other := cluster.servers.Remove(server) + cluster.Unlock() + if other != nil { + other.Close() + log("Removed server ", server.Addr, " from cluster.") + } + server.Close() +} + +type isMasterResult struct { + IsMaster bool + Secondary bool + Primary string + Hosts []string + Passives []string + Tags bson.D + Msg string +} + +func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResult) error { + // Monotonic let's it talk to a slave and still hold the socket. + session := newSession(Monotonic, cluster, 10*time.Second) + session.setSocket(socket) + err := session.Run("ismaster", result) + session.Close() + return err +} + +type possibleTimeout interface { + Timeout() bool +} + +var syncSocketTimeout = 5 * time.Second + +func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerInfo, hosts []string, err error) { + var syncTimeout time.Duration + if raceDetector { + // This variable is only ever touched by tests. + globalMutex.Lock() + syncTimeout = syncSocketTimeout + globalMutex.Unlock() + } else { + syncTimeout = syncSocketTimeout + } + + addr := server.Addr + log("SYNC Processing ", addr, "...") + + // Retry a few times to avoid knocking a server down for a hiccup. + var result isMasterResult + var tryerr error + for retry := 0; ; retry++ { + if retry == 3 || retry == 1 && cluster.failFast { + return nil, nil, tryerr + } + if retry > 0 { + // Don't abuse the server needlessly if there's something actually wrong. + if err, ok := tryerr.(possibleTimeout); ok && err.Timeout() { + // Give a chance for waiters to timeout as well. + cluster.serverSynced.Broadcast() + } + time.Sleep(syncShortDelay) + } + + // It's not clear what would be a good timeout here. Is it + // better to wait longer or to retry? + socket, _, err := server.AcquireSocket(0, syncTimeout) + if err != nil { + tryerr = err + logf("SYNC Failed to get socket to %s: %v", addr, err) + continue + } + err = cluster.isMaster(socket, &result) + socket.Release() + if err != nil { + tryerr = err + logf("SYNC Command 'ismaster' to %s failed: %v", addr, err) + continue + } + debugf("SYNC Result of 'ismaster' from %s: %#v", addr, result) + break + } + + if result.IsMaster { + debugf("SYNC %s is a master.", addr) + // Made an incorrect assumption above, so fix stats. + stats.conn(-1, false) + stats.conn(+1, true) + } else if result.Secondary { + debugf("SYNC %s is a slave.", addr) + } else if cluster.direct { + logf("SYNC %s in unknown state. Pretending it's a slave due to direct connection.", addr) + } else { + logf("SYNC %s is neither a master nor a slave.", addr) + // Made an incorrect assumption above, so fix stats. + stats.conn(-1, false) + return nil, nil, errors.New(addr + " is not a master nor slave") + } + + info = &mongoServerInfo{ + Master: result.IsMaster, + Mongos: result.Msg == "isdbgrid", + Tags: result.Tags, + } + + hosts = make([]string, 0, 1+len(result.Hosts)+len(result.Passives)) + if result.Primary != "" { + // First in the list to speed up master discovery. + hosts = append(hosts, result.Primary) + } + hosts = append(hosts, result.Hosts...) + hosts = append(hosts, result.Passives...) + + debugf("SYNC %s knows about the following peers: %#v", addr, hosts) + return info, hosts, nil +} + +type syncKind bool + +const ( + completeSync syncKind = true + partialSync syncKind = false +) + +func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInfo, syncKind syncKind) { + cluster.Lock() + current := cluster.servers.Search(server.ResolvedAddr) + if current == nil { + if syncKind == partialSync { + cluster.Unlock() + server.Close() + log("SYNC Discarding unknown server ", server.Addr, " due to partial sync.") + return + } + cluster.servers.Add(server) + if info.Master { + cluster.masters.Add(server) + log("SYNC Adding ", server.Addr, " to cluster as a master.") + } else { + log("SYNC Adding ", server.Addr, " to cluster as a slave.") + } + } else { + if server != current { + panic("addServer attempting to add duplicated server") + } + if server.Info().Master != info.Master { + if info.Master { + log("SYNC Server ", server.Addr, " is now a master.") + cluster.masters.Add(server) + } else { + log("SYNC Server ", server.Addr, " is now a slave.") + cluster.masters.Remove(server) + } + } + } + server.SetInfo(info) + debugf("SYNC Broadcasting availability of server %s", server.Addr) + cluster.serverSynced.Broadcast() + cluster.Unlock() +} + +func (cluster *mongoCluster) getKnownAddrs() []string { + cluster.RLock() + max := len(cluster.userSeeds) + len(cluster.dynaSeeds) + cluster.servers.Len() + seen := make(map[string]bool, max) + known := make([]string, 0, max) + + add := func(addr string) { + if _, found := seen[addr]; !found { + seen[addr] = true + known = append(known, addr) + } + } + + for _, addr := range cluster.userSeeds { + add(addr) + } + for _, addr := range cluster.dynaSeeds { + add(addr) + } + for _, serv := range cluster.servers.Slice() { + add(serv.Addr) + } + cluster.RUnlock() + + return known +} + +// syncServers injects a value into the cluster.sync channel to force +// an iteration of the syncServersLoop function. +func (cluster *mongoCluster) syncServers() { + select { + case cluster.sync <- true: + default: + } +} + +// How long to wait for a checkup of the cluster topology if nothing +// else kicks a synchronization before that. +const syncServersDelay = 30 * time.Second +const syncShortDelay = 500 * time.Millisecond + +// syncServersLoop loops while the cluster is alive to keep its idea of +// the server topology up-to-date. It must be called just once from +// newCluster. The loop iterates once syncServersDelay has passed, or +// if somebody injects a value into the cluster.sync channel to force a +// synchronization. A loop iteration will contact all servers in +// parallel, ask them about known peers and their own role within the +// cluster, and then attempt to do the same with all the peers +// retrieved. +func (cluster *mongoCluster) syncServersLoop() { + for { + debugf("SYNC Cluster %p is starting a sync loop iteration.", cluster) + + cluster.Lock() + if cluster.references == 0 { + cluster.Unlock() + break + } + cluster.references++ // Keep alive while syncing. + direct := cluster.direct + cluster.Unlock() + + cluster.syncServersIteration(direct) + + // We just synchronized, so consume any outstanding requests. + select { + case <-cluster.sync: + default: + } + + cluster.Release() + + // Hold off before allowing another sync. No point in + // burning CPU looking for down servers. + if !cluster.failFast { + time.Sleep(syncShortDelay) + } + + cluster.Lock() + if cluster.references == 0 { + cluster.Unlock() + break + } + cluster.syncCount++ + // Poke all waiters so they have a chance to timeout or + // restart syncing if they wish to. + cluster.serverSynced.Broadcast() + // Check if we have to restart immediately either way. + restart := !direct && cluster.masters.Empty() || cluster.servers.Empty() + cluster.Unlock() + + if restart { + log("SYNC No masters found. Will synchronize again.") + time.Sleep(syncShortDelay) + continue + } + + debugf("SYNC Cluster %p waiting for next requested or scheduled sync.", cluster) + + // Hold off until somebody explicitly requests a synchronization + // or it's time to check for a cluster topology change again. + select { + case <-cluster.sync: + case <-time.After(syncServersDelay): + } + } + debugf("SYNC Cluster %p is stopping its sync loop.", cluster) +} + +func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoServer { + cluster.RLock() + server := cluster.servers.Search(tcpaddr.String()) + cluster.RUnlock() + if server != nil { + return server + } + return newServer(addr, tcpaddr, cluster.sync, cluster.dial) +} + +func resolveAddr(addr string) (*net.TCPAddr, error) { + tcpaddr, err := net.ResolveTCPAddr("tcp", addr) + if err != nil { + log("SYNC Failed to resolve ", addr, ": ", err.Error()) + return nil, err + } + if tcpaddr.String() != addr { + debug("SYNC Address ", addr, " resolved as ", tcpaddr.String()) + } + return tcpaddr, nil +} + +type pendingAdd struct { + server *mongoServer + info *mongoServerInfo +} + +func (cluster *mongoCluster) syncServersIteration(direct bool) { + log("SYNC Starting full topology synchronization...") + + var wg sync.WaitGroup + var m sync.Mutex + notYetAdded := make(map[string]pendingAdd) + addIfFound := make(map[string]bool) + seen := make(map[string]bool) + syncKind := partialSync + + var spawnSync func(addr string, byMaster bool) + spawnSync = func(addr string, byMaster bool) { + wg.Add(1) + go func() { + defer wg.Done() + + tcpaddr, err := resolveAddr(addr) + if err != nil { + log("SYNC Failed to start sync of ", addr, ": ", err.Error()) + return + } + resolvedAddr := tcpaddr.String() + + m.Lock() + if byMaster { + if pending, ok := notYetAdded[resolvedAddr]; ok { + delete(notYetAdded, resolvedAddr) + m.Unlock() + cluster.addServer(pending.server, pending.info, completeSync) + return + } + addIfFound[resolvedAddr] = true + } + if seen[resolvedAddr] { + m.Unlock() + return + } + seen[resolvedAddr] = true + m.Unlock() + + server := cluster.server(addr, tcpaddr) + info, hosts, err := cluster.syncServer(server) + if err != nil { + cluster.removeServer(server) + return + } + + m.Lock() + add := direct || info.Master || addIfFound[resolvedAddr] + if add { + syncKind = completeSync + } else { + notYetAdded[resolvedAddr] = pendingAdd{server, info} + } + m.Unlock() + if add { + cluster.addServer(server, info, completeSync) + } + if !direct { + for _, addr := range hosts { + spawnSync(addr, info.Master) + } + } + }() + } + + knownAddrs := cluster.getKnownAddrs() + for _, addr := range knownAddrs { + spawnSync(addr, false) + } + wg.Wait() + + if syncKind == completeSync { + logf("SYNC Synchronization was complete (got data from primary).") + for _, pending := range notYetAdded { + cluster.removeServer(pending.server) + } + } else { + logf("SYNC Synchronization was partial (cannot talk to primary).") + for _, pending := range notYetAdded { + cluster.addServer(pending.server, pending.info, partialSync) + } + } + + cluster.Lock() + ml := cluster.masters.Len() + logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", ml, cluster.servers.Len()-ml) + + // Update dynamic seeds, but only if we have any good servers. Otherwise, + // leave them alone for better chances of a successful sync in the future. + if syncKind == completeSync { + dynaSeeds := make([]string, cluster.servers.Len()) + for i, server := range cluster.servers.Slice() { + dynaSeeds[i] = server.Addr + } + cluster.dynaSeeds = dynaSeeds + debugf("SYNC New dynamic seeds: %#v\n", dynaSeeds) + } + cluster.Unlock() +} + +var socketsPerServer = 4096 + +// AcquireSocket returns a socket to a server in the cluster. If slaveOk is +// true, it will attempt to return a socket to a slave server. If it is +// false, the socket will necessarily be to a master server. +func (cluster *mongoCluster) AcquireSocket(slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D) (s *mongoSocket, err error) { + var started time.Time + var syncCount uint + warnedLimit := false + for { + cluster.RLock() + for { + ml := cluster.masters.Len() + sl := cluster.servers.Len() + debugf("Cluster has %d known masters and %d known slaves.", ml, sl-ml) + if ml > 0 || slaveOk && sl > 0 { + break + } + if started.IsZero() { + // Initialize after fast path above. + started = time.Now() + syncCount = cluster.syncCount + } else if syncTimeout != 0 && started.Before(time.Now().Add(-syncTimeout)) || cluster.failFast && cluster.syncCount != syncCount { + cluster.RUnlock() + return nil, errors.New("no reachable servers") + } + log("Waiting for servers to synchronize...") + cluster.syncServers() + + // Remember: this will release and reacquire the lock. + cluster.serverSynced.Wait() + } + + var server *mongoServer + if slaveOk { + server = cluster.servers.BestFit(serverTags) + } else { + server = cluster.masters.BestFit(nil) + } + cluster.RUnlock() + + if server == nil { + // Must have failed the requested tags. Sleep to avoid spinning. + time.Sleep(1e8) + continue + } + + s, abended, err := server.AcquireSocket(socketsPerServer, socketTimeout) + if err == errSocketLimit { + if !warnedLimit { + log("WARNING: Per-server connection limit reached.") + } + time.Sleep(1e8) + continue + } + if err != nil { + cluster.removeServer(server) + cluster.syncServers() + continue + } + if abended && !slaveOk { + var result isMasterResult + err := cluster.isMaster(s, &result) + if err != nil || !result.IsMaster { + logf("Cannot confirm server %s as master (%v)", server.Addr, err) + s.Release() + cluster.syncServers() + time.Sleep(1e8) + continue + } + } + return s, nil + } + panic("unreached") +} + +func (cluster *mongoCluster) CacheIndex(cacheKey string, exists bool) { + cluster.Lock() + if cluster.cachedIndex == nil { + cluster.cachedIndex = make(map[string]bool) + } + if exists { + cluster.cachedIndex[cacheKey] = true + } else { + delete(cluster.cachedIndex, cacheKey) + } + cluster.Unlock() +} + +func (cluster *mongoCluster) HasCachedIndex(cacheKey string) (result bool) { + cluster.RLock() + if cluster.cachedIndex != nil { + result = cluster.cachedIndex[cacheKey] + } + cluster.RUnlock() + return +} + +func (cluster *mongoCluster) ResetIndexCache() { + cluster.Lock() + cluster.cachedIndex = make(map[string]bool) + cluster.Unlock() +} diff --git a/vendor/labix.org/v2/mgo/cluster_test.go b/vendor/labix.org/v2/mgo/cluster_test.go new file mode 100644 index 0000000..d6d2810 --- /dev/null +++ b/vendor/labix.org/v2/mgo/cluster_test.go @@ -0,0 +1,1559 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo_test + +import ( + "fmt" + "io" + "labix.org/v2/mgo" + "labix.org/v2/mgo/bson" + . "launchpad.net/gocheck" + "net" + "strings" + "sync" + "time" +) + +func (s *S) TestNewSession(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + // Do a dummy operation to wait for connection. + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"_id": 1}) + c.Assert(err, IsNil) + + // Tweak safety and query settings to ensure other has copied those. + session.SetSafe(nil) + session.SetBatch(-1) + other := session.New() + defer other.Close() + session.SetSafe(&mgo.Safe{}) + + // Clone was copied while session was unsafe, so no errors. + otherColl := other.DB("mydb").C("mycoll") + err = otherColl.Insert(M{"_id": 1}) + c.Assert(err, IsNil) + + // Original session was made safe again. + err = coll.Insert(M{"_id": 1}) + c.Assert(err, NotNil) + + // With New(), each session has its own socket now. + stats := mgo.GetStats() + c.Assert(stats.MasterConns, Equals, 2) + c.Assert(stats.SocketsInUse, Equals, 2) + + // Ensure query parameters were cloned. + err = otherColl.Insert(M{"_id": 2}) + c.Assert(err, IsNil) + + // Ping the database to ensure the nonce has been received already. + c.Assert(other.Ping(), IsNil) + + mgo.ResetStats() + + iter := otherColl.Find(M{}).Iter() + c.Assert(err, IsNil) + + m := M{} + ok := iter.Next(m) + c.Assert(ok, Equals, true) + err = iter.Close() + c.Assert(err, IsNil) + + // If Batch(-1) is in effect, a single document must have been received. + stats = mgo.GetStats() + c.Assert(stats.ReceivedDocs, Equals, 1) +} + +func (s *S) TestCloneSession(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + // Do a dummy operation to wait for connection. + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"_id": 1}) + c.Assert(err, IsNil) + + // Tweak safety and query settings to ensure clone is copying those. + session.SetSafe(nil) + session.SetBatch(-1) + clone := session.Clone() + defer clone.Close() + session.SetSafe(&mgo.Safe{}) + + // Clone was copied while session was unsafe, so no errors. + cloneColl := clone.DB("mydb").C("mycoll") + err = cloneColl.Insert(M{"_id": 1}) + c.Assert(err, IsNil) + + // Original session was made safe again. + err = coll.Insert(M{"_id": 1}) + c.Assert(err, NotNil) + + // With Clone(), same socket is shared between sessions now. + stats := mgo.GetStats() + c.Assert(stats.SocketsInUse, Equals, 1) + c.Assert(stats.SocketRefs, Equals, 2) + + // Refreshing one of them should let the original socket go, + // while preserving the safety settings. + clone.Refresh() + err = cloneColl.Insert(M{"_id": 1}) + c.Assert(err, IsNil) + + // Must have used another connection now. + stats = mgo.GetStats() + c.Assert(stats.SocketsInUse, Equals, 2) + c.Assert(stats.SocketRefs, Equals, 2) + + // Ensure query parameters were cloned. + err = cloneColl.Insert(M{"_id": 2}) + c.Assert(err, IsNil) + + // Ping the database to ensure the nonce has been received already. + c.Assert(clone.Ping(), IsNil) + + mgo.ResetStats() + + iter := cloneColl.Find(M{}).Iter() + c.Assert(err, IsNil) + + m := M{} + ok := iter.Next(m) + c.Assert(ok, Equals, true) + err = iter.Close() + c.Assert(err, IsNil) + + // If Batch(-1) is in effect, a single document must have been received. + stats = mgo.GetStats() + c.Assert(stats.ReceivedDocs, Equals, 1) +} + +func (s *S) TestSetModeStrong(c *C) { + session, err := mgo.Dial("localhost:40012") + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.Monotonic, false) + session.SetMode(mgo.Strong, false) + + c.Assert(session.Mode(), Equals, mgo.Strong) + + result := M{} + cmd := session.DB("admin").C("$cmd") + err = cmd.Find(M{"ismaster": 1}).One(&result) + c.Assert(err, IsNil) + c.Assert(result["ismaster"], Equals, true) + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + + // Wait since the sync also uses sockets. + for len(session.LiveServers()) != 3 { + c.Log("Waiting for cluster sync to finish...") + time.Sleep(5e8) + } + + stats := mgo.GetStats() + c.Assert(stats.MasterConns, Equals, 1) + c.Assert(stats.SlaveConns, Equals, 2) + c.Assert(stats.SocketsInUse, Equals, 1) + + session.SetMode(mgo.Strong, true) + + stats = mgo.GetStats() + c.Assert(stats.SocketsInUse, Equals, 0) +} + +func (s *S) TestSetModeMonotonic(c *C) { + // Must necessarily connect to a slave, otherwise the + // master connection will be available first. + session, err := mgo.Dial("localhost:40012") + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.Monotonic, false) + + c.Assert(session.Mode(), Equals, mgo.Monotonic) + + result := M{} + cmd := session.DB("admin").C("$cmd") + err = cmd.Find(M{"ismaster": 1}).One(&result) + c.Assert(err, IsNil) + c.Assert(result["ismaster"], Equals, false) + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + + result = M{} + err = cmd.Find(M{"ismaster": 1}).One(&result) + c.Assert(err, IsNil) + c.Assert(result["ismaster"], Equals, true) + + // Wait since the sync also uses sockets. + for len(session.LiveServers()) != 3 { + c.Log("Waiting for cluster sync to finish...") + time.Sleep(5e8) + } + + stats := mgo.GetStats() + c.Assert(stats.MasterConns, Equals, 1) + c.Assert(stats.SlaveConns, Equals, 2) + c.Assert(stats.SocketsInUse, Equals, 2) + + session.SetMode(mgo.Monotonic, true) + + stats = mgo.GetStats() + c.Assert(stats.SocketsInUse, Equals, 0) +} + +func (s *S) TestSetModeMonotonicAfterStrong(c *C) { + // Test that a strong session shifting to a monotonic + // one preserves the socket untouched. + + session, err := mgo.Dial("localhost:40012") + c.Assert(err, IsNil) + defer session.Close() + + // Insert something to force a connection to the master. + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + + session.SetMode(mgo.Monotonic, false) + + // Wait since the sync also uses sockets. + for len(session.LiveServers()) != 3 { + c.Log("Waiting for cluster sync to finish...") + time.Sleep(5e8) + } + + // Master socket should still be reserved. + stats := mgo.GetStats() + c.Assert(stats.SocketsInUse, Equals, 1) + + // Confirm it's the master even though it's Monotonic by now. + result := M{} + cmd := session.DB("admin").C("$cmd") + err = cmd.Find(M{"ismaster": 1}).One(&result) + c.Assert(err, IsNil) + c.Assert(result["ismaster"], Equals, true) +} + +func (s *S) TestSetModeStrongAfterMonotonic(c *C) { + // Test that shifting from Monotonic to Strong while + // using a slave socket will keep the socket reserved + // until the master socket is necessary, so that no + // switch over occurs unless it's actually necessary. + + // Must necessarily connect to a slave, otherwise the + // master connection will be available first. + session, err := mgo.Dial("localhost:40012") + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.Monotonic, false) + + // Ensure we're talking to a slave, and reserve the socket. + result := M{} + err = session.Run("ismaster", &result) + c.Assert(err, IsNil) + c.Assert(result["ismaster"], Equals, false) + + // Switch to a Strong session. + session.SetMode(mgo.Strong, false) + + // Wait since the sync also uses sockets. + for len(session.LiveServers()) != 3 { + c.Log("Waiting for cluster sync to finish...") + time.Sleep(5e8) + } + + // Slave socket should still be reserved. + stats := mgo.GetStats() + c.Assert(stats.SocketsInUse, Equals, 1) + + // But any operation will switch it to the master. + result = M{} + err = session.Run("ismaster", &result) + c.Assert(err, IsNil) + c.Assert(result["ismaster"], Equals, true) +} + +func (s *S) TestSetModeMonotonicWriteOnIteration(c *C) { + // Must necessarily connect to a slave, otherwise the + // master connection will be available first. + session, err := mgo.Dial("localhost:40012") + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.Monotonic, false) + + c.Assert(session.Mode(), Equals, mgo.Monotonic) + + coll1 := session.DB("mydb").C("mycoll1") + coll2 := session.DB("mydb").C("mycoll2") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + err := coll1.Insert(M{"n": n}) + c.Assert(err, IsNil) + } + + // Release master so we can grab a slave again. + session.Refresh() + + // Wait until synchronization is done. + for { + n, err := coll1.Count() + c.Assert(err, IsNil) + if n == len(ns) { + break + } + } + + iter := coll1.Find(nil).Batch(2).Iter() + i := 0 + m := M{} + for iter.Next(&m) { + i++ + if i > 3 { + err := coll2.Insert(M{"n": 47 + i}) + c.Assert(err, IsNil) + } + } + c.Assert(i, Equals, len(ns)) +} + +func (s *S) TestSetModeEventual(c *C) { + // Must necessarily connect to a slave, otherwise the + // master connection will be available first. + session, err := mgo.Dial("localhost:40012") + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.Eventual, false) + + c.Assert(session.Mode(), Equals, mgo.Eventual) + + result := M{} + err = session.Run("ismaster", &result) + c.Assert(err, IsNil) + c.Assert(result["ismaster"], Equals, false) + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + + result = M{} + err = session.Run("ismaster", &result) + c.Assert(err, IsNil) + c.Assert(result["ismaster"], Equals, false) + + // Wait since the sync also uses sockets. + for len(session.LiveServers()) != 3 { + c.Log("Waiting for cluster sync to finish...") + time.Sleep(5e8) + } + + stats := mgo.GetStats() + c.Assert(stats.MasterConns, Equals, 1) + c.Assert(stats.SlaveConns, Equals, 2) + c.Assert(stats.SocketsInUse, Equals, 0) +} + +func (s *S) TestSetModeEventualAfterStrong(c *C) { + // Test that a strong session shifting to an eventual + // one preserves the socket untouched. + + session, err := mgo.Dial("localhost:40012") + c.Assert(err, IsNil) + defer session.Close() + + // Insert something to force a connection to the master. + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + + session.SetMode(mgo.Eventual, false) + + // Wait since the sync also uses sockets. + for len(session.LiveServers()) != 3 { + c.Log("Waiting for cluster sync to finish...") + time.Sleep(5e8) + } + + // Master socket should still be reserved. + stats := mgo.GetStats() + c.Assert(stats.SocketsInUse, Equals, 1) + + // Confirm it's the master even though it's Eventual by now. + result := M{} + cmd := session.DB("admin").C("$cmd") + err = cmd.Find(M{"ismaster": 1}).One(&result) + c.Assert(err, IsNil) + c.Assert(result["ismaster"], Equals, true) + + session.SetMode(mgo.Eventual, true) + + stats = mgo.GetStats() + c.Assert(stats.SocketsInUse, Equals, 0) +} + +func (s *S) TestPrimaryShutdownStrong(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40021") + c.Assert(err, IsNil) + defer session.Close() + + // With strong consistency, this will open a socket to the master. + result := &struct{ Host string }{} + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + + // Kill the master. + host := result.Host + s.Stop(host) + + // This must fail, since the connection was broken. + err = session.Run("serverStatus", result) + c.Assert(err, Equals, io.EOF) + + // With strong consistency, it fails again until reset. + err = session.Run("serverStatus", result) + c.Assert(err, Equals, io.EOF) + + session.Refresh() + + // Now we should be able to talk to the new master. + // Increase the timeout since this may take quite a while. + session.SetSyncTimeout(3 * time.Minute) + + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(result.Host, Not(Equals), host) + + // Insert some data to confirm it's indeed a master. + err = session.DB("mydb").C("mycoll").Insert(M{"n": 42}) + c.Assert(err, IsNil) +} + +func (s *S) TestPrimaryHiccup(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40021") + c.Assert(err, IsNil) + defer session.Close() + + // With strong consistency, this will open a socket to the master. + result := &struct{ Host string }{} + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + + // Establish a few extra sessions to create spare sockets to + // the master. This increases a bit the chances of getting an + // incorrect cached socket. + var sessions []*mgo.Session + for i := 0; i < 20; i++ { + sessions = append(sessions, session.Copy()) + err = sessions[len(sessions)-1].Run("serverStatus", result) + c.Assert(err, IsNil) + } + for i := range sessions { + sessions[i].Close() + } + + // Kill the master, but bring it back immediatelly. + host := result.Host + s.Stop(host) + s.StartAll() + + // This must fail, since the connection was broken. + err = session.Run("serverStatus", result) + c.Assert(err, Equals, io.EOF) + + // With strong consistency, it fails again until reset. + err = session.Run("serverStatus", result) + c.Assert(err, Equals, io.EOF) + + session.Refresh() + + // Now we should be able to talk to the new master. + // Increase the timeout since this may take quite a while. + session.SetSyncTimeout(3 * time.Minute) + + // Insert some data to confirm it's indeed a master. + err = session.DB("mydb").C("mycoll").Insert(M{"n": 42}) + c.Assert(err, IsNil) +} + +func (s *S) TestPrimaryShutdownMonotonic(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40021") + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.Monotonic, true) + + // Insert something to force a switch to the master. + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + + // Wait a bit for this to be synchronized to slaves. + time.Sleep(3 * time.Second) + + result := &struct{ Host string }{} + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + + // Kill the master. + host := result.Host + s.Stop(host) + + // This must fail, since the connection was broken. + err = session.Run("serverStatus", result) + c.Assert(err, Equals, io.EOF) + + // With monotonic consistency, it fails again until reset. + err = session.Run("serverStatus", result) + c.Assert(err, Equals, io.EOF) + + session.Refresh() + + // Now we should be able to talk to the new master. + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(result.Host, Not(Equals), host) +} + +func (s *S) TestPrimaryShutdownMonotonicWithSlave(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40021") + c.Assert(err, IsNil) + defer session.Close() + + ssresult := &struct{ Host string }{} + imresult := &struct{ IsMaster bool }{} + + // Figure the master while still using the strong session. + err = session.Run("serverStatus", ssresult) + c.Assert(err, IsNil) + err = session.Run("isMaster", imresult) + c.Assert(err, IsNil) + master := ssresult.Host + c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) + + // Create new monotonic session with an explicit address to ensure + // a slave is synchronized before the master, otherwise a connection + // with the master may be used below for lack of other options. + var addr string + switch { + case strings.HasSuffix(ssresult.Host, ":40021"): + addr = "localhost:40022" + case strings.HasSuffix(ssresult.Host, ":40022"): + addr = "localhost:40021" + case strings.HasSuffix(ssresult.Host, ":40023"): + addr = "localhost:40021" + default: + c.Fatal("Unknown host: ", ssresult.Host) + } + + session, err = mgo.Dial(addr) + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.Monotonic, true) + + // Check the address of the socket associated with the monotonic session. + c.Log("Running serverStatus and isMaster with monotonic session") + err = session.Run("serverStatus", ssresult) + c.Assert(err, IsNil) + err = session.Run("isMaster", imresult) + c.Assert(err, IsNil) + slave := ssresult.Host + c.Assert(imresult.IsMaster, Equals, false, Commentf("%s is not a slave", slave)) + + c.Assert(master, Not(Equals), slave) + + // Kill the master. + s.Stop(master) + + // Session must still be good, since we were talking to a slave. + err = session.Run("serverStatus", ssresult) + c.Assert(err, IsNil) + + c.Assert(ssresult.Host, Equals, slave, + Commentf("Monotonic session moved from %s to %s", slave, ssresult.Host)) + + // If we try to insert something, it'll have to hold until the new + // master is available to move the connection, and work correctly. + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + + // Must now be talking to the new master. + err = session.Run("serverStatus", ssresult) + c.Assert(err, IsNil) + err = session.Run("isMaster", imresult) + c.Assert(err, IsNil) + c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) + + // ... which is not the old one, since it's still dead. + c.Assert(ssresult.Host, Not(Equals), master) +} + +func (s *S) TestPrimaryShutdownEventual(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40021") + c.Assert(err, IsNil) + defer session.Close() + + result := &struct{ Host string }{} + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + master := result.Host + + session.SetMode(mgo.Eventual, true) + + // Should connect to the master when needed. + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + + // Wait a bit for this to be synchronized to slaves. + time.Sleep(3 * time.Second) + + // Kill the master. + s.Stop(master) + + // Should still work, with the new master now. + coll = session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(result.Host, Not(Equals), master) +} + +func (s *S) TestPreserveSocketCountOnSync(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + stats := mgo.GetStats() + for stats.MasterConns+stats.SlaveConns != 3 { + stats = mgo.GetStats() + c.Log("Waiting for all connections to be established...") + time.Sleep(5e8) + } + + c.Assert(stats.SocketsAlive, Equals, 3) + + // Kill the master (with rs1, 'a' is always the master). + s.Stop("localhost:40011") + + // Wait for the logic to run for a bit and bring it back. + startedAll := make(chan bool) + go func() { + time.Sleep(5e9) + s.StartAll() + startedAll <- true + }() + + // Do not allow the test to return before the goroutine above is done. + defer func() { + <-startedAll + }() + + // Do an action to kick the resync logic in, and also to + // wait until the cluster recognizes the server is back. + result := struct{ Ok bool }{} + err = session.Run("getLastError", &result) + c.Assert(err, IsNil) + c.Assert(result.Ok, Equals, true) + + for i := 0; i != 20; i++ { + stats = mgo.GetStats() + if stats.SocketsAlive == 3 { + break + } + c.Logf("Waiting for 3 sockets alive, have %d", stats.SocketsAlive) + time.Sleep(5e8) + } + + // Ensure the number of sockets is preserved after syncing. + stats = mgo.GetStats() + c.Assert(stats.SocketsAlive, Equals, 3) + c.Assert(stats.SocketsInUse, Equals, 1) + c.Assert(stats.SocketRefs, Equals, 1) +} + +// Connect to the master of a deployment with a single server, +// run an insert, and then ensure the insert worked and that a +// single connection was established. +func (s *S) TestTopologySyncWithSingleMaster(c *C) { + // Use hostname here rather than IP, to make things trickier. + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1, "b": 2}) + c.Assert(err, IsNil) + + // One connection used for discovery. Master socket recycled for + // insert. Socket is reserved after insert. + stats := mgo.GetStats() + c.Assert(stats.MasterConns, Equals, 1) + c.Assert(stats.SlaveConns, Equals, 0) + c.Assert(stats.SocketsInUse, Equals, 1) + + // Refresh session and socket must be released. + session.Refresh() + stats = mgo.GetStats() + c.Assert(stats.SocketsInUse, Equals, 0) +} + +func (s *S) TestTopologySyncWithSlaveSeed(c *C) { + // That's supposed to be a slave. Must run discovery + // and find out master to insert successfully. + session, err := mgo.Dial("localhost:40012") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + coll.Insert(M{"a": 1, "b": 2}) + + result := struct{ Ok bool }{} + err = session.Run("getLastError", &result) + c.Assert(err, IsNil) + c.Assert(result.Ok, Equals, true) + + // One connection to each during discovery. Master + // socket recycled for insert. + stats := mgo.GetStats() + c.Assert(stats.MasterConns, Equals, 1) + c.Assert(stats.SlaveConns, Equals, 2) + + // Only one socket reference alive, in the master socket owned + // by the above session. + c.Assert(stats.SocketsInUse, Equals, 1) + + // Refresh it, and it must be gone. + session.Refresh() + stats = mgo.GetStats() + c.Assert(stats.SocketsInUse, Equals, 0) +} + +func (s *S) TestSyncTimeout(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + s.Stop("localhost:40001") + + timeout := 3 * time.Second + session.SetSyncTimeout(timeout) + started := time.Now() + + // Do something. + result := struct{ Ok bool }{} + err = session.Run("getLastError", &result) + c.Assert(err, ErrorMatches, "no reachable servers") + c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) + c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true) +} + +func (s *S) TestDialWithTimeout(c *C) { + if *fast { + c.Skip("-fast") + } + + timeout := 2 * time.Second + started := time.Now() + + // 40009 isn't used by the test servers. + session, err := mgo.DialWithTimeout("localhost:40009", timeout) + if session != nil { + session.Close() + } + c.Assert(err, ErrorMatches, "no reachable servers") + c.Assert(session, IsNil) + c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) + c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true) +} + +func (s *S) TestSocketTimeout(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + s.Freeze("localhost:40001") + + timeout := 3 * time.Second + session.SetSocketTimeout(timeout) + started := time.Now() + + // Do something. + result := struct{ Ok bool }{} + err = session.Run("getLastError", &result) + c.Assert(err, ErrorMatches, ".*: i/o timeout") + c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) + c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true) +} + +func (s *S) TestSocketTimeoutOnDial(c *C) { + if *fast { + c.Skip("-fast") + } + + timeout := 1 * time.Second + + defer mgo.HackSyncSocketTimeout(timeout)() + + s.Freeze("localhost:40001") + + started := time.Now() + + session, err := mgo.DialWithTimeout("localhost:40001", timeout) + c.Assert(err, ErrorMatches, "no reachable servers") + c.Assert(session, IsNil) + + c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) + c.Assert(started.After(time.Now().Add(-20*time.Second)), Equals, true) +} + +func (s *S) TestSocketTimeoutOnInactiveSocket(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + timeout := 2 * time.Second + session.SetSocketTimeout(timeout) + + // Do something that relies on the timeout and works. + c.Assert(session.Ping(), IsNil) + + // Freeze and wait for the timeout to go by. + s.Freeze("localhost:40001") + time.Sleep(timeout + 500*time.Millisecond) + s.Thaw("localhost:40001") + + // Do something again. The timeout above should not have killed + // the socket as there was nothing to be done. + c.Assert(session.Ping(), IsNil) +} + +func (s *S) TestDirect(c *C) { + session, err := mgo.Dial("localhost:40012?connect=direct") + c.Assert(err, IsNil) + defer session.Close() + + // We know that server is a slave. + session.SetMode(mgo.Monotonic, true) + + result := &struct{ Host string }{} + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true) + + stats := mgo.GetStats() + c.Assert(stats.SocketsAlive, Equals, 1) + c.Assert(stats.SocketsInUse, Equals, 1) + c.Assert(stats.SocketRefs, Equals, 1) + + // We've got no master, so it'll timeout. + session.SetSyncTimeout(5e8 * time.Nanosecond) + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"test": 1}) + c.Assert(err, ErrorMatches, "no reachable servers") + + // Writing to the local database is okay. + coll = session.DB("local").C("mycoll") + defer coll.RemoveAll(nil) + id := bson.NewObjectId() + err = coll.Insert(M{"_id": id}) + c.Assert(err, IsNil) + + // Data was stored in the right server. + n, err := coll.Find(M{"_id": id}).Count() + c.Assert(err, IsNil) + c.Assert(n, Equals, 1) + + // Server hasn't changed. + result.Host = "" + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true) +} + +func (s *S) TestDirectToUnknownStateMember(c *C) { + session, err := mgo.Dial("localhost:40041?connect=direct") + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.Monotonic, true) + + result := &struct{ Host string }{} + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(strings.HasSuffix(result.Host, ":40041"), Equals, true) + + // We've got no master, so it'll timeout. + session.SetSyncTimeout(5e8 * time.Nanosecond) + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"test": 1}) + c.Assert(err, ErrorMatches, "no reachable servers") + + // Slave is still reachable. + result.Host = "" + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(strings.HasSuffix(result.Host, ":40041"), Equals, true) +} + +func (s *S) TestFailFast(c *C) { + info := mgo.DialInfo{ + Addrs: []string{"localhost:99999"}, + Timeout: 5 * time.Second, + FailFast: true, + } + + started := time.Now() + + _, err := mgo.DialWithInfo(&info) + c.Assert(err, ErrorMatches, "no reachable servers") + + c.Assert(started.After(time.Now().Add(-time.Second)), Equals, true) +} + +type OpCounters struct { + Insert int + Query int + Update int + Delete int + GetMore int + Command int +} + +func getOpCounters(server string) (c *OpCounters, err error) { + session, err := mgo.Dial(server + "?connect=direct") + if err != nil { + return nil, err + } + defer session.Close() + session.SetMode(mgo.Monotonic, true) + result := struct{ OpCounters }{} + err = session.Run("serverStatus", &result) + return &result.OpCounters, err +} + +func (s *S) TestMonotonicSlaveOkFlagWithMongos(c *C) { + session, err := mgo.Dial("localhost:40021") + c.Assert(err, IsNil) + defer session.Close() + + ssresult := &struct{ Host string }{} + imresult := &struct{ IsMaster bool }{} + + // Figure the master while still using the strong session. + err = session.Run("serverStatus", ssresult) + c.Assert(err, IsNil) + err = session.Run("isMaster", imresult) + c.Assert(err, IsNil) + master := ssresult.Host + c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) + + // Collect op counters for everyone. + opc21a, err := getOpCounters("localhost:40021") + c.Assert(err, IsNil) + opc22a, err := getOpCounters("localhost:40022") + c.Assert(err, IsNil) + opc23a, err := getOpCounters("localhost:40023") + c.Assert(err, IsNil) + + // Do a SlaveOk query through MongoS + + mongos, err := mgo.Dial("localhost:40202") + c.Assert(err, IsNil) + defer mongos.Close() + + mongos.SetMode(mgo.Monotonic, true) + + coll := mongos.DB("mydb").C("mycoll") + result := &struct{}{} + for i := 0; i != 5; i++ { + err := coll.Find(nil).One(result) + c.Assert(err, Equals, mgo.ErrNotFound) + } + + // Collect op counters for everyone again. + opc21b, err := getOpCounters("localhost:40021") + c.Assert(err, IsNil) + opc22b, err := getOpCounters("localhost:40022") + c.Assert(err, IsNil) + opc23b, err := getOpCounters("localhost:40023") + c.Assert(err, IsNil) + + masterPort := master[strings.Index(master, ":")+1:] + + var masterDelta, slaveDelta int + switch masterPort { + case "40021": + masterDelta = opc21b.Query - opc21a.Query + slaveDelta = (opc22b.Query - opc22a.Query) + (opc23b.Query - opc23a.Query) + case "40022": + masterDelta = opc22b.Query - opc22a.Query + slaveDelta = (opc21b.Query - opc21a.Query) + (opc23b.Query - opc23a.Query) + case "40023": + masterDelta = opc23b.Query - opc23a.Query + slaveDelta = (opc21b.Query - opc21a.Query) + (opc22b.Query - opc22a.Query) + default: + c.Fatal("Uh?") + } + + c.Check(masterDelta, Equals, 0) // Just the counting itself. + c.Check(slaveDelta, Equals, 5) // The counting for both, plus 5 queries above. +} + +func (s *S) TestRemovalOfClusterMember(c *C) { + if *fast { + c.Skip("-fast") + } + + master, err := mgo.Dial("localhost:40021") + c.Assert(err, IsNil) + defer master.Close() + + // Wait for cluster to fully sync up. + for i := 0; i < 10; i++ { + if len(master.LiveServers()) == 3 { + break + } + time.Sleep(5e8) + } + if len(master.LiveServers()) != 3 { + c.Fatalf("Test started with bad cluster state: %v", master.LiveServers()) + } + + result := &struct { + IsMaster bool + Me string + }{} + slave := master.Copy() + slave.SetMode(mgo.Monotonic, true) // Monotonic can hold a non-master socket persistently. + err = slave.Run("isMaster", result) + c.Assert(err, IsNil) + c.Assert(result.IsMaster, Equals, false) + slaveAddr := result.Me + + defer func() { + master.Refresh() + master.Run(bson.D{{"$eval", `rs.add("` + slaveAddr + `")`}}, nil) + master.Close() + slave.Close() + }() + + c.Logf("========== Removing slave: %s ==========", slaveAddr) + + master.Run(bson.D{{"$eval", `rs.remove("` + slaveAddr + `")`}}, nil) + err = master.Ping() + c.Assert(err, Equals, io.EOF) + + master.Refresh() + + // Give the cluster a moment to catch up by doing a roundtrip to the master. + err = master.Ping() + c.Assert(err, IsNil) + + time.Sleep(3e9) + + // This must fail since the slave has been taken off the cluster. + err = slave.Ping() + c.Assert(err, NotNil) + + for i := 0; i < 15; i++ { + if len(master.LiveServers()) == 2 { + break + } + time.Sleep(time.Second) + } + live := master.LiveServers() + if len(live) != 2 { + c.Errorf("Removed server still considered live: %#s", live) + } + + c.Log("========== Test succeeded. ==========") +} + +func (s *S) TestSocketLimit(c *C) { + if *fast { + c.Skip("-fast") + } + const socketLimit = 64 + restore := mgo.HackSocketsPerServer(socketLimit) + defer restore() + + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + stats := mgo.GetStats() + for stats.MasterConns+stats.SlaveConns != 3 { + stats = mgo.GetStats() + c.Log("Waiting for all connections to be established...") + time.Sleep(5e8) + } + c.Assert(stats.SocketsAlive, Equals, 3) + + // Consume the whole limit for the master. + var master []*mgo.Session + for i := 0; i < socketLimit; i++ { + s := session.Copy() + defer s.Close() + err := s.Ping() + c.Assert(err, IsNil) + master = append(master, s) + } + + before := time.Now() + go func() { + time.Sleep(3e9) + master[0].Refresh() + }() + + // Now a single ping must block, since it would need another + // connection to the master, over the limit. Once the goroutine + // above releases its socket, it should move on. + session.Ping() + delay := time.Now().Sub(before) + c.Assert(delay > 3e9, Equals, true) + c.Assert(delay < 6e9, Equals, true) +} + +func (s *S) TestSetModeEventualIterBug(c *C) { + session1, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session1.Close() + + session1.SetMode(mgo.Eventual, false) + + coll1 := session1.DB("mydb").C("mycoll") + + const N = 100 + for i := 0; i < N; i++ { + err = coll1.Insert(M{"_id": i}) + c.Assert(err, IsNil) + } + + c.Logf("Waiting until secondary syncs") + for { + n, err := coll1.Count() + c.Assert(err, IsNil) + if n == N { + c.Logf("Found all") + break + } + } + + session2, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session2.Close() + + session2.SetMode(mgo.Eventual, false) + + coll2 := session2.DB("mydb").C("mycoll") + + i := 0 + iter := coll2.Find(nil).Batch(10).Iter() + var result struct{} + for iter.Next(&result) { + i++ + } + c.Assert(iter.Close(), Equals, nil) + c.Assert(i, Equals, N) +} + +func (s *S) TestCustomDialOld(c *C) { + dials := make(chan bool, 16) + dial := func(addr net.Addr) (net.Conn, error) { + tcpaddr, ok := addr.(*net.TCPAddr) + if !ok { + return nil, fmt.Errorf("unexpected address type: %T", addr) + } + dials <- true + return net.DialTCP("tcp", nil, tcpaddr) + } + info := mgo.DialInfo{ + Addrs: []string{"localhost:40012"}, + Dial: dial, + } + + // Use hostname here rather than IP, to make things trickier. + session, err := mgo.DialWithInfo(&info) + c.Assert(err, IsNil) + defer session.Close() + + const N = 3 + for i := 0; i < N; i++ { + select { + case <-dials: + case <-time.After(5 * time.Second): + c.Fatalf("expected %d dials, got %d", N, i) + } + } + select { + case <-dials: + c.Fatalf("got more dials than expected") + case <-time.After(100 * time.Millisecond): + } +} + +func (s *S) TestCustomDialNew(c *C) { + dials := make(chan bool, 16) + dial := func(addr *mgo.ServerAddr) (net.Conn, error) { + dials <- true + if addr.TCPAddr().Port == 40012 { + c.Check(addr.String(), Equals, "localhost:40012") + } + return net.DialTCP("tcp", nil, addr.TCPAddr()) + } + info := mgo.DialInfo{ + Addrs: []string{"localhost:40012"}, + DialServer: dial, + } + + // Use hostname here rather than IP, to make things trickier. + session, err := mgo.DialWithInfo(&info) + c.Assert(err, IsNil) + defer session.Close() + + const N = 3 + for i := 0; i < N; i++ { + select { + case <-dials: + case <-time.After(5 * time.Second): + c.Fatalf("expected %d dials, got %d", N, i) + } + } + select { + case <-dials: + c.Fatalf("got more dials than expected") + case <-time.After(100 * time.Millisecond): + } +} + +func (s *S) TestPrimaryShutdownOnAuthShard(c *C) { + if *fast { + c.Skip("-fast") + } + + // Dial the shard. + session, err := mgo.Dial("localhost:40203") + c.Assert(err, IsNil) + defer session.Close() + + // Login and insert something to make it more realistic. + session.DB("admin").Login("root", "rapadura") + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(bson.M{"n": 1}) + c.Assert(err, IsNil) + + // Dial the replica set to figure the master out. + rs, err := mgo.Dial("root:rapadura@localhost:40031") + c.Assert(err, IsNil) + defer rs.Close() + + // With strong consistency, this will open a socket to the master. + result := &struct{ Host string }{} + err = rs.Run("serverStatus", result) + c.Assert(err, IsNil) + + // Kill the master. + host := result.Host + s.Stop(host) + + // This must fail, since the connection was broken. + err = rs.Run("serverStatus", result) + c.Assert(err, Equals, io.EOF) + + // This won't work because the master just died. + err = coll.Insert(bson.M{"n": 2}) + c.Assert(err, NotNil) + + // Refresh session and wait for re-election. + session.Refresh() + for i := 0; i < 60; i++ { + err = coll.Insert(bson.M{"n": 3}) + if err == nil { + break + } + c.Logf("Waiting for replica set to elect a new master. Last error: %v", err) + time.Sleep(500 * time.Millisecond) + } + c.Assert(err, IsNil) + + count, err := coll.Count() + c.Assert(count > 1, Equals, true) +} + +func (s *S) TestNearestSecondary(c *C) { + defer mgo.HackPingDelay(3 * time.Second)() + + rs1a := "127.0.0.1:40011" + rs1b := "127.0.0.1:40012" + rs1c := "127.0.0.1:40013" + s.Freeze(rs1b) + + session, err := mgo.Dial(rs1a) + c.Assert(err, IsNil) + defer session.Close() + + // Wait for the sync up to run through the first couple of servers. + for len(session.LiveServers()) != 2 { + c.Log("Waiting for two servers to be alive...") + time.Sleep(100 * time.Millisecond) + } + + // Extra delay to ensure the third server gets penalized. + time.Sleep(500 * time.Millisecond) + + // Release third server. + s.Thaw(rs1b) + + // Wait for it to come up. + for len(session.LiveServers()) != 3 { + c.Log("Waiting for all servers to be alive...") + time.Sleep(100 * time.Millisecond) + } + + session.SetMode(mgo.Monotonic, true) + var result struct{ Host string } + + // See which slave picks the line, several times to avoid chance. + for i := 0; i < 10; i++ { + session.Refresh() + err = session.Run("serverStatus", &result) + c.Assert(err, IsNil) + c.Assert(hostPort(result.Host), Equals, hostPort(rs1c)) + } + + if *fast { + // Don't hold back for several seconds. + return + } + + // Now hold the other server for long enough to penalize it. + s.Freeze(rs1c) + time.Sleep(5 * time.Second) + s.Thaw(rs1c) + + // Wait for the ping to be processed. + time.Sleep(500 * time.Millisecond) + + // Repeating the test should now pick the former server consistently. + for i := 0; i < 10; i++ { + session.Refresh() + err = session.Run("serverStatus", &result) + c.Assert(err, IsNil) + c.Assert(hostPort(result.Host), Equals, hostPort(rs1b)) + } +} + +func (s *S) TestConnectCloseConcurrency(c *C) { + restore := mgo.HackPingDelay(500 * time.Millisecond) + defer restore() + var wg sync.WaitGroup + const n = 500 + wg.Add(n) + for i := 0; i < n; i++ { + go func() { + defer wg.Done() + session, err := mgo.Dial("localhost:40001") + if err != nil { + c.Fatal(err) + } + time.Sleep(1) + session.Close() + }() + } + wg.Wait() +} + +func (s *S) TestSelectServers(c *C) { + if !s.versionAtLeast(2, 2) { + c.Skip("read preferences introduced in 2.2") + } + + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.Eventual, true) + + var result struct{ Host string } + + session.Refresh() + session.SelectServers(bson.D{{"rs1", "b"}}) + err = session.Run("serverStatus", &result) + c.Assert(err, IsNil) + c.Assert(hostPort(result.Host), Equals, "40012") + + session.Refresh() + session.SelectServers(bson.D{{"rs1", "c"}}) + err = session.Run("serverStatus", &result) + c.Assert(err, IsNil) + c.Assert(hostPort(result.Host), Equals, "40013") +} + +func (s *S) TestSelectServersWithMongos(c *C) { + if !s.versionAtLeast(2, 2) { + c.Skip("read preferences introduced in 2.2") + } + + session, err := mgo.Dial("localhost:40021") + c.Assert(err, IsNil) + defer session.Close() + + ssresult := &struct{ Host string }{} + imresult := &struct{ IsMaster bool }{} + + // Figure the master while still using the strong session. + err = session.Run("serverStatus", ssresult) + c.Assert(err, IsNil) + err = session.Run("isMaster", imresult) + c.Assert(err, IsNil) + master := ssresult.Host + c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) + + var slave1, slave2 string + switch hostPort(master) { + case "40021": + slave1, slave2 = "b", "c" + case "40022": + slave1, slave2 = "a", "c" + case "40023": + slave1, slave2 = "a", "b" + } + + // Collect op counters for everyone. + opc21a, err := getOpCounters("localhost:40021") + c.Assert(err, IsNil) + opc22a, err := getOpCounters("localhost:40022") + c.Assert(err, IsNil) + opc23a, err := getOpCounters("localhost:40023") + c.Assert(err, IsNil) + + // Do a SlaveOk query through MongoS + mongos, err := mgo.Dial("localhost:40202") + c.Assert(err, IsNil) + defer mongos.Close() + + mongos.SetMode(mgo.Monotonic, true) + + mongos.Refresh() + mongos.SelectServers(bson.D{{"rs2", slave1}}) + coll := mongos.DB("mydb").C("mycoll") + result := &struct{}{} + for i := 0; i != 5; i++ { + err := coll.Find(nil).One(result) + c.Assert(err, Equals, mgo.ErrNotFound) + } + + mongos.Refresh() + mongos.SelectServers(bson.D{{"rs2", slave2}}) + coll = mongos.DB("mydb").C("mycoll") + for i := 0; i != 7; i++ { + err := coll.Find(nil).One(result) + c.Assert(err, Equals, mgo.ErrNotFound) + } + + // Collect op counters for everyone again. + opc21b, err := getOpCounters("localhost:40021") + c.Assert(err, IsNil) + opc22b, err := getOpCounters("localhost:40022") + c.Assert(err, IsNil) + opc23b, err := getOpCounters("localhost:40023") + c.Assert(err, IsNil) + + switch hostPort(master) { + case "40021": + c.Check(opc21b.Query-opc21a.Query, Equals, 0) + c.Check(opc22b.Query-opc22a.Query, Equals, 5) + c.Check(opc23b.Query-opc23a.Query, Equals, 7) + case "40022": + c.Check(opc21b.Query-opc21a.Query, Equals, 5) + c.Check(opc22b.Query-opc22a.Query, Equals, 0) + c.Check(opc23b.Query-opc23a.Query, Equals, 7) + case "40023": + c.Check(opc21b.Query-opc21a.Query, Equals, 5) + c.Check(opc22b.Query-opc22a.Query, Equals, 7) + c.Check(opc23b.Query-opc23a.Query, Equals, 0) + default: + c.Fatal("Uh?") + } +} diff --git a/vendor/labix.org/v2/mgo/doc.go b/vendor/labix.org/v2/mgo/doc.go new file mode 100644 index 0000000..9316c55 --- /dev/null +++ b/vendor/labix.org/v2/mgo/doc.go @@ -0,0 +1,31 @@ +// Package mgo offers a rich MongoDB driver for Go. +// +// Details about the mgo project (pronounced as "mango") are found +// in its web page: +// +// http://labix.org/mgo +// +// Usage of the driver revolves around the concept of sessions. To +// get started, obtain a session using the Dial function: +// +// session, err := mgo.Dial(url) +// +// This will establish one or more connections with the cluster of +// servers defined by the url parameter. From then on, the cluster +// may be queried with multiple consistency rules (see SetMode) and +// documents retrieved with statements such as: +// +// c := session.DB(database).C(collection) +// err := c.Find(query).One(&result) +// +// New sessions are typically created by calling session.Copy on the +// initial session obtained at dial time. These new sessions will share +// the same cluster information and connection cache, and may be easily +// handed into other methods and functions for organizing logic. +// Every session created must have its Close method called at the end +// of its life time, so its resources may be put back in the pool or +// collected, depending on the case. +// +// For more details, see the documentation for the types and methods. +// +package mgo diff --git a/vendor/labix.org/v2/mgo/export_test.go b/vendor/labix.org/v2/mgo/export_test.go new file mode 100644 index 0000000..b6bfcbc --- /dev/null +++ b/vendor/labix.org/v2/mgo/export_test.go @@ -0,0 +1,42 @@ +package mgo + +import ( + "time" +) + +func HackSocketsPerServer(newLimit int) (restore func()) { + oldLimit := newLimit + restore = func() { + socketsPerServer = oldLimit + } + socketsPerServer = newLimit + return +} + +func HackPingDelay(newDelay time.Duration) (restore func()) { + globalMutex.Lock() + defer globalMutex.Unlock() + + oldDelay := pingDelay + restore = func() { + globalMutex.Lock() + pingDelay = oldDelay + globalMutex.Unlock() + } + pingDelay = newDelay + return +} + +func HackSyncSocketTimeout(newTimeout time.Duration) (restore func()) { + globalMutex.Lock() + defer globalMutex.Unlock() + + oldTimeout := syncSocketTimeout + restore = func() { + globalMutex.Lock() + syncSocketTimeout = oldTimeout + globalMutex.Unlock() + } + syncSocketTimeout = newTimeout + return +} diff --git a/vendor/labix.org/v2/mgo/gridfs.go b/vendor/labix.org/v2/mgo/gridfs.go new file mode 100644 index 0000000..312f8fb --- /dev/null +++ b/vendor/labix.org/v2/mgo/gridfs.go @@ -0,0 +1,732 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo + +import ( + "crypto/md5" + "encoding/hex" + "errors" + "hash" + "io" + "labix.org/v2/mgo/bson" + "os" + "sync" + "time" +) + +type GridFS struct { + Files *Collection + Chunks *Collection +} + +type gfsFileMode int + +const ( + gfsClosed gfsFileMode = 0 + gfsReading gfsFileMode = 1 + gfsWriting gfsFileMode = 2 +) + +type GridFile struct { + m sync.Mutex + c sync.Cond + gfs *GridFS + mode gfsFileMode + err error + + chunk int + offset int64 + + wpending int + wbuf []byte + wsum hash.Hash + + rbuf []byte + rcache *gfsCachedChunk + + doc gfsFile +} + +type gfsFile struct { + Id interface{} "_id" + ChunkSize int "chunkSize" + UploadDate time.Time "uploadDate" + Length int64 ",minsize" + MD5 string + Filename string ",omitempty" + ContentType string "contentType,omitempty" + Metadata *bson.Raw ",omitempty" +} + +type gfsChunk struct { + Id interface{} "_id" + FilesId interface{} "files_id" + N int + Data []byte +} + +type gfsCachedChunk struct { + wait sync.Mutex + n int + data []byte + err error +} + +func newGridFS(db *Database, prefix string) *GridFS { + return &GridFS{db.C(prefix + ".files"), db.C(prefix + ".chunks")} +} + +func (gfs *GridFS) newFile() *GridFile { + file := &GridFile{gfs: gfs} + file.c.L = &file.m + //runtime.SetFinalizer(file, finalizeFile) + return file +} + +func finalizeFile(file *GridFile) { + file.Close() +} + +// Create creates a new file with the provided name in the GridFS. If the file +// name already exists, a new version will be inserted with an up-to-date +// uploadDate that will cause it to be atomically visible to the Open and +// OpenId methods. If the file name is not important, an empty name may be +// provided and the file Id used instead. +// +// It's important to Close files whether they are being written to +// or read from, and to check the err result to ensure the operation +// completed successfully. +// +// A simple example inserting a new file: +// +// func check(err error) { +// if err != nil { +// panic(err.String()) +// } +// } +// file, err := db.GridFS("fs").Create("myfile.txt") +// check(err) +// n, err := file.Write([]byte("Hello world!") +// check(err) +// err = file.Close() +// check(err) +// fmt.Printf("%d bytes written\n", n) +// +// The io.Writer interface is implemented by *GridFile and may be used to +// help on the file creation. For example: +// +// file, err := db.GridFS("fs").Create("myfile.txt") +// check(err) +// messages, err := os.Open("/var/log/messages") +// check(err) +// defer messages.Close() +// err = io.Copy(file, messages) +// check(err) +// err = file.Close() +// check(err) +// +func (gfs *GridFS) Create(name string) (file *GridFile, err error) { + file = gfs.newFile() + file.mode = gfsWriting + file.wsum = md5.New() + file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 256 * 1024, Filename: name} + return +} + +// OpenId returns the file with the provided id, for reading. +// If the file isn't found, err will be set to mgo.ErrNotFound. +// +// It's important to Close files whether they are being written to +// or read from, and to check the err result to ensure the operation +// completed successfully. +// +// The following example will print the first 8192 bytes from the file: +// +// func check(err error) { +// if err != nil { +// panic(err.String()) +// } +// } +// file, err := db.GridFS("fs").OpenId(objid) +// check(err) +// b := make([]byte, 8192) +// n, err := file.Read(b) +// check(err) +// fmt.Println(string(b)) +// check(err) +// err = file.Close() +// check(err) +// fmt.Printf("%d bytes read\n", n) +// +// The io.Reader interface is implemented by *GridFile and may be used to +// deal with it. As an example, the following snippet will dump the whole +// file into the standard output: +// +// file, err := db.GridFS("fs").OpenId(objid) +// check(err) +// err = io.Copy(os.Stdout, file) +// check(err) +// err = file.Close() +// check(err) +// +func (gfs *GridFS) OpenId(id interface{}) (file *GridFile, err error) { + var doc gfsFile + err = gfs.Files.Find(bson.M{"_id": id}).One(&doc) + if err != nil { + return + } + file = gfs.newFile() + file.mode = gfsReading + file.doc = doc + return +} + +// Open returns the most recently uploaded file with the provided +// name, for reading. If the file isn't found, err will be set +// to mgo.ErrNotFound. +// +// It's important to Close files whether they are being written to +// or read from, and to check the err result to ensure the operation +// completed successfully. +// +// The following example will print the first 8192 bytes from the file: +// +// file, err := db.GridFS("fs").Open("myfile.txt") +// check(err) +// b := make([]byte, 8192) +// n, err := file.Read(b) +// check(err) +// fmt.Println(string(b)) +// check(err) +// err = file.Close() +// check(err) +// fmt.Printf("%d bytes read\n", n) +// +// The io.Reader interface is implemented by *GridFile and may be used to +// deal with it. As an example, the following snippet will dump the whole +// file into the standard output: +// +// file, err := db.GridFS("fs").Open("myfile.txt") +// check(err) +// err = io.Copy(os.Stdout, file) +// check(err) +// err = file.Close() +// check(err) +// +func (gfs *GridFS) Open(name string) (file *GridFile, err error) { + var doc gfsFile + err = gfs.Files.Find(bson.M{"filename": name}).Sort("-uploadDate").One(&doc) + if err != nil { + return + } + file = gfs.newFile() + file.mode = gfsReading + file.doc = doc + return +} + +// OpenNext opens the next file from iter for reading, sets *file to it, +// and returns true on the success case. If no more documents are available +// on iter or an error occurred, *file is set to nil and the result is false. +// Errors will be available via iter.Err(). +// +// The iter parameter must be an iterator on the GridFS files collection. +// Using the GridFS.Find method is an easy way to obtain such an iterator, +// but any iterator on the collection will work. +// +// If the provided *file is non-nil, OpenNext will close it before attempting +// to iterate to the next element. This means that in a loop one only +// has to worry about closing files when breaking out of the loop early +// (break, return, or panic). +// +// For example: +// +// gfs := db.GridFS("fs") +// query := gfs.Find(nil).Sort("filename") +// iter := query.Iter() +// var f *mgo.GridFile +// for gfs.OpenNext(iter, &f) { +// fmt.Printf("Filename: %s\n", f.Name()) +// } +// if iter.Close() != nil { +// panic(iter.Close()) +// } +// +func (gfs *GridFS) OpenNext(iter *Iter, file **GridFile) bool { + if *file != nil { + // Ignoring the error here shouldn't be a big deal + // as we're reading the file and the loop iteration + // for this file is finished. + _ = (*file).Close() + } + var doc gfsFile + if !iter.Next(&doc) { + *file = nil + return false + } + f := gfs.newFile() + f.mode = gfsReading + f.doc = doc + *file = f + return true +} + +// Find runs query on GridFS's files collection and returns +// the resulting Query. +// +// This logic: +// +// gfs := db.GridFS("fs") +// iter := gfs.Find(nil).Iter() +// +// Is equivalent to: +// +// files := db.C("fs" + ".files") +// iter := files.Find(nil).Iter() +// +func (gfs *GridFS) Find(query interface{}) *Query { + return gfs.Files.Find(query) +} + +// RemoveId deletes the file with the provided id from the GridFS. +func (gfs *GridFS) RemoveId(id interface{}) error { + err := gfs.Files.Remove(bson.M{"_id": id}) + if err != nil { + return err + } + _, err = gfs.Chunks.RemoveAll(bson.D{{"files_id", id}}) + return err +} + +type gfsDocId struct { + Id interface{} "_id" +} + +// Remove deletes all files with the provided name from the GridFS. +func (gfs *GridFS) Remove(name string) (err error) { + iter := gfs.Files.Find(bson.M{"filename": name}).Select(bson.M{"_id": 1}).Iter() + var doc gfsDocId + for iter.Next(&doc) { + if e := gfs.RemoveId(doc.Id); e != nil { + err = e + } + } + if err == nil { + err = iter.Close() + } + return err +} + +func (file *GridFile) assertMode(mode gfsFileMode) { + switch file.mode { + case mode: + return + case gfsWriting: + panic("GridFile is open for writing") + case gfsReading: + panic("GridFile is open for reading") + case gfsClosed: + panic("GridFile is closed") + default: + panic("internal error: missing GridFile mode") + } +} + +// SetChunkSize sets size of saved chunks. Once the file is written to, it +// will be split in blocks of that size and each block saved into an +// independent chunk document. The default chunk size is 256kb. +// +// It is a runtime error to call this function once the file has started +// being written to. +func (file *GridFile) SetChunkSize(bytes int) { + file.assertMode(gfsWriting) + debugf("GridFile %p: setting chunk size to %d", file, bytes) + file.m.Lock() + file.doc.ChunkSize = bytes + file.m.Unlock() +} + +// Id returns the current file Id. +func (file *GridFile) Id() interface{} { + return file.doc.Id +} + +// SetId changes the current file Id. +// +// It is a runtime error to call this function once the file has started +// being written to, or when the file is not open for writing. +func (file *GridFile) SetId(id interface{}) { + file.assertMode(gfsWriting) + file.m.Lock() + file.doc.Id = id + file.m.Unlock() +} + +// Name returns the optional file name. An empty string will be returned +// in case it is unset. +func (file *GridFile) Name() string { + return file.doc.Filename +} + +// SetName changes the optional file name. An empty string may be used to +// unset it. +// +// It is a runtime error to call this function when the file is not open +// for writing. +func (file *GridFile) SetName(name string) { + file.assertMode(gfsWriting) + file.m.Lock() + file.doc.Filename = name + file.m.Unlock() +} + +// ContentType returns the optional file content type. An empty string will be +// returned in case it is unset. +func (file *GridFile) ContentType() string { + return file.doc.ContentType +} + +// ContentType changes the optional file content type. An empty string may be +// used to unset it. +// +// It is a runtime error to call this function when the file is not open +// for writing. +func (file *GridFile) SetContentType(ctype string) { + file.assertMode(gfsWriting) + file.m.Lock() + file.doc.ContentType = ctype + file.m.Unlock() +} + +// GetMeta unmarshals the optional "metadata" field associated with the +// file into the result parameter. The meaning of keys under that field +// is user-defined. For example: +// +// result := struct{ INode int }{} +// err = file.GetMeta(&result) +// if err != nil { +// panic(err.String()) +// } +// fmt.Printf("inode: %d\n", result.INode) +// +func (file *GridFile) GetMeta(result interface{}) (err error) { + file.m.Lock() + if file.doc.Metadata != nil { + err = bson.Unmarshal(file.doc.Metadata.Data, result) + } + file.m.Unlock() + return +} + +// SetMeta changes the optional "metadata" field associated with the +// file. The meaning of keys under that field is user-defined. +// For example: +// +// file.SetMeta(bson.M{"inode": inode}) +// +// It is a runtime error to call this function when the file is not open +// for writing. +func (file *GridFile) SetMeta(metadata interface{}) { + file.assertMode(gfsWriting) + data, err := bson.Marshal(metadata) + file.m.Lock() + if err != nil && file.err == nil { + file.err = err + } else { + file.doc.Metadata = &bson.Raw{Data: data} + } + file.m.Unlock() +} + +// Size returns the file size in bytes. +func (file *GridFile) Size() (bytes int64) { + file.m.Lock() + bytes = file.doc.Length + file.m.Unlock() + return +} + +// MD5 returns the file MD5 as a hex-encoded string. +func (file *GridFile) MD5() (md5 string) { + return file.doc.MD5 +} + +// UploadDate returns the file upload time. +func (file *GridFile) UploadDate() time.Time { + return file.doc.UploadDate +} + +// Close flushes any pending changes in case the file is being written +// to, waits for any background operations to finish, and closes the file. +// +// It's important to Close files whether they are being written to +// or read from, and to check the err result to ensure the operation +// completed successfully. +func (file *GridFile) Close() (err error) { + file.m.Lock() + defer file.m.Unlock() + if file.mode == gfsWriting { + if len(file.wbuf) > 0 && file.err == nil { + file.insertChunk(file.wbuf) + file.wbuf = file.wbuf[0:0] + } + file.completeWrite() + } else if file.mode == gfsReading && file.rcache != nil { + file.rcache.wait.Lock() + file.rcache = nil + } + file.mode = gfsClosed + debugf("GridFile %p: closed", file) + return file.err +} + +func (file *GridFile) completeWrite() { + for file.wpending > 0 { + debugf("GridFile %p: waiting for %d pending chunks to complete file write", file, file.wpending) + file.c.Wait() + } + if file.err != nil { + file.gfs.Chunks.RemoveAll(bson.D{{"files_id", file.doc.Id}}) + return + } + hexsum := hex.EncodeToString(file.wsum.Sum(nil)) + file.doc.UploadDate = bson.Now() + file.doc.MD5 = hexsum + file.err = file.gfs.Files.Insert(file.doc) + file.gfs.Chunks.EnsureIndexKey("files_id", "n") +} + +// Abort cancels an in-progress write, preventing the file from being +// automically created and ensuring previously written chunks are +// removed when the file is closed. +// +// It is a runtime error to call Abort when the file was not opened +// for writing. +func (file *GridFile) Abort() { + if file.mode != gfsWriting { + panic("file.Abort must be called on file opened for writing") + } + file.err = errors.New("write aborted") +} + +// Write writes the provided data to the file and returns the +// number of bytes written and an error in case something +// wrong happened. +// +// The file will internally cache the data so that all but the last +// chunk sent to the database have the size defined by SetChunkSize. +// This also means that errors may be deferred until a future call +// to Write or Close. +// +// The parameters and behavior of this function turn the file +// into an io.Writer. +func (file *GridFile) Write(data []byte) (n int, err error) { + file.assertMode(gfsWriting) + file.m.Lock() + debugf("GridFile %p: writing %d bytes", file, len(data)) + defer file.m.Unlock() + + if file.err != nil { + return 0, file.err + } + + n = len(data) + file.doc.Length += int64(n) + chunkSize := file.doc.ChunkSize + + if len(file.wbuf)+len(data) < chunkSize { + file.wbuf = append(file.wbuf, data...) + return + } + + // First, flush file.wbuf complementing with data. + if len(file.wbuf) > 0 { + missing := chunkSize - len(file.wbuf) + if missing > len(data) { + missing = len(data) + } + file.wbuf = append(file.wbuf, data[:missing]...) + data = data[missing:] + file.insertChunk(file.wbuf) + file.wbuf = file.wbuf[0:0] + } + + // Then, flush all chunks from data without copying. + for len(data) > chunkSize { + size := chunkSize + if size > len(data) { + size = len(data) + } + file.insertChunk(data[:size]) + data = data[size:] + } + + // And append the rest for a future call. + file.wbuf = append(file.wbuf, data...) + + return n, file.err +} + +func (file *GridFile) insertChunk(data []byte) { + n := file.chunk + file.chunk++ + debugf("GridFile %p: adding to checksum: %q", file, string(data)) + file.wsum.Write(data) + + for file.doc.ChunkSize*file.wpending >= 1024*1024 { + // Hold on.. we got a MB pending. + file.c.Wait() + if file.err != nil { + return + } + } + + file.wpending++ + + debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data)) + + // We may not own the memory of data, so rather than + // simply copying it, we'll marshal the document ahead of time. + data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data}) + if err != nil { + file.err = err + return + } + + go func() { + err := file.gfs.Chunks.Insert(bson.Raw{Data: data}) + file.m.Lock() + file.wpending-- + if err != nil && file.err == nil { + file.err = err + } + file.c.Broadcast() + file.m.Unlock() + }() +} + +// Seek sets the offset for the next Read or Write on file to +// offset, interpreted according to whence: 0 means relative to +// the origin of the file, 1 means relative to the current offset, +// and 2 means relative to the end. It returns the new offset and +// an error, if any. +func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) { + file.m.Lock() + debugf("GridFile %p: seeking for %s (whence=%d)", file, offset, whence) + defer file.m.Unlock() + switch whence { + case os.SEEK_SET: + case os.SEEK_CUR: + offset += file.offset + case os.SEEK_END: + offset += file.doc.Length + default: + panic("unsupported whence value") + } + if offset > file.doc.Length { + return file.offset, errors.New("seek past end of file") + } + chunk := int(offset / int64(file.doc.ChunkSize)) + if chunk+1 == file.chunk && offset >= file.offset { + file.rbuf = file.rbuf[int(offset-file.offset):] + file.offset = offset + return file.offset, nil + } + file.offset = offset + file.chunk = chunk + file.rbuf = nil + file.rbuf, err = file.getChunk() + if err == nil { + file.rbuf = file.rbuf[int(file.offset-int64(chunk)*int64(file.doc.ChunkSize)):] + } + return file.offset, err +} + +// Read reads into b the next available data from the file and +// returns the number of bytes written and an error in case +// something wrong happened. At the end of the file, n will +// be zero and err will be set to os.EOF. +// +// The parameters and behavior of this function turn the file +// into an io.Reader. +func (file *GridFile) Read(b []byte) (n int, err error) { + file.assertMode(gfsReading) + file.m.Lock() + debugf("GridFile %p: reading at offset %d into buffer of length %d", file, file.offset, len(b)) + defer file.m.Unlock() + if file.offset == file.doc.Length { + return 0, io.EOF + } + for err == nil { + i := copy(b, file.rbuf) + n += i + file.offset += int64(i) + file.rbuf = file.rbuf[i:] + if i == len(b) || file.offset == file.doc.Length { + break + } + b = b[i:] + file.rbuf, err = file.getChunk() + } + return n, err +} + +func (file *GridFile) getChunk() (data []byte, err error) { + cache := file.rcache + file.rcache = nil + if cache != nil && cache.n == file.chunk { + debugf("GridFile %p: Getting chunk %d from cache", file, file.chunk) + cache.wait.Lock() + data, err = cache.data, cache.err + } else { + debugf("GridFile %p: Fetching chunk %d", file, file.chunk) + var doc gfsChunk + err = file.gfs.Chunks.Find(bson.D{{"files_id", file.doc.Id}, {"n", file.chunk}}).One(&doc) + data = doc.Data + } + file.chunk++ + if int64(file.chunk)*int64(file.doc.ChunkSize) < file.doc.Length { + // Read the next one in background. + cache = &gfsCachedChunk{n: file.chunk} + cache.wait.Lock() + debugf("GridFile %p: Scheduling chunk %d for background caching", file, file.chunk) + // Clone the session to avoid having it closed in between. + chunks := file.gfs.Chunks + session := chunks.Database.Session.Clone() + go func(id interface{}, n int) { + defer session.Close() + chunks = chunks.With(session) + var doc gfsChunk + cache.err = chunks.Find(bson.D{{"files_id", id}, {"n", n}}).One(&doc) + cache.data = doc.Data + cache.wait.Unlock() + }(file.doc.Id, file.chunk) + file.rcache = cache + } + debugf("Returning err: %#v", err) + return +} diff --git a/vendor/labix.org/v2/mgo/gridfs_test.go b/vendor/labix.org/v2/mgo/gridfs_test.go new file mode 100644 index 0000000..fbdd5b0 --- /dev/null +++ b/vendor/labix.org/v2/mgo/gridfs_test.go @@ -0,0 +1,644 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo_test + +import ( + "io" + "labix.org/v2/mgo" + "labix.org/v2/mgo/bson" + . "launchpad.net/gocheck" + "os" + "time" +) + +func (s *S) TestGridFSCreate(c *C) { + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + db := session.DB("mydb") + + before := bson.Now() + + gfs := db.GridFS("fs") + file, err := gfs.Create("") + c.Assert(err, IsNil) + + n, err := file.Write([]byte("some data")) + c.Assert(err, IsNil) + c.Assert(n, Equals, 9) + + err = file.Close() + c.Assert(err, IsNil) + + after := bson.Now() + + // Check the file information. + result := M{} + err = db.C("fs.files").Find(nil).One(result) + c.Assert(err, IsNil) + + fileId, ok := result["_id"].(bson.ObjectId) + c.Assert(ok, Equals, true) + c.Assert(fileId.Valid(), Equals, true) + result["_id"] = "" + + ud, ok := result["uploadDate"].(time.Time) + c.Assert(ok, Equals, true) + c.Assert(ud.After(before) && ud.Before(after), Equals, true) + result["uploadDate"] = "" + + expected := M{ + "_id": "", + "length": 9, + "chunkSize": 262144, + "uploadDate": "", + "md5": "1e50210a0202497fb79bc38b6ade6c34", + } + c.Assert(result, DeepEquals, expected) + + // Check the chunk. + result = M{} + err = db.C("fs.chunks").Find(nil).One(result) + c.Assert(err, IsNil) + + chunkId, ok := result["_id"].(bson.ObjectId) + c.Assert(ok, Equals, true) + c.Assert(chunkId.Valid(), Equals, true) + result["_id"] = "" + + expected = M{ + "_id": "", + "files_id": fileId, + "n": 0, + "data": []byte("some data"), + } + c.Assert(result, DeepEquals, expected) + + // Check that an index was created. + indexes, err := db.C("fs.chunks").Indexes() + c.Assert(err, IsNil) + c.Assert(len(indexes), Equals, 2) + c.Assert(indexes[1].Key, DeepEquals, []string{"files_id", "n"}) +} + +func (s *S) TestGridFSFileDetails(c *C) { + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + db := session.DB("mydb") + + gfs := db.GridFS("fs") + + file, err := gfs.Create("myfile1.txt") + c.Assert(err, IsNil) + + n, err := file.Write([]byte("some")) + c.Assert(err, IsNil) + c.Assert(n, Equals, 4) + + c.Assert(file.Size(), Equals, int64(4)) + + n, err = file.Write([]byte(" data")) + c.Assert(err, IsNil) + c.Assert(n, Equals, 5) + + c.Assert(file.Size(), Equals, int64(9)) + + id, _ := file.Id().(bson.ObjectId) + c.Assert(id.Valid(), Equals, true) + c.Assert(file.Name(), Equals, "myfile1.txt") + c.Assert(file.ContentType(), Equals, "") + + var info interface{} + err = file.GetMeta(&info) + c.Assert(err, IsNil) + c.Assert(info, IsNil) + + file.SetId("myid") + file.SetName("myfile2.txt") + file.SetContentType("text/plain") + file.SetMeta(M{"any": "thing"}) + + c.Assert(file.Id(), Equals, "myid") + c.Assert(file.Name(), Equals, "myfile2.txt") + c.Assert(file.ContentType(), Equals, "text/plain") + + err = file.GetMeta(&info) + c.Assert(err, IsNil) + c.Assert(info, DeepEquals, bson.M{"any": "thing"}) + + err = file.Close() + c.Assert(err, IsNil) + + c.Assert(file.MD5(), Equals, "1e50210a0202497fb79bc38b6ade6c34") + + ud := file.UploadDate() + now := time.Now() + c.Assert(ud.Before(now), Equals, true) + c.Assert(ud.After(now.Add(-3*time.Second)), Equals, true) + + result := M{} + err = db.C("fs.files").Find(nil).One(result) + c.Assert(err, IsNil) + + result["uploadDate"] = "" + + expected := M{ + "_id": "myid", + "length": 9, + "chunkSize": 262144, + "uploadDate": "", + "md5": "1e50210a0202497fb79bc38b6ade6c34", + "filename": "myfile2.txt", + "contentType": "text/plain", + "metadata": M{"any": "thing"}, + } + c.Assert(result, DeepEquals, expected) +} + +func (s *S) TestGridFSCreateWithChunking(c *C) { + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + db := session.DB("mydb") + + gfs := db.GridFS("fs") + + file, err := gfs.Create("") + c.Assert(err, IsNil) + + file.SetChunkSize(5) + + // Smaller than the chunk size. + n, err := file.Write([]byte("abc")) + c.Assert(err, IsNil) + c.Assert(n, Equals, 3) + + // Boundary in the middle. + n, err = file.Write([]byte("defg")) + c.Assert(err, IsNil) + c.Assert(n, Equals, 4) + + // Boundary at the end. + n, err = file.Write([]byte("hij")) + c.Assert(err, IsNil) + c.Assert(n, Equals, 3) + + // Larger than the chunk size, with 3 chunks. + n, err = file.Write([]byte("klmnopqrstuv")) + c.Assert(err, IsNil) + c.Assert(n, Equals, 12) + + err = file.Close() + c.Assert(err, IsNil) + + // Check the file information. + result := M{} + err = db.C("fs.files").Find(nil).One(result) + c.Assert(err, IsNil) + + fileId, _ := result["_id"].(bson.ObjectId) + c.Assert(fileId.Valid(), Equals, true) + result["_id"] = "" + result["uploadDate"] = "" + + expected := M{ + "_id": "", + "length": 22, + "chunkSize": 5, + "uploadDate": "", + "md5": "44a66044834cbe55040089cabfc102d5", + } + c.Assert(result, DeepEquals, expected) + + // Check the chunks. + iter := db.C("fs.chunks").Find(nil).Sort("n").Iter() + dataChunks := []string{"abcde", "fghij", "klmno", "pqrst", "uv"} + for i := 0; ; i++ { + result = M{} + if !iter.Next(result) { + if i != 5 { + c.Fatalf("Expected 5 chunks, got %d", i) + } + break + } + c.Assert(iter.Close(), IsNil) + + result["_id"] = "" + + expected = M{ + "_id": "", + "files_id": fileId, + "n": i, + "data": []byte(dataChunks[i]), + } + c.Assert(result, DeepEquals, expected) + } +} + +func (s *S) TestGridFSAbort(c *C) { + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + db := session.DB("mydb") + + gfs := db.GridFS("fs") + file, err := gfs.Create("") + c.Assert(err, IsNil) + + file.SetChunkSize(5) + + n, err := file.Write([]byte("some data")) + c.Assert(err, IsNil) + c.Assert(n, Equals, 9) + + var count int + for i := 0; i < 10; i++ { + count, err = db.C("fs.chunks").Count() + if count > 0 || err != nil { + break + } + } + c.Assert(err, IsNil) + c.Assert(count, Equals, 1) + + file.Abort() + + err = file.Close() + c.Assert(err, ErrorMatches, "write aborted") + + count, err = db.C("fs.chunks").Count() + c.Assert(err, IsNil) + c.Assert(count, Equals, 0) +} + +func (s *S) TestGridFSOpenNotFound(c *C) { + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + db := session.DB("mydb") + + gfs := db.GridFS("fs") + file, err := gfs.OpenId("non-existent") + c.Assert(err == mgo.ErrNotFound, Equals, true) + c.Assert(file, IsNil) + + file, err = gfs.Open("non-existent") + c.Assert(err == mgo.ErrNotFound, Equals, true) + c.Assert(file, IsNil) +} + +func (s *S) TestGridFSReadAll(c *C) { + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + db := session.DB("mydb") + + gfs := db.GridFS("fs") + file, err := gfs.Create("") + c.Assert(err, IsNil) + id := file.Id() + + file.SetChunkSize(5) + + n, err := file.Write([]byte("abcdefghijklmnopqrstuv")) + c.Assert(err, IsNil) + c.Assert(n, Equals, 22) + + err = file.Close() + c.Assert(err, IsNil) + + file, err = gfs.OpenId(id) + c.Assert(err, IsNil) + + b := make([]byte, 30) + n, err = file.Read(b) + c.Assert(n, Equals, 22) + c.Assert(err, IsNil) + + n, err = file.Read(b) + c.Assert(n, Equals, 0) + c.Assert(err == io.EOF, Equals, true) + + err = file.Close() + c.Assert(err, IsNil) +} + +func (s *S) TestGridFSReadChunking(c *C) { + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + db := session.DB("mydb") + + gfs := db.GridFS("fs") + + file, err := gfs.Create("") + c.Assert(err, IsNil) + + id := file.Id() + + file.SetChunkSize(5) + + n, err := file.Write([]byte("abcdefghijklmnopqrstuv")) + c.Assert(err, IsNil) + c.Assert(n, Equals, 22) + + err = file.Close() + c.Assert(err, IsNil) + + file, err = gfs.OpenId(id) + c.Assert(err, IsNil) + + b := make([]byte, 30) + + // Smaller than the chunk size. + n, err = file.Read(b[:3]) + c.Assert(err, IsNil) + c.Assert(n, Equals, 3) + c.Assert(b[:3], DeepEquals, []byte("abc")) + + // Boundary in the middle. + n, err = file.Read(b[:4]) + c.Assert(err, IsNil) + c.Assert(n, Equals, 4) + c.Assert(b[:4], DeepEquals, []byte("defg")) + + // Boundary at the end. + n, err = file.Read(b[:3]) + c.Assert(err, IsNil) + c.Assert(n, Equals, 3) + c.Assert(b[:3], DeepEquals, []byte("hij")) + + // Larger than the chunk size, with 3 chunks. + n, err = file.Read(b) + c.Assert(err, IsNil) + c.Assert(n, Equals, 12) + c.Assert(b[:12], DeepEquals, []byte("klmnopqrstuv")) + + n, err = file.Read(b) + c.Assert(n, Equals, 0) + c.Assert(err == io.EOF, Equals, true) + + err = file.Close() + c.Assert(err, IsNil) +} + +func (s *S) TestGridFSOpen(c *C) { + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + db := session.DB("mydb") + + gfs := db.GridFS("fs") + + file, err := gfs.Create("myfile.txt") + c.Assert(err, IsNil) + file.Write([]byte{'1'}) + file.Close() + + file, err = gfs.Create("myfile.txt") + c.Assert(err, IsNil) + file.Write([]byte{'2'}) + file.Close() + + file, err = gfs.Open("myfile.txt") + c.Assert(err, IsNil) + defer file.Close() + + var b [1]byte + + _, err = file.Read(b[:]) + c.Assert(err, IsNil) + c.Assert(string(b[:]), Equals, "2") +} + +func (s *S) TestGridFSSeek(c *C) { + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + db := session.DB("mydb") + + gfs := db.GridFS("fs") + file, err := gfs.Create("") + c.Assert(err, IsNil) + id := file.Id() + + file.SetChunkSize(5) + + n, err := file.Write([]byte("abcdefghijklmnopqrstuv")) + c.Assert(err, IsNil) + c.Assert(n, Equals, 22) + + err = file.Close() + c.Assert(err, IsNil) + + b := make([]byte, 5) + + file, err = gfs.OpenId(id) + c.Assert(err, IsNil) + + o, err := file.Seek(3, os.SEEK_SET) + c.Assert(err, IsNil) + c.Assert(o, Equals, int64(3)) + _, err = file.Read(b) + c.Assert(err, IsNil) + c.Assert(b, DeepEquals, []byte("defgh")) + + o, err = file.Seek(5, os.SEEK_CUR) + c.Assert(err, IsNil) + c.Assert(o, Equals, int64(13)) + _, err = file.Read(b) + c.Assert(err, IsNil) + c.Assert(b, DeepEquals, []byte("nopqr")) + + o, err = file.Seek(-10, os.SEEK_END) + c.Assert(err, IsNil) + c.Assert(o, Equals, int64(12)) + _, err = file.Read(b) + c.Assert(err, IsNil) + c.Assert(b, DeepEquals, []byte("mnopq")) + + o, err = file.Seek(8, os.SEEK_SET) + c.Assert(err, IsNil) + c.Assert(o, Equals, int64(8)) + _, err = file.Read(b) + c.Assert(err, IsNil) + c.Assert(b, DeepEquals, []byte("ijklm")) + + // Trivial seek forward within same chunk. Already + // got the data, shouldn't touch the database. + sent := mgo.GetStats().SentOps + o, err = file.Seek(1, os.SEEK_CUR) + c.Assert(err, IsNil) + c.Assert(o, Equals, int64(14)) + c.Assert(mgo.GetStats().SentOps, Equals, sent) + _, err = file.Read(b) + c.Assert(err, IsNil) + c.Assert(b, DeepEquals, []byte("opqrs")) + + // Try seeking past end of file. + file.Seek(3, os.SEEK_SET) + o, err = file.Seek(23, os.SEEK_SET) + c.Assert(err, ErrorMatches, "seek past end of file") + c.Assert(o, Equals, int64(3)) +} + +func (s *S) TestGridFSRemoveId(c *C) { + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + db := session.DB("mydb") + + gfs := db.GridFS("fs") + + file, err := gfs.Create("myfile.txt") + c.Assert(err, IsNil) + file.Write([]byte{'1'}) + file.Close() + + file, err = gfs.Create("myfile.txt") + c.Assert(err, IsNil) + file.Write([]byte{'2'}) + id := file.Id() + file.Close() + + err = gfs.RemoveId(id) + c.Assert(err, IsNil) + + file, err = gfs.Open("myfile.txt") + c.Assert(err, IsNil) + defer file.Close() + + var b [1]byte + + _, err = file.Read(b[:]) + c.Assert(err, IsNil) + c.Assert(string(b[:]), Equals, "1") + + n, err := db.C("fs.chunks").Find(M{"files_id": id}).Count() + c.Assert(err, IsNil) + c.Assert(n, Equals, 0) +} + +func (s *S) TestGridFSRemove(c *C) { + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + db := session.DB("mydb") + + gfs := db.GridFS("fs") + + file, err := gfs.Create("myfile.txt") + c.Assert(err, IsNil) + file.Write([]byte{'1'}) + file.Close() + + file, err = gfs.Create("myfile.txt") + c.Assert(err, IsNil) + file.Write([]byte{'2'}) + file.Close() + + err = gfs.Remove("myfile.txt") + c.Assert(err, IsNil) + + _, err = gfs.Open("myfile.txt") + c.Assert(err == mgo.ErrNotFound, Equals, true) + + n, err := db.C("fs.chunks").Find(nil).Count() + c.Assert(err, IsNil) + c.Assert(n, Equals, 0) +} + +func (s *S) TestGridFSOpenNext(c *C) { + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + db := session.DB("mydb") + + gfs := db.GridFS("fs") + + file, err := gfs.Create("myfile1.txt") + c.Assert(err, IsNil) + file.Write([]byte{'1'}) + file.Close() + + file, err = gfs.Create("myfile2.txt") + c.Assert(err, IsNil) + file.Write([]byte{'2'}) + file.Close() + + var f *mgo.GridFile + var b [1]byte + + iter := gfs.Find(nil).Sort("-filename").Iter() + + ok := gfs.OpenNext(iter, &f) + c.Assert(ok, Equals, true) + c.Check(f.Name(), Equals, "myfile2.txt") + + _, err = f.Read(b[:]) + c.Assert(err, IsNil) + c.Assert(string(b[:]), Equals, "2") + + ok = gfs.OpenNext(iter, &f) + c.Assert(ok, Equals, true) + c.Check(f.Name(), Equals, "myfile1.txt") + + _, err = f.Read(b[:]) + c.Assert(err, IsNil) + c.Assert(string(b[:]), Equals, "1") + + ok = gfs.OpenNext(iter, &f) + c.Assert(ok, Equals, false) + c.Assert(iter.Close(), IsNil) + c.Assert(f, IsNil) + + // Do it again with a more restrictive query to make sure + // it's actually taken into account. + iter = gfs.Find(bson.M{"filename": "myfile1.txt"}).Iter() + + ok = gfs.OpenNext(iter, &f) + c.Assert(ok, Equals, true) + c.Check(f.Name(), Equals, "myfile1.txt") + + ok = gfs.OpenNext(iter, &f) + c.Assert(ok, Equals, false) + c.Assert(iter.Close(), IsNil) + c.Assert(f, IsNil) +} diff --git a/vendor/labix.org/v2/mgo/log.go b/vendor/labix.org/v2/mgo/log.go new file mode 100644 index 0000000..9abbe21 --- /dev/null +++ b/vendor/labix.org/v2/mgo/log.go @@ -0,0 +1,133 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo + +import ( + "fmt" + "sync" +) + +// --------------------------------------------------------------------------- +// Logging integration. + +// Avoid importing the log type information unnecessarily. There's a small cost +// associated with using an interface rather than the type. Depending on how +// often the logger is plugged in, it would be worth using the type instead. +type log_Logger interface { + Output(calldepth int, s string) error +} + +var ( + globalLogger log_Logger + globalDebug bool + globalMutex sync.Mutex +) + +// RACE WARNING: There are known data races when logging, which are manually +// silenced when the race detector is in use. These data races won't be +// observed in typical use, because logging is supposed to be set up once when +// the application starts. Having raceDetector as a constant, the compiler +// should elide the locks altogether in actual use. + +// Specify the *log.Logger object where log messages should be sent to. +func SetLogger(logger log_Logger) { + if raceDetector { + globalMutex.Lock() + defer globalMutex.Unlock() + } + globalLogger = logger +} + +// Enable the delivery of debug messages to the logger. Only meaningful +// if a logger is also set. +func SetDebug(debug bool) { + if raceDetector { + globalMutex.Lock() + defer globalMutex.Unlock() + } + globalDebug = debug +} + +func log(v ...interface{}) { + if raceDetector { + globalMutex.Lock() + defer globalMutex.Unlock() + } + if globalLogger != nil { + globalLogger.Output(2, fmt.Sprint(v...)) + } +} + +func logln(v ...interface{}) { + if raceDetector { + globalMutex.Lock() + defer globalMutex.Unlock() + } + if globalLogger != nil { + globalLogger.Output(2, fmt.Sprintln(v...)) + } +} + +func logf(format string, v ...interface{}) { + if raceDetector { + globalMutex.Lock() + defer globalMutex.Unlock() + } + if globalLogger != nil { + globalLogger.Output(2, fmt.Sprintf(format, v...)) + } +} + +func debug(v ...interface{}) { + if raceDetector { + globalMutex.Lock() + defer globalMutex.Unlock() + } + if globalDebug && globalLogger != nil { + globalLogger.Output(2, fmt.Sprint(v...)) + } +} + +func debugln(v ...interface{}) { + if raceDetector { + globalMutex.Lock() + defer globalMutex.Unlock() + } + if globalDebug && globalLogger != nil { + globalLogger.Output(2, fmt.Sprintln(v...)) + } +} + +func debugf(format string, v ...interface{}) { + if raceDetector { + globalMutex.Lock() + defer globalMutex.Unlock() + } + if globalDebug && globalLogger != nil { + globalLogger.Output(2, fmt.Sprintf(format, v...)) + } +} diff --git a/vendor/labix.org/v2/mgo/queue.go b/vendor/labix.org/v2/mgo/queue.go new file mode 100644 index 0000000..e9245de --- /dev/null +++ b/vendor/labix.org/v2/mgo/queue.go @@ -0,0 +1,91 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo + +type queue struct { + elems []interface{} + nelems, popi, pushi int +} + +func (q *queue) Len() int { + return q.nelems +} + +func (q *queue) Push(elem interface{}) { + //debugf("Pushing(pushi=%d popi=%d cap=%d): %#v\n", + // q.pushi, q.popi, len(q.elems), elem) + if q.nelems == len(q.elems) { + q.expand() + } + q.elems[q.pushi] = elem + q.nelems++ + q.pushi = (q.pushi + 1) % len(q.elems) + //debugf(" Pushed(pushi=%d popi=%d cap=%d): %#v\n", + // q.pushi, q.popi, len(q.elems), elem) +} + +func (q *queue) Pop() (elem interface{}) { + //debugf("Popping(pushi=%d popi=%d cap=%d)\n", + // q.pushi, q.popi, len(q.elems)) + if q.nelems == 0 { + return nil + } + elem = q.elems[q.popi] + q.elems[q.popi] = nil // Help GC. + q.nelems-- + q.popi = (q.popi + 1) % len(q.elems) + //debugf(" Popped(pushi=%d popi=%d cap=%d): %#v\n", + // q.pushi, q.popi, len(q.elems), elem) + return elem +} + +func (q *queue) expand() { + curcap := len(q.elems) + var newcap int + if curcap == 0 { + newcap = 8 + } else if curcap < 1024 { + newcap = curcap * 2 + } else { + newcap = curcap + (curcap / 4) + } + elems := make([]interface{}, newcap) + + if q.popi == 0 { + copy(elems, q.elems) + q.pushi = curcap + } else { + newpopi := newcap - (curcap - q.popi) + copy(elems, q.elems[:q.popi]) + copy(elems[newpopi:], q.elems[q.popi:]) + q.popi = newpopi + } + for i := range q.elems { + q.elems[i] = nil // Help GC. + } + q.elems = elems +} diff --git a/vendor/labix.org/v2/mgo/queue_test.go b/vendor/labix.org/v2/mgo/queue_test.go new file mode 100644 index 0000000..38b0325 --- /dev/null +++ b/vendor/labix.org/v2/mgo/queue_test.go @@ -0,0 +1,104 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo + +import ( + "launchpad.net/gocheck" +) + +type QS struct{} + +var _ = gocheck.Suite(&QS{}) + +func (s *QS) TestSequentialGrowth(c *gocheck.C) { + q := queue{} + n := 2048 + for i := 0; i != n; i++ { + q.Push(i) + } + for i := 0; i != n; i++ { + c.Assert(q.Pop(), gocheck.Equals, i) + } +} + +var queueTestLists = [][]int{ + // {0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + + // {8, 9, 10, 11, ... 2, 3, 4, 5, 6, 7} + {0, 1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, 11}, + + // {8, 9, 10, 11, ... 2, 3, 4, 5, 6, 7} + {0, 1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11}, + + // {0, 1, 2, 3, 4, 5, 6, 7, 8} + {0, 1, 2, 3, 4, 5, 6, 7, 8, + -1, -1, -1, -1, -1, -1, -1, -1, -1, + 0, 1, 2, 3, 4, 5, 6, 7, 8}, +} + +func (s *QS) TestQueueTestLists(c *gocheck.C) { + test := []int{} + testi := 0 + reset := func() { + test = test[0:0] + testi = 0 + } + push := func(i int) { + test = append(test, i) + } + pop := func() (i int) { + if testi == len(test) { + return -1 + } + i = test[testi] + testi++ + return + } + + for _, list := range queueTestLists { + reset() + q := queue{} + for _, n := range list { + if n == -1 { + c.Assert(q.Pop(), gocheck.Equals, pop(), + gocheck.Commentf("With list %#v", list)) + } else { + q.Push(n) + push(n) + } + } + + for n := pop(); n != -1; n = pop() { + c.Assert(q.Pop(), gocheck.Equals, n, + gocheck.Commentf("With list %#v", list)) + } + + c.Assert(q.Pop(), gocheck.Equals, nil, + gocheck.Commentf("With list %#v", list)) + } +} diff --git a/vendor/labix.org/v2/mgo/raceoff.go b/vendor/labix.org/v2/mgo/raceoff.go new file mode 100644 index 0000000..7470dd6 --- /dev/null +++ b/vendor/labix.org/v2/mgo/raceoff.go @@ -0,0 +1,6 @@ +// +build !race + +package mgo + +const raceDetector = false + diff --git a/vendor/labix.org/v2/mgo/raceon.go b/vendor/labix.org/v2/mgo/raceon.go new file mode 100644 index 0000000..737b08e --- /dev/null +++ b/vendor/labix.org/v2/mgo/raceon.go @@ -0,0 +1,5 @@ +// +build race + +package mgo + +const raceDetector = true diff --git a/vendor/labix.org/v2/mgo/saslimpl.go b/vendor/labix.org/v2/mgo/saslimpl.go new file mode 100644 index 0000000..3b255de --- /dev/null +++ b/vendor/labix.org/v2/mgo/saslimpl.go @@ -0,0 +1,11 @@ +//+build sasl + +package mgo + +import ( + "labix.org/v2/mgo/sasl" +) + +func saslNew(cred Credential, host string) (saslStepper, error) { + return sasl.New(cred.Username, cred.Password, cred.Mechanism, cred.Service, host) +} diff --git a/vendor/labix.org/v2/mgo/saslstub.go b/vendor/labix.org/v2/mgo/saslstub.go new file mode 100644 index 0000000..6e9e309 --- /dev/null +++ b/vendor/labix.org/v2/mgo/saslstub.go @@ -0,0 +1,11 @@ +//+build !sasl + +package mgo + +import ( + "fmt" +) + +func saslNew(cred Credential, host string) (saslStepper, error) { + return nil, fmt.Errorf("SASL support not enabled during build (-tags sasl)") +} diff --git a/vendor/labix.org/v2/mgo/server.go b/vendor/labix.org/v2/mgo/server.go new file mode 100644 index 0000000..d61f018 --- /dev/null +++ b/vendor/labix.org/v2/mgo/server.go @@ -0,0 +1,444 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo + +import ( + "errors" + "labix.org/v2/mgo/bson" + "net" + "sort" + "sync" + "time" +) + +// --------------------------------------------------------------------------- +// Mongo server encapsulation. + +type mongoServer struct { + sync.RWMutex + Addr string + ResolvedAddr string + tcpaddr *net.TCPAddr + unusedSockets []*mongoSocket + liveSockets []*mongoSocket + closed bool + abended bool + sync chan bool + dial dialer + pingValue time.Duration + pingIndex int + pingCount uint32 + pingWindow [6]time.Duration + info *mongoServerInfo +} + +type dialer struct { + old func(addr net.Addr) (net.Conn, error) + new func(addr *ServerAddr) (net.Conn, error) +} + +func (dial dialer) isSet() bool { + return dial.old != nil || dial.new != nil +} + +type mongoServerInfo struct { + Master bool + Mongos bool + Tags bson.D +} + +var defaultServerInfo mongoServerInfo + +func newServer(addr string, tcpaddr *net.TCPAddr, sync chan bool, dial dialer) *mongoServer { + server := &mongoServer{ + Addr: addr, + ResolvedAddr: tcpaddr.String(), + tcpaddr: tcpaddr, + sync: sync, + dial: dial, + info: &defaultServerInfo, + } + // Once so the server gets a ping value, then loop in background. + server.pinger(false) + go server.pinger(true) + return server +} + +var errSocketLimit = errors.New("per-server connection limit reached") +var errServerClosed = errors.New("server was closed") + +// AcquireSocket returns a socket for communicating with the server. +// This will attempt to reuse an old connection, if one is available. Otherwise, +// it will establish a new one. The returned socket is owned by the call site, +// and will return to the cache when the socket has its Release method called +// the same number of times as AcquireSocket + Acquire were called for it. +// If the limit argument is not zero, a socket will only be returned if the +// number of sockets in use for this server is under the provided limit. +func (server *mongoServer) AcquireSocket(limit int, timeout time.Duration) (socket *mongoSocket, abended bool, err error) { + for { + server.Lock() + abended = server.abended + if server.closed { + server.Unlock() + return nil, abended, errServerClosed + } + n := len(server.unusedSockets) + if limit > 0 && len(server.liveSockets)-n >= limit { + server.Unlock() + return nil, false, errSocketLimit + } + if n > 0 { + socket = server.unusedSockets[n-1] + server.unusedSockets[n-1] = nil // Help GC. + server.unusedSockets = server.unusedSockets[:n-1] + info := server.info + server.Unlock() + err = socket.InitialAcquire(info, timeout) + if err != nil { + continue + } + } else { + server.Unlock() + socket, err = server.Connect(timeout) + if err == nil { + server.Lock() + // We've waited for the Connect, see if we got + // closed in the meantime + if server.closed { + server.Unlock() + socket.Release() + socket.Close() + return nil, abended, errServerClosed + } + server.liveSockets = append(server.liveSockets, socket) + server.Unlock() + } + } + return + } + panic("unreachable") +} + +// Connect establishes a new connection to the server. This should +// generally be done through server.AcquireSocket(). +func (server *mongoServer) Connect(timeout time.Duration) (*mongoSocket, error) { + server.RLock() + master := server.info.Master + dial := server.dial + server.RUnlock() + + logf("Establishing new connection to %s (timeout=%s)...", server.Addr, timeout) + var conn net.Conn + var err error + switch { + case !dial.isSet(): + // Cannot do this because it lacks timeout support. :-( + //conn, err = net.DialTCP("tcp", nil, server.tcpaddr) + conn, err = net.DialTimeout("tcp", server.ResolvedAddr, timeout) + case dial.old != nil: + conn, err = dial.old(server.tcpaddr) + case dial.new != nil: + conn, err = dial.new(&ServerAddr{server.Addr, server.tcpaddr}) + default: + panic("dialer is set, but both dial.old and dial.new are nil") + } + if err != nil { + logf("Connection to %s failed: %v", server.Addr, err.Error()) + return nil, err + } + logf("Connection to %s established.", server.Addr) + + stats.conn(+1, master) + return newSocket(server, conn, timeout), nil +} + +// Close forces closing all sockets that are alive, whether +// they're currently in use or not. +func (server *mongoServer) Close() { + server.Lock() + server.closed = true + liveSockets := server.liveSockets + unusedSockets := server.unusedSockets + server.liveSockets = nil + server.unusedSockets = nil + server.Unlock() + logf("Connections to %s closing (%d live sockets).", server.Addr, len(liveSockets)) + for i, s := range liveSockets { + s.Close() + liveSockets[i] = nil + } + for i := range unusedSockets { + unusedSockets[i] = nil + } +} + +// RecycleSocket puts socket back into the unused cache. +func (server *mongoServer) RecycleSocket(socket *mongoSocket) { + server.Lock() + if !server.closed { + server.unusedSockets = append(server.unusedSockets, socket) + } + server.Unlock() +} + +func removeSocket(sockets []*mongoSocket, socket *mongoSocket) []*mongoSocket { + for i, s := range sockets { + if s == socket { + copy(sockets[i:], sockets[i+1:]) + n := len(sockets) - 1 + sockets[n] = nil + sockets = sockets[:n] + break + } + } + return sockets +} + +// AbendSocket notifies the server that the given socket has terminated +// abnormally, and thus should be discarded rather than cached. +func (server *mongoServer) AbendSocket(socket *mongoSocket) { + server.Lock() + server.abended = true + if server.closed { + server.Unlock() + return + } + server.liveSockets = removeSocket(server.liveSockets, socket) + server.unusedSockets = removeSocket(server.unusedSockets, socket) + server.Unlock() + // Maybe just a timeout, but suggest a cluster sync up just in case. + select { + case server.sync <- true: + default: + } +} + +func (server *mongoServer) SetInfo(info *mongoServerInfo) { + server.Lock() + server.info = info + server.Unlock() +} + +func (server *mongoServer) Info() *mongoServerInfo { + server.Lock() + info := server.info + server.Unlock() + return info +} + +func (server *mongoServer) hasTags(serverTags []bson.D) bool { +NextTagSet: + for _, tags := range serverTags { + NextReqTag: + for _, req := range tags { + for _, has := range server.info.Tags { + if req.Name == has.Name { + if req.Value == has.Value { + continue NextReqTag + } + continue NextTagSet + } + } + continue NextTagSet + } + return true + } + return false +} + +var pingDelay = 5 * time.Second + +func (server *mongoServer) pinger(loop bool) { + var delay time.Duration + if raceDetector { + // This variable is only ever touched by tests. + globalMutex.Lock() + delay = pingDelay + globalMutex.Unlock() + } else { + delay = pingDelay + } + op := queryOp{ + collection: "admin.$cmd", + query: bson.D{{"ping", 1}}, + flags: flagSlaveOk, + limit: -1, + } + for { + if loop { + time.Sleep(delay) + } + op := op + socket, _, err := server.AcquireSocket(0, 3 * delay) + if err == nil { + start := time.Now() + _, _ = socket.SimpleQuery(&op) + delay := time.Now().Sub(start) + + server.pingWindow[server.pingIndex] = delay + server.pingIndex = (server.pingIndex + 1) % len(server.pingWindow) + server.pingCount++ + var max time.Duration + for i := 0; i < len(server.pingWindow) && uint32(i) < server.pingCount; i++ { + if server.pingWindow[i] > max { + max = server.pingWindow[i] + } + } + socket.Release() + server.Lock() + if server.closed { + loop = false + } + server.pingValue = max + server.Unlock() + logf("Ping for %s is %d ms", server.Addr, max/time.Millisecond) + } else if err == errServerClosed { + return + } + if !loop { + return + } + } +} + +type mongoServerSlice []*mongoServer + +func (s mongoServerSlice) Len() int { + return len(s) +} + +func (s mongoServerSlice) Less(i, j int) bool { + return s[i].ResolvedAddr < s[j].ResolvedAddr +} + +func (s mongoServerSlice) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s mongoServerSlice) Sort() { + sort.Sort(s) +} + +func (s mongoServerSlice) Search(resolvedAddr string) (i int, ok bool) { + n := len(s) + i = sort.Search(n, func(i int) bool { + return s[i].ResolvedAddr >= resolvedAddr + }) + return i, i != n && s[i].ResolvedAddr == resolvedAddr +} + +type mongoServers struct { + slice mongoServerSlice +} + +func (servers *mongoServers) Search(resolvedAddr string) (server *mongoServer) { + if i, ok := servers.slice.Search(resolvedAddr); ok { + return servers.slice[i] + } + return nil +} + +func (servers *mongoServers) Add(server *mongoServer) { + servers.slice = append(servers.slice, server) + servers.slice.Sort() +} + +func (servers *mongoServers) Remove(other *mongoServer) (server *mongoServer) { + if i, found := servers.slice.Search(other.ResolvedAddr); found { + server = servers.slice[i] + copy(servers.slice[i:], servers.slice[i+1:]) + n := len(servers.slice) - 1 + servers.slice[n] = nil // Help GC. + servers.slice = servers.slice[:n] + } + return +} + +func (servers *mongoServers) Slice() []*mongoServer { + return ([]*mongoServer)(servers.slice) +} + +func (servers *mongoServers) Get(i int) *mongoServer { + return servers.slice[i] +} + +func (servers *mongoServers) Len() int { + return len(servers.slice) +} + +func (servers *mongoServers) Empty() bool { + return len(servers.slice) == 0 +} + +// BestFit returns the best guess of what would be the most interesting +// server to perform operations on at this point in time. +func (servers *mongoServers) BestFit(serverTags []bson.D) *mongoServer { + var best *mongoServer + for _, next := range servers.slice { + if best == nil { + best = next + best.RLock() + if serverTags != nil && !next.info.Mongos && !best.hasTags(serverTags) { + best.RUnlock() + best = nil + } + continue + } + next.RLock() + swap := false + switch { + case serverTags != nil && !next.info.Mongos && !next.hasTags(serverTags): + // Must have requested tags. + case next.info.Master != best.info.Master: + // Prefer slaves. + swap = best.info.Master + case absDuration(next.pingValue-best.pingValue) > 15*time.Millisecond: + // Prefer nearest server. + swap = next.pingValue < best.pingValue + case len(next.liveSockets)-len(next.unusedSockets) < len(best.liveSockets)-len(best.unusedSockets): + // Prefer servers with less connections. + swap = true + } + if swap { + best.RUnlock() + best = next + } else { + next.RUnlock() + } + } + if best != nil { + best.RUnlock() + } + return best +} + +func absDuration(d time.Duration) time.Duration { + if d < 0 { + return -d + } + return d +} diff --git a/vendor/labix.org/v2/mgo/session.go b/vendor/labix.org/v2/mgo/session.go new file mode 100644 index 0000000..0ca86e8 --- /dev/null +++ b/vendor/labix.org/v2/mgo/session.go @@ -0,0 +1,3517 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo + +import ( + "crypto/md5" + "encoding/hex" + "errors" + "fmt" + "labix.org/v2/mgo/bson" + "math" + "net" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +type mode int + +const ( + Eventual mode = 0 + Monotonic mode = 1 + Strong mode = 2 +) + +// When changing the Session type, check if newSession and copySession +// need to be updated too. + +type Session struct { + m sync.RWMutex + cluster_ *mongoCluster + slaveSocket *mongoSocket + masterSocket *mongoSocket + slaveOk bool + consistency mode + queryConfig query + safeOp *queryOp + syncTimeout time.Duration + sockTimeout time.Duration + defaultdb string + sourcedb string + dialCred *Credential + creds []Credential +} + +type Database struct { + Session *Session + Name string +} + +type Collection struct { + Database *Database + Name string // "collection" + FullName string // "db.collection" +} + +type Query struct { + m sync.Mutex + session *Session + query // Enables default settings in session. +} + +type query struct { + op queryOp + prefetch float64 + limit int32 +} + +type getLastError struct { + CmdName int "getLastError" + W interface{} "w,omitempty" + WTimeout int "wtimeout,omitempty" + FSync bool "fsync,omitempty" + J bool "j,omitempty" +} + +type Iter struct { + m sync.Mutex + gotReply sync.Cond + session *Session + server *mongoServer + docData queue + err error + op getMoreOp + prefetch float64 + limit int32 + docsToReceive int + docsBeforeMore int + timeout time.Duration + timedout bool +} + +var ErrNotFound = errors.New("not found") + +const defaultPrefetch = 0.25 + +// Dial establishes a new session to the cluster identified by the given seed +// server(s). The session will enable communication with all of the servers in +// the cluster, so the seed servers are used only to find out about the cluster +// topology. +// +// Dial will timeout after 10 seconds if a server isn't reached. The returned +// session will timeout operations after one minute by default if servers +// aren't available. To customize the timeout, see DialWithTimeout, +// SetSyncTimeout, and SetSocketTimeout. +// +// This method is generally called just once for a given cluster. Further +// sessions to the same cluster are then established using the New or Copy +// methods on the obtained session. This will make them share the underlying +// cluster, and manage the pool of connections appropriately. +// +// Once the session is not useful anymore, Close must be called to release the +// resources appropriately. +// +// The seed servers must be provided in the following format: +// +// [mongodb://][user:pass@]host1[:port1][,host2[:port2],...][/database][?options] +// +// For example, it may be as simple as: +// +// localhost +// +// Or more involved like: +// +// mongodb://myuser:mypass@localhost:40001,otherhost:40001/mydb +// +// If the port number is not provided for a server, it defaults to 27017. +// +// The username and password provided in the URL will be used to authenticate +// into the database named after the slash at the end of the host names, or +// into the "admin" database if none is provided. The authentication information +// will persist in sessions obtained through the New method as well. +// +// The following connection options are supported after the question mark: +// +// connect=direct +// +// Disables the automatic replica set server discovery logic, and +// forces the use of servers provided only (even if secondaries). +// Note that to talk to a secondary the consistency requirements +// must be relaxed to Monotonic or Eventual via SetMode. +// +// +// authSource= +// +// Informs the database used to establish credentials and privileges +// with a MongoDB server. Defaults to the database name provided via +// the URL path, and "admin" if that's unset. +// +// +// authMechanism= +// +// Defines the protocol for credential negotiation. Defaults to "MONGODB-CR", +// which is the default username/password challenge-response mechanism. +// +// +// gssapiServiceName= +// +// Defines the service name to use when authenticating with the GSSAPI +// mechanism. Defaults to "mongodb". +// +// +// Relevant documentation: +// +// http://docs.mongodb.org/manual/reference/connection-string/ +// +func Dial(url string) (*Session, error) { + session, err := DialWithTimeout(url, 10*time.Second) + if err == nil { + session.SetSyncTimeout(1 * time.Minute) + session.SetSocketTimeout(1 * time.Minute) + } + return session, err +} + +// DialWithTimeout works like Dial, but uses timeout as the amount of time to +// wait for a server to respond when first connecting and also on follow up +// operations in the session. If timeout is zero, the call may block +// forever waiting for a connection to be made. +// +// See SetSyncTimeout for customizing the timeout for the session. +func DialWithTimeout(url string, timeout time.Duration) (*Session, error) { + uinfo, err := parseURL(url) + if err != nil { + return nil, err + } + direct := false + mechanism := "" + service := "" + source := "" + for k, v := range uinfo.options { + switch k { + case "authSource": + source = v + case "authMechanism": + mechanism = v + case "gssapiServiceName": + service = v + case "connect": + if v == "direct" { + direct = true + break + } + if v == "replicaSet" { + break + } + fallthrough + default: + return nil, errors.New("unsupported connection URL option: " + k + "=" + v) + } + } + info := DialInfo{ + Addrs: uinfo.addrs, + Direct: direct, + Timeout: timeout, + Database: uinfo.db, + Username: uinfo.user, + Password: uinfo.pass, + Mechanism: mechanism, + Service: service, + Source: source, + } + return DialWithInfo(&info) +} + +// DialInfo holds options for establishing a session with a MongoDB cluster. +// To use a URL, see the Dial function. +type DialInfo struct { + // Addrs holds the addresses for the seed servers. + Addrs []string + + // Direct informs whether to establish connections only with the + // specified seed servers, or to obtain information for the whole + // cluster and establish connections with further servers too. + Direct bool + + // Timeout is the amount of time to wait for a server to respond when + // first connecting and on follow up operations in the session. If + // timeout is zero, the call may block forever waiting for a connection + // to be established. + Timeout time.Duration + + // FailFast will cause connection and query attempts to fail faster when + // the server is unavailable, instead of retrying until the configured + // timeout period. Note that an unavailable server may silently drop + // packets instead of rejecting them, in which case it's impossible to + // distinguish it from a slow server, so the timeout stays relevant. + FailFast bool + + // Database is the default database name used when the Session.DB method + // is called with an empty name, and is also used during the intial + // authenticatoin if Source is unset. + Database string + + // Source is the database used to establish credentials and privileges + // with a MongoDB server. Defaults to the value of Database, if that is + // set, or "admin" otherwise. + Source string + + // Service defines the service name to use when authenticating with the GSSAPI + // mechanism. Defaults to "mongodb". + Service string + + // Mechanism defines the protocol for credential negotiation. + // Defaults to "MONGODB-CR". + Mechanism string + + // Username and Password inform the credentials for the initial authentication + // done on the database defined by the Source field. See Session.Login. + Username string + Password string + + // DialServer optionally specifies the dial function for establishing + // connections with the MongoDB servers. + DialServer func(addr *ServerAddr) (net.Conn, error) + + // WARNING: This field is obsolete. See DialServer above. + Dial func(addr net.Addr) (net.Conn, error) +} + +// ServerAddr represents the address for establishing a connection to an +// individual MongoDB server. +type ServerAddr struct { + str string + tcp *net.TCPAddr +} + +// String returns the address that was provided for the server before resolution. +func (addr *ServerAddr) String() string { + return addr.str +} + +// TCPAddr returns the resolved TCP address for the server. +func (addr *ServerAddr) TCPAddr() *net.TCPAddr { + return addr.tcp +} + +// DialWithInfo establishes a new session to the cluster identified by info. +func DialWithInfo(info *DialInfo) (*Session, error) { + addrs := make([]string, len(info.Addrs)) + for i, addr := range info.Addrs { + p := strings.LastIndexAny(addr, "]:") + if p == -1 || addr[p] != ':' { + // XXX This is untested. The test suite doesn't use the standard port. + addr += ":27017" + } + addrs[i] = addr + } + cluster := newCluster(addrs, info.Direct, info.FailFast, dialer{info.Dial, info.DialServer}) + session := newSession(Eventual, cluster, info.Timeout) + session.defaultdb = info.Database + if session.defaultdb == "" { + session.defaultdb = "test" + } + session.sourcedb = info.Source + if session.sourcedb == "" { + session.sourcedb = info.Database + if session.sourcedb == "" { + session.sourcedb = "admin" + } + } + if info.Username != "" { + source := session.sourcedb + if info.Source == "" && (info.Mechanism == "GSSAPI" || info.Mechanism == "PLAIN") { + source = "$external" + } + session.dialCred = &Credential{ + Username: info.Username, + Password: info.Password, + Mechanism: info.Mechanism, + Service: info.Service, + Source: source, + } + session.creds = []Credential{*session.dialCred} + } + cluster.Release() + + // People get confused when we return a session that is not actually + // established to any servers yet (e.g. what if url was wrong). So, + // ping the server to ensure there's someone there, and abort if it + // fails. + if err := session.Ping(); err != nil { + session.Close() + return nil, err + } + session.SetMode(Strong, true) + return session, nil +} + +func isOptSep(c rune) bool { + return c == ';' || c == '&' +} + +type urlInfo struct { + addrs []string + user string + pass string + db string + options map[string]string +} + +func parseURL(s string) (*urlInfo, error) { + if strings.HasPrefix(s, "mongodb://") { + s = s[10:] + } + info := &urlInfo{options: make(map[string]string)} + if c := strings.Index(s, "?"); c != -1 { + for _, pair := range strings.FieldsFunc(s[c+1:], isOptSep) { + l := strings.SplitN(pair, "=", 2) + if len(l) != 2 || l[0] == "" || l[1] == "" { + return nil, errors.New("connection option must be key=value: " + pair) + } + info.options[l[0]] = l[1] + } + s = s[:c] + } + if c := strings.Index(s, "@"); c != -1 { + pair := strings.SplitN(s[:c], ":", 2) + if len(pair) > 2 || pair[0] == "" { + return nil, errors.New("credentials must be provided as user:pass@host") + } + var err error + info.user, err = url.QueryUnescape(pair[0]) + if err != nil { + return nil, fmt.Errorf("cannot unescape username in URL: %q", pair[0]) + } + if len(pair) > 1 { + info.pass, err = url.QueryUnescape(pair[1]) + if err != nil { + return nil, fmt.Errorf("cannot unescape password in URL") + } + } + s = s[c+1:] + } + if c := strings.Index(s, "/"); c != -1 { + info.db = s[c+1:] + s = s[:c] + } + info.addrs = strings.Split(s, ",") + return info, nil +} + +func newSession(consistency mode, cluster *mongoCluster, timeout time.Duration) (session *Session) { + cluster.Acquire() + session = &Session{cluster_: cluster, syncTimeout: timeout, sockTimeout: timeout} + debugf("New session %p on cluster %p", session, cluster) + session.SetMode(consistency, true) + session.SetSafe(&Safe{}) + session.queryConfig.prefetch = defaultPrefetch + return session +} + +func copySession(session *Session, keepCreds bool) (s *Session) { + cluster := session.cluster() + cluster.Acquire() + if session.masterSocket != nil { + session.masterSocket.Acquire() + } + if session.slaveSocket != nil { + session.slaveSocket.Acquire() + } + var creds []Credential + if keepCreds { + creds = make([]Credential, len(session.creds)) + copy(creds, session.creds) + } else if session.dialCred != nil { + creds = []Credential{*session.dialCred} + } + scopy := *session + scopy.m = sync.RWMutex{} + scopy.creds = creds + s = &scopy + debugf("New session %p on cluster %p (copy from %p)", s, cluster, session) + return s +} + +// LiveServers returns a list of server addresses which are +// currently known to be alive. +func (s *Session) LiveServers() (addrs []string) { + s.m.RLock() + addrs = s.cluster().LiveServers() + s.m.RUnlock() + return addrs +} + +// DB returns a value representing the named database. If name +// is empty, the database name provided in the dialed URL is +// used instead. If that is also empty, "test" is used as a +// fallback in a way equivalent to the mongo shell. +// +// Creating this value is a very lightweight operation, and +// involves no network communication. +func (s *Session) DB(name string) *Database { + if name == "" { + name = s.defaultdb + } + return &Database{s, name} +} + +// C returns a value representing the named collection. +// +// Creating this value is a very lightweight operation, and +// involves no network communication. +func (db *Database) C(name string) *Collection { + return &Collection{db, name, db.Name + "." + name} +} + +// With returns a copy of db that uses session s. +func (db *Database) With(s *Session) *Database { + newdb := *db + newdb.Session = s + return &newdb +} + +// With returns a copy of c that uses session s. +func (c *Collection) With(s *Session) *Collection { + newdb := *c.Database + newdb.Session = s + newc := *c + newc.Database = &newdb + return &newc +} + +// GridFS returns a GridFS value representing collections in db that +// follow the standard GridFS specification. +// The provided prefix (sometimes known as root) will determine which +// collections to use, and is usually set to "fs" when there is a +// single GridFS in the database. +// +// See the GridFS Create, Open, and OpenId methods for more details. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/GridFS +// http://www.mongodb.org/display/DOCS/GridFS+Tools +// http://www.mongodb.org/display/DOCS/GridFS+Specification +// +func (db *Database) GridFS(prefix string) *GridFS { + return newGridFS(db, prefix) +} + +// Run issues the provided command on the db database and unmarshals +// its result in the respective argument. The cmd argument may be either +// a string with the command name itself, in which case an empty document of +// the form bson.M{cmd: 1} will be used, or it may be a full command document. +// +// Note that MongoDB considers the first marshalled key as the command +// name, so when providing a command with options, it's important to +// use an ordering-preserving document, such as a struct value or an +// instance of bson.D. For instance: +// +// db.Run(bson.D{{"create", "mycollection"}, {"size", 1024}}) +// +// For privilleged commands typically run on the "admin" database, see +// the Run method in the Session type. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/Commands +// http://www.mongodb.org/display/DOCS/List+of+Database+CommandSkips +// +func (db *Database) Run(cmd interface{}, result interface{}) error { + if name, ok := cmd.(string); ok { + cmd = bson.D{{name, 1}} + } + return db.C("$cmd").Find(cmd).One(result) +} + +// Credential holds details to authenticate with a MongoDB server. +type Credential struct { + // Username and Password hold the basic details for authentication. + // Password is optional with some authentication mechanisms. + Username string + Password string + + // Source is the database used to establish credentials and privileges + // with a MongoDB server. Defaults to the default database provided + // during dial, or "admin" if that was unset. + Source string + + // Service defines the service name to use when authenticating with the GSSAPI + // mechanism. Defaults to "mongodb". + Service string + + // Mechanism defines the protocol for credential negotiation. + // Defaults to "MONGODB-CR". + Mechanism string +} + +// Login authenticates with MongoDB using the provided credential. The +// authentication is valid for the whole session and will stay valid until +// Logout is explicitly called for the same database, or the session is +// closed. +func (db *Database) Login(user, pass string) error { + return db.Session.Login(&Credential{Username: user, Password: pass, Source: db.Name}) +} + +// Login authenticates with MongoDB using the provided credential. The +// authentication is valid for the whole session and will stay valid until +// Logout is explicitly called for the same database, or the session is +// closed. +func (s *Session) Login(cred *Credential) error { + socket, err := s.acquireSocket(true) + if err != nil { + return err + } + defer socket.Release() + + credCopy := *cred + if cred.Source == "" { + if cred.Mechanism == "GSSAPI" { + credCopy.Source = "$external" + } else { + credCopy.Source = s.sourcedb + } + } + err = socket.Login(credCopy) + if err != nil { + return err + } + + s.m.Lock() + s.creds = append(s.creds, credCopy) + s.m.Unlock() + return nil +} + +func (s *Session) socketLogin(socket *mongoSocket) error { + for _, cred := range s.creds { + if err := socket.Login(cred); err != nil { + return err + } + } + return nil +} + +// Logout removes any established authentication credentials for the database. +func (db *Database) Logout() { + session := db.Session + dbname := db.Name + session.m.Lock() + found := false + for i, cred := range session.creds { + if cred.Source == dbname { + copy(session.creds[i:], session.creds[i+1:]) + session.creds = session.creds[:len(session.creds)-1] + found = true + break + } + } + if found { + if session.masterSocket != nil { + session.masterSocket.Logout(dbname) + } + if session.slaveSocket != nil { + session.slaveSocket.Logout(dbname) + } + } + session.m.Unlock() +} + +// LogoutAll removes all established authentication credentials for the session. +func (s *Session) LogoutAll() { + s.m.Lock() + for _, cred := range s.creds { + if s.masterSocket != nil { + s.masterSocket.Logout(cred.Source) + } + if s.slaveSocket != nil { + s.slaveSocket.Logout(cred.Source) + } + } + s.creds = s.creds[0:0] + s.m.Unlock() +} + +// User represents a MongoDB user. +// +// Relevant documentation: +// +// http://docs.mongodb.org/manual/reference/privilege-documents/ +// http://docs.mongodb.org/manual/reference/user-privileges/ +// +type User struct { + // Username is how the user identifies itself to the system. + Username string `bson:"user"` + + // Password is the plaintext password for the user. If set, + // the UpsertUser method will hash it into PasswordHash and + // unset it before the user is added to the database. + Password string `bson:",omitempty"` + + // PasswordHash is the MD5 hash of Username+":mongo:"+Password. + PasswordHash string `bson:"pwd,omitempty"` + + // CustomData holds arbitrary data admins decide to associate + // with this user, such as the full name or employee id. + CustomData interface{} `bson:"customData,omitempty"` + + // Roles indicates the set of roles the user will be provided. + // See the Role constants. + Roles []Role `bson:"roles"` + + // OtherDBRoles allows assigning roles in other databases from + // user documents inserted in the admin database. This field + // only works in the admin database. + OtherDBRoles map[string][]Role `bson:"otherDBRoles,omitempty"` + + // UserSource indicates where to look for this user's credentials. + // It may be set to a database name, or to "$external" for + // consulting an external resource such as Kerberos. UserSource + // must not be set if Password or PasswordHash are present. + // + // WARNING: This setting was only ever supported in MongoDB 2.4, + // and is now obsolete. + UserSource string `bson:"userSource,omitempty"` +} + +type Role string + +const ( + // Relevant documentation: + // + // http://docs.mongodb.org/manual/reference/user-privileges/ + // + RoleRoot Role = "root" + RoleRead Role = "read" + RoleReadAny Role = "readAnyDatabase" + RoleReadWrite Role = "readWrite" + RoleReadWriteAny Role = "readWriteAnyDatabase" + RoleDBAdmin Role = "dbAdmin" + RoleDBAdminAny Role = "dbAdminAnyDatabase" + RoleUserAdmin Role = "userAdmin" + RoleUserAdminAny Role = "userAdminAnyDatabase" + RoleClusterAdmin Role = "clusterAdmin" +) + +// UpsertUser updates the authentication credentials and the roles for +// a MongoDB user within the db database. If the named user doesn't exist +// it will be created. +// +// This method should only be used from MongoDB 2.4 and on. For older +// MongoDB releases, use the obsolete AddUser method instead. +// +// Relevant documentation: +// +// http://docs.mongodb.org/manual/reference/user-privileges/ +// http://docs.mongodb.org/manual/reference/privilege-documents/ +// +func (db *Database) UpsertUser(user *User) error { + if user.Username == "" { + return fmt.Errorf("user has no Username") + } + if (user.Password != "" || user.PasswordHash != "") && user.UserSource != "" { + return fmt.Errorf("user has both Password/PasswordHash and UserSource set") + } + if len(user.OtherDBRoles) > 0 && db.Name != "admin" { + return fmt.Errorf("user with OtherDBRoles is only supported in admin database") + } + + // Attempt to run this using 2.6+ commands. + rundb := db + if user.UserSource != "" { + // Compatibility logic for the userSource field of MongoDB <= 2.4.X + rundb = db.Session.DB(user.UserSource) + } + err := rundb.runUserCmd("updateUser", user) + if e, ok := err.(*QueryError); ok && e.Code == 11 { + return rundb.runUserCmd("createUser", user) + } + if !isNoCmd(err) { + return err + } + + // Command does not exist. Fallback to pre-2.6 behavior. + var set, unset bson.D + if user.Password != "" { + psum := md5.New() + psum.Write([]byte(user.Username + ":mongo:" + user.Password)) + set = append(set, bson.DocElem{"pwd", hex.EncodeToString(psum.Sum(nil))}) + unset = append(unset, bson.DocElem{"userSource", 1}) + } else if user.PasswordHash != "" { + set = append(set, bson.DocElem{"pwd", user.PasswordHash}) + unset = append(unset, bson.DocElem{"userSource", 1}) + } + if user.UserSource != "" { + set = append(set, bson.DocElem{"userSource", user.UserSource}) + unset = append(unset, bson.DocElem{"pwd", 1}) + } + if user.Roles != nil || user.OtherDBRoles != nil { + set = append(set, bson.DocElem{"roles", user.Roles}) + if len(user.OtherDBRoles) > 0 { + set = append(set, bson.DocElem{"otherDBRoles", user.OtherDBRoles}) + } else { + unset = append(unset, bson.DocElem{"otherDBRoles", 1}) + } + } + users := db.C("system.users") + err = users.Update(bson.D{{"user", user.Username}}, bson.D{{"$unset", unset}, {"$set", set}}) + if err == ErrNotFound { + set = append(set, bson.DocElem{"user", user.Username}) + if user.Roles == nil && user.OtherDBRoles == nil { + // Roles must be sent, as it's the way MongoDB distinguishes + // old-style documents from new-style documents in pre-2.6. + set = append(set, bson.DocElem{"roles", user.Roles}) + } + err = users.Insert(set) + } + return err +} + +func isNoCmd(err error) bool { + e, ok := err.(*QueryError) + return ok && strings.HasPrefix(e.Message, "no such cmd:") +} + +func (db *Database) runUserCmd(cmdName string, user *User) error { + //if user.UserSource != "" && (user.UserSource != "$external" || db.Name != "$external") { + // return fmt.Errorf("MongoDB 2.6+ does not support the UserSource setting") + //} + + cmd := make(bson.D, 0, 16) + cmd = append(cmd, bson.DocElem{cmdName, user.Username}) + if user.Password != "" { + cmd = append(cmd, bson.DocElem{"pwd", user.Password}) + } + var roles []interface{} + for _, role := range user.Roles { + roles = append(roles, role) + } + for db, dbroles := range user.OtherDBRoles { + for _, role := range dbroles { + roles = append(roles, bson.D{{"role", role}, {"db", db}}) + } + } + if roles != nil || user.Roles != nil || cmdName == "createUser" { + cmd = append(cmd, bson.DocElem{"roles", roles}) + } + return db.Run(cmd, nil) +} + +// AddUser creates or updates the authentication credentials of user within +// the db database. +// +// WARNING: This method is obsolete and should only be used with MongoDB 2.2 +// or earlier. For MongoDB 2.4 and on, use UpsertUser instead. +func (db *Database) AddUser(username, password string, readOnly bool) error { + // Try to emulate the old behavior on 2.6+ + user := &User{Username: username, Password: password} + if db.Name == "admin" { + if readOnly { + user.Roles = []Role{RoleReadAny} + } else { + user.Roles = []Role{RoleReadWriteAny} + } + } else { + if readOnly { + user.Roles = []Role{RoleRead} + } else { + user.Roles = []Role{RoleReadWrite} + } + } + err := db.runUserCmd("updateUser", user) + if e, ok := err.(*QueryError); ok && e.Code == 11 { + return db.runUserCmd("createUser", user) + } + if !isNoCmd(err) { + return err + } + + // Command doesn't exist. Fallback to pre-2.6 behavior. + psum := md5.New() + psum.Write([]byte(username + ":mongo:" + password)) + digest := hex.EncodeToString(psum.Sum(nil)) + c := db.C("system.users") + _, err = c.Upsert(bson.M{"user": username}, bson.M{"$set": bson.M{"user": username, "pwd": digest, "readOnly": readOnly}}) + return err +} + +// RemoveUser removes the authentication credentials of user from the database. +func (db *Database) RemoveUser(user string) error { + err := db.Run(bson.D{{"dropUser", user}}, nil) + if isNoCmd(err) { + users := db.C("system.users") + return users.Remove(bson.M{"user": user}) + } + return err +} + +type indexSpec struct { + Name, NS string + Key bson.D + Unique bool ",omitempty" + DropDups bool "dropDups,omitempty" + Background bool ",omitempty" + Sparse bool ",omitempty" + Bits, Min, Max int ",omitempty" + ExpireAfter int "expireAfterSeconds,omitempty" +} + +type Index struct { + Key []string // Index key fields; prefix name with dash (-) for descending order + Unique bool // Prevent two documents from having the same index key + DropDups bool // Drop documents with the same index key as a previously indexed one + Background bool // Build index in background and return immediately + Sparse bool // Only index documents containing the Key fields + + ExpireAfter time.Duration // Periodically delete docs with indexed time.Time older than that. + + Name string // Index name, computed by EnsureIndex + + Bits, Min, Max int // Properties for spatial indexes +} + +func parseIndexKey(key []string) (name string, realKey bson.D, err error) { + var order interface{} + for _, field := range key { + raw := field + if name != "" { + name += "_" + } + var kind string + if field != "" { + if field[0] == '$' { + if c := strings.Index(field, ":"); c > 1 && c < len(field)-1 { + kind = field[1:c] + field = field[c+1:] + name += field + "_" + kind + } + } + switch field[0] { + case '$': + // Logic above failed. Reset and error. + field = "" + case '@': + order = "2d" + field = field[1:] + // The shell used to render this field as key_ instead of key_2d, + // and mgo followed suit. This has been fixed in recent server + // releases, and mgo followed as well. + name += field + "_2d" + case '-': + order = -1 + field = field[1:] + name += field + "_-1" + case '+': + field = field[1:] + fallthrough + default: + if kind == "" { + order = 1 + name += field + "_1" + } else { + order = kind + } + } + } + if field == "" || kind != "" && order != kind { + return "", nil, fmt.Errorf(`invalid index key: want "[$:][-]", got %q`, raw) + } + realKey = append(realKey, bson.DocElem{field, order}) + } + if name == "" { + return "", nil, errors.New("invalid index key: no fields provided") + } + return +} + +// EnsureIndexKey ensures an index with the given key exists, creating it +// if necessary. +// +// This example: +// +// err := collection.EnsureIndexKey("a", "b") +// +// Is equivalent to: +// +// err := collection.EnsureIndex(mgo.Index{Key: []string{"a", "b"}}) +// +// See the EnsureIndex method for more details. +func (c *Collection) EnsureIndexKey(key ...string) error { + return c.EnsureIndex(Index{Key: key}) +} + +// EnsureIndex ensures an index with the given key exists, creating it with +// the provided parameters if necessary. +// +// Once EnsureIndex returns successfully, following requests for the same index +// will not contact the server unless Collection.DropIndex is used to drop the +// same index, or Session.ResetIndexCache is called. +// +// For example: +// +// index := Index{ +// Key: []string{"lastname", "firstname"}, +// Unique: true, +// DropDups: true, +// Background: true, // See notes. +// Sparse: true, +// } +// err := collection.EnsureIndex(index) +// +// The Key value determines which fields compose the index. The index ordering +// will be ascending by default. To obtain an index with a descending order, +// the field name should be prefixed by a dash (e.g. []string{"-time"}). +// +// If Unique is true, the index must necessarily contain only a single +// document per Key. With DropDups set to true, documents with the same key +// as a previously indexed one will be dropped rather than an error returned. +// +// If Background is true, other connections will be allowed to proceed using +// the collection without the index while it's being built. Note that the +// session executing EnsureIndex will be blocked for as long as it takes for +// the index to be built. +// +// If Sparse is true, only documents containing the provided Key fields will be +// included in the index. When using a sparse index for sorting, only indexed +// documents will be returned. +// +// If ExpireAfter is non-zero, the server will periodically scan the collection +// and remove documents containing an indexed time.Time field with a value +// older than ExpireAfter. See the documentation for details: +// +// http://docs.mongodb.org/manual/tutorial/expire-data +// +// Other kinds of indexes are also supported through that API. Here is an example: +// +// index := Index{ +// Key: []string{"$2d:loc"}, +// Bits: 26, +// } +// err := collection.EnsureIndex(index) +// +// The example above requests the creation of a "2d" index for the "loc" field. +// +// The 2D index bounds may be changed using the Min and Max attributes of the +// Index value. The default bound setting of (-180, 180) is suitable for +// latitude/longitude pairs. +// +// The Bits parameter sets the precision of the 2D geohash values. If not +// provided, 26 bits are used, which is roughly equivalent to 1 foot of +// precision for the default (-180, 180) index bounds. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/Indexes +// http://www.mongodb.org/display/DOCS/Indexing+Advice+and+FAQ +// http://www.mongodb.org/display/DOCS/Indexing+as+a+Background+Operation +// http://www.mongodb.org/display/DOCS/Geospatial+Indexing +// http://www.mongodb.org/display/DOCS/Multikeys +// +func (c *Collection) EnsureIndex(index Index) error { + name, realKey, err := parseIndexKey(index.Key) + if err != nil { + return err + } + + session := c.Database.Session + cacheKey := c.FullName + "\x00" + name + if session.cluster().HasCachedIndex(cacheKey) { + return nil + } + + spec := indexSpec{ + Name: name, + NS: c.FullName, + Key: realKey, + Unique: index.Unique, + DropDups: index.DropDups, + Background: index.Background, + Sparse: index.Sparse, + Bits: index.Bits, + Min: index.Min, + Max: index.Max, + ExpireAfter: int(index.ExpireAfter / time.Second), + } + + session = session.Clone() + defer session.Close() + session.SetMode(Strong, false) + session.EnsureSafe(&Safe{}) + + db := c.Database.With(session) + err = db.C("system.indexes").Insert(&spec) + if err == nil { + session.cluster().CacheIndex(cacheKey, true) + } + session.Close() + return err +} + +// DropIndex removes the index with key from the collection. +// +// The key value determines which fields compose the index. The index ordering +// will be ascending by default. To obtain an index with a descending order, +// the field name should be prefixed by a dash (e.g. []string{"-time"}). +// +// For example: +// +// err := collection.DropIndex("lastname", "firstname") +// +// See the EnsureIndex method for more details on indexes. +func (c *Collection) DropIndex(key ...string) error { + name, _, err := parseIndexKey(key) + if err != nil { + return err + } + + session := c.Database.Session + cacheKey := c.FullName + "\x00" + name + session.cluster().CacheIndex(cacheKey, false) + + session = session.Clone() + defer session.Close() + session.SetMode(Strong, false) + + db := c.Database.With(session) + result := struct { + ErrMsg string + Ok bool + }{} + err = db.Run(bson.D{{"dropIndexes", c.Name}, {"index", name}}, &result) + if err != nil { + return err + } + if !result.Ok { + return errors.New(result.ErrMsg) + } + return nil +} + +// Indexes returns a list of all indexes for the collection. +// +// For example, this snippet would drop all available indexes: +// +// indexes, err := collection.Indexes() +// if err != nil { +// return err +// } +// for _, index := range indexes { +// err = collection.DropIndex(index.Key...) +// if err != nil { +// return err +// } +// } +// +// See the EnsureIndex method for more details on indexes. +func (c *Collection) Indexes() (indexes []Index, err error) { + query := c.Database.C("system.indexes").Find(bson.M{"ns": c.FullName}) + iter := query.Sort("name").Iter() + for { + var spec indexSpec + if !iter.Next(&spec) { + break + } + index := Index{ + Name: spec.Name, + Key: simpleIndexKey(spec.Key), + Unique: spec.Unique, + DropDups: spec.DropDups, + Background: spec.Background, + Sparse: spec.Sparse, + ExpireAfter: time.Duration(spec.ExpireAfter) * time.Second, + } + indexes = append(indexes, index) + } + err = iter.Close() + return +} + +func simpleIndexKey(realKey bson.D) (key []string) { + for i := range realKey { + field := realKey[i].Name + vi, ok := realKey[i].Value.(int) + if !ok { + vf, _ := realKey[i].Value.(float64) + vi = int(vf) + } + if vi == 1 { + key = append(key, field) + continue + } + if vi == -1 { + key = append(key, "-"+field) + continue + } + if vs, ok := realKey[i].Value.(string); ok { + key = append(key, "$"+vs+":"+field) + continue + } + panic("Got unknown index key type for field " + field) + } + return +} + +// ResetIndexCache() clears the cache of previously ensured indexes. +// Following requests to EnsureIndex will contact the server. +func (s *Session) ResetIndexCache() { + s.cluster().ResetIndexCache() +} + +// New creates a new session with the same parameters as the original +// session, including consistency, batch size, prefetching, safety mode, +// etc. The returned session will use sockets from the pool, so there's +// a chance that writes just performed in another session may not yet +// be visible. +// +// Login information from the original session will not be copied over +// into the new session unless it was provided through the initial URL +// for the Dial function. +// +// See the Copy and Clone methods. +// +func (s *Session) New() *Session { + s.m.Lock() + scopy := copySession(s, false) + s.m.Unlock() + scopy.Refresh() + return scopy +} + +// Copy works just like New, but preserves the exact authentication +// information from the original session. +func (s *Session) Copy() *Session { + s.m.Lock() + scopy := copySession(s, true) + s.m.Unlock() + scopy.Refresh() + return scopy +} + +// Clone works just like Copy, but also reuses the same socket as the original +// session, in case it had already reserved one due to its consistency +// guarantees. This behavior ensures that writes performed in the old session +// are necessarily observed when using the new session, as long as it was a +// strong or monotonic session. That said, it also means that long operations +// may cause other goroutines using the original session to wait. +func (s *Session) Clone() *Session { + s.m.Lock() + scopy := copySession(s, true) + s.m.Unlock() + return scopy +} + +// Close terminates the session. It's a runtime error to use a session +// after it has been closed. +func (s *Session) Close() { + s.m.Lock() + if s.cluster_ != nil { + debugf("Closing session %p", s) + s.unsetSocket() + s.cluster_.Release() + s.cluster_ = nil + } + s.m.Unlock() +} + +func (s *Session) cluster() *mongoCluster { + if s.cluster_ == nil { + panic("Session already closed") + } + return s.cluster_ +} + +// Refresh puts back any reserved sockets in use and restarts the consistency +// guarantees according to the current consistency setting for the session. +func (s *Session) Refresh() { + s.m.Lock() + s.slaveOk = s.consistency != Strong + s.unsetSocket() + s.m.Unlock() +} + +// SetMode changes the consistency mode for the session. +// +// In the Strong consistency mode reads and writes will always be made to +// the primary server using a unique connection so that reads and writes are +// fully consistent, ordered, and observing the most up-to-date data. +// This offers the least benefits in terms of distributing load, but the +// most guarantees. See also Monotonic and Eventual. +// +// In the Monotonic consistency mode reads may not be entirely up-to-date, +// but they will always see the history of changes moving forward, the data +// read will be consistent across sequential queries in the same session, +// and modifications made within the session will be observed in following +// queries (read-your-writes). +// +// In practice, the Monotonic mode is obtained by performing initial reads +// on a unique connection to an arbitrary secondary, if one is available, +// and once the first write happens, the session connection is switched over +// to the primary server. This manages to distribute some of the reading +// load with secondaries, while maintaining some useful guarantees. +// +// In the Eventual consistency mode reads will be made to any secondary in the +// cluster, if one is available, and sequential reads will not necessarily +// be made with the same connection. This means that data may be observed +// out of order. Writes will of course be issued to the primary, but +// independent writes in the same Eventual session may also be made with +// independent connections, so there are also no guarantees in terms of +// write ordering (no read-your-writes guarantees either). +// +// The Eventual mode is the fastest and most resource-friendly, but is +// also the one offering the least guarantees about ordering of the data +// read and written. +// +// If refresh is true, in addition to ensuring the session is in the given +// consistency mode, the consistency guarantees will also be reset (e.g. +// a Monotonic session will be allowed to read from secondaries again). +// This is equivalent to calling the Refresh function. +// +// Shifting between Monotonic and Strong modes will keep a previously +// reserved connection for the session unless refresh is true or the +// connection is unsuitable (to a secondary server in a Strong session). +func (s *Session) SetMode(consistency mode, refresh bool) { + s.m.Lock() + debugf("Session %p: setting mode %d with refresh=%v (master=%p, slave=%p)", s, consistency, refresh, s.masterSocket, s.slaveSocket) + s.consistency = consistency + if refresh { + s.slaveOk = s.consistency != Strong + s.unsetSocket() + } else if s.consistency == Strong { + s.slaveOk = false + } else if s.masterSocket == nil { + s.slaveOk = true + } + s.m.Unlock() +} + +// Mode returns the current consistency mode for the session. +func (s *Session) Mode() mode { + s.m.RLock() + mode := s.consistency + s.m.RUnlock() + return mode +} + +// SetSyncTimeout sets the amount of time an operation with this session +// will wait before returning an error in case a connection to a usable +// server can't be established. Set it to zero to wait forever. The +// default value is 7 seconds. +func (s *Session) SetSyncTimeout(d time.Duration) { + s.m.Lock() + s.syncTimeout = d + s.m.Unlock() +} + +// SetSocketTimeout sets the amount of time to wait for a non-responding +// socket to the database before it is forcefully closed. +func (s *Session) SetSocketTimeout(d time.Duration) { + s.m.Lock() + s.sockTimeout = d + if s.masterSocket != nil { + s.masterSocket.SetTimeout(d) + } + if s.slaveSocket != nil { + s.slaveSocket.SetTimeout(d) + } + s.m.Unlock() +} + +// SetCursorTimeout changes the standard timeout period that the server +// enforces on created cursors. The only supported value right now is +// 0, which disables the timeout. The standard server timeout is 10 minutes. +func (s *Session) SetCursorTimeout(d time.Duration) { + s.m.Lock() + if d == 0 { + s.queryConfig.op.flags |= flagNoCursorTimeout + } else { + panic("SetCursorTimeout: only 0 (disable timeout) supported for now") + } + s.m.Unlock() +} + +// SetBatch sets the default batch size used when fetching documents from the +// database. It's possible to change this setting on a per-query basis as +// well, using the Query.Batch method. +// +// The default batch size is defined by the database itself. As of this +// writing, MongoDB will use an initial size of min(100 docs, 4MB) on the +// first batch, and 4MB on remaining ones. +func (s *Session) SetBatch(n int) { + if n == 1 { + // Server interprets 1 as -1 and closes the cursor (!?) + n = 2 + } + s.m.Lock() + s.queryConfig.op.limit = int32(n) + s.m.Unlock() +} + +// SetPrefetch sets the default point at which the next batch of results will be +// requested. When there are p*batch_size remaining documents cached in an +// Iter, the next batch will be requested in background. For instance, when +// using this: +// +// session.SetBatch(200) +// session.SetPrefetch(0.25) +// +// and there are only 50 documents cached in the Iter to be processed, the +// next batch of 200 will be requested. It's possible to change this setting on +// a per-query basis as well, using the Prefetch method of Query. +// +// The default prefetch value is 0.25. +func (s *Session) SetPrefetch(p float64) { + s.m.Lock() + s.queryConfig.prefetch = p + s.m.Unlock() +} + +// See SetSafe for details on the Safe type. +type Safe struct { + W int // Min # of servers to ack before success + WMode string // Write mode for MongoDB 2.0+ (e.g. "majority") + WTimeout int // Milliseconds to wait for W before timing out + FSync bool // Should servers sync to disk before returning success + J bool // Wait for next group commit if journaling; no effect otherwise +} + +// Safe returns the current safety mode for the session. +func (s *Session) Safe() (safe *Safe) { + s.m.Lock() + defer s.m.Unlock() + if s.safeOp != nil { + cmd := s.safeOp.query.(*getLastError) + safe = &Safe{WTimeout: cmd.WTimeout, FSync: cmd.FSync, J: cmd.J} + switch w := cmd.W.(type) { + case string: + safe.WMode = w + case int: + safe.W = w + } + } + return +} + +// SetSafe changes the session safety mode. +// +// If the safe parameter is nil, the session is put in unsafe mode, and writes +// become fire-and-forget, without error checking. The unsafe mode is faster +// since operations won't hold on waiting for a confirmation. +// +// If the safe parameter is not nil, any changing query (insert, update, ...) +// will be followed by a getLastError command with the specified parameters, +// to ensure the request was correctly processed. +// +// The safe.W parameter determines how many servers should confirm a write +// before the operation is considered successful. If set to 0 or 1, the +// command will return as soon as the primary is done with the request. +// If safe.WTimeout is greater than zero, it determines how many milliseconds +// to wait for the safe.W servers to respond before returning an error. +// +// Starting with MongoDB 2.0.0 the safe.WMode parameter can be used instead +// of W to request for richer semantics. If set to "majority" the server will +// wait for a majority of members from the replica set to respond before +// returning. Custom modes may also be defined within the server to create +// very detailed placement schemas. See the data awareness documentation in +// the links below for more details (note that MongoDB internally reuses the +// "w" field name for WMode). +// +// If safe.FSync is true and journaling is disabled, the servers will be +// forced to sync all files to disk immediately before returning. If the +// same option is true but journaling is enabled, the server will instead +// await for the next group commit before returning. +// +// Since MongoDB 2.0.0, the safe.J option can also be used instead of FSync +// to force the server to wait for a group commit in case journaling is +// enabled. The option has no effect if the server has journaling disabled. +// +// For example, the following statement will make the session check for +// errors, without imposing further constraints: +// +// session.SetSafe(&mgo.Safe{}) +// +// The following statement will force the server to wait for a majority of +// members of a replica set to return (MongoDB 2.0+ only): +// +// session.SetSafe(&mgo.Safe{WMode: "majority"}) +// +// The following statement, on the other hand, ensures that at least two +// servers have flushed the change to disk before confirming the success +// of operations: +// +// session.EnsureSafe(&mgo.Safe{W: 2, FSync: true}) +// +// The following statement, on the other hand, disables the verification +// of errors entirely: +// +// session.SetSafe(nil) +// +// See also the EnsureSafe method. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/getLastError+Command +// http://www.mongodb.org/display/DOCS/Verifying+Propagation+of+Writes+with+getLastError +// http://www.mongodb.org/display/DOCS/Data+Center+Awareness +// +func (s *Session) SetSafe(safe *Safe) { + s.m.Lock() + s.safeOp = nil + s.ensureSafe(safe) + s.m.Unlock() +} + +// EnsureSafe compares the provided safety parameters with the ones +// currently in use by the session and picks the most conservative +// choice for each setting. +// +// That is: +// +// - safe.WMode is always used if set. +// - safe.W is used if larger than the current W and WMode is empty. +// - safe.FSync is always used if true. +// - safe.J is used if FSync is false. +// - safe.WTimeout is used if set and smaller than the current WTimeout. +// +// For example, the following statement will ensure the session is +// at least checking for errors, without enforcing further constraints. +// If a more conservative SetSafe or EnsureSafe call was previously done, +// the following call will be ignored. +// +// session.EnsureSafe(&mgo.Safe{}) +// +// See also the SetSafe method for details on what each option means. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/getLastError+Command +// http://www.mongodb.org/display/DOCS/Verifying+Propagation+of+Writes+with+getLastError +// http://www.mongodb.org/display/DOCS/Data+Center+Awareness +// +func (s *Session) EnsureSafe(safe *Safe) { + s.m.Lock() + s.ensureSafe(safe) + s.m.Unlock() +} + +func (s *Session) ensureSafe(safe *Safe) { + if safe == nil { + return + } + + var w interface{} + if safe.WMode != "" { + w = safe.WMode + } else if safe.W > 0 { + w = safe.W + } + + var cmd getLastError + if s.safeOp == nil { + cmd = getLastError{1, w, safe.WTimeout, safe.FSync, safe.J} + } else { + // Copy. We don't want to mutate the existing query. + cmd = *(s.safeOp.query.(*getLastError)) + if cmd.W == nil { + cmd.W = w + } else if safe.WMode != "" { + cmd.W = safe.WMode + } else if i, ok := cmd.W.(int); ok && safe.W > i { + cmd.W = safe.W + } + if safe.WTimeout > 0 && safe.WTimeout < cmd.WTimeout { + cmd.WTimeout = safe.WTimeout + } + if safe.FSync { + cmd.FSync = true + cmd.J = false + } else if safe.J && !cmd.FSync { + cmd.J = true + } + } + s.safeOp = &queryOp{ + query: &cmd, + collection: "admin.$cmd", + limit: -1, + } +} + +// Run issues the provided command on the "admin" database and +// and unmarshals its result in the respective argument. The cmd +// argument may be either a string with the command name itself, in +// which case an empty document of the form bson.M{cmd: 1} will be used, +// or it may be a full command document. +// +// Note that MongoDB considers the first marshalled key as the command +// name, so when providing a command with options, it's important to +// use an ordering-preserving document, such as a struct value or an +// instance of bson.D. For instance: +// +// db.Run(bson.D{{"create", "mycollection"}, {"size", 1024}}) +// +// For commands on arbitrary databases, see the Run method in +// the Database type. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/Commands +// http://www.mongodb.org/display/DOCS/List+of+Database+CommandSkips +// +func (s *Session) Run(cmd interface{}, result interface{}) error { + return s.DB("admin").Run(cmd, result) +} + +// SelectServers restricts communication to servers configured with the +// given tags. For example, the following statement restricts servers +// used for reading operations to those with both tag "disk" set to +// "ssd" and tag "rack" set to 1: +// +// session.SelectSlaves(bson.D{{"disk", "ssd"}, {"rack", 1}}) +// +// Multiple sets of tags may be provided, in which case the used server +// must match all tags within any one set. +// +// If a connection was previously assigned to the session due to the +// current session mode (see Session.SetMode), the tag selection will +// only be enforced after the session is refreshed. +// +// Relevant documentation: +// +// http://docs.mongodb.org/manual/tutorial/configure-replica-set-tag-sets +// +func (s *Session) SelectServers(tags ...bson.D) { + s.m.Lock() + s.queryConfig.op.serverTags = tags + s.m.Unlock() +} + +// Ping runs a trivial ping command just to get in touch with the server. +func (s *Session) Ping() error { + return s.Run("ping", nil) +} + +// Fsync flushes in-memory writes to disk on the server the session +// is established with. If async is true, the call returns immediately, +// otherwise it returns after the flush has been made. +func (s *Session) Fsync(async bool) error { + return s.Run(bson.D{{"fsync", 1}, {"async", async}}, nil) +} + +// FsyncLock locks all writes in the specific server the session is +// established with and returns. Any writes attempted to the server +// after it is successfully locked will block until FsyncUnlock is +// called for the same server. +// +// This method works on secondaries as well, preventing the oplog from +// being flushed while the server is locked, but since only the server +// connected to is locked, for locking specific secondaries it may be +// necessary to establish a connection directly to the secondary (see +// Dial's connect=direct option). +// +// As an important caveat, note that once a write is attempted and +// blocks, follow up reads will block as well due to the way the +// lock is internally implemented in the server. More details at: +// +// https://jira.mongodb.org/browse/SERVER-4243 +// +// FsyncLock is often used for performing consistent backups of +// the database files on disk. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/fsync+Command +// http://www.mongodb.org/display/DOCS/Backups +// +func (s *Session) FsyncLock() error { + return s.Run(bson.D{{"fsync", 1}, {"lock", true}}, nil) +} + +// FsyncUnlock releases the server for writes. See FsyncLock for details. +func (s *Session) FsyncUnlock() error { + return s.DB("admin").C("$cmd.sys.unlock").Find(nil).One(nil) // WTF? +} + +// Find prepares a query using the provided document. The document may be a +// map or a struct value capable of being marshalled with bson. The map +// may be a generic one using interface{} for its key and/or values, such as +// bson.M, or it may be a properly typed map. Providing nil as the document +// is equivalent to providing an empty document such as bson.M{}. +// +// Further details of the query may be tweaked using the resulting Query value, +// and then executed to retrieve results using methods such as One, For, +// Iter, or Tail. +// +// In case the resulting document includes a field named $err or errmsg, which +// are standard ways for MongoDB to return query errors, the returned err will +// be set to a *QueryError value including the Err message and the Code. In +// those cases, the result argument is still unmarshalled into with the +// received document so that any other custom values may be obtained if +// desired. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/Querying +// http://www.mongodb.org/display/DOCS/Advanced+Queries +// +func (c *Collection) Find(query interface{}) *Query { + session := c.Database.Session + session.m.RLock() + q := &Query{session: session, query: session.queryConfig} + session.m.RUnlock() + q.op.query = query + q.op.collection = c.FullName + return q +} + +// FindId is a convenience helper equivalent to: +// +// query := collection.Find(bson.M{"_id": id}) +// +// See the Find method for more details. +func (c *Collection) FindId(id interface{}) *Query { + return c.Find(bson.D{{"_id", id}}) +} + +type Pipe struct { + session *Session + collection *Collection + pipeline interface{} +} + +// Pipe prepares a pipeline to aggregate. The pipeline document +// must be a slice built in terms of the aggregation framework language. +// +// For example: +// +// pipe := collection.Pipe([]bson.M{{"$match": bson.M{"name": "Otavio"}}}) +// iter := pipe.Iter() +// +// Relevant documentation: +// +// http://docs.mongodb.org/manual/reference/aggregation +// http://docs.mongodb.org/manual/applications/aggregation +// http://docs.mongodb.org/manual/tutorial/aggregation-examples +// +func (c *Collection) Pipe(pipeline interface{}) *Pipe { + session := c.Database.Session + return &Pipe{ + session: session, + collection: c, + pipeline: pipeline, + } +} + +// Iter executes the pipeline and returns an iterator capable of going +// over all the generated results. +func (p *Pipe) Iter() *Iter { + iter := &Iter{ + session: p.session, + timeout: -1, + } + iter.gotReply.L = &iter.m + var result struct{ Result []bson.Raw } + c := p.collection + iter.err = c.Database.Run(bson.D{{"aggregate", c.Name}, {"pipeline", p.pipeline}}, &result) + if iter.err != nil { + return iter + } + for i := range result.Result { + iter.docData.Push(result.Result[i].Data) + } + return iter +} + +// All works like Iter.All. +func (p *Pipe) All(result interface{}) error { + return p.Iter().All(result) +} + +// One executes the pipeline and unmarshals the first item from the +// result set into the result parameter. +// It returns ErrNotFound if no items are generated by the pipeline. +func (p *Pipe) One(result interface{}) error { + iter := p.Iter() + if iter.Next(result) { + return nil + } + if err := iter.Err(); err != nil { + return err + } + return ErrNotFound +} + +type LastError struct { + Err string + Code, N, Waited int + FSyncFiles int `bson:"fsyncFiles"` + WTimeout bool + UpdatedExisting bool `bson:"updatedExisting"` + UpsertedId interface{} `bson:"upserted"` +} + +func (err *LastError) Error() string { + return err.Err +} + +type queryError struct { + Err string "$err" + ErrMsg string + Assertion string + Code int + AssertionCode int "assertionCode" + LastError *LastError "lastErrorObject" +} + +type QueryError struct { + Code int + Message string + Assertion bool +} + +func (err *QueryError) Error() string { + return err.Message +} + +// IsDup returns whether err informs of a duplicate key error because +// a primary key index or a secondary unique index already has an entry +// with the given value. +func IsDup(err error) bool { + // Besides being handy, helps with MongoDB bugs SERVER-7164 and SERVER-11493. + // What follows makes me sad. Hopefully conventions will be more clear over time. + switch e := err.(type) { + case *LastError: + return e.Code == 11000 || e.Code == 11001 || e.Code == 12582 || e.Code == 16460 && strings.Contains(e.Err, " E11000 ") + case *QueryError: + return e.Code == 11000 || e.Code == 11001 || e.Code == 12582 + } + return false +} + +// Insert inserts one or more documents in the respective collection. In +// case the session is in safe mode (see the SetSafe method) and an error +// happens while inserting the provided documents, the returned error will +// be of type *LastError. +func (c *Collection) Insert(docs ...interface{}) error { + _, err := c.writeQuery(&insertOp{c.FullName, docs, 0}) + return err +} + +// Update finds a single document matching the provided selector document +// and modifies it according to the update document. +// If the session is in safe mode (see SetSafe) a ErrNotFound error is +// returned if a document isn't found, or a value of type *LastError +// when some other error is detected. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/Updating +// http://www.mongodb.org/display/DOCS/Atomic+Operations +// +func (c *Collection) Update(selector interface{}, update interface{}) error { + lerr, err := c.writeQuery(&updateOp{c.FullName, selector, update, 0}) + if err == nil && lerr != nil && !lerr.UpdatedExisting { + return ErrNotFound + } + return err +} + +// UpdateId is a convenience helper equivalent to: +// +// err := collection.Update(bson.M{"_id": id}, update) +// +// See the Update method for more details. +func (c *Collection) UpdateId(id interface{}, update interface{}) error { + return c.Update(bson.D{{"_id", id}}, update) +} + +// ChangeInfo holds details about the outcome of an update operation. +type ChangeInfo struct { + Updated int // Number of existing documents updated + Removed int // Number of documents removed + UpsertedId interface{} // Upserted _id field, when not explicitly provided +} + +// UpdateAll finds all documents matching the provided selector document +// and modifies them according to the update document. +// If the session is in safe mode (see SetSafe) details of the executed +// operation are returned in info or an error of type *LastError when +// some problem is detected. It is not an error for the update to not be +// applied on any documents because the selector doesn't match. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/Updating +// http://www.mongodb.org/display/DOCS/Atomic+Operations +// +func (c *Collection) UpdateAll(selector interface{}, update interface{}) (info *ChangeInfo, err error) { + lerr, err := c.writeQuery(&updateOp{c.FullName, selector, update, 2}) + if err == nil && lerr != nil { + info = &ChangeInfo{Updated: lerr.N} + } + return info, err +} + +// Upsert finds a single document matching the provided selector document +// and modifies it according to the update document. If no document matching +// the selector is found, the update document is applied to the selector +// document and the result is inserted in the collection. +// If the session is in safe mode (see SetSafe) details of the executed +// operation are returned in info, or an error of type *LastError when +// some problem is detected. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/Updating +// http://www.mongodb.org/display/DOCS/Atomic+Operations +// +func (c *Collection) Upsert(selector interface{}, update interface{}) (info *ChangeInfo, err error) { + lerr, err := c.writeQuery(&updateOp{c.FullName, selector, update, 1}) + if err == nil && lerr != nil { + info = &ChangeInfo{} + if lerr.UpdatedExisting { + info.Updated = lerr.N + } else { + info.UpsertedId = lerr.UpsertedId + } + } + return info, err +} + +// UpsertId is a convenience helper equivalent to: +// +// info, err := collection.Upsert(bson.M{"_id": id}, update) +// +// See the Upsert method for more details. +func (c *Collection) UpsertId(id interface{}, update interface{}) (info *ChangeInfo, err error) { + return c.Upsert(bson.D{{"_id", id}}, update) +} + +// Remove finds a single document matching the provided selector document +// and removes it from the database. +// If the session is in safe mode (see SetSafe) a ErrNotFound error is +// returned if a document isn't found, or a value of type *LastError +// when some other error is detected. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/Removing +// +func (c *Collection) Remove(selector interface{}) error { + lerr, err := c.writeQuery(&deleteOp{c.FullName, selector, 1}) + if err == nil && lerr != nil && lerr.N == 0 { + return ErrNotFound + } + return err +} + +// RemoveId is a convenience helper equivalent to: +// +// err := collection.Remove(bson.M{"_id": id}) +// +// See the Remove method for more details. +func (c *Collection) RemoveId(id interface{}) error { + return c.Remove(bson.D{{"_id", id}}) +} + +// RemoveAll finds all documents matching the provided selector document +// and removes them from the database. In case the session is in safe mode +// (see the SetSafe method) and an error happens when attempting the change, +// the returned error will be of type *LastError. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/Removing +// +func (c *Collection) RemoveAll(selector interface{}) (info *ChangeInfo, err error) { + lerr, err := c.writeQuery(&deleteOp{c.FullName, selector, 0}) + if err == nil && lerr != nil { + info = &ChangeInfo{Removed: lerr.N} + } + return info, err +} + +// DropDatabase removes the entire database including all of its collections. +func (db *Database) DropDatabase() error { + return db.Run(bson.D{{"dropDatabase", 1}}, nil) +} + +// DropCollection removes the entire collection including all of its documents. +func (c *Collection) DropCollection() error { + return c.Database.Run(bson.D{{"drop", c.Name}}, nil) +} + +// The CollectionInfo type holds metadata about a collection. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/createCollection+Command +// http://www.mongodb.org/display/DOCS/Capped+Collections +// +type CollectionInfo struct { + // DisableIdIndex prevents the automatic creation of the index + // on the _id field for the collection. + DisableIdIndex bool + + // ForceIdIndex enforces the automatic creation of the index + // on the _id field for the collection. Capped collections, + // for example, do not have such an index by default. + ForceIdIndex bool + + // If Capped is true new documents will replace old ones when + // the collection is full. MaxBytes must necessarily be set + // to define the size when the collection wraps around. + // MaxDocs optionally defines the number of documents when it + // wraps, but MaxBytes still needs to be set. + Capped bool + MaxBytes int + MaxDocs int +} + +// Create explicitly creates the c collection with details of info. +// MongoDB creates collections automatically on use, so this method +// is only necessary when creating collection with non-default +// characteristics, such as capped collections. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/createCollection+Command +// http://www.mongodb.org/display/DOCS/Capped+Collections +// +func (c *Collection) Create(info *CollectionInfo) error { + cmd := make(bson.D, 0, 4) + cmd = append(cmd, bson.DocElem{"create", c.Name}) + if info.Capped { + if info.MaxBytes < 1 { + return fmt.Errorf("Collection.Create: with Capped, MaxBytes must also be set") + } + cmd = append(cmd, bson.DocElem{"capped", true}) + cmd = append(cmd, bson.DocElem{"size", info.MaxBytes}) + if info.MaxDocs > 0 { + cmd = append(cmd, bson.DocElem{"max", info.MaxDocs}) + } + } + if info.DisableIdIndex { + cmd = append(cmd, bson.DocElem{"autoIndexId", false}) + } + if info.ForceIdIndex { + cmd = append(cmd, bson.DocElem{"autoIndexId", true}) + } + return c.Database.Run(cmd, nil) +} + +// Batch sets the batch size used when fetching documents from the database. +// It's possible to change this setting on a per-session basis as well, using +// the Batch method of Session. +// +// The default batch size is defined by the database itself. As of this +// writing, MongoDB will use an initial size of min(100 docs, 4MB) on the +// first batch, and 4MB on remaining ones. +func (q *Query) Batch(n int) *Query { + if n == 1 { + // Server interprets 1 as -1 and closes the cursor (!?) + n = 2 + } + q.m.Lock() + q.op.limit = int32(n) + q.m.Unlock() + return q +} + +// Prefetch sets the point at which the next batch of results will be requested. +// When there are p*batch_size remaining documents cached in an Iter, the next +// batch will be requested in background. For instance, when using this: +// +// query.Batch(200).Prefetch(0.25) +// +// and there are only 50 documents cached in the Iter to be processed, the +// next batch of 200 will be requested. It's possible to change this setting on +// a per-session basis as well, using the SetPrefetch method of Session. +// +// The default prefetch value is 0.25. +func (q *Query) Prefetch(p float64) *Query { + q.m.Lock() + q.prefetch = p + q.m.Unlock() + return q +} + +// Skip skips over the n initial documents from the query results. Note that +// this only makes sense with capped collections where documents are naturally +// ordered by insertion time, or with sorted results. +func (q *Query) Skip(n int) *Query { + q.m.Lock() + q.op.skip = int32(n) + q.m.Unlock() + return q +} + +// Limit restricts the maximum number of documents retrieved to n, and also +// changes the batch size to the same value. Once n documents have been +// returned by Next, the following call will return ErrNotFound. +func (q *Query) Limit(n int) *Query { + q.m.Lock() + switch { + case n == 1: + q.limit = 1 + q.op.limit = -1 + case n == math.MinInt32: // -MinInt32 == -MinInt32 + q.limit = math.MaxInt32 + q.op.limit = math.MinInt32 + 1 + case n < 0: + q.limit = int32(-n) + q.op.limit = int32(n) + default: + q.limit = int32(n) + q.op.limit = int32(n) + } + q.m.Unlock() + return q +} + +// Select enables selecting which fields should be retrieved for the results +// found. For example, the following query would only retrieve the name field: +// +// err := collection.Find(nil).Select(bson.M{"name": 1}).One(&result) +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/Retrieving+a+Subset+of+Fields +// +func (q *Query) Select(selector interface{}) *Query { + q.m.Lock() + q.op.selector = selector + q.m.Unlock() + return q +} + +// Sort asks the database to order returned documents according to the +// provided field names. A field name may be prefixed by - (minus) for +// it to be sorted in reverse order. +// +// For example: +// +// query1 := collection.Find(nil).Sort("firstname", "lastname") +// query2 := collection.Find(nil).Sort("-age") +// query3 := collection.Find(nil).Sort("$natural") +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/Sorting+and+Natural+Order +// +func (q *Query) Sort(fields ...string) *Query { + // TODO // query4 := collection.Find(nil).Sort("score:{$meta:textScore}") + q.m.Lock() + var order bson.D + for _, field := range fields { + n := 1 + if field != "" { + switch field[0] { + case '+': + field = field[1:] + case '-': + n = -1 + field = field[1:] + } + } + if field == "" { + panic("Sort: empty field name") + } + order = append(order, bson.DocElem{field, n}) + } + q.op.options.OrderBy = order + q.op.hasOptions = true + q.m.Unlock() + return q +} + +// Explain returns a number of details about how the MongoDB server would +// execute the requested query, such as the number of objects examined, +// the number of time the read lock was yielded to allow writes to go in, +// and so on. +// +// For example: +// +// m := bson.M{} +// err := collection.Find(bson.M{"filename": name}).Explain(m) +// if err == nil { +// fmt.Printf("Explain: %#v\n", m) +// } +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/Optimization +// http://www.mongodb.org/display/DOCS/Query+Optimizer +// +func (q *Query) Explain(result interface{}) error { + q.m.Lock() + clone := &Query{session: q.session, query: q.query} + q.m.Unlock() + clone.op.options.Explain = true + clone.op.hasOptions = true + if clone.op.limit > 0 { + clone.op.limit = -q.op.limit + } + iter := clone.Iter() + if iter.Next(result) { + return nil + } + return iter.Close() +} + +// Hint will include an explicit "hint" in the query to force the server +// to use a specified index, potentially improving performance in some +// situations. The provided parameters are the fields that compose the +// key of the index to be used. For details on how the indexKey may be +// built, see the EnsureIndex method. +// +// For example: +// +// query := collection.Find(bson.M{"firstname": "Joe", "lastname": "Winter"}) +// query.Hint("lastname", "firstname") +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/Optimization +// http://www.mongodb.org/display/DOCS/Query+Optimizer +// +func (q *Query) Hint(indexKey ...string) *Query { + q.m.Lock() + _, realKey, err := parseIndexKey(indexKey) + q.op.options.Hint = realKey + q.op.hasOptions = true + q.m.Unlock() + if err != nil { + panic(err) + } + return q +} + +// Snapshot will force the performed query to make use of an available +// index on the _id field to prevent the same document from being returned +// more than once in a single iteration. This might happen without this +// setting in situations when the document changes in size and thus has to +// be moved while the iteration is running. +// +// Because snapshot mode traverses the _id index, it may not be used with +// sorting or explicit hints. It also cannot use any other index for the +// query. +// +// Even with snapshot mode, items inserted or deleted during the query may +// or may not be returned; that is, this mode is not a true point-in-time +// snapshot. +// +// The same effect of Snapshot may be obtained by using any unique index on +// field(s) that will not be modified (best to use Hint explicitly too). +// A non-unique index (such as creation time) may be made unique by +// appending _id to the index when creating it. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/How+to+do+Snapshotted+Queries+in+the+Mongo+Database +// +func (q *Query) Snapshot() *Query { + q.m.Lock() + q.op.options.Snapshot = true + q.op.hasOptions = true + q.m.Unlock() + return q +} + +// LogReplay enables an option that optimizes queries that are typically +// made on the MongoDB oplog for replaying it. This is an internal +// implementation aspect and most likely uninteresting for other uses. +// It has seen at least one use case, though, so it's exposed via the API. +func (q *Query) LogReplay() *Query { + q.m.Lock() + q.op.flags |= flagLogReplay + q.m.Unlock() + return q +} + +func checkQueryError(fullname string, d []byte) error { + l := len(d) + if l < 16 { + return nil + } + if d[5] == '$' && d[6] == 'e' && d[7] == 'r' && d[8] == 'r' && d[9] == '\x00' && d[4] == '\x02' { + goto Error + } + if len(fullname) < 5 || fullname[len(fullname)-5:] != ".$cmd" { + return nil + } + for i := 0; i+8 < l; i++ { + if d[i] == '\x02' && d[i+1] == 'e' && d[i+2] == 'r' && d[i+3] == 'r' && d[i+4] == 'm' && d[i+5] == 's' && d[i+6] == 'g' && d[i+7] == '\x00' { + goto Error + } + } + return nil + +Error: + result := &queryError{} + bson.Unmarshal(d, result) + logf("queryError: %#v\n", result) + if result.LastError != nil { + return result.LastError + } + if result.Err == "" && result.ErrMsg == "" { + return nil + } + if result.AssertionCode != 0 && result.Assertion != "" { + return &QueryError{Code: result.AssertionCode, Message: result.Assertion, Assertion: true} + } + if result.Err != "" { + return &QueryError{Code: result.Code, Message: result.Err} + } + return &QueryError{Code: result.Code, Message: result.ErrMsg} +} + +// One executes the query and unmarshals the first obtained document into the +// result argument. The result must be a struct or map value capable of being +// unmarshalled into by gobson. This function blocks until either a result +// is available or an error happens. For example: +// +// err := collection.Find(bson.M{"a", 1}).One(&result) +// +// In case the resulting document includes a field named $err or errmsg, which +// are standard ways for MongoDB to return query errors, the returned err will +// be set to a *QueryError value including the Err message and the Code. In +// those cases, the result argument is still unmarshalled into with the +// received document so that any other custom values may be obtained if +// desired. +// +func (q *Query) One(result interface{}) (err error) { + q.m.Lock() + session := q.session + op := q.op // Copy. + q.m.Unlock() + + socket, err := session.acquireSocket(true) + if err != nil { + return err + } + defer socket.Release() + + op.flags |= session.slaveOkFlag() + op.limit = -1 + + data, err := socket.SimpleQuery(&op) + if err != nil { + return err + } + if data == nil { + return ErrNotFound + } + if result != nil { + err = bson.Unmarshal(data, result) + if err == nil { + debugf("Query %p document unmarshaled: %#v", q, result) + } else { + debugf("Query %p document unmarshaling failed: %#v", q, err) + return err + } + } + return checkQueryError(op.collection, data) +} + +// The DBRef type implements support for the database reference MongoDB +// convention as supported by multiple drivers. This convention enables +// cross-referencing documents between collections and databases using +// a structure which includes a collection name, a document id, and +// optionally a database name. +// +// See the FindRef methods on Session and on Database. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/Database+References +// +type DBRef struct { + Collection string `bson:"$ref"` + Id interface{} `bson:"$id"` + Database string `bson:"$db,omitempty"` +} + +// NOTE: Order of fields for DBRef above does matter, per documentation. + +// FindRef returns a query that looks for the document in the provided +// reference. If the reference includes the DB field, the document will +// be retrieved from the respective database. +// +// See also the DBRef type and the FindRef method on Session. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/Database+References +// +func (db *Database) FindRef(ref *DBRef) *Query { + var c *Collection + if ref.Database == "" { + c = db.C(ref.Collection) + } else { + c = db.Session.DB(ref.Database).C(ref.Collection) + } + return c.FindId(ref.Id) +} + +// FindRef returns a query that looks for the document in the provided +// reference. For a DBRef to be resolved correctly at the session level +// it must necessarily have the optional DB field defined. +// +// See also the DBRef type and the FindRef method on Database. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/Database+References +// +func (s *Session) FindRef(ref *DBRef) *Query { + if ref.Database == "" { + panic(errors.New(fmt.Sprintf("Can't resolve database for %#v", ref))) + } + c := s.DB(ref.Database).C(ref.Collection) + return c.FindId(ref.Id) +} + +// CollectionNames returns the collection names present in database. +func (db *Database) CollectionNames() (names []string, err error) { + c := len(db.Name) + 1 + iter := db.C("system.namespaces").Find(nil).Iter() + var result *struct{ Name string } + for iter.Next(&result) { + if strings.Index(result.Name, "$") < 0 || strings.Index(result.Name, ".oplog.$") >= 0 { + names = append(names, result.Name[c:]) + } + } + if err := iter.Close(); err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +type dbNames struct { + Databases []struct { + Name string + Empty bool + } +} + +// DatabaseNames returns the names of non-empty databases present in the cluster. +func (s *Session) DatabaseNames() (names []string, err error) { + var result dbNames + err = s.Run("listDatabases", &result) + if err != nil { + return nil, err + } + for _, db := range result.Databases { + if !db.Empty { + names = append(names, db.Name) + } + } + sort.Strings(names) + return names, nil +} + +// Iter executes the query and returns an iterator capable of going over all +// the results. Results will be returned in batches of configurable +// size (see the Batch method) and more documents will be requested when a +// configurable number of documents is iterated over (see the Prefetch method). +func (q *Query) Iter() *Iter { + q.m.Lock() + session := q.session + op := q.op + prefetch := q.prefetch + limit := q.limit + q.m.Unlock() + + iter := &Iter{ + session: session, + prefetch: prefetch, + limit: limit, + timeout: -1, + } + iter.gotReply.L = &iter.m + iter.op.collection = op.collection + iter.op.limit = op.limit + iter.op.replyFunc = iter.replyFunc() + iter.docsToReceive++ + op.replyFunc = iter.op.replyFunc + op.flags |= session.slaveOkFlag() + + socket, err := session.acquireSocket(true) + if err != nil { + iter.err = err + } else { + iter.server = socket.Server() + err = socket.Query(&op) + if err != nil { + // Must lock as the query above may call replyFunc. + iter.m.Lock() + iter.err = err + iter.m.Unlock() + } + socket.Release() + } + return iter +} + +// Tail returns a tailable iterator. Unlike a normal iterator, a +// tailable iterator may wait for new values to be inserted in the +// collection once the end of the current result set is reached, +// A tailable iterator may only be used with capped collections. +// +// The timeout parameter indicates how long Next will block waiting +// for a result before timing out. If set to -1, Next will not +// timeout, and will continue waiting for a result for as long as +// the cursor is valid and the session is not closed. If set to 0, +// Next times out as soon as it reaches the end of the result set. +// Otherwise, Next will wait for at least the given number of +// seconds for a new document to be available before timing out. +// +// On timeouts, Next will unblock and return false, and the Timeout +// method will return true if called. In these cases, Next may still +// be called again on the same iterator to check if a new value is +// available at the current cursor position, and again it will block +// according to the specified timeoutSecs. If the cursor becomes +// invalid, though, both Next and Timeout will return false and +// the query must be restarted. +// +// The following example demonstrates timeout handling and query +// restarting: +// +// iter := collection.Find(nil).Sort("$natural").Tail(5 * time.Second) +// for { +// for iter.Next(&result) { +// fmt.Println(result.Id) +// lastId = result.Id +// } +// if iter.Err() != nil { +// return iter.Close() +// } +// if iter.Timeout() { +// continue +// } +// query := collection.Find(bson.M{"_id": bson.M{"$gt": lastId}}) +// iter = query.Sort("$natural").Tail(5 * time.Second) +// } +// iter.Close() +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/Tailable+Cursors +// http://www.mongodb.org/display/DOCS/Capped+Collections +// http://www.mongodb.org/display/DOCS/Sorting+and+Natural+Order +// +func (q *Query) Tail(timeout time.Duration) *Iter { + q.m.Lock() + session := q.session + op := q.op + prefetch := q.prefetch + q.m.Unlock() + + iter := &Iter{session: session, prefetch: prefetch} + iter.gotReply.L = &iter.m + iter.timeout = timeout + iter.op.collection = op.collection + iter.op.limit = op.limit + iter.op.replyFunc = iter.replyFunc() + iter.docsToReceive++ + op.replyFunc = iter.op.replyFunc + op.flags |= flagTailable | flagAwaitData | session.slaveOkFlag() + + socket, err := session.acquireSocket(true) + if err != nil { + iter.err = err + } else { + iter.server = socket.Server() + err = socket.Query(&op) + if err != nil { + // Must lock as the query above may call replyFunc. + iter.m.Lock() + iter.err = err + iter.m.Unlock() + } + socket.Release() + } + return iter +} + +func (s *Session) slaveOkFlag() (flag queryOpFlags) { + s.m.RLock() + if s.slaveOk { + flag = flagSlaveOk + } + s.m.RUnlock() + return +} + +// Err returns nil if no errors happened during iteration, or the actual +// error otherwise. +// +// In case a resulting document included a field named $err or errmsg, which are +// standard ways for MongoDB to report an improper query, the returned value has +// a *QueryError type, and includes the Err message and the Code. +func (iter *Iter) Err() error { + iter.m.Lock() + err := iter.err + iter.m.Unlock() + if err == ErrNotFound { + return nil + } + return err +} + +// Close kills the server cursor used by the iterator, if any, and returns +// nil if no errors happened during iteration, or the actual error otherwise. +// +// Server cursors are automatically closed at the end of an iteration, which +// means close will do nothing unless the iteration was interrupted before +// the server finished sending results to the driver. If Close is not called +// in such a situation, the cursor will remain available at the server until +// the default cursor timeout period is reached. No further problems arise. +// +// Close is idempotent. That means it can be called repeatedly and will +// return the same result every time. +// +// In case a resulting document included a field named $err or errmsg, which are +// standard ways for MongoDB to report an improper query, the returned value has +// a *QueryError type. +func (iter *Iter) Close() error { + iter.m.Lock() + iter.killCursor() + err := iter.err + iter.m.Unlock() + if err == ErrNotFound { + return nil + } + return err +} + +func (iter *Iter) killCursor() error { + if iter.op.cursorId != 0 { + socket, err := iter.acquireSocket() + if err == nil { + // TODO Batch kills. + err = socket.Query(&killCursorsOp{[]int64{iter.op.cursorId}}) + socket.Release() + } + if err != nil && (iter.err == nil || iter.err == ErrNotFound) { + iter.err = err + } + iter.op.cursorId = 0 + return err + } + return nil +} + +// Timeout returns true if Next returned false due to a timeout of +// a tailable cursor. In those cases, Next may be called again to continue +// the iteration at the previous cursor position. +func (iter *Iter) Timeout() bool { + iter.m.Lock() + result := iter.timedout + iter.m.Unlock() + return result +} + +// Next retrieves the next document from the result set, blocking if necessary. +// This method will also automatically retrieve another batch of documents from +// the server when the current one is exhausted, or before that in background +// if pre-fetching is enabled (see the Query.Prefetch and Session.SetPrefetch +// methods). +// +// Next returns true if a document was successfully unmarshalled onto result, +// and false at the end of the result set or if an error happened. +// When Next returns false, the Err method should be called to verify if +// there was an error during iteration. +// +// For example: +// +// iter := collection.Find(nil).Iter() +// for iter.Next(&result) { +// fmt.Printf("Result: %v\n", result.Id) +// } +// if err := iter.Close(); err != nil { +// return err +// } +// +func (iter *Iter) Next(result interface{}) bool { + iter.m.Lock() + iter.timedout = false + timeout := time.Time{} + for iter.err == nil && iter.docData.Len() == 0 && (iter.docsToReceive > 0 || iter.op.cursorId != 0) { + if iter.docsToReceive == 0 { + if iter.timeout >= 0 { + if timeout.IsZero() { + timeout = time.Now().Add(iter.timeout) + } + if time.Now().After(timeout) { + iter.timedout = true + iter.m.Unlock() + return false + } + } + iter.getMore() + if iter.err != nil { + break + } + } + iter.gotReply.Wait() + } + + // Exhaust available data before reporting any errors. + if docData, ok := iter.docData.Pop().([]byte); ok { + if iter.limit > 0 { + iter.limit-- + if iter.limit == 0 { + if iter.docData.Len() > 0 { + iter.m.Unlock() + panic(fmt.Errorf("data remains after limit exhausted: %d", iter.docData.Len())) + } + iter.err = ErrNotFound + if iter.killCursor() != nil { + iter.m.Unlock() + return false + } + } + } + if iter.op.cursorId != 0 && iter.err == nil { + if iter.docsBeforeMore == 0 { + iter.getMore() + } + iter.docsBeforeMore-- // Goes negative. + } + iter.m.Unlock() + err := bson.Unmarshal(docData, result) + if err != nil { + debugf("Iter %p document unmarshaling failed: %#v", iter, err) + iter.m.Lock() + if iter.err == nil { + iter.err = err + } + iter.m.Unlock() + return false + } + debugf("Iter %p document unmarshaled: %#v", iter, result) + // XXX Only have to check first document for a query error? + err = checkQueryError(iter.op.collection, docData) + if err != nil { + iter.m.Lock() + if iter.err == nil { + iter.err = err + } + iter.m.Unlock() + return false + } + return true + } else if iter.err != nil { + debugf("Iter %p returning false: %s", iter, iter.err) + iter.m.Unlock() + return false + } else if iter.op.cursorId == 0 { + iter.err = ErrNotFound + debugf("Iter %p exhausted with cursor=0", iter) + iter.m.Unlock() + return false + } + + panic("unreachable") +} + +// All retrieves all documents from the result set into the provided slice +// and closes the iterator. +// +// The result argument must necessarily be the address for a slice. The slice +// may be nil or previously allocated. +// +// WARNING: Obviously, All must not be used with result sets that may be +// potentially large, since it may consume all memory until the system +// crashes. Consider building the query with a Limit clause to ensure the +// result size is bounded. +// +// For instance: +// +// var result []struct{ Value int } +// iter := collection.Find(nil).Limit(100).Iter() +// err := iter.All(&result) +// if err != nil { +// return err +// } +// +func (iter *Iter) All(result interface{}) error { + resultv := reflect.ValueOf(result) + if resultv.Kind() != reflect.Ptr || resultv.Elem().Kind() != reflect.Slice { + panic("result argument must be a slice address") + } + slicev := resultv.Elem() + slicev = slicev.Slice(0, slicev.Cap()) + elemt := slicev.Type().Elem() + i := 0 + for { + if slicev.Len() == i { + elemp := reflect.New(elemt) + if !iter.Next(elemp.Interface()) { + break + } + slicev = reflect.Append(slicev, elemp.Elem()) + slicev = slicev.Slice(0, slicev.Cap()) + } else { + if !iter.Next(slicev.Index(i).Addr().Interface()) { + break + } + } + i++ + } + resultv.Elem().Set(slicev.Slice(0, i)) + return iter.Close() +} + +// All works like Iter.All. +func (q *Query) All(result interface{}) error { + return q.Iter().All(result) +} + +// The For method is obsolete and will be removed in a future release. +// See Iter as an elegant replacement. +func (q *Query) For(result interface{}, f func() error) error { + return q.Iter().For(result, f) +} + +// The For method is obsolete and will be removed in a future release. +// See Iter as an elegant replacement. +func (iter *Iter) For(result interface{}, f func() error) (err error) { + valid := false + v := reflect.ValueOf(result) + if v.Kind() == reflect.Ptr { + v = v.Elem() + switch v.Kind() { + case reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice: + valid = v.IsNil() + } + } + if !valid { + panic("For needs a pointer to nil reference value. See the documentation.") + } + zero := reflect.Zero(v.Type()) + for { + v.Set(zero) + if !iter.Next(result) { + break + } + err = f() + if err != nil { + return err + } + } + return iter.Err() +} + +func (iter *Iter) acquireSocket() (*mongoSocket, error) { + socket, err := iter.session.acquireSocket(true) + if err != nil { + return nil, err + } + if socket.Server() != iter.server { + // Socket server changed during iteration. This may happen + // with Eventual sessions, if a Refresh is done, or if a + // monotonic session gets a write and shifts from secondary + // to primary. Our cursor is in a specific server, though. + iter.session.m.Lock() + sockTimeout := iter.session.sockTimeout + iter.session.m.Unlock() + socket.Release() + socket, _, err = iter.server.AcquireSocket(0, sockTimeout) + if err != nil { + return nil, err + } + err := iter.session.socketLogin(socket) + if err != nil { + socket.Release() + return nil, err + } + } + return socket, nil +} + +func (iter *Iter) getMore() { + socket, err := iter.acquireSocket() + if err != nil { + iter.err = err + return + } + defer socket.Release() + + debugf("Iter %p requesting more documents", iter) + if iter.limit > 0 { + limit := iter.limit - int32(iter.docsToReceive) - int32(iter.docData.Len()) + if limit < iter.op.limit { + iter.op.limit = limit + } + } + if err := socket.Query(&iter.op); err != nil { + iter.err = err + } + iter.docsToReceive++ +} + +type countCmd struct { + Count string + Query interface{} + Limit int32 ",omitempty" + Skip int32 ",omitempty" +} + +// Count returns the total number of documents in the result set. +func (q *Query) Count() (n int, err error) { + q.m.Lock() + session := q.session + op := q.op + limit := q.limit + q.m.Unlock() + + c := strings.Index(op.collection, ".") + if c < 0 { + return 0, errors.New("Bad collection name: " + op.collection) + } + + dbname := op.collection[:c] + cname := op.collection[c+1:] + + result := struct{ N int }{} + err = session.DB(dbname).Run(countCmd{cname, op.query, limit, op.skip}, &result) + return result.N, err +} + +// Count returns the total number of documents in the collection. +func (c *Collection) Count() (n int, err error) { + return c.Find(nil).Count() +} + +type distinctCmd struct { + Collection string "distinct" + Key string + Query interface{} ",omitempty" +} + +// Distinct returns a list of distinct values for the given key within +// the result set. The list of distinct values will be unmarshalled +// in the "values" key of the provided result parameter. +// +// For example: +// +// var result []int +// err := collection.Find(bson.M{"gender": "F"}).Distinct("age", &result) +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/Aggregation +// +func (q *Query) Distinct(key string, result interface{}) error { + q.m.Lock() + session := q.session + op := q.op // Copy. + q.m.Unlock() + + c := strings.Index(op.collection, ".") + if c < 0 { + return errors.New("Bad collection name: " + op.collection) + } + + dbname := op.collection[:c] + cname := op.collection[c+1:] + + var doc struct{ Values bson.Raw } + err := session.DB(dbname).Run(distinctCmd{cname, key, op.query}, &doc) + if err != nil { + return err + } + return doc.Values.Unmarshal(result) +} + +type mapReduceCmd struct { + Collection string "mapreduce" + Map string ",omitempty" + Reduce string ",omitempty" + Finalize string ",omitempty" + Limit int32 ",omitempty" + Out interface{} + Query interface{} ",omitempty" + Sort interface{} ",omitempty" + Scope interface{} ",omitempty" + Verbose bool ",omitempty" +} + +type mapReduceResult struct { + Results bson.Raw + Result bson.Raw + TimeMillis int64 "timeMillis" + Counts struct{ Input, Emit, Output int } + Ok bool + Err string + Timing *MapReduceTime +} + +type MapReduce struct { + Map string // Map Javascript function code (required) + Reduce string // Reduce Javascript function code (required) + Finalize string // Finalize Javascript function code (optional) + Out interface{} // Output collection name or document. If nil, results are inlined into the result parameter. + Scope interface{} // Optional global scope for Javascript functions + Verbose bool +} + +type MapReduceInfo struct { + InputCount int // Number of documents mapped + EmitCount int // Number of times reduce called emit + OutputCount int // Number of documents in resulting collection + Database string // Output database, if results are not inlined + Collection string // Output collection, if results are not inlined + Time int64 // Time to run the job, in nanoseconds + VerboseTime *MapReduceTime // Only defined if Verbose was true +} + +type MapReduceTime struct { + Total int64 // Total time, in nanoseconds + Map int64 "mapTime" // Time within map function, in nanoseconds + EmitLoop int64 "emitLoop" // Time within the emit/map loop, in nanoseconds +} + +// MapReduce executes a map/reduce job for documents covered by the query. +// That kind of job is suitable for very flexible bulk aggregation of data +// performed at the server side via Javascript functions. +// +// Results from the job may be returned as a result of the query itself +// through the result parameter in case they'll certainly fit in memory +// and in a single document. If there's the possibility that the amount +// of data might be too large, results must be stored back in an alternative +// collection or even a separate database, by setting the Out field of the +// provided MapReduce job. In that case, provide nil as the result parameter. +// +// These are some of the ways to set Out: +// +// nil +// Inline results into the result parameter. +// +// bson.M{"replace": "mycollection"} +// The output will be inserted into a collection which replaces any +// existing collection with the same name. +// +// bson.M{"merge": "mycollection"} +// This option will merge new data into the old output collection. In +// other words, if the same key exists in both the result set and the +// old collection, the new key will overwrite the old one. +// +// bson.M{"reduce": "mycollection"} +// If documents exist for a given key in the result set and in the old +// collection, then a reduce operation (using the specified reduce +// function) will be performed on the two values and the result will be +// written to the output collection. If a finalize function was +// provided, this will be run after the reduce as well. +// +// bson.M{...., "db": "mydb"} +// Any of the above options can have the "db" key included for doing +// the respective action in a separate database. +// +// The following is a trivial example which will count the number of +// occurrences of a field named n on each document in a collection, and +// will return results inline: +// +// job := &mgo.MapReduce{ +// Map: "function() { emit(this.n, 1) }", +// Reduce: "function(key, values) { return Array.sum(values) }", +// } +// var result []struct { Id int "_id"; Value int } +// _, err := collection.Find(nil).MapReduce(job, &result) +// if err != nil { +// return err +// } +// for _, item := range result { +// fmt.Println(item.Value) +// } +// +// This function is compatible with MongoDB 1.7.4+. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/MapReduce +// +func (q *Query) MapReduce(job *MapReduce, result interface{}) (info *MapReduceInfo, err error) { + q.m.Lock() + session := q.session + op := q.op // Copy. + limit := q.limit + q.m.Unlock() + + c := strings.Index(op.collection, ".") + if c < 0 { + return nil, errors.New("Bad collection name: " + op.collection) + } + + dbname := op.collection[:c] + cname := op.collection[c+1:] + + cmd := mapReduceCmd{ + Collection: cname, + Map: job.Map, + Reduce: job.Reduce, + Finalize: job.Finalize, + Out: fixMROut(job.Out), + Scope: job.Scope, + Verbose: job.Verbose, + Query: op.query, + Sort: op.options.OrderBy, + Limit: limit, + } + + if cmd.Out == nil { + cmd.Out = bson.M{"inline": 1} + } + + var doc mapReduceResult + err = session.DB(dbname).Run(&cmd, &doc) + if err != nil { + return nil, err + } + if doc.Err != "" { + return nil, errors.New(doc.Err) + } + + info = &MapReduceInfo{ + InputCount: doc.Counts.Input, + EmitCount: doc.Counts.Emit, + OutputCount: doc.Counts.Output, + Time: doc.TimeMillis * 1e6, + } + + if doc.Result.Kind == 0x02 { + err = doc.Result.Unmarshal(&info.Collection) + info.Database = dbname + } else if doc.Result.Kind == 0x03 { + var v struct{ Collection, Db string } + err = doc.Result.Unmarshal(&v) + info.Collection = v.Collection + info.Database = v.Db + } + + if doc.Timing != nil { + info.VerboseTime = doc.Timing + info.VerboseTime.Total *= 1e6 + info.VerboseTime.Map *= 1e6 + info.VerboseTime.EmitLoop *= 1e6 + } + + if err != nil { + return nil, err + } + if result != nil { + return info, doc.Results.Unmarshal(result) + } + return info, nil +} + +// The "out" option in the MapReduce command must be ordered. This was +// found after the implementation was accepting maps for a long time, +// so rather than breaking the API, we'll fix the order if necessary. +// Details about the order requirement may be seen in MongoDB's code: +// +// http://goo.gl/L8jwJX +// +func fixMROut(out interface{}) interface{} { + outv := reflect.ValueOf(out) + if outv.Kind() != reflect.Map || outv.Type().Key() != reflect.TypeOf("") { + return out + } + outs := make(bson.D, outv.Len()) + + outTypeIndex := -1 + for i, k := range outv.MapKeys() { + ks := k.String() + outs[i].Name = ks + outs[i].Value = outv.MapIndex(k).Interface() + switch ks { + case "normal", "replace", "merge", "reduce", "inline": + outTypeIndex = i + } + } + if outTypeIndex > 0 { + outs[0], outs[outTypeIndex] = outs[outTypeIndex], outs[0] + } + return outs +} + +// Change holds fields for running a findAndModify MongoDB command via +// the Query.Apply method. +type Change struct { + Update interface{} // The update document + Upsert bool // Whether to insert in case the document isn't found + Remove bool // Whether to remove the document found rather than updating + ReturnNew bool // Should the modified document be returned rather than the old one +} + +type findModifyCmd struct { + Collection string "findAndModify" + Query, Update, Sort, Fields interface{} ",omitempty" + Upsert, Remove, New bool ",omitempty" +} + +type valueResult struct { + Value bson.Raw + LastError LastError "lastErrorObject" +} + +// Apply runs the findAndModify MongoDB command, which allows updating, upserting +// or removing a document matching a query and atomically returning either the old +// version (the default) or the new version of the document (when ReturnNew is true). +// If no objects are found Apply returns ErrNotFound. +// +// The Sort and Select query methods affect the result of Apply. In case +// multiple documents match the query, Sort enables selecting which document to +// act upon by ordering it first. Select enables retrieving only a selection +// of fields of the new or old document. +// +// This simple example increments a counter and prints its new value: +// +// change := mgo.Change{ +// Update: bson.M{"$inc": bson.M{"n": 1}}, +// ReturnNew: true, +// } +// info, err = col.Find(M{"_id": id}).Apply(change, &doc) +// fmt.Println(doc.N) +// +// This method depends on MongoDB >= 2.0 to work properly. +// +// Relevant documentation: +// +// http://www.mongodb.org/display/DOCS/findAndModify+Command +// http://www.mongodb.org/display/DOCS/Updating +// http://www.mongodb.org/display/DOCS/Atomic+Operations +// +func (q *Query) Apply(change Change, result interface{}) (info *ChangeInfo, err error) { + q.m.Lock() + session := q.session + op := q.op // Copy. + q.m.Unlock() + + c := strings.Index(op.collection, ".") + if c < 0 { + return nil, errors.New("bad collection name: " + op.collection) + } + + dbname := op.collection[:c] + cname := op.collection[c+1:] + + cmd := findModifyCmd{ + Collection: cname, + Update: change.Update, + Upsert: change.Upsert, + Remove: change.Remove, + New: change.ReturnNew, + Query: op.query, + Sort: op.options.OrderBy, + Fields: op.selector, + } + + session = session.Clone() + defer session.Close() + session.SetMode(Strong, false) + + var doc valueResult + err = session.DB(dbname).Run(&cmd, &doc) + if err != nil { + if qerr, ok := err.(*QueryError); ok && qerr.Message == "No matching object found" { + return nil, ErrNotFound + } + return nil, err + } + if doc.LastError.N == 0 { + return nil, ErrNotFound + } + if doc.Value.Kind != 0x0A { + err = doc.Value.Unmarshal(result) + if err != nil { + return nil, err + } + } + info = &ChangeInfo{} + lerr := &doc.LastError + if lerr.UpdatedExisting { + info.Updated = lerr.N + } else if change.Remove { + info.Removed = lerr.N + } else if change.Upsert { + info.UpsertedId = lerr.UpsertedId + } + return info, nil +} + +// The BuildInfo type encapsulates details about the running MongoDB server. +// +// Note that the VersionArray field was introduced in MongoDB 2.0+, but it is +// internally assembled from the Version information for previous versions. +// In both cases, VersionArray is guaranteed to have at least 4 entries. +type BuildInfo struct { + Version string + VersionArray []int `bson:"versionArray"` // On MongoDB 2.0+; assembled from Version otherwise + GitVersion string `bson:"gitVersion"` + SysInfo string `bson:"sysInfo"` + Bits int + Debug bool + MaxObjectSize int `bson:"maxBsonObjectSize"` +} + +// VersionAtLeast returns whether the BuildInfo version is greater than or +// equal to the provided version number. If more than one number is +// provided, numbers will be considered as major, minor, and so on. +func (bi *BuildInfo) VersionAtLeast(version ...int) bool { + for i := range version { + if i == len(bi.VersionArray) { + return false + } + if bi.VersionArray[i] < version[i] { + return false + } + } + return true +} + +// BuildInfo retrieves the version and other details about the +// running MongoDB server. +func (s *Session) BuildInfo() (info BuildInfo, err error) { + err = s.Run(bson.D{{"buildInfo", "1"}}, &info) + if len(info.VersionArray) == 0 { + for _, a := range strings.Split(info.Version, ".") { + i, err := strconv.Atoi(a) + if err != nil { + break + } + info.VersionArray = append(info.VersionArray, i) + } + } + for len(info.VersionArray) < 4 { + info.VersionArray = append(info.VersionArray, 0) + } + return +} + +// --------------------------------------------------------------------------- +// Internal session handling helpers. + +func (s *Session) acquireSocket(slaveOk bool) (*mongoSocket, error) { + + // Read-only lock to check for previously reserved socket. + s.m.RLock() + if s.masterSocket != nil { + socket := s.masterSocket + socket.Acquire() + s.m.RUnlock() + return socket, nil + } + if s.slaveSocket != nil && s.slaveOk && slaveOk { + socket := s.slaveSocket + socket.Acquire() + s.m.RUnlock() + return socket, nil + } + s.m.RUnlock() + + // No go. We may have to request a new socket and change the session, + // so try again but with an exclusive lock now. + s.m.Lock() + defer s.m.Unlock() + + if s.masterSocket != nil { + s.masterSocket.Acquire() + return s.masterSocket, nil + } + if s.slaveSocket != nil && s.slaveOk && slaveOk { + s.slaveSocket.Acquire() + return s.slaveSocket, nil + } + + // Still not good. We need a new socket. + sock, err := s.cluster().AcquireSocket(slaveOk && s.slaveOk, s.syncTimeout, s.sockTimeout, s.queryConfig.op.serverTags) + if err != nil { + return nil, err + } + + // Authenticate the new socket. + if err = s.socketLogin(sock); err != nil { + sock.Release() + return nil, err + } + + // Keep track of the new socket, if necessary. + // Note that, as a special case, if the Eventual session was + // not refreshed (s.slaveSocket != nil), it means the developer + // asked to preserve an existing reserved socket, so we'll + // keep a master one around too before a Refresh happens. + if s.consistency != Eventual || s.slaveSocket != nil { + s.setSocket(sock) + } + + // Switch over a Monotonic session to the master. + if !slaveOk && s.consistency == Monotonic { + s.slaveOk = false + } + + return sock, nil +} + +// setSocket binds socket to this section. +func (s *Session) setSocket(socket *mongoSocket) { + info := socket.Acquire() + if info.Master { + if s.masterSocket != nil { + panic("setSocket(master) with existing master socket reserved") + } + s.masterSocket = socket + } else { + if s.slaveSocket != nil { + panic("setSocket(slave) with existing slave socket reserved") + } + s.slaveSocket = socket + } +} + +// unsetSocket releases any slave and/or master sockets reserved. +func (s *Session) unsetSocket() { + if s.masterSocket != nil { + s.masterSocket.Release() + } + if s.slaveSocket != nil { + s.slaveSocket.Release() + } + s.masterSocket = nil + s.slaveSocket = nil +} + +func (iter *Iter) replyFunc() replyFunc { + return func(err error, op *replyOp, docNum int, docData []byte) { + iter.m.Lock() + iter.docsToReceive-- + if err != nil { + iter.err = err + debugf("Iter %p received an error: %s", iter, err.Error()) + } else if docNum == -1 { + debugf("Iter %p received no documents (cursor=%d).", iter, op.cursorId) + if op != nil && op.cursorId != 0 { + // It's a tailable cursor. + iter.op.cursorId = op.cursorId + } else { + iter.err = ErrNotFound + } + } else { + rdocs := int(op.replyDocs) + if docNum == 0 { + iter.docsToReceive += rdocs - 1 + docsToProcess := iter.docData.Len() + rdocs + if iter.limit == 0 || int32(docsToProcess) < iter.limit { + iter.docsBeforeMore = docsToProcess - int(iter.prefetch*float64(rdocs)) + } else { + iter.docsBeforeMore = -1 + } + iter.op.cursorId = op.cursorId + } + // XXX Handle errors and flags. + debugf("Iter %p received reply document %d/%d (cursor=%d)", iter, docNum+1, rdocs, op.cursorId) + iter.docData.Push(docData) + } + iter.gotReply.Broadcast() + iter.m.Unlock() + } +} + +// writeQuery runs the given modifying operation, potentially followed up +// by a getLastError command in case the session is in safe mode. The +// LastError result is made available in lerr, and if lerr.Err is set it +// will also be returned as err. +func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) { + s := c.Database.Session + dbname := c.Database.Name + socket, err := s.acquireSocket(dbname == "local") + if err != nil { + return nil, err + } + defer socket.Release() + + s.m.RLock() + safeOp := s.safeOp + s.m.RUnlock() + + if safeOp == nil { + return nil, socket.Query(op) + } else { + var mutex sync.Mutex + var replyData []byte + var replyErr error + mutex.Lock() + query := *safeOp // Copy the data. + query.collection = dbname + ".$cmd" + query.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { + replyData = docData + replyErr = err + mutex.Unlock() + } + err = socket.Query(op, &query) + if err != nil { + return nil, err + } + mutex.Lock() // Wait. + if replyErr != nil { + return nil, replyErr // XXX TESTME + } + if hasErrMsg(replyData) { + // Looks like getLastError itself failed. + err = checkQueryError(query.collection, replyData) + if err != nil { + return nil, err + } + } + result := &LastError{} + bson.Unmarshal(replyData, &result) + debugf("Result from writing query: %#v", result) + if result.Err != "" { + return result, result + } + return result, nil + } + panic("unreachable") +} + +func hasErrMsg(d []byte) bool { + l := len(d) + for i := 0; i+8 < l; i++ { + if d[i] == '\x02' && d[i+1] == 'e' && d[i+2] == 'r' && d[i+3] == 'r' && d[i+4] == 'm' && d[i+5] == 's' && d[i+6] == 'g' && d[i+7] == '\x00' { + return true + } + } + return false +} diff --git a/vendor/labix.org/v2/mgo/session_test.go b/vendor/labix.org/v2/mgo/session_test.go new file mode 100644 index 0000000..e9f90f5 --- /dev/null +++ b/vendor/labix.org/v2/mgo/session_test.go @@ -0,0 +1,3260 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo_test + +import ( + "flag" + "fmt" + "labix.org/v2/mgo" + "labix.org/v2/mgo/bson" + . "launchpad.net/gocheck" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "time" +) + +func (s *S) TestRunString(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + result := struct{ Ok int }{} + err = session.Run("ping", &result) + c.Assert(err, IsNil) + c.Assert(result.Ok, Equals, 1) +} + +func (s *S) TestRunValue(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + result := struct{ Ok int }{} + err = session.Run(M{"ping": 1}, &result) + c.Assert(err, IsNil) + c.Assert(result.Ok, Equals, 1) +} + +func (s *S) TestPing(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + // Just ensure the nonce has been received. + result := struct{}{} + err = session.Run("ping", &result) + + mgo.ResetStats() + + err = session.Ping() + c.Assert(err, IsNil) + + // Pretty boring. + stats := mgo.GetStats() + c.Assert(stats.SentOps, Equals, 1) + c.Assert(stats.ReceivedOps, Equals, 1) +} + +func (s *S) TestURLSingle(c *C) { + session, err := mgo.Dial("mongodb://localhost:40001/") + c.Assert(err, IsNil) + defer session.Close() + + result := struct{ Ok int }{} + err = session.Run("ping", &result) + c.Assert(err, IsNil) + c.Assert(result.Ok, Equals, 1) +} + +func (s *S) TestURLMany(c *C) { + session, err := mgo.Dial("mongodb://localhost:40011,localhost:40012/") + c.Assert(err, IsNil) + defer session.Close() + + result := struct{ Ok int }{} + err = session.Run("ping", &result) + c.Assert(err, IsNil) + c.Assert(result.Ok, Equals, 1) +} + +func (s *S) TestURLParsing(c *C) { + urls := []string{ + "localhost:40001?foo=1&bar=2", + "localhost:40001?foo=1;bar=2", + } + for _, url := range urls { + session, err := mgo.Dial(url) + if session != nil { + session.Close() + } + c.Assert(err, ErrorMatches, "unsupported connection URL option: (foo=1|bar=2)") + } +} + +func (s *S) TestInsertFindOne(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1, "b": 2}) + c.Assert(err, IsNil) + err = coll.Insert(M{"a": 1, "b": 3}) + c.Assert(err, IsNil) + + result := struct{ A, B int }{} + + err = coll.Find(M{"a": 1}).Sort("b").One(&result) + c.Assert(err, IsNil) + c.Assert(result.A, Equals, 1) + c.Assert(result.B, Equals, 2) +} + +func (s *S) TestInsertFindOneNil(c *C) { + session, err := mgo.Dial("localhost:40002") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + err = coll.Find(nil).One(nil) + c.Assert(err, ErrorMatches, "unauthorized.*|not authorized.*") +} + +func (s *S) TestInsertFindOneMap(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1, "b": 2}) + c.Assert(err, IsNil) + result := make(M) + err = coll.Find(M{"a": 1}).One(result) + c.Assert(err, IsNil) + c.Assert(result["a"], Equals, 1) + c.Assert(result["b"], Equals, 2) +} + +func (s *S) TestInsertFindAll(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1, "b": 2}) + c.Assert(err, IsNil) + err = coll.Insert(M{"a": 3, "b": 4}) + c.Assert(err, IsNil) + + type R struct{ A, B int } + var result []R + + assertResult := func() { + c.Assert(len(result), Equals, 2) + c.Assert(result[0].A, Equals, 1) + c.Assert(result[0].B, Equals, 2) + c.Assert(result[1].A, Equals, 3) + c.Assert(result[1].B, Equals, 4) + } + + // nil slice + err = coll.Find(nil).Sort("a").All(&result) + c.Assert(err, IsNil) + assertResult() + + // Previously allocated slice + allocd := make([]R, 5) + result = allocd + err = coll.Find(nil).Sort("a").All(&result) + c.Assert(err, IsNil) + assertResult() + + // Ensure result is backed by the originally allocated array + c.Assert(&result[0], Equals, &allocd[0]) + + // Non-pointer slice error + f := func() { coll.Find(nil).All(result) } + c.Assert(f, Panics, "result argument must be a slice address") + + // Non-slice error + f = func() { coll.Find(nil).All(new(int)) } + c.Assert(f, Panics, "result argument must be a slice address") +} + +func (s *S) TestFindRef(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + db1 := session.DB("db1") + db1col1 := db1.C("col1") + + db2 := session.DB("db2") + db2col1 := db2.C("col1") + + err = db1col1.Insert(M{"_id": 1, "n": 1}) + c.Assert(err, IsNil) + err = db1col1.Insert(M{"_id": 2, "n": 2}) + c.Assert(err, IsNil) + err = db2col1.Insert(M{"_id": 2, "n": 3}) + c.Assert(err, IsNil) + + result := struct{ N int }{} + + ref1 := &mgo.DBRef{Collection: "col1", Id: 1} + ref2 := &mgo.DBRef{Collection: "col1", Id: 2, Database: "db2"} + + err = db1.FindRef(ref1).One(&result) + c.Assert(err, IsNil) + c.Assert(result.N, Equals, 1) + + err = db1.FindRef(ref2).One(&result) + c.Assert(err, IsNil) + c.Assert(result.N, Equals, 3) + + err = db2.FindRef(ref1).One(&result) + c.Assert(err, Equals, mgo.ErrNotFound) + + err = db2.FindRef(ref2).One(&result) + c.Assert(err, IsNil) + c.Assert(result.N, Equals, 3) + + err = session.FindRef(ref2).One(&result) + c.Assert(err, IsNil) + c.Assert(result.N, Equals, 3) + + f := func() { session.FindRef(ref1).One(&result) } + c.Assert(f, PanicMatches, "Can't resolve database for &mgo.DBRef{Collection:\"col1\", Id:1, Database:\"\"}") +} + +func (s *S) TestDatabaseAndCollectionNames(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + db1 := session.DB("db1") + db1col1 := db1.C("col1") + db1col2 := db1.C("col2") + + db2 := session.DB("db2") + db2col1 := db2.C("col3") + + err = db1col1.Insert(M{"_id": 1}) + c.Assert(err, IsNil) + err = db1col2.Insert(M{"_id": 1}) + c.Assert(err, IsNil) + err = db2col1.Insert(M{"_id": 1}) + c.Assert(err, IsNil) + + names, err := session.DatabaseNames() + c.Assert(err, IsNil) + if !reflect.DeepEqual(names, []string{"db1", "db2"}) { + // 2.4+ has "local" as well. + c.Assert(names, DeepEquals, []string{"db1", "db2", "local"}) + } + + names, err = db1.CollectionNames() + c.Assert(err, IsNil) + c.Assert(names, DeepEquals, []string{"col1", "col2", "system.indexes"}) + + names, err = db2.CollectionNames() + c.Assert(err, IsNil) + c.Assert(names, DeepEquals, []string{"col3", "system.indexes"}) +} + +func (s *S) TestSelect(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + coll.Insert(M{"a": 1, "b": 2}) + + result := struct{ A, B int }{} + + err = coll.Find(M{"a": 1}).Select(M{"b": 1}).One(&result) + c.Assert(err, IsNil) + c.Assert(result.A, Equals, 0) + c.Assert(result.B, Equals, 2) +} + +func (s *S) TestInlineMap(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + var v, result1 struct { + A int + M map[string]int ",inline" + } + + v.A = 1 + v.M = map[string]int{"b": 2} + err = coll.Insert(v) + c.Assert(err, IsNil) + + noId := M{"_id": 0} + + err = coll.Find(nil).Select(noId).One(&result1) + c.Assert(err, IsNil) + c.Assert(result1.A, Equals, 1) + c.Assert(result1.M, DeepEquals, map[string]int{"b": 2}) + + var result2 M + err = coll.Find(nil).Select(noId).One(&result2) + c.Assert(err, IsNil) + c.Assert(result2, DeepEquals, M{"a": 1, "b": 2}) + +} + +func (s *S) TestUpdate(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + err := coll.Insert(M{"k": n, "n": n}) + c.Assert(err, IsNil) + } + + err = coll.Update(M{"k": 42}, M{"$inc": M{"n": 1}}) + c.Assert(err, IsNil) + + result := make(M) + err = coll.Find(M{"k": 42}).One(result) + c.Assert(err, IsNil) + c.Assert(result["n"], Equals, 43) + + err = coll.Update(M{"k": 47}, M{"k": 47, "n": 47}) + c.Assert(err, Equals, mgo.ErrNotFound) + + err = coll.Find(M{"k": 47}).One(result) + c.Assert(err, Equals, mgo.ErrNotFound) +} + +func (s *S) TestUpdateId(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + err := coll.Insert(M{"_id": n, "n": n}) + c.Assert(err, IsNil) + } + + err = coll.UpdateId(42, M{"$inc": M{"n": 1}}) + c.Assert(err, IsNil) + + result := make(M) + err = coll.FindId(42).One(result) + c.Assert(err, IsNil) + c.Assert(result["n"], Equals, 43) + + err = coll.UpdateId(47, M{"k": 47, "n": 47}) + c.Assert(err, Equals, mgo.ErrNotFound) + + err = coll.FindId(47).One(result) + c.Assert(err, Equals, mgo.ErrNotFound) +} + +func (s *S) TestUpdateNil(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"k": 42, "n": 42}) + c.Assert(err, IsNil) + err = coll.Update(nil, M{"$inc": M{"n": 1}}) + c.Assert(err, IsNil) + + result := make(M) + err = coll.Find(M{"k": 42}).One(result) + c.Assert(err, IsNil) + c.Assert(result["n"], Equals, 43) + + err = coll.Insert(M{"k": 45, "n": 45}) + c.Assert(err, IsNil) + _, err = coll.UpdateAll(nil, M{"$inc": M{"n": 1}}) + c.Assert(err, IsNil) + + err = coll.Find(M{"k": 42}).One(result) + c.Assert(err, IsNil) + c.Assert(result["n"], Equals, 44) + err = coll.Find(M{"k": 45}).One(result) + c.Assert(err, IsNil) + c.Assert(result["n"], Equals, 46) + +} + +func (s *S) TestUpsert(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + err := coll.Insert(M{"k": n, "n": n}) + c.Assert(err, IsNil) + } + + info, err := coll.Upsert(M{"k": 42}, M{"k": 42, "n": 24}) + c.Assert(err, IsNil) + c.Assert(info.Updated, Equals, 1) + c.Assert(info.UpsertedId, IsNil) + + result := M{} + err = coll.Find(M{"k": 42}).One(result) + c.Assert(err, IsNil) + c.Assert(result["n"], Equals, 24) + + // Insert with internally created id. + info, err = coll.Upsert(M{"k": 47}, M{"k": 47, "n": 47}) + c.Assert(err, IsNil) + c.Assert(info.Updated, Equals, 0) + c.Assert(info.UpsertedId, NotNil) + + err = coll.Find(M{"k": 47}).One(result) + c.Assert(err, IsNil) + c.Assert(result["n"], Equals, 47) + + result = M{} + err = coll.Find(M{"_id": info.UpsertedId}).One(result) + c.Assert(err, IsNil) + c.Assert(result["n"], Equals, 47) + + // Insert with provided id. + info, err = coll.Upsert(M{"k": 48}, M{"k": 48, "n": 48, "_id": 48}) + c.Assert(err, IsNil) + c.Assert(info.Updated, Equals, 0) + if s.versionAtLeast(2, 6) { + c.Assert(info.UpsertedId, Equals, 48) + } else { + c.Assert(info.UpsertedId, IsNil) // Unfortunate, but that's what Mongo gave us. + } + + err = coll.Find(M{"k": 48}).One(result) + c.Assert(err, IsNil) + c.Assert(result["n"], Equals, 48) +} + +func (s *S) TestUpsertId(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + err := coll.Insert(M{"_id": n, "n": n}) + c.Assert(err, IsNil) + } + + info, err := coll.UpsertId(42, M{"n": 24}) + c.Assert(err, IsNil) + c.Assert(info.Updated, Equals, 1) + c.Assert(info.UpsertedId, IsNil) + + result := M{} + err = coll.FindId(42).One(result) + c.Assert(err, IsNil) + c.Assert(result["n"], Equals, 24) + + info, err = coll.UpsertId(47, M{"_id": 47, "n": 47}) + c.Assert(err, IsNil) + c.Assert(info.Updated, Equals, 0) + if s.versionAtLeast(2, 6) { + c.Assert(info.UpsertedId, Equals, 47) + } else { + c.Assert(info.UpsertedId, IsNil) + } + + err = coll.FindId(47).One(result) + c.Assert(err, IsNil) + c.Assert(result["n"], Equals, 47) +} + +func (s *S) TestUpdateAll(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + err := coll.Insert(M{"k": n, "n": n}) + c.Assert(err, IsNil) + } + + info, err := coll.UpdateAll(M{"k": M{"$gt": 42}}, M{"$inc": M{"n": 1}}) + c.Assert(err, IsNil) + c.Assert(info.Updated, Equals, 4) + + result := make(M) + err = coll.Find(M{"k": 42}).One(result) + c.Assert(err, IsNil) + c.Assert(result["n"], Equals, 42) + + err = coll.Find(M{"k": 43}).One(result) + c.Assert(err, IsNil) + c.Assert(result["n"], Equals, 44) + + err = coll.Find(M{"k": 44}).One(result) + c.Assert(err, IsNil) + c.Assert(result["n"], Equals, 45) + + if !s.versionAtLeast(2, 6) { + // 2.6 made this invalid. + info, err = coll.UpdateAll(M{"k": 47}, M{"k": 47, "n": 47}) + c.Assert(err, Equals, nil) + c.Assert(info.Updated, Equals, 0) + } +} + +func (s *S) TestRemove(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + err := coll.Insert(M{"n": n}) + c.Assert(err, IsNil) + } + + err = coll.Remove(M{"n": M{"$gt": 42}}) + c.Assert(err, IsNil) + + result := &struct{ N int }{} + err = coll.Find(M{"n": 42}).One(result) + c.Assert(err, IsNil) + c.Assert(result.N, Equals, 42) + + err = coll.Find(M{"n": 43}).One(result) + c.Assert(err, Equals, mgo.ErrNotFound) + + err = coll.Find(M{"n": 44}).One(result) + c.Assert(err, IsNil) + c.Assert(result.N, Equals, 44) +} + +func (s *S) TestRemoveId(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"_id": 40}, M{"_id": 41}, M{"_id": 42}) + c.Assert(err, IsNil) + + err = coll.RemoveId(41) + c.Assert(err, IsNil) + + c.Assert(coll.FindId(40).One(nil), IsNil) + c.Assert(coll.FindId(41).One(nil), Equals, mgo.ErrNotFound) + c.Assert(coll.FindId(42).One(nil), IsNil) +} + +func (s *S) TestRemoveAll(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + err := coll.Insert(M{"n": n}) + c.Assert(err, IsNil) + } + + info, err := coll.RemoveAll(M{"n": M{"$gt": 42}}) + c.Assert(err, IsNil) + c.Assert(info.Updated, Equals, 0) + c.Assert(info.Removed, Equals, 4) + c.Assert(info.UpsertedId, IsNil) + + result := &struct{ N int }{} + err = coll.Find(M{"n": 42}).One(result) + c.Assert(err, IsNil) + c.Assert(result.N, Equals, 42) + + err = coll.Find(M{"n": 43}).One(result) + c.Assert(err, Equals, mgo.ErrNotFound) + + err = coll.Find(M{"n": 44}).One(result) + c.Assert(err, Equals, mgo.ErrNotFound) +} + +func (s *S) TestDropDatabase(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + db1 := session.DB("db1") + db1.C("col").Insert(M{"_id": 1}) + + db2 := session.DB("db2") + db2.C("col").Insert(M{"_id": 1}) + + err = db1.DropDatabase() + c.Assert(err, IsNil) + + names, err := session.DatabaseNames() + c.Assert(err, IsNil) + if !reflect.DeepEqual(names, []string{"db2"}) { + // 2.4+ has "local" as well. + c.Assert(names, DeepEquals, []string{"db2", "local"}) + } + + err = db2.DropDatabase() + c.Assert(err, IsNil) + + names, err = session.DatabaseNames() + c.Assert(err, IsNil) + if !reflect.DeepEqual(names, []string(nil)) { + // 2.4+ has "local" as well. + c.Assert(names, DeepEquals, []string{"local"}) + } +} + +func (s *S) TestDropCollection(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + db := session.DB("db1") + db.C("col1").Insert(M{"_id": 1}) + db.C("col2").Insert(M{"_id": 1}) + + err = db.C("col1").DropCollection() + c.Assert(err, IsNil) + + names, err := db.CollectionNames() + c.Assert(err, IsNil) + c.Assert(names, DeepEquals, []string{"col2", "system.indexes"}) + + err = db.C("col2").DropCollection() + c.Assert(err, IsNil) + + names, err = db.CollectionNames() + c.Assert(err, IsNil) + c.Assert(names, DeepEquals, []string{"system.indexes"}) +} + +func (s *S) TestCreateCollectionCapped(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + info := &mgo.CollectionInfo{ + Capped: true, + MaxBytes: 1024, + MaxDocs: 3, + } + err = coll.Create(info) + c.Assert(err, IsNil) + + ns := []int{1, 2, 3, 4, 5} + for _, n := range ns { + err := coll.Insert(M{"n": n}) + c.Assert(err, IsNil) + } + + n, err := coll.Find(nil).Count() + c.Assert(err, IsNil) + c.Assert(n, Equals, 3) +} + +func (s *S) TestCreateCollectionNoIndex(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + info := &mgo.CollectionInfo{ + DisableIdIndex: true, + } + err = coll.Create(info) + c.Assert(err, IsNil) + + err = coll.Insert(M{"n": 1}) + c.Assert(err, IsNil) + + indexes, err := coll.Indexes() + c.Assert(indexes, HasLen, 0) +} + +func (s *S) TestCreateCollectionForceIndex(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + info := &mgo.CollectionInfo{ + ForceIdIndex: true, + Capped: true, + MaxBytes: 1024, + } + err = coll.Create(info) + c.Assert(err, IsNil) + + err = coll.Insert(M{"n": 1}) + c.Assert(err, IsNil) + + indexes, err := coll.Indexes() + c.Assert(indexes, HasLen, 1) +} + +func (s *S) TestIsDupValues(c *C) { + c.Assert(mgo.IsDup(nil), Equals, false) + c.Assert(mgo.IsDup(&mgo.LastError{Code: 1}), Equals, false) + c.Assert(mgo.IsDup(&mgo.QueryError{Code: 1}), Equals, false) + c.Assert(mgo.IsDup(&mgo.LastError{Code: 11000}), Equals, true) + c.Assert(mgo.IsDup(&mgo.QueryError{Code: 11000}), Equals, true) + c.Assert(mgo.IsDup(&mgo.LastError{Code: 11001}), Equals, true) + c.Assert(mgo.IsDup(&mgo.QueryError{Code: 11001}), Equals, true) + c.Assert(mgo.IsDup(&mgo.LastError{Code: 12582}), Equals, true) + c.Assert(mgo.IsDup(&mgo.QueryError{Code: 12582}), Equals, true) + lerr := &mgo.LastError{Code: 16460, Err:"error inserting 1 documents to shard ... caused by :: E11000 duplicate key error index: ..."} + c.Assert(mgo.IsDup(lerr), Equals, true) +} + +func (s *S) TestIsDupPrimary(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"_id": 1}) + c.Assert(err, IsNil) + err = coll.Insert(M{"_id": 1}) + c.Assert(err, ErrorMatches, ".*duplicate key error.*") + c.Assert(mgo.IsDup(err), Equals, true) +} + +func (s *S) TestIsDupUnique(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + index := mgo.Index{ + Key: []string{"a", "b"}, + Unique: true, + } + + coll := session.DB("mydb").C("mycoll") + + err = coll.EnsureIndex(index) + c.Assert(err, IsNil) + + err = coll.Insert(M{"a": 1, "b": 1}) + c.Assert(err, IsNil) + err = coll.Insert(M{"a": 1, "b": 1}) + c.Assert(err, ErrorMatches, ".*duplicate key error.*") + c.Assert(mgo.IsDup(err), Equals, true) +} + +func (s *S) TestIsDupCapped(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + info := &mgo.CollectionInfo{ + ForceIdIndex: true, + Capped: true, + MaxBytes: 1024, + } + err = coll.Create(info) + c.Assert(err, IsNil) + + err = coll.Insert(M{"_id": 1}) + c.Assert(err, IsNil) + err = coll.Insert(M{"_id": 1}) + // The error was different for capped collections before 2.6. + c.Assert(err, ErrorMatches, ".*duplicate key.*") + // The issue is reduced by using IsDup. + c.Assert(mgo.IsDup(err), Equals, true) +} + +func (s *S) TestIsDupFindAndModify(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.EnsureIndex(mgo.Index{Key: []string{"n"}, Unique: true}) + c.Assert(err, IsNil) + + err = coll.Insert(M{"n": 1}) + c.Assert(err, IsNil) + err = coll.Insert(M{"n": 2}) + c.Assert(err, IsNil) + _, err = coll.Find(M{"n": 1}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}}, bson.M{}) + c.Assert(err, ErrorMatches, ".*duplicate key error.*") + c.Assert(mgo.IsDup(err), Equals, true) +} + +func (s *S) TestFindAndModify(c *C) { + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"n": 42}) + + session.SetMode(mgo.Monotonic, true) + + result := M{} + info, err := coll.Find(M{"n": 42}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}}, result) + c.Assert(err, IsNil) + c.Assert(result["n"], Equals, 42) + c.Assert(info.Updated, Equals, 1) + c.Assert(info.Removed, Equals, 0) + c.Assert(info.UpsertedId, IsNil) + + result = M{} + info, err = coll.Find(M{"n": 43}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}, ReturnNew: true}, result) + c.Assert(err, IsNil) + c.Assert(result["n"], Equals, 44) + c.Assert(info.Updated, Equals, 1) + c.Assert(info.Removed, Equals, 0) + c.Assert(info.UpsertedId, IsNil) + + result = M{} + info, err = coll.Find(M{"n": 50}).Apply(mgo.Change{Upsert: true, Update: M{"n": 51, "o": 52}}, result) + c.Assert(err, IsNil) + c.Assert(result["n"], IsNil) + c.Assert(info.Updated, Equals, 0) + c.Assert(info.Removed, Equals, 0) + c.Assert(info.UpsertedId, NotNil) + + result = M{} + info, err = coll.Find(nil).Sort("-n").Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}, ReturnNew: true}, result) + c.Assert(err, IsNil) + c.Assert(result["n"], Equals, 52) + c.Assert(info.Updated, Equals, 1) + c.Assert(info.Removed, Equals, 0) + c.Assert(info.UpsertedId, IsNil) + + result = M{} + info, err = coll.Find(M{"n": 52}).Select(M{"o": 1}).Apply(mgo.Change{Remove: true}, result) + c.Assert(err, IsNil) + c.Assert(result["n"], IsNil) + c.Assert(result["o"], Equals, 52) + c.Assert(info.Updated, Equals, 0) + c.Assert(info.Removed, Equals, 1) + c.Assert(info.UpsertedId, IsNil) + + result = M{} + info, err = coll.Find(M{"n": 60}).Apply(mgo.Change{Remove: true}, result) + c.Assert(err, Equals, mgo.ErrNotFound) + c.Assert(len(result), Equals, 0) + c.Assert(info, IsNil) +} + +func (s *S) TestFindAndModifyBug997828(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"n": "not-a-number"}) + + result := make(M) + _, err = coll.Find(M{"n": "not-a-number"}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}}, result) + c.Assert(err, ErrorMatches, `(exception: )?Cannot apply \$inc .*`) + if s.versionAtLeast(2, 1) { + qerr, _ := err.(*mgo.QueryError) + c.Assert(qerr, NotNil, Commentf("err: %#v", err)) + if s.versionAtLeast(2, 6) { + // Oh, the dance of error codes. :-( + c.Assert(qerr.Code, Equals, 16837) + } else { + c.Assert(qerr.Code, Equals, 10140) + } + } else { + lerr, _ := err.(*mgo.LastError) + c.Assert(lerr, NotNil, Commentf("err: %#v", err)) + c.Assert(lerr.Code, Equals, 10140) + } +} + +func (s *S) TestCountCollection(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42} + for _, n := range ns { + err := coll.Insert(M{"n": n}) + c.Assert(err, IsNil) + } + + n, err := coll.Count() + c.Assert(err, IsNil) + c.Assert(n, Equals, 3) +} + +func (s *S) TestCountQuery(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42} + for _, n := range ns { + err := coll.Insert(M{"n": n}) + c.Assert(err, IsNil) + } + + n, err := coll.Find(M{"n": M{"$gt": 40}}).Count() + c.Assert(err, IsNil) + c.Assert(n, Equals, 2) +} + +func (s *S) TestCountQuerySorted(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42} + for _, n := range ns { + err := coll.Insert(M{"n": n}) + c.Assert(err, IsNil) + } + + n, err := coll.Find(M{"n": M{"$gt": 40}}).Sort("n").Count() + c.Assert(err, IsNil) + c.Assert(n, Equals, 2) +} + +func (s *S) TestCountSkipLimit(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42, 43, 44} + for _, n := range ns { + err := coll.Insert(M{"n": n}) + c.Assert(err, IsNil) + } + + n, err := coll.Find(nil).Skip(1).Limit(3).Count() + c.Assert(err, IsNil) + c.Assert(n, Equals, 3) + + n, err = coll.Find(nil).Skip(1).Limit(5).Count() + c.Assert(err, IsNil) + c.Assert(n, Equals, 4) +} + +func (s *S) TestQueryExplain(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42} + for _, n := range ns { + err := coll.Insert(M{"n": n}) + c.Assert(err, IsNil) + } + + m := M{} + query := coll.Find(nil).Limit(2) + err = query.Explain(m) + c.Assert(err, IsNil) + c.Assert(m["cursor"], Equals, "BasicCursor") + c.Assert(m["nscanned"], Equals, 2) + c.Assert(m["n"], Equals, 2) + + n := 0 + var result M + iter := query.Iter() + for iter.Next(&result) { + n++ + } + c.Assert(iter.Close(), IsNil) + c.Assert(n, Equals, 2) +} + +func (s *S) TestQueryHint(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + coll.EnsureIndexKey("a") + + m := M{} + err = coll.Find(nil).Hint("a").Explain(m) + c.Assert(err, IsNil) + c.Assert(m["indexBounds"], NotNil) + c.Assert(m["indexBounds"].(M)["a"], NotNil) +} + +func (s *S) TestFindOneNotFound(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + result := struct{ A, B int }{} + err = coll.Find(M{"a": 1}).One(&result) + c.Assert(err, Equals, mgo.ErrNotFound) + c.Assert(err, ErrorMatches, "not found") + c.Assert(err == mgo.ErrNotFound, Equals, true) +} + +func (s *S) TestFindNil(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"n": 1}) + c.Assert(err, IsNil) + + result := struct{ N int }{} + + err = coll.Find(nil).One(&result) + c.Assert(err, IsNil) + c.Assert(result.N, Equals, 1) +} + +func (s *S) TestFindId(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"_id": 41, "n": 41}) + c.Assert(err, IsNil) + err = coll.Insert(M{"_id": 42, "n": 42}) + c.Assert(err, IsNil) + + result := struct{ N int }{} + + err = coll.FindId(42).One(&result) + c.Assert(err, IsNil) + c.Assert(result.N, Equals, 42) +} + +func (s *S) TestFindIterAll(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + coll.Insert(M{"n": n}) + } + + session.Refresh() // Release socket. + + mgo.ResetStats() + + iter := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2).Iter() + result := struct{ N int }{} + for i := 2; i < 7; i++ { + ok := iter.Next(&result) + c.Assert(ok, Equals, true) + c.Assert(result.N, Equals, ns[i]) + if i == 1 { + stats := mgo.GetStats() + c.Assert(stats.ReceivedDocs, Equals, 2) + } + } + + ok := iter.Next(&result) + c.Assert(ok, Equals, false) + c.Assert(iter.Close(), IsNil) + + session.Refresh() // Release socket. + + stats := mgo.GetStats() + c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP + c.Assert(stats.ReceivedOps, Equals, 3) // and their REPLY_OPs. + c.Assert(stats.ReceivedDocs, Equals, 5) + c.Assert(stats.SocketsInUse, Equals, 0) +} + +func (s *S) TestFindIterTwiceWithSameQuery(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + for i := 40; i != 47; i++ { + coll.Insert(M{"n": i}) + } + + query := coll.Find(M{}).Sort("n") + + result1 := query.Skip(1).Iter() + result2 := query.Skip(2).Iter() + + result := struct{ N int }{} + ok := result2.Next(&result) + c.Assert(ok, Equals, true) + c.Assert(result.N, Equals, 42) + ok = result1.Next(&result) + c.Assert(ok, Equals, true) + c.Assert(result.N, Equals, 41) +} + +func (s *S) TestFindIterWithoutResults(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + coll.Insert(M{"n": 42}) + + iter := coll.Find(M{"n": 0}).Iter() + + result := struct{ N int }{} + ok := iter.Next(&result) + c.Assert(ok, Equals, false) + c.Assert(iter.Close(), IsNil) + c.Assert(result.N, Equals, 0) +} + +func (s *S) TestFindIterLimit(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + coll.Insert(M{"n": n}) + } + + session.Refresh() // Release socket. + + mgo.ResetStats() + + query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Limit(3) + iter := query.Iter() + + result := struct{ N int }{} + for i := 2; i < 5; i++ { + ok := iter.Next(&result) + c.Assert(ok, Equals, true) + c.Assert(result.N, Equals, ns[i]) + } + + ok := iter.Next(&result) + c.Assert(ok, Equals, false) + c.Assert(iter.Close(), IsNil) + + session.Refresh() // Release socket. + + stats := mgo.GetStats() + c.Assert(stats.SentOps, Equals, 2) // 1*QUERY_OP + 1*KILL_CURSORS_OP + c.Assert(stats.ReceivedOps, Equals, 1) // and its REPLY_OP + c.Assert(stats.ReceivedDocs, Equals, 3) + c.Assert(stats.SocketsInUse, Equals, 0) +} + +func (s *S) TestTooManyItemsLimitBug(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU())) + + mgo.SetDebug(false) + coll := session.DB("mydb").C("mycoll") + words := strings.Split("foo bar baz", " ") + for i := 0; i < 5; i++ { + words = append(words, words...) + } + doc := bson.D{{"words", words}} + inserts := 10000 + limit := 5000 + iters := 0 + c.Assert(inserts > limit, Equals, true) + for i := 0; i < inserts; i++ { + err := coll.Insert(&doc) + c.Assert(err, IsNil) + } + iter := coll.Find(nil).Limit(limit).Iter() + for iter.Next(&doc) { + if iters%100 == 0 { + c.Logf("Seen %d docments", iters) + } + iters++ + } + c.Assert(iter.Close(), IsNil) + c.Assert(iters, Equals, limit) +} + +func serverCursorsOpen(session *mgo.Session) int { + var result struct { + Cursors struct { + TotalOpen int `bson:"totalOpen"` + TimedOut int `bson:"timedOut"` + } + } + err := session.Run("serverStatus", &result) + if err != nil { + panic(err) + } + return result.Cursors.TotalOpen +} + +func (s *S) TestFindIterLimitWithMore(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + // Insane amounts of logging otherwise due to the + // amount of data being shuffled. + mgo.SetDebug(false) + defer mgo.SetDebug(true) + + // Should amount to more than 4MB bson payload, + // the default limit per result chunk. + const total = 4096 + var d struct{ A [1024]byte } + docs := make([]interface{}, total) + for i := 0; i < total; i++ { + docs[i] = &d + } + err = coll.Insert(docs...) + c.Assert(err, IsNil) + + n, err := coll.Count() + c.Assert(err, IsNil) + c.Assert(n, Equals, total) + + // First, try restricting to a single chunk with a negative limit. + nresults := 0 + iter := coll.Find(nil).Limit(-total).Iter() + var discard struct{} + for iter.Next(&discard) { + nresults++ + } + if nresults < total/2 || nresults >= total { + c.Fatalf("Bad result size with negative limit: %d", nresults) + } + + cursorsOpen := serverCursorsOpen(session) + + // Try again, with a positive limit. Should reach the end now, + // using multiple chunks. + nresults = 0 + iter = coll.Find(nil).Limit(total).Iter() + for iter.Next(&discard) { + nresults++ + } + c.Assert(nresults, Equals, total) + + // Ensure the cursor used is properly killed. + c.Assert(serverCursorsOpen(session), Equals, cursorsOpen) + + // Edge case, -MinInt == -MinInt. + nresults = 0 + iter = coll.Find(nil).Limit(math.MinInt32).Iter() + for iter.Next(&discard) { + nresults++ + } + if nresults < total/2 || nresults >= total { + c.Fatalf("Bad result size with MinInt32 limit: %d", nresults) + } +} + +func (s *S) TestFindIterLimitWithBatch(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + coll.Insert(M{"n": n}) + } + + // Ping the database to ensure the nonce has been received already. + c.Assert(session.Ping(), IsNil) + + session.Refresh() // Release socket. + + mgo.ResetStats() + + query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Limit(3).Batch(2) + iter := query.Iter() + result := struct{ N int }{} + for i := 2; i < 5; i++ { + ok := iter.Next(&result) + c.Assert(ok, Equals, true) + c.Assert(result.N, Equals, ns[i]) + if i == 3 { + stats := mgo.GetStats() + c.Assert(stats.ReceivedDocs, Equals, 2) + } + } + + ok := iter.Next(&result) + c.Assert(ok, Equals, false) + c.Assert(iter.Close(), IsNil) + + session.Refresh() // Release socket. + + stats := mgo.GetStats() + c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 1*GET_MORE_OP + 1*KILL_CURSORS_OP + c.Assert(stats.ReceivedOps, Equals, 2) // and its REPLY_OPs + c.Assert(stats.ReceivedDocs, Equals, 3) + c.Assert(stats.SocketsInUse, Equals, 0) +} + +func (s *S) TestFindIterSortWithBatch(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + coll.Insert(M{"n": n}) + } + + // Without this, the logic above breaks because Mongo refuses to + // return a cursor with an in-memory sort. + coll.EnsureIndexKey("n") + + // Ping the database to ensure the nonce has been received already. + c.Assert(session.Ping(), IsNil) + + session.Refresh() // Release socket. + + mgo.ResetStats() + + query := coll.Find(M{"n": M{"$lte": 44}}).Sort("-n").Batch(2) + iter := query.Iter() + ns = []int{46, 45, 44, 43, 42, 41, 40} + result := struct{ N int }{} + for i := 2; i < len(ns); i++ { + c.Logf("i=%d", i) + ok := iter.Next(&result) + c.Assert(ok, Equals, true) + c.Assert(result.N, Equals, ns[i]) + if i == 3 { + stats := mgo.GetStats() + c.Assert(stats.ReceivedDocs, Equals, 2) + } + } + + ok := iter.Next(&result) + c.Assert(ok, Equals, false) + c.Assert(iter.Close(), IsNil) + + session.Refresh() // Release socket. + + stats := mgo.GetStats() + c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP + c.Assert(stats.ReceivedOps, Equals, 3) // and its REPLY_OPs + c.Assert(stats.ReceivedDocs, Equals, 5) + c.Assert(stats.SocketsInUse, Equals, 0) +} + +// Test tailable cursors in a situation where Next has to sleep to +// respect the timeout requested on Tail. +func (s *S) TestFindTailTimeoutWithSleep(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + cresult := struct{ ErrMsg string }{} + + db := session.DB("mydb") + err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult) + c.Assert(err, IsNil) + c.Assert(cresult.ErrMsg, Equals, "") + coll := db.C("mycoll") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + coll.Insert(M{"n": n}) + } + + session.Refresh() // Release socket. + + mgo.ResetStats() + + timeout := 3 * time.Second + + query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) + iter := query.Tail(timeout) + + n := len(ns) + result := struct{ N int }{} + for i := 2; i != n; i++ { + ok := iter.Next(&result) + c.Assert(ok, Equals, true) + c.Assert(iter.Err(), IsNil) + c.Assert(iter.Timeout(), Equals, false) + c.Assert(result.N, Equals, ns[i]) + if i == 3 { // The batch boundary. + stats := mgo.GetStats() + c.Assert(stats.ReceivedDocs, Equals, 2) + } + } + + mgo.ResetStats() + + // The following call to Next will block. + go func() { + // The internal AwaitData timing of MongoDB is around 2 seconds, + // so this should force mgo to sleep at least once by itself to + // respect the requested timeout. + time.Sleep(timeout + 5e8*time.Nanosecond) + session := session.New() + defer session.Close() + coll := session.DB("mydb").C("mycoll") + coll.Insert(M{"n": 47}) + }() + + c.Log("Will wait for Next with N=47...") + ok := iter.Next(&result) + c.Assert(ok, Equals, true) + c.Assert(iter.Err(), IsNil) + c.Assert(iter.Timeout(), Equals, false) + c.Assert(result.N, Equals, 47) + c.Log("Got Next with N=47!") + + // The following may break because it depends a bit on the internal + // timing used by MongoDB's AwaitData logic. If it does, the problem + // will be observed as more GET_MORE_OPs than predicted: + // 1*QUERY for nonce + 1*GET_MORE_OP on Next + 1*GET_MORE_OP on Next after sleep + + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 + stats := mgo.GetStats() + c.Assert(stats.SentOps, Equals, 5) + c.Assert(stats.ReceivedOps, Equals, 4) // REPLY_OPs for 1*QUERY_OP for nonce + 2*GET_MORE_OPs + 1*QUERY_OP + c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response + + c.Log("Will wait for a result which will never come...") + + started := time.Now() + ok = iter.Next(&result) + c.Assert(ok, Equals, false) + c.Assert(iter.Err(), IsNil) + c.Assert(iter.Timeout(), Equals, true) + c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) + + c.Log("Will now reuse the timed out tail cursor...") + + coll.Insert(M{"n": 48}) + ok = iter.Next(&result) + c.Assert(ok, Equals, true) + c.Assert(iter.Close(), IsNil) + c.Assert(iter.Timeout(), Equals, false) + c.Assert(result.N, Equals, 48) +} + +// Test tailable cursors in a situation where Next never gets to sleep once +// to respect the timeout requested on Tail. +func (s *S) TestFindTailTimeoutNoSleep(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + cresult := struct{ ErrMsg string }{} + + db := session.DB("mydb") + err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult) + c.Assert(err, IsNil) + c.Assert(cresult.ErrMsg, Equals, "") + coll := db.C("mycoll") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + coll.Insert(M{"n": n}) + } + + session.Refresh() // Release socket. + + mgo.ResetStats() + + timeout := 1 * time.Second + + query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) + iter := query.Tail(timeout) + + n := len(ns) + result := struct{ N int }{} + for i := 2; i != n; i++ { + ok := iter.Next(&result) + c.Assert(ok, Equals, true) + c.Assert(iter.Err(), IsNil) + c.Assert(iter.Timeout(), Equals, false) + c.Assert(result.N, Equals, ns[i]) + if i == 3 { // The batch boundary. + stats := mgo.GetStats() + c.Assert(stats.ReceivedDocs, Equals, 2) + } + } + + mgo.ResetStats() + + // The following call to Next will block. + go func() { + // The internal AwaitData timing of MongoDB is around 2 seconds, + // so this item should arrive within the AwaitData threshold. + time.Sleep(5e8) + session := session.New() + defer session.Close() + coll := session.DB("mydb").C("mycoll") + coll.Insert(M{"n": 47}) + }() + + c.Log("Will wait for Next with N=47...") + ok := iter.Next(&result) + c.Assert(ok, Equals, true) + c.Assert(iter.Err(), IsNil) + c.Assert(iter.Timeout(), Equals, false) + c.Assert(result.N, Equals, 47) + c.Log("Got Next with N=47!") + + // The following may break because it depends a bit on the internal + // timing used by MongoDB's AwaitData logic. If it does, the problem + // will be observed as more GET_MORE_OPs than predicted: + // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 + stats := mgo.GetStats() + c.Assert(stats.SentOps, Equals, 4) + c.Assert(stats.ReceivedOps, Equals, 3) // REPLY_OPs for 1*QUERY_OP for nonce + 1*GET_MORE_OPs and 1*QUERY_OP + c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response + + c.Log("Will wait for a result which will never come...") + + started := time.Now() + ok = iter.Next(&result) + c.Assert(ok, Equals, false) + c.Assert(iter.Err(), IsNil) + c.Assert(iter.Timeout(), Equals, true) + c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) + + c.Log("Will now reuse the timed out tail cursor...") + + coll.Insert(M{"n": 48}) + ok = iter.Next(&result) + c.Assert(ok, Equals, true) + c.Assert(iter.Close(), IsNil) + c.Assert(iter.Timeout(), Equals, false) + c.Assert(result.N, Equals, 48) +} + +// Test tailable cursors in a situation where Next never gets to sleep once +// to respect the timeout requested on Tail. +func (s *S) TestFindTailNoTimeout(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + cresult := struct{ ErrMsg string }{} + + db := session.DB("mydb") + err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult) + c.Assert(err, IsNil) + c.Assert(cresult.ErrMsg, Equals, "") + coll := db.C("mycoll") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + coll.Insert(M{"n": n}) + } + + session.Refresh() // Release socket. + + mgo.ResetStats() + + query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) + iter := query.Tail(-1) + c.Assert(err, IsNil) + + n := len(ns) + result := struct{ N int }{} + for i := 2; i != n; i++ { + ok := iter.Next(&result) + c.Assert(ok, Equals, true) + c.Assert(result.N, Equals, ns[i]) + if i == 3 { // The batch boundary. + stats := mgo.GetStats() + c.Assert(stats.ReceivedDocs, Equals, 2) + } + } + + mgo.ResetStats() + + // The following call to Next will block. + go func() { + time.Sleep(5e8) + session := session.New() + defer session.Close() + coll := session.DB("mydb").C("mycoll") + coll.Insert(M{"n": 47}) + }() + + c.Log("Will wait for Next with N=47...") + ok := iter.Next(&result) + c.Assert(ok, Equals, true) + c.Assert(iter.Err(), IsNil) + c.Assert(iter.Timeout(), Equals, false) + c.Assert(result.N, Equals, 47) + c.Log("Got Next with N=47!") + + // The following may break because it depends a bit on the internal + // timing used by MongoDB's AwaitData logic. If it does, the problem + // will be observed as more GET_MORE_OPs than predicted: + // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 + stats := mgo.GetStats() + c.Assert(stats.SentOps, Equals, 4) + c.Assert(stats.ReceivedOps, Equals, 3) // REPLY_OPs for 1*QUERY_OP for nonce + 1*GET_MORE_OPs and 1*QUERY_OP + c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response + + c.Log("Will wait for a result which will never come...") + + gotNext := make(chan bool) + go func() { + ok := iter.Next(&result) + gotNext <- ok + }() + + select { + case ok := <-gotNext: + c.Fatalf("Next returned: %v", ok) + case <-time.After(3e9): + // Good. Should still be sleeping at that point. + } + + // Closing the session should cause Next to return. + session.Close() + + select { + case ok := <-gotNext: + c.Assert(ok, Equals, false) + c.Assert(iter.Err(), ErrorMatches, "Closed explicitly") + c.Assert(iter.Timeout(), Equals, false) + case <-time.After(1e9): + c.Fatal("Closing the session did not unblock Next") + } +} + +func (s *S) TestIterNextResetsResult(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{1, 2, 3} + for _, n := range ns { + coll.Insert(M{"n" + strconv.Itoa(n): n}) + } + + query := coll.Find(nil).Sort("$natural") + + i := 0 + var sresult *struct{ N1, N2, N3 int } + iter := query.Iter() + for iter.Next(&sresult) { + switch i { + case 0: + c.Assert(sresult.N1, Equals, 1) + c.Assert(sresult.N2+sresult.N3, Equals, 0) + case 1: + c.Assert(sresult.N2, Equals, 2) + c.Assert(sresult.N1+sresult.N3, Equals, 0) + case 2: + c.Assert(sresult.N3, Equals, 3) + c.Assert(sresult.N1+sresult.N2, Equals, 0) + } + i++ + } + c.Assert(iter.Close(), IsNil) + + i = 0 + var mresult M + iter = query.Iter() + for iter.Next(&mresult) { + delete(mresult, "_id") + switch i { + case 0: + c.Assert(mresult, DeepEquals, M{"n1": 1}) + case 1: + c.Assert(mresult, DeepEquals, M{"n2": 2}) + case 2: + c.Assert(mresult, DeepEquals, M{"n3": 3}) + } + i++ + } + c.Assert(iter.Close(), IsNil) + + i = 0 + var iresult interface{} + iter = query.Iter() + for iter.Next(&iresult) { + mresult, ok := iresult.(bson.M) + c.Assert(ok, Equals, true, Commentf("%#v", iresult)) + delete(mresult, "_id") + switch i { + case 0: + c.Assert(mresult, DeepEquals, bson.M{"n1": 1}) + case 1: + c.Assert(mresult, DeepEquals, bson.M{"n2": 2}) + case 2: + c.Assert(mresult, DeepEquals, bson.M{"n3": 3}) + } + i++ + } + c.Assert(iter.Close(), IsNil) +} + +func (s *S) TestFindForOnIter(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + coll.Insert(M{"n": n}) + } + + session.Refresh() // Release socket. + + mgo.ResetStats() + + query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) + iter := query.Iter() + + i := 2 + var result *struct{ N int } + err = iter.For(&result, func() error { + c.Assert(i < 7, Equals, true) + c.Assert(result.N, Equals, ns[i]) + if i == 1 { + stats := mgo.GetStats() + c.Assert(stats.ReceivedDocs, Equals, 2) + } + i++ + return nil + }) + c.Assert(err, IsNil) + + session.Refresh() // Release socket. + + stats := mgo.GetStats() + c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP + c.Assert(stats.ReceivedOps, Equals, 3) // and their REPLY_OPs. + c.Assert(stats.ReceivedDocs, Equals, 5) + c.Assert(stats.SocketsInUse, Equals, 0) +} + +func (s *S) TestFindFor(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + coll.Insert(M{"n": n}) + } + + session.Refresh() // Release socket. + + mgo.ResetStats() + + query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) + + i := 2 + var result *struct{ N int } + err = query.For(&result, func() error { + c.Assert(i < 7, Equals, true) + c.Assert(result.N, Equals, ns[i]) + if i == 1 { + stats := mgo.GetStats() + c.Assert(stats.ReceivedDocs, Equals, 2) + } + i++ + return nil + }) + c.Assert(err, IsNil) + + session.Refresh() // Release socket. + + stats := mgo.GetStats() + c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP + c.Assert(stats.ReceivedOps, Equals, 3) // and their REPLY_OPs. + c.Assert(stats.ReceivedDocs, Equals, 5) + c.Assert(stats.SocketsInUse, Equals, 0) +} + +func (s *S) TestFindForStopOnError(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + coll.Insert(M{"n": n}) + } + + query := coll.Find(M{"n": M{"$gte": 42}}) + i := 2 + var result *struct{ N int } + err = query.For(&result, func() error { + c.Assert(i < 4, Equals, true) + c.Assert(result.N, Equals, ns[i]) + if i == 3 { + return fmt.Errorf("stop!") + } + i++ + return nil + }) + c.Assert(err, ErrorMatches, "stop!") +} + +func (s *S) TestFindForResetsResult(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{1, 2, 3} + for _, n := range ns { + coll.Insert(M{"n" + strconv.Itoa(n): n}) + } + + query := coll.Find(nil).Sort("$natural") + + i := 0 + var sresult *struct{ N1, N2, N3 int } + err = query.For(&sresult, func() error { + switch i { + case 0: + c.Assert(sresult.N1, Equals, 1) + c.Assert(sresult.N2+sresult.N3, Equals, 0) + case 1: + c.Assert(sresult.N2, Equals, 2) + c.Assert(sresult.N1+sresult.N3, Equals, 0) + case 2: + c.Assert(sresult.N3, Equals, 3) + c.Assert(sresult.N1+sresult.N2, Equals, 0) + } + i++ + return nil + }) + c.Assert(err, IsNil) + + i = 0 + var mresult M + err = query.For(&mresult, func() error { + delete(mresult, "_id") + switch i { + case 0: + c.Assert(mresult, DeepEquals, M{"n1": 1}) + case 1: + c.Assert(mresult, DeepEquals, M{"n2": 2}) + case 2: + c.Assert(mresult, DeepEquals, M{"n3": 3}) + } + i++ + return nil + }) + c.Assert(err, IsNil) + + i = 0 + var iresult interface{} + err = query.For(&iresult, func() error { + mresult, ok := iresult.(bson.M) + c.Assert(ok, Equals, true, Commentf("%#v", iresult)) + delete(mresult, "_id") + switch i { + case 0: + c.Assert(mresult, DeepEquals, bson.M{"n1": 1}) + case 1: + c.Assert(mresult, DeepEquals, bson.M{"n2": 2}) + case 2: + c.Assert(mresult, DeepEquals, bson.M{"n3": 3}) + } + i++ + return nil + }) + c.Assert(err, IsNil) +} + +func (s *S) TestFindIterSnapshot(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + // Insane amounts of logging otherwise due to the + // amount of data being shuffled. + mgo.SetDebug(false) + defer mgo.SetDebug(true) + + coll := session.DB("mydb").C("mycoll") + + var a [1024000]byte + + for n := 0; n < 10; n++ { + err := coll.Insert(M{"_id": n, "n": n, "a1": &a}) + c.Assert(err, IsNil) + } + + query := coll.Find(M{"n": M{"$gt": -1}}).Batch(2).Prefetch(0) + query.Snapshot() + iter := query.Iter() + + seen := map[int]bool{} + result := struct { + Id int "_id" + }{} + for iter.Next(&result) { + if len(seen) == 2 { + // Grow all entries so that they have to move. + // Backwards so that the order is inverted. + for n := 10; n >= 0; n-- { + _, err := coll.Upsert(M{"_id": n}, M{"$set": M{"a2": &a}}) + c.Assert(err, IsNil) + } + } + if seen[result.Id] { + c.Fatalf("seen duplicated key: %d", result.Id) + } + seen[result.Id] = true + } + c.Assert(iter.Close(), IsNil) +} + +func (s *S) TestSort(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + coll.Insert(M{"a": 1, "b": 1}) + coll.Insert(M{"a": 2, "b": 2}) + coll.Insert(M{"a": 2, "b": 1}) + coll.Insert(M{"a": 0, "b": 1}) + coll.Insert(M{"a": 2, "b": 0}) + coll.Insert(M{"a": 0, "b": 2}) + coll.Insert(M{"a": 1, "b": 2}) + coll.Insert(M{"a": 0, "b": 0}) + coll.Insert(M{"a": 1, "b": 0}) + + query := coll.Find(M{}) + query.Sort("-a") // Should be ignored. + query.Sort("-b", "a") + iter := query.Iter() + + l := make([]int, 18) + r := struct{ A, B int }{} + for i := 0; i != len(l); i += 2 { + ok := iter.Next(&r) + c.Assert(ok, Equals, true) + c.Assert(err, IsNil) + l[i] = r.A + l[i+1] = r.B + } + + c.Assert(l, DeepEquals, []int{0, 2, 1, 2, 2, 2, 0, 1, 1, 1, 2, 1, 0, 0, 1, 0, 2, 0}) +} + +func (s *S) TestSortWithBadArgs(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + f1 := func() { coll.Find(nil).Sort("") } + f2 := func() { coll.Find(nil).Sort("+") } + f3 := func() { coll.Find(nil).Sort("foo", "-") } + + for _, f := range []func(){f1, f2, f3} { + c.Assert(f, PanicMatches, "Sort: empty field name") + } +} + +func (s *S) TestPrefetching(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + mgo.SetDebug(false) + docs := make([]interface{}, 800) + for i := 0; i != 600; i++ { + docs[i] = bson.D{{"n", i}} + } + coll.Insert(docs...) + + for testi := 0; testi < 5; testi++ { + mgo.ResetStats() + + var iter *mgo.Iter + var beforeMore int + + switch testi { + case 0: // The default session value. + session.SetBatch(100) + iter = coll.Find(M{}).Iter() + beforeMore = 75 + + case 2: // Changing the session value. + session.SetBatch(100) + session.SetPrefetch(0.27) + iter = coll.Find(M{}).Iter() + beforeMore = 73 + + case 1: // Changing via query methods. + iter = coll.Find(M{}).Prefetch(0.27).Batch(100).Iter() + beforeMore = 73 + + case 3: // With prefetch on first document. + iter = coll.Find(M{}).Prefetch(1.0).Batch(100).Iter() + beforeMore = 0 + + case 4: // Without prefetch. + iter = coll.Find(M{}).Prefetch(0).Batch(100).Iter() + beforeMore = 100 + } + + pings := 0 + for batchi := 0; batchi < len(docs)/100-1; batchi++ { + c.Logf("Iterating over %d documents on batch %d", beforeMore, batchi) + var result struct{ N int } + for i := 0; i < beforeMore; i++ { + ok := iter.Next(&result) + c.Assert(ok, Equals, true, Commentf("iter.Err: %v", iter.Err())) + } + beforeMore = 99 + c.Logf("Done iterating.") + + session.Run("ping", nil) // Roundtrip to settle down. + pings++ + + stats := mgo.GetStats() + c.Assert(stats.ReceivedDocs, Equals, (batchi+1)*100+pings) + + c.Logf("Iterating over one more document on batch %d", batchi) + ok := iter.Next(&result) + c.Assert(ok, Equals, true, Commentf("iter.Err: %v", iter.Err())) + c.Logf("Done iterating.") + + session.Run("ping", nil) // Roundtrip to settle down. + pings++ + + stats = mgo.GetStats() + c.Assert(stats.ReceivedDocs, Equals, (batchi+2)*100+pings) + } + } +} + +func (s *S) TestSafeSetting(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + // Check the default + safe := session.Safe() + c.Assert(safe.W, Equals, 0) + c.Assert(safe.WMode, Equals, "") + c.Assert(safe.WTimeout, Equals, 0) + c.Assert(safe.FSync, Equals, false) + c.Assert(safe.J, Equals, false) + + // Tweak it + session.SetSafe(&mgo.Safe{W: 1, WTimeout: 2, FSync: true}) + safe = session.Safe() + c.Assert(safe.W, Equals, 1) + c.Assert(safe.WMode, Equals, "") + c.Assert(safe.WTimeout, Equals, 2) + c.Assert(safe.FSync, Equals, true) + c.Assert(safe.J, Equals, false) + + // Reset it again. + session.SetSafe(&mgo.Safe{}) + safe = session.Safe() + c.Assert(safe.W, Equals, 0) + c.Assert(safe.WMode, Equals, "") + c.Assert(safe.WTimeout, Equals, 0) + c.Assert(safe.FSync, Equals, false) + c.Assert(safe.J, Equals, false) + + // Ensure safety to something more conservative. + session.SetSafe(&mgo.Safe{W: 5, WTimeout: 6, J: true}) + safe = session.Safe() + c.Assert(safe.W, Equals, 5) + c.Assert(safe.WMode, Equals, "") + c.Assert(safe.WTimeout, Equals, 6) + c.Assert(safe.FSync, Equals, false) + c.Assert(safe.J, Equals, true) + + // Ensure safety to something less conservative won't change it. + session.EnsureSafe(&mgo.Safe{W: 4, WTimeout: 7}) + safe = session.Safe() + c.Assert(safe.W, Equals, 5) + c.Assert(safe.WMode, Equals, "") + c.Assert(safe.WTimeout, Equals, 6) + c.Assert(safe.FSync, Equals, false) + c.Assert(safe.J, Equals, true) + + // But to something more conservative will. + session.EnsureSafe(&mgo.Safe{W: 6, WTimeout: 4, FSync: true}) + safe = session.Safe() + c.Assert(safe.W, Equals, 6) + c.Assert(safe.WMode, Equals, "") + c.Assert(safe.WTimeout, Equals, 4) + c.Assert(safe.FSync, Equals, true) + c.Assert(safe.J, Equals, false) + + // Even more conservative. + session.EnsureSafe(&mgo.Safe{WMode: "majority", WTimeout: 2}) + safe = session.Safe() + c.Assert(safe.W, Equals, 0) + c.Assert(safe.WMode, Equals, "majority") + c.Assert(safe.WTimeout, Equals, 2) + c.Assert(safe.FSync, Equals, true) + c.Assert(safe.J, Equals, false) + + // WMode always overrides, whatever it is, but J doesn't. + session.EnsureSafe(&mgo.Safe{WMode: "something", J: true}) + safe = session.Safe() + c.Assert(safe.W, Equals, 0) + c.Assert(safe.WMode, Equals, "something") + c.Assert(safe.WTimeout, Equals, 2) + c.Assert(safe.FSync, Equals, true) + c.Assert(safe.J, Equals, false) + + // EnsureSafe with nil does nothing. + session.EnsureSafe(nil) + safe = session.Safe() + c.Assert(safe.W, Equals, 0) + c.Assert(safe.WMode, Equals, "something") + c.Assert(safe.WTimeout, Equals, 2) + c.Assert(safe.FSync, Equals, true) + c.Assert(safe.J, Equals, false) + + // Changing the safety of a cloned session doesn't touch the original. + clone := session.Clone() + defer clone.Close() + clone.EnsureSafe(&mgo.Safe{WMode: "foo"}) + safe = session.Safe() + c.Assert(safe.WMode, Equals, "something") +} + +func (s *S) TestSafeInsert(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + // Insert an element with a predefined key. + err = coll.Insert(M{"_id": 1}) + c.Assert(err, IsNil) + + mgo.ResetStats() + + // Session should be safe by default, so inserting it again must fail. + err = coll.Insert(M{"_id": 1}) + c.Assert(err, ErrorMatches, ".*E11000 duplicate.*") + c.Assert(err.(*mgo.LastError).Code, Equals, 11000) + + // It must have sent two operations (INSERT_OP + getLastError QUERY_OP) + stats := mgo.GetStats() + c.Assert(stats.SentOps, Equals, 2) + + mgo.ResetStats() + + // If we disable safety, though, it won't complain. + session.SetSafe(nil) + err = coll.Insert(M{"_id": 1}) + c.Assert(err, IsNil) + + // Must have sent a single operation this time (just the INSERT_OP) + stats = mgo.GetStats() + c.Assert(stats.SentOps, Equals, 1) +} + +func (s *S) TestSafeParameters(c *C) { + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + // Tweak the safety parameters to something unachievable. + session.SetSafe(&mgo.Safe{W: 4, WTimeout: 100}) + err = coll.Insert(M{"_id": 1}) + c.Assert(err, ErrorMatches, "timeout|timed out waiting for slaves") + if !s.versionAtLeast(2, 6) { + // 2.6 turned it into a query error. + c.Assert(err.(*mgo.LastError).WTimeout, Equals, true) + } +} + +func (s *S) TestQueryErrorOne(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + result := struct { + Err string "$err" + }{} + + err = coll.Find(M{"a": 1}).Select(M{"a": M{"b": 1}}).One(&result) + c.Assert(err, ErrorMatches, ".*Unsupported projection option:.*") + c.Assert(err.(*mgo.QueryError).Message, Matches, ".*Unsupported projection option:.*") + if s.versionAtLeast(2, 6) { + // Oh, the dance of error codes. :-( + c.Assert(err.(*mgo.QueryError).Code, Equals, 17287) + } else { + c.Assert(err.(*mgo.QueryError).Code, Equals, 13097) + } + + // The result should be properly unmarshalled with QueryError + c.Assert(result.Err, Matches, ".*Unsupported projection option:.*") +} + +func (s *S) TestQueryErrorNext(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + result := struct { + Err string "$err" + }{} + + iter := coll.Find(M{"a": 1}).Select(M{"a": M{"b": 1}}).Iter() + + ok := iter.Next(&result) + c.Assert(ok, Equals, false) + + err = iter.Close() + c.Assert(err, ErrorMatches, ".*Unsupported projection option:.*") + c.Assert(err.(*mgo.QueryError).Message, Matches, ".*Unsupported projection option:.*") + if s.versionAtLeast(2, 6) { + // Oh, the dance of error codes. :-( + c.Assert(err.(*mgo.QueryError).Code, Equals, 17287) + } else { + c.Assert(err.(*mgo.QueryError).Code, Equals, 13097) + } + c.Assert(iter.Err(), Equals, err) + + // The result should be properly unmarshalled with QueryError + c.Assert(result.Err, Matches, ".*Unsupported projection option:.*") +} + +func (s *S) TestEnsureIndex(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + index1 := mgo.Index{ + Key: []string{"a"}, + Background: true, + } + + index2 := mgo.Index{ + Key: []string{"a", "-b"}, + Unique: true, + DropDups: true, + } + + // Obsolete: + index3 := mgo.Index{ + Key: []string{"@loc_old"}, + Min: -500, + Max: 500, + Bits: 32, + } + + index4 := mgo.Index{ + Key: []string{"$2d:loc"}, + Min: -500, + Max: 500, + Bits: 32, + } + + coll := session.DB("mydb").C("mycoll") + + for _, index := range []mgo.Index{index1, index2, index3, index4} { + err = coll.EnsureIndex(index) + c.Assert(err, IsNil) + } + + sysidx := session.DB("mydb").C("system.indexes") + + result1 := M{} + err = sysidx.Find(M{"name": "a_1"}).One(result1) + c.Assert(err, IsNil) + + result2 := M{} + err = sysidx.Find(M{"name": "a_1_b_-1"}).One(result2) + c.Assert(err, IsNil) + + result3 := M{} + err = sysidx.Find(M{"name": "loc_old_2d"}).One(result3) + c.Assert(err, IsNil) + + result4 := M{} + err = sysidx.Find(M{"name": "loc_2d"}).One(result4) + c.Assert(err, IsNil) + + delete(result1, "v") + expected1 := M{ + "name": "a_1", + "key": M{"a": 1}, + "ns": "mydb.mycoll", + "background": true, + } + c.Assert(result1, DeepEquals, expected1) + + delete(result2, "v") + expected2 := M{ + "name": "a_1_b_-1", + "key": M{"a": 1, "b": -1}, + "ns": "mydb.mycoll", + "unique": true, + "dropDups": true, + } + c.Assert(result2, DeepEquals, expected2) + + delete(result3, "v") + expected3 := M{ + "name": "loc_old_2d", + "key": M{"loc_old": "2d"}, + "ns": "mydb.mycoll", + "min": -500, + "max": 500, + "bits": 32, + } + c.Assert(result3, DeepEquals, expected3) + + delete(result4, "v") + expected4 := M{ + "name": "loc_2d", + "key": M{"loc": "2d"}, + "ns": "mydb.mycoll", + "min": -500, + "max": 500, + "bits": 32, + } + c.Assert(result4, DeepEquals, expected4) + + // Ensure the index actually works for real. + err = coll.Insert(M{"a": 1, "b": 1}) + c.Assert(err, IsNil) + err = coll.Insert(M{"a": 1, "b": 1}) + c.Assert(err, ErrorMatches, ".*duplicate key error.*") + c.Assert(mgo.IsDup(err), Equals, true) +} + +func (s *S) TestEnsureIndexWithBadInfo(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.EnsureIndex(mgo.Index{}) + c.Assert(err, ErrorMatches, "invalid index key:.*") + + err = coll.EnsureIndex(mgo.Index{Key: []string{""}}) + c.Assert(err, ErrorMatches, "invalid index key:.*") +} + +func (s *S) TestEnsureIndexWithUnsafeSession(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + session.SetSafe(nil) + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + + // Should fail since there are duplicated entries. + index := mgo.Index{ + Key: []string{"a"}, + Unique: true, + } + + err = coll.EnsureIndex(index) + c.Assert(err, ErrorMatches, ".*duplicate key error.*") +} + +func (s *S) TestEnsureIndexKey(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.EnsureIndexKey("a") + c.Assert(err, IsNil) + + err = coll.EnsureIndexKey("a", "-b") + c.Assert(err, IsNil) + + sysidx := session.DB("mydb").C("system.indexes") + + result1 := M{} + err = sysidx.Find(M{"name": "a_1"}).One(result1) + c.Assert(err, IsNil) + + result2 := M{} + err = sysidx.Find(M{"name": "a_1_b_-1"}).One(result2) + c.Assert(err, IsNil) + + delete(result1, "v") + expected1 := M{ + "name": "a_1", + "key": M{"a": 1}, + "ns": "mydb.mycoll", + } + c.Assert(result1, DeepEquals, expected1) + + delete(result2, "v") + expected2 := M{ + "name": "a_1_b_-1", + "key": M{"a": 1, "b": -1}, + "ns": "mydb.mycoll", + } + c.Assert(result2, DeepEquals, expected2) +} + +func (s *S) TestEnsureIndexDropIndex(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.EnsureIndexKey("a") + c.Assert(err, IsNil) + + err = coll.EnsureIndexKey("-b") + c.Assert(err, IsNil) + + err = coll.DropIndex("-b") + c.Assert(err, IsNil) + + sysidx := session.DB("mydb").C("system.indexes") + dummy := &struct{}{} + + err = sysidx.Find(M{"name": "a_1"}).One(dummy) + c.Assert(err, IsNil) + + err = sysidx.Find(M{"name": "b_1"}).One(dummy) + c.Assert(err, Equals, mgo.ErrNotFound) + + err = coll.DropIndex("a") + c.Assert(err, IsNil) + + err = sysidx.Find(M{"name": "a_1"}).One(dummy) + c.Assert(err, Equals, mgo.ErrNotFound) + + err = coll.DropIndex("a") + c.Assert(err, ErrorMatches, "index not found.*") +} + +func (s *S) TestEnsureIndexCaching(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.EnsureIndexKey("a") + c.Assert(err, IsNil) + + mgo.ResetStats() + + // Second EnsureIndex should be cached and do nothing. + err = coll.EnsureIndexKey("a") + c.Assert(err, IsNil) + + stats := mgo.GetStats() + c.Assert(stats.SentOps, Equals, 0) + + // Resetting the cache should make it contact the server again. + session.ResetIndexCache() + + err = coll.EnsureIndexKey("a") + c.Assert(err, IsNil) + + stats = mgo.GetStats() + c.Assert(stats.SentOps, Equals, 2) + + // Dropping the index should also drop the cached index key. + err = coll.DropIndex("a") + c.Assert(err, IsNil) + + mgo.ResetStats() + + err = coll.EnsureIndexKey("a") + c.Assert(err, IsNil) + + stats = mgo.GetStats() + c.Assert(stats.SentOps, Equals, 2) +} + +func (s *S) TestEnsureIndexGetIndexes(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.EnsureIndexKey("-b") + c.Assert(err, IsNil) + + err = coll.EnsureIndexKey("a") + c.Assert(err, IsNil) + + // Obsolete. + err = coll.EnsureIndexKey("@c") + c.Assert(err, IsNil) + + err = coll.EnsureIndexKey("$2d:d") + c.Assert(err, IsNil) + + indexes, err := coll.Indexes() + c.Assert(err, IsNil) + + c.Assert(indexes[0].Name, Equals, "_id_") + c.Assert(indexes[1].Name, Equals, "a_1") + c.Assert(indexes[1].Key, DeepEquals, []string{"a"}) + c.Assert(indexes[2].Name, Equals, "b_-1") + c.Assert(indexes[2].Key, DeepEquals, []string{"-b"}) + c.Assert(indexes[3].Name, Equals, "c_2d") + c.Assert(indexes[3].Key, DeepEquals, []string{"$2d:c"}) + c.Assert(indexes[4].Name, Equals, "d_2d") + c.Assert(indexes[4].Key, DeepEquals, []string{"$2d:d"}) +} + +func (s *S) TestEnsureIndexEvalGetIndexes(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({b: -1})"}}, nil) + c.Assert(err, IsNil) + err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({a: 1})"}}, nil) + c.Assert(err, IsNil) + err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({c: '2d'})"}}, nil) + c.Assert(err, IsNil) + err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({d: -1, e: 1})"}}, nil) + c.Assert(err, IsNil) + + indexes, err := coll.Indexes() + c.Assert(err, IsNil) + + c.Assert(indexes[0].Name, Equals, "_id_") + c.Assert(indexes[1].Name, Equals, "a_1") + c.Assert(indexes[1].Key, DeepEquals, []string{"a"}) + c.Assert(indexes[2].Name, Equals, "b_-1") + c.Assert(indexes[2].Key, DeepEquals, []string{"-b"}) + c.Assert(indexes[3].Name, Equals, "c_2d") + c.Assert(indexes[3].Key, DeepEquals, []string{"$2d:c"}) + c.Assert(indexes[4].Name, Equals, "d_-1_e_1") + c.Assert(indexes[4].Key, DeepEquals, []string{"-d", "e"}) +} + +var testTTL = flag.Bool("test-ttl", false, "test TTL collections (may take 1 minute)") + +func (s *S) TestEnsureIndexExpireAfter(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + session.SetSafe(nil) + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"n": 1, "t": time.Now().Add(-120 * time.Second)}) + c.Assert(err, IsNil) + err = coll.Insert(M{"n": 2, "t": time.Now()}) + c.Assert(err, IsNil) + + // Should fail since there are duplicated entries. + index := mgo.Index{ + Key: []string{"t"}, + ExpireAfter: 1 * time.Minute, + } + + err = coll.EnsureIndex(index) + c.Assert(err, IsNil) + + indexes, err := coll.Indexes() + c.Assert(err, IsNil) + c.Assert(indexes[1].Name, Equals, "t_1") + c.Assert(indexes[1].ExpireAfter, Equals, 1*time.Minute) + + if *testTTL { + worked := false + stop := time.Now().Add(70 * time.Second) + for time.Now().Before(stop) { + n, err := coll.Count() + c.Assert(err, IsNil) + if n == 1 { + worked = true + break + } + c.Assert(n, Equals, 2) + c.Logf("Still has 2 entries...") + time.Sleep(1 * time.Second) + } + if !worked { + c.Fatalf("TTL index didn't work") + } + } +} + +func (s *S) TestDistinct(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { + coll.Insert(M{"n": i}) + } + + var result []int + err = coll.Find(M{"n": M{"$gt": 2}}).Sort("n").Distinct("n", &result) + + sort.IntSlice(result).Sort() + c.Assert(result, DeepEquals, []int{3, 4, 6}) +} + +func (s *S) TestMapReduce(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { + coll.Insert(M{"n": i}) + } + + job := &mgo.MapReduce{ + Map: "function() { emit(this.n, 1); }", + Reduce: "function(key, values) { return Array.sum(values); }", + } + var result []struct { + Id int "_id" + Value int + } + + info, err := coll.Find(M{"n": M{"$gt": 2}}).MapReduce(job, &result) + c.Assert(err, IsNil) + c.Assert(info.InputCount, Equals, 4) + c.Assert(info.EmitCount, Equals, 4) + c.Assert(info.OutputCount, Equals, 3) + c.Assert(info.VerboseTime, IsNil) + + expected := map[int]int{3: 1, 4: 2, 6: 1} + for _, item := range result { + c.Logf("Item: %#v", &item) + c.Assert(item.Value, Equals, expected[item.Id]) + expected[item.Id] = -1 + } + + // Weak attempt of testing that Sort gets delivered. + _, err = coll.Find(nil).Sort("-n").MapReduce(job, &result) + _, isQueryError := err.(*mgo.QueryError) + c.Assert(isQueryError, Equals, true) +} + +func (s *S) TestMapReduceFinalize(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { + coll.Insert(M{"n": i}) + } + + job := &mgo.MapReduce{ + Map: "function() { emit(this.n, 1) }", + Reduce: "function(key, values) { return Array.sum(values) }", + Finalize: "function(key, count) { return {count: count} }", + } + var result []struct { + Id int "_id" + Value struct{ Count int } + } + _, err = coll.Find(nil).MapReduce(job, &result) + c.Assert(err, IsNil) + + expected := map[int]int{1: 1, 2: 2, 3: 1, 4: 2, 6: 1} + for _, item := range result { + c.Logf("Item: %#v", &item) + c.Assert(item.Value.Count, Equals, expected[item.Id]) + expected[item.Id] = -1 + } +} + +func (s *S) TestMapReduceToCollection(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { + coll.Insert(M{"n": i}) + } + + job := &mgo.MapReduce{ + Map: "function() { emit(this.n, 1); }", + Reduce: "function(key, values) { return Array.sum(values); }", + Out: "mr", + } + + info, err := coll.Find(nil).MapReduce(job, nil) + c.Assert(err, IsNil) + c.Assert(info.InputCount, Equals, 7) + c.Assert(info.EmitCount, Equals, 7) + c.Assert(info.OutputCount, Equals, 5) + c.Assert(info.Collection, Equals, "mr") + c.Assert(info.Database, Equals, "mydb") + + expected := map[int]int{1: 1, 2: 2, 3: 1, 4: 2, 6: 1} + var item *struct { + Id int "_id" + Value int + } + mr := session.DB("mydb").C("mr") + iter := mr.Find(nil).Iter() + for iter.Next(&item) { + c.Logf("Item: %#v", &item) + c.Assert(item.Value, Equals, expected[item.Id]) + expected[item.Id] = -1 + } + c.Assert(iter.Close(), IsNil) +} + +func (s *S) TestMapReduceToOtherDb(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { + coll.Insert(M{"n": i}) + } + + job := &mgo.MapReduce{ + Map: "function() { emit(this.n, 1); }", + Reduce: "function(key, values) { return Array.sum(values); }", + Out: bson.D{{"replace", "mr"}, {"db", "otherdb"}}, + } + + info, err := coll.Find(nil).MapReduce(job, nil) + c.Assert(err, IsNil) + c.Assert(info.InputCount, Equals, 7) + c.Assert(info.EmitCount, Equals, 7) + c.Assert(info.OutputCount, Equals, 5) + c.Assert(info.Collection, Equals, "mr") + c.Assert(info.Database, Equals, "otherdb") + + expected := map[int]int{1: 1, 2: 2, 3: 1, 4: 2, 6: 1} + var item *struct { + Id int "_id" + Value int + } + mr := session.DB("otherdb").C("mr") + iter := mr.Find(nil).Iter() + for iter.Next(&item) { + c.Logf("Item: %#v", &item) + c.Assert(item.Value, Equals, expected[item.Id]) + expected[item.Id] = -1 + } + c.Assert(iter.Close(), IsNil) +} + +func (s *S) TestMapReduceOutOfOrder(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { + coll.Insert(M{"n": i}) + } + + job := &mgo.MapReduce{ + Map: "function() { emit(this.n, 1); }", + Reduce: "function(key, values) { return Array.sum(values); }", + Out: bson.M{"a": "a", "z": "z", "replace": "mr", "db": "otherdb", "b": "b", "y": "y"}, + } + + info, err := coll.Find(nil).MapReduce(job, nil) + c.Assert(err, IsNil) + c.Assert(info.Collection, Equals, "mr") + c.Assert(info.Database, Equals, "otherdb") +} + +func (s *S) TestMapReduceScope(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + coll.Insert(M{"n": 1}) + + job := &mgo.MapReduce{ + Map: "function() { emit(this.n, x); }", + Reduce: "function(key, values) { return Array.sum(values); }", + Scope: M{"x": 42}, + } + + var result []bson.M + _, err = coll.Find(nil).MapReduce(job, &result) + c.Assert(len(result), Equals, 1) + c.Assert(result[0]["value"], Equals, 42.0) +} + +func (s *S) TestMapReduceVerbose(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + for i := 0; i < 100; i++ { + err = coll.Insert(M{"n": i}) + c.Assert(err, IsNil) + } + + job := &mgo.MapReduce{ + Map: "function() { emit(this.n, 1); }", + Reduce: "function(key, values) { return Array.sum(values); }", + Verbose: true, + } + + info, err := coll.Find(nil).MapReduce(job, nil) + c.Assert(err, IsNil) + c.Assert(info.VerboseTime, NotNil) +} + +func (s *S) TestMapReduceLimit(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { + coll.Insert(M{"n": i}) + } + + job := &mgo.MapReduce{ + Map: "function() { emit(this.n, 1); }", + Reduce: "function(key, values) { return Array.sum(values); }", + } + + var result []bson.M + _, err = coll.Find(nil).Limit(3).MapReduce(job, &result) + c.Assert(err, IsNil) + c.Assert(len(result), Equals, 3) +} + +func (s *S) TestBuildInfo(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + info, err := session.BuildInfo() + c.Assert(err, IsNil) + + var v []int + for i, a := range strings.Split(info.Version, ".") { + for _, token := range []string{"-rc", "-pre"} { + if i == 2 && strings.Contains(a, token) { + a = a[:strings.Index(a, token)] + info.VersionArray[len(info.VersionArray)-1] = 0 + } + } + n, err := strconv.Atoi(a) + c.Assert(err, IsNil) + v = append(v, n) + } + for len(v) < 4 { + v = append(v, 0) + } + + c.Assert(info.VersionArray, DeepEquals, v) + c.Assert(info.GitVersion, Matches, "[a-z0-9]+") + c.Assert(info.SysInfo, Matches, ".*[0-9:]+.*") + if info.Bits != 32 && info.Bits != 64 { + c.Fatalf("info.Bits is %d", info.Bits) + } + if info.MaxObjectSize < 8192 { + c.Fatalf("info.MaxObjectSize seems too small: %d", info.MaxObjectSize) + } +} + +func (s *S) TestZeroTimeRoundtrip(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + var d struct{ T time.Time } + conn := session.DB("mydb").C("mycoll") + err = conn.Insert(d) + c.Assert(err, IsNil) + + var result bson.M + err = conn.Find(nil).One(&result) + c.Assert(err, IsNil) + t, isTime := result["t"].(time.Time) + c.Assert(isTime, Equals, true) + c.Assert(t, Equals, time.Time{}) +} + +func (s *S) TestFsyncLock(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + clone := session.Clone() + defer clone.Close() + + err = session.FsyncLock() + c.Assert(err, IsNil) + + done := make(chan time.Time) + go func() { + time.Sleep(3e9) + now := time.Now() + err := session.FsyncUnlock() + c.Check(err, IsNil) + done <- now + }() + + err = clone.DB("mydb").C("mycoll").Insert(bson.M{"n": 1}) + unlocked := time.Now() + unlocking := <-done + c.Assert(err, IsNil) + + c.Assert(unlocked.After(unlocking), Equals, true) + c.Assert(unlocked.Sub(unlocking) < 1e9, Equals, true) +} + +func (s *S) TestFsync(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + // Not much to do here. Just a smoke check. + err = session.Fsync(false) + c.Assert(err, IsNil) + err = session.Fsync(true) + c.Assert(err, IsNil) +} + +func (s *S) TestPipeIter(c *C) { + if !s.versionAtLeast(2, 1) { + c.Skip("Pipe only works on 2.1+") + } + + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + coll.Insert(M{"n": n}) + } + + iter := coll.Pipe([]M{{"$match": M{"n": M{"$gte": 42}}}}).Iter() + result := struct{ N int }{} + for i := 2; i < 7; i++ { + ok := iter.Next(&result) + c.Assert(ok, Equals, true) + c.Assert(result.N, Equals, ns[i]) + } + + c.Assert(iter.Next(&result), Equals, false) + c.Assert(iter.Close(), IsNil) +} + +func (s *S) TestPipeAll(c *C) { + if !s.versionAtLeast(2, 1) { + c.Skip("Pipe only works on 2.1+") + } + + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + err := coll.Insert(M{"n": n}) + c.Assert(err, IsNil) + } + + var result []struct{ N int } + err = coll.Pipe([]M{{"$match": M{"n": M{"$gte": 42}}}}).All(&result) + c.Assert(err, IsNil) + for i := 2; i < 7; i++ { + c.Assert(result[i-2].N, Equals, ns[i]) + } +} + +func (s *S) TestPipeOne(c *C) { + if !s.versionAtLeast(2, 1) { + c.Skip("Pipe only works on 2.1+") + } + + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + coll.Insert(M{"a": 1, "b": 2}) + + result := struct{ A, B int }{} + + pipe := coll.Pipe([]M{{"$project": M{"a": 1, "b": M{"$add": []interface{}{"$b", 1}}}}}) + err = pipe.One(&result) + c.Assert(err, IsNil) + c.Assert(result.A, Equals, 1) + c.Assert(result.B, Equals, 3) + + pipe = coll.Pipe([]M{{"$match": M{"a": 2}}}) + err = pipe.One(&result) + c.Assert(err, Equals, mgo.ErrNotFound) +} + +func (s *S) TestBatch1Bug(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + for i := 0; i < 3; i++ { + err := coll.Insert(M{"n": i}) + c.Assert(err, IsNil) + } + + var ns []struct{ N int } + err = coll.Find(nil).Batch(1).All(&ns) + c.Assert(err, IsNil) + c.Assert(len(ns), Equals, 3) + + session.SetBatch(1) + err = coll.Find(nil).All(&ns) + c.Assert(err, IsNil) + c.Assert(len(ns), Equals, 3) +} + +func (s *S) TestInterfaceIterBug(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + for i := 0; i < 3; i++ { + err := coll.Insert(M{"n": i}) + c.Assert(err, IsNil) + } + + var result interface{} + + i := 0 + iter := coll.Find(nil).Sort("n").Iter() + for iter.Next(&result) { + c.Assert(result.(bson.M)["n"], Equals, i) + i++ + } + c.Assert(iter.Close(), IsNil) +} + +func (s *S) TestFindIterCloseKillsCursor(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + cursors := serverCursorsOpen(session) + + coll := session.DB("mydb").C("mycoll") + ns := []int{40, 41, 42, 43, 44, 45, 46} + for _, n := range ns { + err = coll.Insert(M{"n": n}) + c.Assert(err, IsNil) + } + + iter := coll.Find(nil).Batch(2).Iter() + c.Assert(iter.Next(bson.M{}), Equals, true) + + c.Assert(iter.Close(), IsNil) + c.Assert(serverCursorsOpen(session), Equals, cursors) +} + +func (s *S) TestLogReplay(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + for i := 0; i < 5; i++ { + err = coll.Insert(M{"ts": time.Now()}) + c.Assert(err, IsNil) + } + + iter := coll.Find(nil).LogReplay().Iter() + if s.versionAtLeast(2, 6) { + // This used to fail in 2.4. Now it's just a smoke test. + c.Assert(iter.Err(), IsNil) + } else { + c.Assert(iter.Next(bson.M{}), Equals, false) + c.Assert(iter.Err(), ErrorMatches, "no ts field in query") + } +} + +func (s *S) TestSetCursorTimeout(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"n": 42}) + + // This is just a smoke test. Won't wait 10 minutes for an actual timeout. + + session.SetCursorTimeout(0) + + var result struct{ N int } + iter := coll.Find(nil).Iter() + c.Assert(iter.Next(&result), Equals, true) + c.Assert(result.N, Equals, 42) + c.Assert(iter.Next(&result), Equals, false) +} diff --git a/vendor/labix.org/v2/mgo/socket.go b/vendor/labix.org/v2/mgo/socket.go new file mode 100644 index 0000000..97c2fd7 --- /dev/null +++ b/vendor/labix.org/v2/mgo/socket.go @@ -0,0 +1,673 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo + +import ( + "errors" + "labix.org/v2/mgo/bson" + "net" + "sync" + "time" +) + +type replyFunc func(err error, reply *replyOp, docNum int, docData []byte) + +type mongoSocket struct { + sync.Mutex + server *mongoServer // nil when cached + conn net.Conn + timeout time.Duration + addr string // For debugging only. + nextRequestId uint32 + replyFuncs map[uint32]replyFunc + references int + creds []Credential + logout []Credential + cachedNonce string + gotNonce sync.Cond + dead error + serverInfo *mongoServerInfo +} + +type queryOpFlags uint32 + +const ( + _ queryOpFlags = 1 << iota + flagTailable + flagSlaveOk + flagLogReplay + flagNoCursorTimeout + flagAwaitData +) + +type queryOp struct { + collection string + query interface{} + skip int32 + limit int32 + selector interface{} + flags queryOpFlags + replyFunc replyFunc + + options queryWrapper + hasOptions bool + serverTags []bson.D +} + +type queryWrapper struct { + Query interface{} "$query" + OrderBy interface{} "$orderby,omitempty" + Hint interface{} "$hint,omitempty" + Explain bool "$explain,omitempty" + Snapshot bool "$snapshot,omitempty" + ReadPreference bson.D "$readPreference,omitempty" +} + +func (op *queryOp) finalQuery(socket *mongoSocket) interface{} { + if op.flags&flagSlaveOk != 0 && len(op.serverTags) > 0 && socket.ServerInfo().Mongos { + op.hasOptions = true + op.options.ReadPreference = bson.D{{"mode", "secondaryPreferred"}, {"tags", op.serverTags}} + } + if op.hasOptions { + if op.query == nil { + var empty bson.D + op.options.Query = empty + } else { + op.options.Query = op.query + } + debugf("final query is %#v\n", &op.options) + return &op.options + } + return op.query +} + +type getMoreOp struct { + collection string + limit int32 + cursorId int64 + replyFunc replyFunc +} + +type replyOp struct { + flags uint32 + cursorId int64 + firstDoc int32 + replyDocs int32 +} + +type insertOp struct { + collection string // "database.collection" + documents []interface{} // One or more documents to insert + flags uint32 +} + +type updateOp struct { + collection string // "database.collection" + selector interface{} + update interface{} + flags uint32 +} + +type deleteOp struct { + collection string // "database.collection" + selector interface{} + flags uint32 +} + +type killCursorsOp struct { + cursorIds []int64 +} + +type requestInfo struct { + bufferPos int + replyFunc replyFunc +} + +func newSocket(server *mongoServer, conn net.Conn, timeout time.Duration) *mongoSocket { + socket := &mongoSocket{ + conn: conn, + addr: server.Addr, + server: server, + replyFuncs: make(map[uint32]replyFunc), + } + socket.gotNonce.L = &socket.Mutex + if err := socket.InitialAcquire(server.Info(), timeout); err != nil { + panic("newSocket: InitialAcquire returned error: " + err.Error()) + } + stats.socketsAlive(+1) + debugf("Socket %p to %s: initialized", socket, socket.addr) + socket.resetNonce() + go socket.readLoop() + return socket +} + +// Server returns the server that the socket is associated with. +// It returns nil while the socket is cached in its respective server. +func (socket *mongoSocket) Server() *mongoServer { + socket.Lock() + server := socket.server + socket.Unlock() + return server +} + +// ServerInfo returns details for the server at the time the socket +// was initially acquired. +func (socket *mongoSocket) ServerInfo() *mongoServerInfo { + socket.Lock() + serverInfo := socket.serverInfo + socket.Unlock() + return serverInfo +} + +// InitialAcquire obtains the first reference to the socket, either +// right after the connection is made or once a recycled socket is +// being put back in use. +func (socket *mongoSocket) InitialAcquire(serverInfo *mongoServerInfo, timeout time.Duration) error { + socket.Lock() + if socket.references > 0 { + panic("Socket acquired out of cache with references") + } + if socket.dead != nil { + dead := socket.dead + socket.Unlock() + return dead + } + socket.references++ + socket.serverInfo = serverInfo + socket.timeout = timeout + stats.socketsInUse(+1) + stats.socketRefs(+1) + socket.Unlock() + return nil +} + +// Acquire obtains an additional reference to the socket. +// The socket will only be recycled when it's released as many +// times as it's been acquired. +func (socket *mongoSocket) Acquire() (info *mongoServerInfo) { + socket.Lock() + if socket.references == 0 { + panic("Socket got non-initial acquire with references == 0") + } + // We'll track references to dead sockets as well. + // Caller is still supposed to release the socket. + socket.references++ + stats.socketRefs(+1) + serverInfo := socket.serverInfo + socket.Unlock() + return serverInfo +} + +// Release decrements a socket reference. The socket will be +// recycled once its released as many times as it's been acquired. +func (socket *mongoSocket) Release() { + socket.Lock() + if socket.references == 0 { + panic("socket.Release() with references == 0") + } + socket.references-- + stats.socketRefs(-1) + if socket.references == 0 { + stats.socketsInUse(-1) + server := socket.server + socket.Unlock() + socket.LogoutAll() + // If the socket is dead server is nil. + if server != nil { + server.RecycleSocket(socket) + } + } else { + socket.Unlock() + } +} + +// SetTimeout changes the timeout used on socket operations. +func (socket *mongoSocket) SetTimeout(d time.Duration) { + socket.Lock() + socket.timeout = d + socket.Unlock() +} + +type deadlineType int + +const ( + readDeadline deadlineType = 1 + writeDeadline deadlineType = 2 +) + +func (socket *mongoSocket) updateDeadline(which deadlineType) { + var when time.Time + if socket.timeout > 0 { + when = time.Now().Add(socket.timeout) + } + whichstr := "" + switch which { + case readDeadline | writeDeadline: + whichstr = "read/write" + socket.conn.SetDeadline(when) + case readDeadline: + whichstr = "read" + socket.conn.SetReadDeadline(when) + case writeDeadline: + whichstr = "write" + socket.conn.SetWriteDeadline(when) + default: + panic("invalid parameter to updateDeadline") + } + debugf("Socket %p to %s: updated %s deadline to %s ahead (%s)", socket, socket.addr, whichstr, socket.timeout, when) +} + +// Close terminates the socket use. +func (socket *mongoSocket) Close() { + socket.kill(errors.New("Closed explicitly"), false) +} + +func (socket *mongoSocket) kill(err error, abend bool) { + socket.Lock() + if socket.dead != nil { + debugf("Socket %p to %s: killed again: %s (previously: %s)", socket, socket.addr, err.Error(), socket.dead.Error()) + socket.Unlock() + return + } + logf("Socket %p to %s: closing: %s (abend=%v)", socket, socket.addr, err.Error(), abend) + socket.dead = err + socket.conn.Close() + stats.socketsAlive(-1) + replyFuncs := socket.replyFuncs + socket.replyFuncs = make(map[uint32]replyFunc) + server := socket.server + socket.server = nil + socket.gotNonce.Broadcast() + socket.Unlock() + for _, replyFunc := range replyFuncs { + logf("Socket %p to %s: notifying replyFunc of closed socket: %s", socket, socket.addr, err.Error()) + replyFunc(err, nil, -1, nil) + } + if abend { + server.AbendSocket(socket) + } +} + +func (socket *mongoSocket) SimpleQuery(op *queryOp) (data []byte, err error) { + var wait, change sync.Mutex + var replyDone bool + var replyData []byte + var replyErr error + wait.Lock() + op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { + change.Lock() + if !replyDone { + replyDone = true + replyErr = err + if err == nil { + replyData = docData + } + } + change.Unlock() + wait.Unlock() + } + err = socket.Query(op) + if err != nil { + return nil, err + } + wait.Lock() + change.Lock() + data = replyData + err = replyErr + change.Unlock() + return data, err +} + +func (socket *mongoSocket) Query(ops ...interface{}) (err error) { + + if lops := socket.flushLogout(); len(lops) > 0 { + ops = append(lops, ops...) + } + + buf := make([]byte, 0, 256) + + // Serialize operations synchronously to avoid interrupting + // other goroutines while we can't really be sending data. + // Also, record id positions so that we can compute request + // ids at once later with the lock already held. + requests := make([]requestInfo, len(ops)) + requestCount := 0 + + for _, op := range ops { + debugf("Socket %p to %s: serializing op: %#v", socket, socket.addr, op) + start := len(buf) + var replyFunc replyFunc + switch op := op.(type) { + + case *updateOp: + buf = addHeader(buf, 2001) + buf = addInt32(buf, 0) // Reserved + buf = addCString(buf, op.collection) + buf = addInt32(buf, int32(op.flags)) + debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.selector) + buf, err = addBSON(buf, op.selector) + if err != nil { + return err + } + debugf("Socket %p to %s: serializing update document: %#v", socket, socket.addr, op.update) + buf, err = addBSON(buf, op.update) + if err != nil { + return err + } + + case *insertOp: + buf = addHeader(buf, 2002) + buf = addInt32(buf, int32(op.flags)) + buf = addCString(buf, op.collection) + for _, doc := range op.documents { + debugf("Socket %p to %s: serializing document for insertion: %#v", socket, socket.addr, doc) + buf, err = addBSON(buf, doc) + if err != nil { + return err + } + } + + case *queryOp: + buf = addHeader(buf, 2004) + buf = addInt32(buf, int32(op.flags)) + buf = addCString(buf, op.collection) + buf = addInt32(buf, op.skip) + buf = addInt32(buf, op.limit) + buf, err = addBSON(buf, op.finalQuery(socket)) + if err != nil { + return err + } + if op.selector != nil { + buf, err = addBSON(buf, op.selector) + if err != nil { + return err + } + } + replyFunc = op.replyFunc + + case *getMoreOp: + buf = addHeader(buf, 2005) + buf = addInt32(buf, 0) // Reserved + buf = addCString(buf, op.collection) + buf = addInt32(buf, op.limit) + buf = addInt64(buf, op.cursorId) + replyFunc = op.replyFunc + + case *deleteOp: + buf = addHeader(buf, 2006) + buf = addInt32(buf, 0) // Reserved + buf = addCString(buf, op.collection) + buf = addInt32(buf, int32(op.flags)) + debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.selector) + buf, err = addBSON(buf, op.selector) + if err != nil { + return err + } + + case *killCursorsOp: + buf = addHeader(buf, 2007) + buf = addInt32(buf, 0) // Reserved + buf = addInt32(buf, int32(len(op.cursorIds))) + for _, cursorId := range op.cursorIds { + buf = addInt64(buf, cursorId) + } + + default: + panic("internal error: unknown operation type") + } + + setInt32(buf, start, int32(len(buf)-start)) + + if replyFunc != nil { + request := &requests[requestCount] + request.replyFunc = replyFunc + request.bufferPos = start + requestCount++ + } + } + + // Buffer is ready for the pipe. Lock, allocate ids, and enqueue. + + socket.Lock() + if socket.dead != nil { + dead := socket.dead + socket.Unlock() + debugf("Socket %p to %s: failing query, already closed: %s", socket, socket.addr, socket.dead.Error()) + // XXX This seems necessary in case the session is closed concurrently + // with a query being performed, but it's not yet tested: + for i := 0; i != requestCount; i++ { + request := &requests[i] + if request.replyFunc != nil { + request.replyFunc(dead, nil, -1, nil) + } + } + return dead + } + + wasWaiting := len(socket.replyFuncs) > 0 + + // Reserve id 0 for requests which should have no responses. + requestId := socket.nextRequestId + 1 + if requestId == 0 { + requestId++ + } + socket.nextRequestId = requestId + uint32(requestCount) + for i := 0; i != requestCount; i++ { + request := &requests[i] + setInt32(buf, request.bufferPos+4, int32(requestId)) + socket.replyFuncs[requestId] = request.replyFunc + requestId++ + } + + debugf("Socket %p to %s: sending %d op(s) (%d bytes)", socket, socket.addr, len(ops), len(buf)) + stats.sentOps(len(ops)) + + socket.updateDeadline(writeDeadline) + _, err = socket.conn.Write(buf) + if !wasWaiting && requestCount > 0 { + socket.updateDeadline(readDeadline) + } + socket.Unlock() + return err +} + +func fill(r net.Conn, b []byte) error { + l := len(b) + n, err := r.Read(b) + for n != l && err == nil { + var ni int + ni, err = r.Read(b[n:]) + n += ni + } + return err +} + +// Estimated minimum cost per socket: 1 goroutine + memory for the largest +// document ever seen. +func (socket *mongoSocket) readLoop() { + p := make([]byte, 36) // 16 from header + 20 from OP_REPLY fixed fields + s := make([]byte, 4) + conn := socket.conn // No locking, conn never changes. + for { + // XXX Handle timeouts, , etc + err := fill(conn, p) + if err != nil { + socket.kill(err, true) + return + } + + totalLen := getInt32(p, 0) + responseTo := getInt32(p, 8) + opCode := getInt32(p, 12) + + // Don't use socket.server.Addr here. socket is not + // locked and socket.server may go away. + debugf("Socket %p to %s: got reply (%d bytes)", socket, socket.addr, totalLen) + + _ = totalLen + + if opCode != 1 { + socket.kill(errors.New("opcode != 1, corrupted data?"), true) + return + } + + reply := replyOp{ + flags: uint32(getInt32(p, 16)), + cursorId: getInt64(p, 20), + firstDoc: getInt32(p, 28), + replyDocs: getInt32(p, 32), + } + + stats.receivedOps(+1) + stats.receivedDocs(int(reply.replyDocs)) + + socket.Lock() + replyFunc, ok := socket.replyFuncs[uint32(responseTo)] + if ok { + delete(socket.replyFuncs, uint32(responseTo)) + } + socket.Unlock() + + if replyFunc != nil && reply.replyDocs == 0 { + replyFunc(nil, &reply, -1, nil) + } else { + for i := 0; i != int(reply.replyDocs); i++ { + err := fill(conn, s) + if err != nil { + if replyFunc != nil { + replyFunc(err, nil, -1, nil) + } + socket.kill(err, true) + return + } + + b := make([]byte, int(getInt32(s, 0))) + + // copy(b, s) in an efficient way. + b[0] = s[0] + b[1] = s[1] + b[2] = s[2] + b[3] = s[3] + + err = fill(conn, b[4:]) + if err != nil { + if replyFunc != nil { + replyFunc(err, nil, -1, nil) + } + socket.kill(err, true) + return + } + + if globalDebug && globalLogger != nil { + m := bson.M{} + if err := bson.Unmarshal(b, m); err == nil { + debugf("Socket %p to %s: received document: %#v", socket, socket.addr, m) + } + } + + if replyFunc != nil { + replyFunc(nil, &reply, i, b) + } + + // XXX Do bound checking against totalLen. + } + } + + socket.Lock() + if len(socket.replyFuncs) == 0 { + // Nothing else to read for now. Disable deadline. + socket.conn.SetReadDeadline(time.Time{}) + } else { + socket.updateDeadline(readDeadline) + } + socket.Unlock() + + // XXX Do bound checking against totalLen. + } +} + +var emptyHeader = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + +func addHeader(b []byte, opcode int) []byte { + i := len(b) + b = append(b, emptyHeader...) + // Enough for current opcodes. + b[i+12] = byte(opcode) + b[i+13] = byte(opcode >> 8) + return b +} + +func addInt32(b []byte, i int32) []byte { + return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24)) +} + +func addInt64(b []byte, i int64) []byte { + return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24), + byte(i>>32), byte(i>>40), byte(i>>48), byte(i>>56)) +} + +func addCString(b []byte, s string) []byte { + b = append(b, []byte(s)...) + b = append(b, 0) + return b +} + +func addBSON(b []byte, doc interface{}) ([]byte, error) { + if doc == nil { + return append(b, 5, 0, 0, 0, 0), nil + } + data, err := bson.Marshal(doc) + if err != nil { + return b, err + } + return append(b, data...), nil +} + +func setInt32(b []byte, pos int, i int32) { + b[pos] = byte(i) + b[pos+1] = byte(i >> 8) + b[pos+2] = byte(i >> 16) + b[pos+3] = byte(i >> 24) +} + +func getInt32(b []byte, pos int) int32 { + return (int32(b[pos+0])) | + (int32(b[pos+1]) << 8) | + (int32(b[pos+2]) << 16) | + (int32(b[pos+3]) << 24) +} + +func getInt64(b []byte, pos int) int64 { + return (int64(b[pos+0])) | + (int64(b[pos+1]) << 8) | + (int64(b[pos+2]) << 16) | + (int64(b[pos+3]) << 24) | + (int64(b[pos+4]) << 32) | + (int64(b[pos+5]) << 40) | + (int64(b[pos+6]) << 48) | + (int64(b[pos+7]) << 56) +} diff --git a/vendor/labix.org/v2/mgo/stats.go b/vendor/labix.org/v2/mgo/stats.go new file mode 100644 index 0000000..59723e6 --- /dev/null +++ b/vendor/labix.org/v2/mgo/stats.go @@ -0,0 +1,147 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo + +import ( + "sync" +) + +var stats *Stats +var statsMutex sync.Mutex + +func SetStats(enabled bool) { + statsMutex.Lock() + if enabled { + if stats == nil { + stats = &Stats{} + } + } else { + stats = nil + } + statsMutex.Unlock() +} + +func GetStats() (snapshot Stats) { + statsMutex.Lock() + snapshot = *stats + statsMutex.Unlock() + return +} + +func ResetStats() { + statsMutex.Lock() + debug("Resetting stats") + old := stats + stats = &Stats{} + // These are absolute values: + stats.Clusters = old.Clusters + stats.SocketsInUse = old.SocketsInUse + stats.SocketsAlive = old.SocketsAlive + stats.SocketRefs = old.SocketRefs + statsMutex.Unlock() + return +} + +type Stats struct { + Clusters int + MasterConns int + SlaveConns int + SentOps int + ReceivedOps int + ReceivedDocs int + SocketsAlive int + SocketsInUse int + SocketRefs int +} + +func (stats *Stats) cluster(delta int) { + if stats != nil { + statsMutex.Lock() + stats.Clusters += delta + statsMutex.Unlock() + } +} + +func (stats *Stats) conn(delta int, master bool) { + if stats != nil { + statsMutex.Lock() + if master { + stats.MasterConns += delta + } else { + stats.SlaveConns += delta + } + statsMutex.Unlock() + } +} + +func (stats *Stats) sentOps(delta int) { + if stats != nil { + statsMutex.Lock() + stats.SentOps += delta + statsMutex.Unlock() + } +} + +func (stats *Stats) receivedOps(delta int) { + if stats != nil { + statsMutex.Lock() + stats.ReceivedOps += delta + statsMutex.Unlock() + } +} + +func (stats *Stats) receivedDocs(delta int) { + if stats != nil { + statsMutex.Lock() + stats.ReceivedDocs += delta + statsMutex.Unlock() + } +} + +func (stats *Stats) socketsInUse(delta int) { + if stats != nil { + statsMutex.Lock() + stats.SocketsInUse += delta + statsMutex.Unlock() + } +} + +func (stats *Stats) socketsAlive(delta int) { + if stats != nil { + statsMutex.Lock() + stats.SocketsAlive += delta + statsMutex.Unlock() + } +} + +func (stats *Stats) socketRefs(delta int) { + if stats != nil { + statsMutex.Lock() + stats.SocketRefs += delta + statsMutex.Unlock() + } +} diff --git a/vendor/labix.org/v2/mgo/suite_test.go b/vendor/labix.org/v2/mgo/suite_test.go new file mode 100644 index 0000000..a846c51 --- /dev/null +++ b/vendor/labix.org/v2/mgo/suite_test.go @@ -0,0 +1,240 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo_test + +import ( + "errors" + "flag" + "fmt" + "labix.org/v2/mgo" + "labix.org/v2/mgo/bson" + . "launchpad.net/gocheck" + "net" + "os/exec" + "strconv" + "syscall" + + "testing" + "time" +) + +var fast = flag.Bool("fast", false, "Skip slow tests") + +type M bson.M + +type cLogger C + +func (c *cLogger) Output(calldepth int, s string) error { + ns := time.Now().UnixNano() + t := float64(ns%100e9) / 1e9 + ((*C)(c)).Logf("[LOG] %.05f %s", t, s) + return nil +} + +func TestAll(t *testing.T) { + TestingT(t) +} + +type S struct { + session *mgo.Session + stopped bool + build mgo.BuildInfo + frozen []string +} + +func (s *S) versionAtLeast(v ...int) bool { + for i := range v { + if i == len(s.build.VersionArray) { + return false + } + if s.build.VersionArray[i] < v[i] { + return false + } + } + return true +} + +var _ = Suite(&S{}) + +func (s *S) SetUpSuite(c *C) { + mgo.SetDebug(true) + mgo.SetStats(true) + s.StartAll() + + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + s.build, err = session.BuildInfo() + c.Check(err, IsNil) + session.Close() +} + +func (s *S) SetUpTest(c *C) { + err := run("mongo --nodb testdb/dropall.js") + if err != nil { + panic(err.Error()) + } + mgo.SetLogger((*cLogger)(c)) + mgo.ResetStats() +} + +func (s *S) TearDownTest(c *C) { + if s.stopped { + s.StartAll() + } + for _, host := range s.frozen { + if host != "" { + s.Thaw(host) + } + } + var stats mgo.Stats + for i := 0; ; i++ { + stats = mgo.GetStats() + if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 { + break + } + if i == 20 { + c.Fatal("Test left sockets in a dirty state") + } + c.Logf("Waiting for sockets to die: %d in use, %d alive", stats.SocketsInUse, stats.SocketsAlive) + time.Sleep(500 * time.Millisecond) + } + for i := 0; ; i++ { + stats = mgo.GetStats() + if stats.Clusters == 0 { + break + } + if i == 60 { + c.Fatal("Test left clusters alive") + } + c.Logf("Waiting for clusters to die: %d alive", stats.Clusters) + time.Sleep(1 * time.Second) + } +} + +func (s *S) Stop(host string) { + // Give a moment for slaves to sync and avoid getting rollback issues. + time.Sleep(2 * time.Second) + err := run("cd _testdb && supervisorctl stop " + supvName(host)) + if err != nil { + panic(err) + } + s.stopped = true +} + +func (s *S) pid(host string) int { + output, err := exec.Command("lsof", "-iTCP:"+hostPort(host), "-sTCP:LISTEN", "-Fp").CombinedOutput() + if err != nil { + panic(err) + } + pidstr := string(output[1 : len(output)-1]) + pid, err := strconv.Atoi(pidstr) + if err != nil { + panic("cannot convert pid to int: " + pidstr) + } + return pid +} + +func (s *S) Freeze(host string) { + err := syscall.Kill(s.pid(host), syscall.SIGSTOP) + if err != nil { + panic(err) + } + s.frozen = append(s.frozen, host) +} + +func (s *S) Thaw(host string) { + err := syscall.Kill(s.pid(host), syscall.SIGCONT) + if err != nil { + panic(err) + } + for i, frozen := range s.frozen { + if frozen == host { + s.frozen[i] = "" + } + } +} + +func (s *S) StartAll() { + // Restart any stopped nodes. + run("cd _testdb && supervisorctl start all") + err := run("cd testdb && mongo --nodb wait.js") + if err != nil { + panic(err) + } + s.stopped = false +} + +func run(command string) error { + output, err := exec.Command("/bin/sh", "-c", command).CombinedOutput() + if err != nil { + msg := fmt.Sprintf("Failed to execute: %s: %s\n%s", command, err.Error(), string(output)) + return errors.New(msg) + } + return nil +} + +var supvNames = map[string]string{ + "40001": "db1", + "40002": "db2", + "40011": "rs1a", + "40012": "rs1b", + "40013": "rs1c", + "40021": "rs2a", + "40022": "rs2b", + "40023": "rs2c", + "40031": "rs3a", + "40032": "rs3b", + "40033": "rs3c", + "40041": "rs4a", + "40101": "cfg1", + "40102": "cfg2", + "40103": "cfg3", + "40201": "s1", + "40202": "s2", + "40203": "s3", +} + +// supvName returns the supervisord name for the given host address. +func supvName(host string) string { + host, port, err := net.SplitHostPort(host) + if err != nil { + panic(err) + } + name, ok := supvNames[port] + if !ok { + panic("Unknown host: " + host) + } + return name +} + +func hostPort(host string) string { + _, port, err := net.SplitHostPort(host) + if err != nil { + panic(err) + } + return port +} diff --git a/vendor/labix.org/v2/mgo/testdb/dropall.js b/vendor/labix.org/v2/mgo/testdb/dropall.js new file mode 100644 index 0000000..ca12892 --- /dev/null +++ b/vendor/labix.org/v2/mgo/testdb/dropall.js @@ -0,0 +1,47 @@ + +var ports = [40001, 40002, 40011, 40012, 40013, 40021, 40022, 40023, 40041, 40101, 40102, 40103, 40201, 40202, 40203] +var auth = [40002, 40103, 40203, 40031] + +for (var i in ports) { + var port = ports[i] + var server = "localhost:" + port + var mongo = new Mongo("localhost:" + port) + var admin = mongo.getDB("admin") + + for (var j in auth) { + if (auth[j] == port) { + admin.auth("root", "rapadura") + admin.system.users.find().forEach(function(u) { + if (u.user == "root" || u.user == "reader") { + return; + } + if (typeof admin.dropUser == "function") { + mongo.getDB(u.db).dropUser(u.user); + } else { + admin.removeUser(u.user); + } + }) + break + } + } + var result = admin.runCommand({"listDatabases": 1}) + // Why is the command returning undefined!? + while (typeof result.databases == "undefined") { + print("dropall.js: listing databases of :" + port + " got:", result) + result = admin.runCommand({"listDatabases": 1}) + } + var dbs = result.databases + for (var j = 0; j != dbs.length; j++) { + var db = dbs[j] + switch (db.name) { + case "admin": + case "local": + case "config": + break + default: + mongo.getDB(db.name).dropDatabase() + } + } +} + +// vim:ts=4:sw=4:et diff --git a/vendor/labix.org/v2/mgo/testdb/init.js b/vendor/labix.org/v2/mgo/testdb/init.js new file mode 100644 index 0000000..02a6c61 --- /dev/null +++ b/vendor/labix.org/v2/mgo/testdb/init.js @@ -0,0 +1,103 @@ +//var settings = {heartbeatSleep: 0.05, heartbeatTimeout: 0.5} +var settings = {}; + +// We know the master of the first set (pri=1), but not of the second. +var rs1cfg = {_id: "rs1", + members: [{_id: 1, host: "127.0.0.1:40011", priority: 1, tags: {rs1: "a"}}, + {_id: 2, host: "127.0.0.1:40012", priority: 0, tags: {rs1: "b"}}, + {_id: 3, host: "127.0.0.1:40013", priority: 0, tags: {rs1: "c"}}], + settings: settings} +var rs2cfg = {_id: "rs2", + members: [{_id: 1, host: "127.0.0.1:40021", priority: 1, tags: {rs2: "a"}}, + {_id: 2, host: "127.0.0.1:40022", priority: 1, tags: {rs2: "b"}}, + {_id: 3, host: "127.0.0.1:40023", priority: 1, tags: {rs2: "c"}}], + settings: settings} +var rs3cfg = {_id: "rs3", + members: [{_id: 1, host: "127.0.0.1:40031", priority: 1, tags: {rs3: "a"}}, + {_id: 2, host: "127.0.0.1:40032", priority: 1, tags: {rs3: "b"}}, + {_id: 3, host: "127.0.0.1:40033", priority: 1, tags: {rs3: "c"}}], + settings: settings} + +for (var i = 0; i != 60; i++) { + try { + db1 = new Mongo("127.0.0.1:40001").getDB("admin") + db2 = new Mongo("127.0.0.1:40002").getDB("admin") + rs1a = new Mongo("127.0.0.1:40011").getDB("admin") + rs2a = new Mongo("127.0.0.1:40021").getDB("admin") + rs3a = new Mongo("127.0.0.1:40031").getDB("admin") + break + } catch(err) { + print("Can't connect yet...") + } + sleep(1000) +} + +rs1a.runCommand({replSetInitiate: rs1cfg}) +rs2a.runCommand({replSetInitiate: rs2cfg}) +rs3a.runCommand({replSetInitiate: rs3cfg}) + +function configShards() { + cfg1 = new Mongo("127.0.0.1:40201").getDB("admin") + cfg1.runCommand({addshard: "127.0.0.1:40001"}) + cfg1.runCommand({addshard: "rs1/127.0.0.1:40011"}) + + cfg2 = new Mongo("127.0.0.1:40202").getDB("admin") + cfg2.runCommand({addshard: "rs2/127.0.0.1:40021"}) + + cfg3 = new Mongo("127.0.0.1:40203").getDB("admin") + cfg3.runCommand({addshard: "rs3/127.0.0.1:40031"}) +} + +function configAuth() { + var addrs = ["127.0.0.1:40002", "127.0.0.1:40203", "127.0.0.1:40031"] + for (var i in addrs) { + var db = new Mongo(addrs[i]).getDB("admin") + var v = db.serverBuildInfo().versionArray + if (v < [2, 5]) { + db.addUser("root", "rapadura") + } else { + db.createUser({user: "root", pwd: "rapadura", roles: ["root"]}) + } + db.auth("root", "rapadura") + if (v >= [2, 6]) { + db.createUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]}) + } else if (v >= [2, 4]) { + db.addUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]}) + } else { + db.addUser("reader", "rapadura", true) + } + } +} + +function countHealthy(rs) { + var status = rs.runCommand({replSetGetStatus: 1}) + var count = 0 + if (typeof status.members != "undefined") { + for (var i = 0; i != status.members.length; i++) { + var m = status.members[i] + if (m.health == 1 && (m.state == 1 || m.state == 2)) { + count += 1 + } + } + } + return count +} + +var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length + +for (var i = 0; i != 60; i++) { + var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a) + print("Replica sets have", count, "healthy nodes.") + if (count == totalRSMembers) { + sleep(2000) + configShards() + configAuth() + quit(0) + } + sleep(1000) +} + +print("Replica sets didn't sync up properly.") +quit(12) + +// vim:ts=4:sw=4:et diff --git a/vendor/labix.org/v2/mgo/testdb/setup.sh b/vendor/labix.org/v2/mgo/testdb/setup.sh new file mode 100755 index 0000000..ab841da --- /dev/null +++ b/vendor/labix.org/v2/mgo/testdb/setup.sh @@ -0,0 +1,54 @@ +#!/bin/sh -e + +start() { + mkdir _testdb + cd _testdb + mkdir db1 db2 rs1a rs1b rs1c rs2a rs2b rs2c rs3a rs3b rs3c rs4a cfg1 cfg2 cfg3 + ln -s ../testdb/supervisord.conf supervisord.conf + echo keyfile > keyfile + chmod 600 keyfile + echo "Running supervisord..." + supervisord || ( echo "Supervisord failed executing ($?)" && exit 1 ) + COUNT=$(grep '^\[program' supervisord.conf | wc -l) + echo "Supervisord is up, starting $COUNT processes..." + for i in $(seq 10); do + RUNNING=$(supervisorctl status | grep RUNNING | wc -l) + echo "$RUNNING processes running..." + if [ x$COUNT = x$RUNNING ]; then + echo "Running setup.js with mongo..." + mongo --nodb ../testdb/init.js + exit 0 + fi + sleep 1 + done + echo "Failed to start all processes. Check out what's up at $PWD now!" + exit 1 +} + +stop() { + if [ -d _testdb ]; then + echo "Shutting down test cluster..." + (cd _testdb && supervisorctl shutdown) + rm -rf _testdb + fi +} + + +if [ ! -f suite_test.go ]; then + echo "This script must be run from within the source directory." + exit 1 +fi + +case "$1" in + + start) + start + ;; + + stop) + stop + ;; + +esac + +# vim:ts=4:sw=4:et diff --git a/vendor/labix.org/v2/mgo/testdb/supervisord.conf b/vendor/labix.org/v2/mgo/testdb/supervisord.conf new file mode 100644 index 0000000..b0aca01 --- /dev/null +++ b/vendor/labix.org/v2/mgo/testdb/supervisord.conf @@ -0,0 +1,62 @@ +[supervisord] +logfile = %(here)s/supervisord.log +pidfile = %(here)s/supervisord.pid +directory = %(here)s +#nodaemon = true + +[inet_http_server] +port = 127.0.0.1:9001 + +[supervisorctl] +serverurl = http://127.0.0.1:9001 + +[rpcinterface:supervisor] +supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface + +[program:db1] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db1 --bind_ip=127.0.0.1 --port 40001 + +[program:db2] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db2 --bind_ip=127.0.0.1 --port 40002 --auth + +[program:rs1a] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1a --bind_ip=127.0.0.1 --port 40011 +[program:rs1b] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1b --bind_ip=127.0.0.1 --port 40012 +[program:rs1c] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1c --bind_ip=127.0.0.1 --port 40013 + +[program:rs2a] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(here)s/rs2a --bind_ip=127.0.0.1 --port 40021 +[program:rs2b] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(here)s/rs2b --bind_ip=127.0.0.1 --port 40022 +[program:rs2c] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(here)s/rs2c --bind_ip=127.0.0.1 --port 40023 + +[program:rs3a] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(here)s/rs3a --bind_ip=127.0.0.1 --port 40031 --auth --keyFile=%(here)s/keyfile +[program:rs3b] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(here)s/rs3b --bind_ip=127.0.0.1 --port 40032 --auth --keyFile=%(here)s/keyfile +[program:rs3c] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(here)s/rs3c --bind_ip=127.0.0.1 --port 40033 --auth --keyFile=%(here)s/keyfile + +[program:rs4a] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs4 --dbpath %(here)s/rs4a --bind_ip=127.0.0.1 --port 40041 + +[program:cfg1] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg1 --bind_ip=127.0.0.1 --port 40101 + +[program:cfg2] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg2 --bind_ip=127.0.0.1 --port 40102 + +[program:cfg3] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg3 --bind_ip=127.0.0.1 --port 40103 --auth --keyFile=%(here)s/keyfile + +[program:s1] +command = mongos --configdb 127.0.0.1:40101 --bind_ip=127.0.0.1 --port 40201 --chunkSize 1 + +[program:s2] +command = mongos --configdb 127.0.0.1:40102 --bind_ip=127.0.0.1 --port 40202 --chunkSize 1 + +[program:s3] +command = mongos --configdb 127.0.0.1:40103 --bind_ip=127.0.0.1 --port 40203 --chunkSize 1 --keyFile=%(here)s/keyfile diff --git a/vendor/labix.org/v2/mgo/testdb/wait.js b/vendor/labix.org/v2/mgo/testdb/wait.js new file mode 100644 index 0000000..de0d660 --- /dev/null +++ b/vendor/labix.org/v2/mgo/testdb/wait.js @@ -0,0 +1,58 @@ +// We know the master of the first set (pri=1), but not of the second. +var settings = {} +var rs1cfg = {_id: "rs1", + members: [{_id: 1, host: "127.0.0.1:40011", priority: 1}, + {_id: 2, host: "127.0.0.1:40012", priority: 0}, + {_id: 3, host: "127.0.0.1:40013", priority: 0}]} +var rs2cfg = {_id: "rs2", + members: [{_id: 1, host: "127.0.0.1:40021", priority: 1}, + {_id: 2, host: "127.0.0.1:40022", priority: 1}, + {_id: 3, host: "127.0.0.1:40023", priority: 0}]} +var rs3cfg = {_id: "rs3", + members: [{_id: 1, host: "127.0.0.1:40031", priority: 1}, + {_id: 2, host: "127.0.0.1:40032", priority: 1}, + {_id: 3, host: "127.0.0.1:40033", priority: 1}], + settings: settings} + +for (var i = 0; i != 60; i++) { + try { + rs1a = new Mongo("127.0.0.1:40011").getDB("admin") + rs2a = new Mongo("127.0.0.1:40021").getDB("admin") + rs3a = new Mongo("127.0.0.1:40031").getDB("admin") + rs3a.auth("root", "rapadura") + db1 = new Mongo("127.0.0.1:40001").getDB("admin") + db2 = new Mongo("127.0.0.1:40002").getDB("admin") + break + } catch(err) { + print("Can't connect yet...") + } + sleep(1000) +} + +function countHealthy(rs) { + var status = rs.runCommand({replSetGetStatus: 1}) + var count = 0 + if (typeof status.members != "undefined") { + for (var i = 0; i != status.members.length; i++) { + var m = status.members[i] + if (m.health == 1 && (m.state == 1 || m.state == 2)) { + count += 1 + } + } + } + return count +} + +var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length + +for (var i = 0; i != 60; i++) { + var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a) + print("Replica sets have", count, "healthy nodes.") + if (count == totalRSMembers) { + quit(0) + } + sleep(1000) +} + +print("Replica sets didn't sync up properly.") +quit(12) diff --git a/vendor/labix.org/v2/mgo/txn/chaos.go b/vendor/labix.org/v2/mgo/txn/chaos.go new file mode 100644 index 0000000..a9258fa --- /dev/null +++ b/vendor/labix.org/v2/mgo/txn/chaos.go @@ -0,0 +1,68 @@ +package txn + +import ( + mrand "math/rand" + "time" +) + +var chaosEnabled = false +var chaosSetting Chaos + +// Chaos holds parameters for the failure injection mechanism. +type Chaos struct { + // KillChance is the 0.0 to 1.0 chance that a given checkpoint + // within the algorithm will raise an interruption that will + // stop the procedure. + KillChance float64 + + // SlowdownChance is the 0.0 to 1.0 chance that a given checkpoint + // within the algorithm will be delayed by Slowdown before + // continuing. + SlowdownChance float64 + Slowdown time.Duration + + // If Breakpoint is set, the above settings will only affect the + // named breakpoint. + Breakpoint string +} + +// SetChaos sets the failure injection parameters to c. +func SetChaos(c Chaos) { + chaosSetting = c + chaosEnabled = c.KillChance > 0 || c.SlowdownChance > 0 +} + +func chaos(bpname string) { + if !chaosEnabled { + return + } + switch chaosSetting.Breakpoint { + case "", bpname: + kc := chaosSetting.KillChance + if kc > 0 && mrand.Intn(1000) < int(kc*1000) { + panic(chaosError{}) + } + if bpname == "insert" { + return + } + sc := chaosSetting.SlowdownChance + if sc > 0 && mrand.Intn(1000) < int(kc*1000) { + time.Sleep(chaosSetting.Slowdown) + } + } +} + +type chaosError struct{} + +func (f *flusher) handleChaos(err *error) { + v := recover() + if v == nil { + return + } + if _, ok := v.(chaosError); ok { + f.debugf("Killed by chaos!") + *err = ErrChaos + return + } + panic(v) +} diff --git a/vendor/labix.org/v2/mgo/txn/debug.go b/vendor/labix.org/v2/mgo/txn/debug.go new file mode 100644 index 0000000..7f67f4e --- /dev/null +++ b/vendor/labix.org/v2/mgo/txn/debug.go @@ -0,0 +1,108 @@ +package txn + +import ( + "bytes" + "fmt" + "labix.org/v2/mgo/bson" + "sort" + "sync/atomic" +) + +var ( + debugEnabled bool + logger log_Logger +) + +type log_Logger interface { + Output(calldepth int, s string) error +} + +// Specify the *log.Logger where logged messages should be sent to. +func SetLogger(l log_Logger) { + logger = l +} + +// SetDebug enables or disables debugging. +func SetDebug(debug bool) { + debugEnabled = debug +} + +var ErrChaos = fmt.Errorf("interrupted by chaos") + +var debugId uint32 + +func debugPrefix() string { + d := atomic.AddUint32(&debugId, 1) - 1 + s := make([]byte, 0, 10) + for i := uint(0); i < 8; i++ { + s = append(s, "abcdefghijklmnop"[(d>>(4*i))&0xf]) + if d>>(4*(i+1)) == 0 { + break + } + } + s = append(s, ')', ' ') + return string(s) +} + +func logf(format string, args ...interface{}) { + if logger != nil { + logger.Output(2, fmt.Sprintf(format, argsForLog(args)...)) + } +} + +func debugf(format string, args ...interface{}) { + if debugEnabled && logger != nil { + logger.Output(2, fmt.Sprintf(format, argsForLog(args)...)) + } +} + +func argsForLog(args []interface{}) []interface{} { + for i, arg := range args { + switch v := arg.(type) { + case bson.ObjectId: + args[i] = v.Hex() + case []bson.ObjectId: + lst := make([]string, len(v)) + for j, id := range v { + lst[j] = id.Hex() + } + args[i] = lst + case map[docKey][]bson.ObjectId: + buf := &bytes.Buffer{} + var dkeys docKeys + for dkey := range v { + dkeys = append(dkeys, dkey) + } + sort.Sort(dkeys) + for i, dkey := range dkeys { + if i > 0 { + buf.WriteByte(' ') + } + buf.WriteString(fmt.Sprintf("%v: {", dkey)) + for j, id := range v[dkey] { + if j > 0 { + buf.WriteByte(' ') + } + buf.WriteString(id.Hex()) + } + buf.WriteByte('}') + } + args[i] = buf.String() + case map[docKey][]int64: + buf := &bytes.Buffer{} + var dkeys docKeys + for dkey := range v { + dkeys = append(dkeys, dkey) + } + sort.Sort(dkeys) + for i, dkey := range dkeys { + if i > 0 { + buf.WriteByte(' ') + } + buf.WriteString(fmt.Sprintf("%v: %v", dkey, v[dkey])) + } + args[i] = buf.String() + } + } + return args +} diff --git a/vendor/labix.org/v2/mgo/txn/flusher.go b/vendor/labix.org/v2/mgo/txn/flusher.go new file mode 100644 index 0000000..846eefe --- /dev/null +++ b/vendor/labix.org/v2/mgo/txn/flusher.go @@ -0,0 +1,996 @@ +package txn + +import ( + "fmt" + "labix.org/v2/mgo" + "labix.org/v2/mgo/bson" + "sort" +) + +func flush(r *Runner, t *transaction) error { + f := &flusher{ + Runner: r, + goal: t, + goalKeys: make(map[docKey]bool), + queue: make(map[docKey][]token), + debugId: debugPrefix(), + } + for _, dkey := range f.goal.docKeys() { + f.goalKeys[dkey] = true + } + return f.run() +} + +type flusher struct { + *Runner + goal *transaction + goalKeys map[docKey]bool + queue map[docKey][]token + debugId string +} + +func (f *flusher) run() (err error) { + if chaosEnabled { + defer f.handleChaos(&err) + } + + f.debugf("Processing %s", f.goal) + seen := make(map[bson.ObjectId]*transaction) + if err := f.recurse(f.goal, seen); err != nil { + return err + } + if f.goal.done() { + return nil + } + + // Sparse workloads will generally be managed entirely by recurse. + // Getting here means one or more transactions have dependencies + // and perhaps cycles. + + // Build successors data for Tarjan's sort. Must consider + // that entries in txn-queue are not necessarily valid. + successors := make(map[bson.ObjectId][]bson.ObjectId) + ready := true + for _, dqueue := range f.queue { + NextPair: + for i := 0; i < len(dqueue); i++ { + pred := dqueue[i] + predid := pred.id() + predt := seen[predid] + if predt == nil || predt.Nonce != pred.nonce() { + continue + } + predsuccids, ok := successors[predid] + if !ok { + successors[predid] = nil + } + + for j := i + 1; j < len(dqueue); j++ { + succ := dqueue[j] + succid := succ.id() + succt := seen[succid] + if succt == nil || succt.Nonce != succ.nonce() { + continue + } + if _, ok := successors[succid]; !ok { + successors[succid] = nil + } + + // Found a valid pred/succ pair. + i = j - 1 + for _, predsuccid := range predsuccids { + if predsuccid == succid { + continue NextPair + } + } + successors[predid] = append(predsuccids, succid) + if succid == f.goal.Id { + // There are still pre-requisites to handle. + ready = false + } + continue NextPair + } + } + } + f.debugf("Queues: %v", f.queue) + f.debugf("Successors: %v", successors) + if ready { + f.debugf("Goal %s has no real pre-requisites", f.goal) + return f.advance(f.goal, nil, true) + } + + // Robert Tarjan's algorithm for detecting strongly-connected + // components is used for topological sorting and detecting + // cycles at once. The order in which transactions are applied + // in commonly affected documents must be a global agreement. + sorted := tarjanSort(successors) + if debugEnabled { + f.debugf("Tarjan output: %v", sorted) + } + pull := make(map[bson.ObjectId]*transaction) + for i := len(sorted) - 1; i >= 0; i-- { + scc := sorted[i] + f.debugf("Flushing %v", scc) + if len(scc) == 1 { + pull[scc[0]] = seen[scc[0]] + } + for _, id := range scc { + if err := f.advance(seen[id], pull, true); err != nil { + return err + } + } + if len(scc) > 1 { + for _, id := range scc { + pull[id] = seen[id] + } + } + } + return nil +} + +func (f *flusher) recurse(t *transaction, seen map[bson.ObjectId]*transaction) error { + seen[t.Id] = t + err := f.advance(t, nil, false) + if err != errPreReqs { + return err + } + for _, dkey := range t.docKeys() { + for _, dtt := range f.queue[dkey] { + id := dtt.id() + if seen[id] != nil { + continue + } + qt, err := f.load(id) + if err != nil { + return err + } + err = f.recurse(qt, seen) + if err != nil { + return err + } + } + } + return nil +} + +func (f *flusher) advance(t *transaction, pull map[bson.ObjectId]*transaction, force bool) error { + for { + switch t.State { + case tpreparing, tprepared: + revnos, err := f.prepare(t, force) + if err != nil { + return err + } + if t.State != tprepared { + continue + } + if err = f.assert(t, revnos, pull); err != nil { + return err + } + if t.State != tprepared { + continue + } + if err = f.checkpoint(t, revnos); err != nil { + return err + } + case tapplying: + return f.apply(t, pull) + case taborting: + return f.abortOrReload(t, nil, pull) + case tapplied, taborted: + return nil + default: + panic(fmt.Errorf("transaction in unknown state: %q", t.State)) + } + } + panic("unreachable") +} + +type stash string + +const ( + stashStable stash = "" + stashInsert stash = "insert" + stashRemove stash = "remove" +) + +type txnInfo struct { + Queue []token `bson:"txn-queue"` + Revno int64 `bson:"txn-revno,omitempty"` + Insert bson.ObjectId `bson:"txn-insert,omitempty"` + Remove bson.ObjectId `bson:"txn-remove,omitempty"` +} + +type stashState string + +const ( + stashNew stashState = "" + stashInserting stashState = "inserting" +) + +var txnFields = bson.D{{"txn-queue", 1}, {"txn-revno", 1}, {"txn-remove", 1}, {"txn-insert", 1}} + +var errPreReqs = fmt.Errorf("transaction has pre-requisites and force is false") + +// prepare injects t's id onto txn-queue for all affected documents +// and collects the current txn-queue and txn-revno values during +// the process. If the prepared txn-queue indicates that there are +// pre-requisite transactions to be applied and the force parameter +// is false, errPreReqs will be returned. Otherwise, the current +// tip revision numbers for all the documents are returned. +func (f *flusher) prepare(t *transaction, force bool) (revnos []int64, err error) { + if t.State != tpreparing { + return f.rescan(t, force) + } + f.debugf("Preparing %s", t) + + // Iterate in a stable way across all runners. This isn't + // strictly required, but reduces the chances of cycles. + dkeys := t.docKeys() + sort.Sort(dkeys) + + revno := make(map[docKey]int64) + info := txnInfo{} + tt := tokenFor(t) +NextDoc: + for _, dkey := range dkeys { + change := mgo.Change{ + Update: bson.D{{"$addToSet", bson.D{{"txn-queue", tt}}}}, + ReturnNew: true, + } + c := f.tc.Database.C(dkey.C) + cquery := c.FindId(dkey.Id).Select(txnFields) + + RetryDoc: + change.Upsert = false + chaos("") + if _, err := cquery.Apply(change, &info); err == nil { + if info.Remove == "" { + // Fast path, unless workload is insert/remove heavy. + revno[dkey] = info.Revno + f.queue[dkey] = info.Queue + f.debugf("[A] Prepared document %v with revno %d and queue: %v", dkey, info.Revno, info.Queue) + continue NextDoc + } else { + // Handle remove in progress before preparing it. + if err := f.loadAndApply(info.Remove); err != nil { + return nil, err + } + goto RetryDoc + } + } else if err != mgo.ErrNotFound { + return nil, err + } + + // Document missing. Use stash collection. + change.Upsert = true + chaos("") + _, err := f.sc.FindId(dkey).Apply(change, &info) + if err != nil { + return nil, err + } + if info.Insert != "" { + // Handle insert in progress before preparing it. + if err := f.loadAndApply(info.Insert); err != nil { + return nil, err + } + goto RetryDoc + } + + // Must confirm stash is still in use and is the same one + // prepared, since applying a remove overwrites the stash. + docFound := false + stashFound := false + if err = c.FindId(dkey.Id).Select(txnFields).One(&info); err == nil { + docFound = true + } else if err != mgo.ErrNotFound { + return nil, err + } else if err = f.sc.FindId(dkey).One(&info); err == nil { + stashFound = true + if info.Revno == 0 { + // Missing revno in the stash only happens when it + // has been upserted, in which case it defaults to -1. + // Txn-inserted documents get revno -1 while in the stash + // for the first time, and -revno-1 == 2 when they go live. + info.Revno = -1 + } + } else if err != mgo.ErrNotFound { + return nil, err + } + + if docFound && info.Remove == "" || stashFound && info.Insert == "" { + for _, dtt := range info.Queue { + if dtt != tt { + continue + } + // Found tt properly prepared. + if stashFound { + f.debugf("[B] Prepared document %v on stash with revno %d and queue: %v", dkey, info.Revno, info.Queue) + } else { + f.debugf("[B] Prepared document %v with revno %d and queue: %v", dkey, info.Revno, info.Queue) + } + revno[dkey] = info.Revno + f.queue[dkey] = info.Queue + continue NextDoc + } + } + + // The stash wasn't valid and tt got overwriten. Try again. + f.unstashToken(tt, dkey) + goto RetryDoc + } + + // Save the prepared nonce onto t. + nonce := tt.nonce() + qdoc := bson.D{{"_id", t.Id}, {"s", tpreparing}} + udoc := bson.D{{"$set", bson.D{{"s", tprepared}, {"n", nonce}}}} + chaos("set-prepared") + err = f.tc.Update(qdoc, udoc) + if err == nil { + t.State = tprepared + t.Nonce = nonce + } else if err == mgo.ErrNotFound { + f.debugf("Can't save nonce of %s: LOST RACE", tt) + if err := f.reload(t); err != nil { + return nil, err + } else if t.State == tpreparing { + panic("can't save nonce yet transaction is still preparing") + } else if t.State != tprepared { + return t.Revnos, nil + } + tt = t.token() + } else if err != nil { + return nil, err + } + + prereqs, found := f.hasPreReqs(tt, dkeys) + if !found { + // Must only happen when reloading above. + return f.rescan(t, force) + } else if prereqs && !force { + f.debugf("Prepared queue with %s [has prereqs & not forced].", tt) + return nil, errPreReqs + } + for _, op := range t.Ops { + dkey := op.docKey() + revnos = append(revnos, revno[dkey]) + drevno := revno[dkey] + switch { + case op.Insert != nil && drevno < 0: + revno[dkey] = -drevno+1 + case op.Update != nil && drevno >= 0: + revno[dkey] = drevno+1 + case op.Remove && drevno >= 0: + revno[dkey] = -drevno-1 + } + } + if !prereqs { + f.debugf("Prepared queue with %s [no prereqs]. Revnos: %v", tt, revnos) + } else { + f.debugf("Prepared queue with %s [forced] Revnos: %v", tt, revnos) + } + return revnos, nil +} + +func (f *flusher) unstashToken(tt token, dkey docKey) error { + qdoc := bson.D{{"_id", dkey}, {"txn-queue", tt}} + udoc := bson.D{{"$pull", bson.D{{"txn-queue", tt}}}} + chaos("") + if err := f.sc.Update(qdoc, udoc); err == nil { + chaos("") + err = f.sc.Remove(bson.D{{"_id", dkey}, {"txn-queue", bson.D{}}}) + } else if err != mgo.ErrNotFound { + return err + } + return nil +} + +func (f *flusher) rescan(t *transaction, force bool) (revnos []int64, err error) { + f.debugf("Rescanning %s", t) + if t.State != tprepared { + panic(fmt.Errorf("rescanning transaction in invalid state: %q", t.State)) + } + + // Iterate in a stable way across all runners. This isn't + // strictly required, but reduces the chances of cycles. + dkeys := t.docKeys() + sort.Sort(dkeys) + + tt := t.token() + if !force { + prereqs, found := f.hasPreReqs(tt, dkeys) + if found && prereqs { + // Its state is already known. + return nil, errPreReqs + } + } + + revno := make(map[docKey]int64) + info := txnInfo{} + for _, dkey := range dkeys { + retry := 0 + + RetryDoc: + c := f.tc.Database.C(dkey.C) + if err := c.FindId(dkey.Id).Select(txnFields).One(&info); err == mgo.ErrNotFound { + // Document is missing. Look in stash. + if err := f.sc.FindId(dkey).One(&info); err == mgo.ErrNotFound { + // Stash also doesn't exist. Maybe someone applied it. + if err := f.reload(t); err != nil { + return nil, err + } else if t.State != tprepared { + return t.Revnos, err + } + // Not applying either. + retry++ + if retry < 3 { + // Retry since there might be an insert/remove race. + goto RetryDoc + } + // Neither the doc nor the stash seem to exist. + return nil, fmt.Errorf("cannot find document %v for applying transaction %s", dkey, t) + } else if err != nil { + return nil, err + } + // Stash found. + if info.Insert != "" { + // Handle insert in progress before assuming ordering is good. + if err := f.loadAndApply(info.Insert); err != nil { + return nil, err + } + goto RetryDoc + } + if info.Revno == 0 { + // Missing revno in the stash means -1. + info.Revno = -1 + } + } else if err != nil { + return nil, err + } else if info.Remove != "" { + // Handle remove in progress before assuming ordering is good. + if err := f.loadAndApply(info.Remove); err != nil { + return nil, err + } + goto RetryDoc + } + revno[dkey] = info.Revno + + found := false + for _, id := range info.Queue { + if id == tt { + found = true + break + } + } + f.queue[dkey] = info.Queue + if !found { + // Previously set txn-queue was popped by someone. + // Transaction is being/has been applied elsewhere. + f.debugf("Rescanned document %v misses %s in queue: %v", dkey, tt, info.Queue) + err := f.reload(t) + if t.State == tpreparing || t.State == tprepared { + panic("rescanned document misses transaction in queue") + } + return t.Revnos, err + } + } + + prereqs, found := f.hasPreReqs(tt, dkeys) + if !found { + panic("rescanning loop guarantees that this can't happen") + } else if prereqs && !force { + f.debugf("Rescanned queue with %s: has prereqs, not forced", tt) + return nil, errPreReqs + } + for _, op := range t.Ops { + dkey := op.docKey() + revnos = append(revnos, revno[dkey]) + if op.isChange() { + revno[dkey] += 1 + } + } + if !prereqs { + f.debugf("Rescanned queue with %s: no prereqs, revnos: %v", tt, revnos) + } else { + f.debugf("Rescanned queue with %s: has prereqs, forced, revnos: %v", tt, revnos) + } + return revnos, nil +} + +func (f *flusher) hasPreReqs(tt token, dkeys docKeys) (prereqs, found bool) { + found = true +NextDoc: + for _, dkey := range dkeys { + for _, dtt := range f.queue[dkey] { + if dtt == tt { + continue NextDoc + } else if dtt.id() != tt.id() { + prereqs = true + } + } + found = false + } + return +} + +func (f *flusher) reload(t *transaction) error { + var newt transaction + query := f.tc.FindId(t.Id) + query.Select(bson.D{{"s", 1}, {"n", 1}, {"r", 1}}) + if err := query.One(&newt); err != nil { + return fmt.Errorf("failed to reload transaction: %v", err) + } + t.State = newt.State + t.Nonce = newt.Nonce + t.Revnos = newt.Revnos + f.debugf("Reloaded %s: %q", t, t.State) + return nil +} + +func (f *flusher) loadAndApply(id bson.ObjectId) error { + t, err := f.load(id) + if err != nil { + return err + } + return f.advance(t, nil, true) +} + +// assert verifies that all assertions in t match the content that t +// will be applied upon. If an assertion fails, the transaction state +// is changed to aborted. +func (f *flusher) assert(t *transaction, revnos []int64, pull map[bson.ObjectId]*transaction) error { + f.debugf("Asserting %s with revnos %v", t, revnos) + if t.State != tprepared { + panic(fmt.Errorf("asserting transaction in invalid state: %q", t.State)) + } + qdoc := make(bson.D, 3) + revno := make(map[docKey]int64) + for i, op := range t.Ops { + dkey := op.docKey() + if _, ok := revno[dkey]; !ok { + revno[dkey] = revnos[i] + } + if op.Assert == nil { + continue + } + if op.Assert == DocMissing { + if revnos[i] >= 0 { + return f.abortOrReload(t, revnos, pull) + } + continue + } + if op.Insert != nil { + return fmt.Errorf("Insert can only Assert txn.DocMissing", op.Assert) + } + // if revnos[i] < 0 { abort }? + + qdoc = append(qdoc[:0], bson.DocElem{"_id", op.Id}) + if op.Assert != DocMissing { + var revnoq interface{} + if n := revno[dkey]; n == 0 { + revnoq = bson.D{{"$exists", false}} + } else { + revnoq = n + } + // XXX Add tt to the query here, once we're sure it's all working. + // Not having it increases the chances of breaking on bad logic. + qdoc = append(qdoc, bson.DocElem{"txn-revno", revnoq}) + if op.Assert != DocExists { + qdoc = append(qdoc, bson.DocElem{"$or", []interface{}{op.Assert}}) + } + } + + c := f.tc.Database.C(op.C) + if err := c.Find(qdoc).Select(bson.D{{"_id", 1}}).One(nil); err == mgo.ErrNotFound { + // Assertion failed or someone else started applying. + return f.abortOrReload(t, revnos, pull) + } else if err != nil { + return err + } + } + f.debugf("Asserting %s succeeded", t) + return nil +} + +func (f *flusher) abortOrReload(t *transaction, revnos []int64, pull map[bson.ObjectId]*transaction) (err error) { + f.debugf("Aborting or reloading %s (was %q)", t, t.State) + if t.State == tprepared { + qdoc := bson.D{{"_id", t.Id}, {"s", tprepared}} + udoc := bson.D{{"$set", bson.D{{"s", taborting}}}} + chaos("set-aborting") + if err = f.tc.Update(qdoc, udoc); err == nil { + t.State = taborting + } else if err == mgo.ErrNotFound { + if err = f.reload(t); err != nil || t.State != taborting { + f.debugf("Won't abort %s. Reloaded state: %q", t, t.State) + return err + } + } else { + return err + } + } else if t.State != taborting { + panic(fmt.Errorf("aborting transaction in invalid state: %q", t.State)) + } + + if len(revnos) > 0 { + if pull == nil { + pull = map[bson.ObjectId]*transaction{t.Id: t} + } + seen := make(map[docKey]bool) + for i, op := range t.Ops { + dkey := op.docKey() + if seen[op.docKey()] { + continue + } + seen[dkey] = true + + pullAll := tokensToPull(f.queue[dkey], pull, "") + if len(pullAll) == 0 { + continue + } + udoc := bson.D{{"$pullAll", bson.D{{"txn-queue", pullAll}}}} + chaos("") + if revnos[i] < 0 { + err = f.sc.UpdateId(dkey, udoc) + } else { + c := f.tc.Database.C(dkey.C) + err = c.UpdateId(dkey.Id, udoc) + } + if err != nil && err != mgo.ErrNotFound { + return err + } + } + } + udoc := bson.D{{"$set", bson.D{{"s", taborted}}}} + chaos("set-aborted") + if err := f.tc.UpdateId(t.Id, udoc); err != nil && err != mgo.ErrNotFound { + return err + } + t.State = taborted + f.debugf("Aborted %s", t) + return nil +} + +func (f *flusher) checkpoint(t *transaction, revnos []int64) error { + var debugRevnos map[docKey][]int64 + if debugEnabled { + debugRevnos = make(map[docKey][]int64) + for i, op := range t.Ops { + dkey := op.docKey() + debugRevnos[dkey] = append(debugRevnos[dkey], revnos[i]) + } + f.debugf("Ready to apply %s. Saving revnos %v", t, debugRevnos) + } + + // Save in t the txn-revno values the transaction must run on. + qdoc := bson.D{{"_id", t.Id}, {"s", tprepared}} + udoc := bson.D{{"$set", bson.D{{"s", tapplying}, {"r", revnos}}}} + chaos("set-applying") + err := f.tc.Update(qdoc, udoc) + if err == nil { + t.State = tapplying + t.Revnos = revnos + f.debugf("Ready to apply %s. Saving revnos %v: DONE", t, debugRevnos) + } else if err == mgo.ErrNotFound { + f.debugf("Ready to apply %s. Saving revnos %v: LOST RACE", t, debugRevnos) + return f.reload(t) + } + return nil +} + +func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) error { + f.debugf("Applying transaction %s", t) + if t.State != tapplying { + panic(fmt.Errorf("applying transaction in invalid state: %q", t.State)) + } + if pull == nil { + pull = map[bson.ObjectId]*transaction{t.Id: t} + } + + // Compute the operation in which t's id may be pulled + // out of txn-queue. That's on its last change, or the + // first assertion. + pullOp := make(map[docKey]int) + for i := range t.Ops { + op := &t.Ops[i] + dkey := op.docKey() + if _, ok := pullOp[dkey]; !ok || op.isChange() { + pullOp[dkey] = i + } + } + + logRevnos := append([]int64(nil), t.Revnos...) + logDoc := bson.D{{"_id", t.Id}} + + tt := tokenFor(t) + for i := range t.Ops { + op := &t.Ops[i] + dkey := op.docKey() + dqueue := f.queue[dkey] + revno := t.Revnos[i] + + var opName string + if debugEnabled { + opName = op.name() + f.debugf("Applying %s op %d (%s) on %v with txn-revno %d", t, i, opName, dkey, revno) + } + + c := f.tc.Database.C(op.C) + + qdoc := bson.D{{"_id", dkey.Id}, {"txn-revno", revno}, {"txn-queue", tt}} + if op.Insert != nil { + qdoc[0].Value = dkey + if revno == -1 { + qdoc[1].Value = bson.D{{"$exists", false}} + } + } else if revno == 0 { + // There's no document with revno 0. The only way to see it is + // when an existent document participates in a transaction the + // first time. Txn-inserted documents get revno -1 while in the + // stash for the first time, and -revno-1 == 2 when they go live. + qdoc[1].Value = bson.D{{"$exists", false}} + } + + dontPull := tt + isPullOp := pullOp[dkey] == i + if isPullOp { + dontPull = "" + } + pullAll := tokensToPull(dqueue, pull, dontPull) + + var d bson.D + var outcome string + var err error + switch { + case op.Update != nil: + if revno < 0 { + err = mgo.ErrNotFound + f.debugf("Won't try to apply update op; negative revision means the document is missing or stashed"); + } else { + newRevno := revno + 1 + logRevnos[i] = newRevno + if d, err = objToDoc(op.Update); err != nil { + return err + } + if d, err = addToDoc(d, "$pullAll", bson.D{{"txn-queue", pullAll}}); err != nil { + return err + } + if d, err = addToDoc(d, "$set", bson.D{{"txn-revno", newRevno}}); err != nil { + return err + } + chaos("") + err = c.Update(qdoc, d) + } + case op.Remove: + if revno < 0 { + err = mgo.ErrNotFound + } else { + newRevno := -revno - 1 + logRevnos[i] = newRevno + nonce := newNonce() + stash := txnInfo{} + change := mgo.Change{ + Update: bson.D{{"$push", bson.D{{"n", nonce}}}}, + Upsert: true, + ReturnNew: true, + } + if _, err = f.sc.FindId(dkey).Apply(change, &stash); err != nil { + return err + } + change = mgo.Change{ + Update: bson.D{{"$set", bson.D{{"txn-remove", t.Id}}}}, + ReturnNew: true, + } + var info txnInfo + if _, err = c.Find(qdoc).Apply(change, &info); err == nil { + // The document still exists so the stash previously + // observed was either out of date or necessarily + // contained the token being applied. + f.debugf("Marked document %v to be removed on revno %d with queue: %v", dkey, info.Revno, info.Queue) + updated := false + if !hasToken(stash.Queue, tt) { + var set, unset bson.D + if revno == 0 { + // Missing revno in stash means -1. + set = bson.D{{"txn-queue", info.Queue}} + unset = bson.D{{"n", 1}, {"txn-revno", 1}} + } else { + set = bson.D{{"txn-queue", info.Queue}, {"txn-revno", newRevno}} + unset = bson.D{{"n", 1}} + } + qdoc := bson.D{{"_id", dkey}, {"n", nonce}} + udoc := bson.D{{"$set", set}, {"$unset", unset}} + if err = f.sc.Update(qdoc, udoc); err == nil { + updated = true + } else if err != mgo.ErrNotFound { + return err + } + } + if updated { + f.debugf("Updated stash for document %v with revno %d and queue: %v", dkey, newRevno, info.Queue) + } else { + f.debugf("Stash for document %v was up-to-date", dkey) + } + err = c.Remove(qdoc) + } + } + case op.Insert != nil: + if revno >= 0 { + err = mgo.ErrNotFound + } else { + newRevno := -revno + 1 + logRevnos[i] = newRevno + if d, err = objToDoc(op.Insert); err != nil { + return err + } + change := mgo.Change{ + Update: bson.D{{"$set", bson.D{{"txn-insert", t.Id}}}}, + ReturnNew: true, + } + chaos("") + var info txnInfo + if _, err = f.sc.Find(qdoc).Apply(change, &info); err == nil { + f.debugf("Stash for document %v has revno %d and queue: %v", dkey, info.Revno, info.Queue) + d = setInDoc(d, bson.D{{"_id", op.Id}, {"txn-revno", newRevno}, {"txn-queue", info.Queue}}) + // Unlikely yet unfortunate race in here if this gets seriously + // delayed. If someone inserts+removes meanwhile, this will + // reinsert, and there's no way to avoid that while keeping the + // collection clean or compromising sharding. applyOps can solve + // the former, but it can't shard (SERVER-1439). + chaos("insert") + err = c.Insert(d) + if err == nil || mgo.IsDup(err) { + if err == nil { + f.debugf("New document %v inserted with revno %d and queue: %v", dkey, info.Revno, info.Queue) + } else { + f.debugf("Document %v already existed", dkey) + } + chaos("") + if err = f.sc.Remove(qdoc); err == nil { + f.debugf("Stash for document %v removed", dkey) + } + } + if pullOp[dkey] == i && len(pullAll) > 0 { + _ = f.sc.UpdateId(dkey, bson.D{{"$pullAll", bson.D{{"txn-queue", pullAll}}}}) + } + } + } + case op.Assert != nil: + // TODO pullAll if pullOp[dkey] == i + } + if err == nil { + outcome = "DONE" + } else if err == mgo.ErrNotFound || mgo.IsDup(err) { + outcome = "MISS" + err = nil + } else { + outcome = err.Error() + } + if debugEnabled { + f.debugf("Applying %s op %d (%s) on %v with txn-revno %d: %s", t, i, opName, dkey, revno, outcome) + } + if err != nil { + return err + } + + if f.lc != nil && op.isChange() { + // Add change to the log document. + var dr bson.D + for li := range logDoc { + elem := &logDoc[li] + if elem.Name == op.C { + dr = elem.Value.(bson.D) + break + } + } + if dr == nil { + logDoc = append(logDoc, bson.DocElem{op.C, bson.D{{"d", []interface{}{}}, {"r", []int64{}}}}) + dr = logDoc[len(logDoc)-1].Value.(bson.D) + } + dr[0].Value = append(dr[0].Value.([]interface{}), op.Id) + dr[1].Value = append(dr[1].Value.([]int64), logRevnos[i]) + } + } + t.State = tapplied + + if f.lc != nil { + // Insert log document into the changelog collection. + f.debugf("Inserting %s into change log", t) + err := f.lc.Insert(logDoc) + if err != nil && !mgo.IsDup(err) { + return err + } + } + + // It's been applied, so errors are ignored here. It's fine for someone + // else to win the race and mark it as applied, and it's also fine for + // it to remain pending until a later point when someone will perceive + // it has been applied and mark it at such. + f.debugf("Marking %s as applied", t) + chaos("set-applied") + f.tc.Update(bson.D{{"_id", t.Id}, {"s", tapplying}}, bson.D{{"$set", bson.D{{"s", tapplied}}}}) + return nil +} + +func tokensToPull(dqueue []token, pull map[bson.ObjectId]*transaction, dontPull token) []token { + var result []token + for j := len(dqueue) - 1; j >= 0; j-- { + dtt := dqueue[j] + if dt, ok := pull[dtt.id()]; ok { + if dt.Nonce == dtt.nonce() { + // It's valid and is being pulled out, so everything + // preceding it must have been handled already. + if dtt == dontPull { + // Not time to pull this one out yet. + j-- + } + result = append(result, dqueue[:j+1]...) + break + } + // It was handled before and this is a leftover invalid + // nonce in the queue. Cherry-pick it out. + result = append(result, dtt) + } + } + return result +} + +func objToDoc(obj interface{}) (d bson.D, err error) { + data, err := bson.Marshal(obj) + if err != nil { + return nil, err + } + err = bson.Unmarshal(data, &d) + if err != nil { + return nil, err + } + return d, err +} + +func addToDoc(doc bson.D, key string, add bson.D) (bson.D, error) { + for i := range doc { + elem := &doc[i] + if elem.Name != key { + continue + } + if old, ok := elem.Value.(bson.D); ok { + elem.Value = append(old, add...) + return doc, nil + } else { + return nil, fmt.Errorf("invalid %q value in change document: %#v", key, elem.Value) + } + } + return append(doc, bson.DocElem{key, add}), nil +} + +func setInDoc(doc bson.D, set bson.D) bson.D { + dlen := len(doc) +NextS: + for s := range set { + sname := set[s].Name + for d := 0; d < dlen; d++ { + if doc[d].Name == sname { + doc[d].Value = set[s].Value + continue NextS + } + } + doc = append(doc, set[s]) + } + return doc +} + +func hasToken(tokens []token, tt token) bool { + for _, ttt := range tokens { + if ttt == tt { + return true + } + } + return false +} + +func (f *flusher) debugf(format string, args ...interface{}) { + if !debugEnabled { + return + } + debugf(f.debugId+format, args...) +} diff --git a/vendor/labix.org/v2/mgo/txn/mgo_test.go b/vendor/labix.org/v2/mgo/txn/mgo_test.go new file mode 100644 index 0000000..ce5d9d0 --- /dev/null +++ b/vendor/labix.org/v2/mgo/txn/mgo_test.go @@ -0,0 +1,101 @@ +package txn_test + +import ( + "bytes" + "labix.org/v2/mgo" + . "launchpad.net/gocheck" + "os/exec" + "time" +) + +// ---------------------------------------------------------------------------- +// The mgo test suite + +type MgoSuite struct { + output bytes.Buffer + server *exec.Cmd + session *mgo.Session +} + +var mgoaddr = "127.0.0.1:50017" + +func (s *MgoSuite) SetUpSuite(c *C) { + //mgo.SetDebug(true) + mgo.SetStats(true) + dbdir := c.MkDir() + args := []string{ + "--dbpath", dbdir, + "--bind_ip", "127.0.0.1", + "--port", "50017", + "--nssize", "1", + "--noprealloc", + "--smallfiles", + "--nojournal", + "-vvvvv", + } + s.server = exec.Command("mongod", args...) + s.server.Stdout = &s.output + s.server.Stderr = &s.output + err := s.server.Start() + if err != nil { + panic(err) + } +} + +func (s *MgoSuite) TearDownSuite(c *C) { + s.server.Process.Kill() + s.server.Process.Wait() +} + +func (s *MgoSuite) SetUpTest(c *C) { + err := DropAll(mgoaddr) + if err != nil { + panic(err) + } + mgo.SetLogger(c) + mgo.ResetStats() + + s.session, err = mgo.Dial(mgoaddr) + c.Assert(err, IsNil) +} + +func (s *MgoSuite) TearDownTest(c *C) { + if s.session != nil { + s.session.Close() + } + for i := 0; ; i++ { + stats := mgo.GetStats() + if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 { + break + } + if i == 20 { + c.Fatal("Test left sockets in a dirty state") + } + c.Logf("Waiting for sockets to die: %d in use, %d alive", stats.SocketsInUse, stats.SocketsAlive) + time.Sleep(500 * time.Millisecond) + } +} + +func DropAll(mongourl string) (err error) { + session, err := mgo.Dial(mongourl) + if err != nil { + return err + } + defer session.Close() + + names, err := session.DatabaseNames() + if err != nil { + return err + } + for _, name := range names { + switch name { + case "admin", "local", "config": + default: + err = session.DB(name).DropDatabase() + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/labix.org/v2/mgo/txn/sim_test.go b/vendor/labix.org/v2/mgo/txn/sim_test.go new file mode 100644 index 0000000..312eed9 --- /dev/null +++ b/vendor/labix.org/v2/mgo/txn/sim_test.go @@ -0,0 +1,389 @@ +package txn_test + +import ( + "flag" + "labix.org/v2/mgo" + "labix.org/v2/mgo/bson" + "labix.org/v2/mgo/txn" + . "launchpad.net/gocheck" + "math/rand" + "time" +) + +var ( + duration = flag.Duration("duration", 200*time.Millisecond, "duration for each simulation") + seed = flag.Int64("seed", 0, "seed for rand") +) + +type params struct { + killChance float64 + slowdownChance float64 + slowdown time.Duration + + unsafe bool + workers int + accounts int + changeHalf bool + reinsertCopy bool + reinsertZeroed bool + changelog bool + + changes int +} + +func (s *S) TestSim1Worker(c *C) { + simulate(c, params{ + workers: 1, + accounts: 4, + killChance: 0.01, + slowdownChance: 0.3, + slowdown: 100 * time.Millisecond, + }) +} + +func (s *S) TestSim4WorkersDense(c *C) { + simulate(c, params{ + workers: 4, + accounts: 2, + killChance: 0.01, + slowdownChance: 0.3, + slowdown: 100 * time.Millisecond, + }) +} + +func (s *S) TestSim4WorkersSparse(c *C) { + simulate(c, params{ + workers: 4, + accounts: 10, + killChance: 0.01, + slowdownChance: 0.3, + slowdown: 100 * time.Millisecond, + }) +} + +func (s *S) TestSimHalf1Worker(c *C) { + simulate(c, params{ + workers: 1, + accounts: 4, + changeHalf: true, + killChance: 0.01, + slowdownChance: 0.3, + slowdown: 100 * time.Millisecond, + }) +} + +func (s *S) TestSimHalf4WorkersDense(c *C) { + simulate(c, params{ + workers: 4, + accounts: 2, + changeHalf: true, + killChance: 0.01, + slowdownChance: 0.3, + slowdown: 100 * time.Millisecond, + }) +} + +func (s *S) TestSimHalf4WorkersSparse(c *C) { + simulate(c, params{ + workers: 4, + accounts: 10, + changeHalf: true, + killChance: 0.01, + slowdownChance: 0.3, + slowdown: 100 * time.Millisecond, + }) +} + +func (s *S) TestSimReinsertCopy1Worker(c *C) { + simulate(c, params{ + workers: 1, + accounts: 10, + reinsertCopy: true, + killChance: 0.01, + slowdownChance: 0.3, + slowdown: 100 * time.Millisecond, + }) +} + +func (s *S) TestSimReinsertCopy4Workers(c *C) { + simulate(c, params{ + workers: 4, + accounts: 10, + reinsertCopy: true, + killChance: 0.01, + slowdownChance: 0.3, + slowdown: 100 * time.Millisecond, + }) +} + +func (s *S) TestSimReinsertZeroed1Worker(c *C) { + simulate(c, params{ + workers: 1, + accounts: 10, + reinsertZeroed: true, + killChance: 0.01, + slowdownChance: 0.3, + slowdown: 100 * time.Millisecond, + }) +} + +func (s *S) TestSimReinsertZeroed4Workers(c *C) { + simulate(c, params{ + workers: 4, + accounts: 10, + reinsertZeroed: true, + killChance: 0.01, + slowdownChance: 0.3, + slowdown: 100 * time.Millisecond, + }) +} + +func (s *S) TestSimChangeLog(c *C) { + simulate(c, params{ + workers: 4, + accounts: 10, + killChance: 0.01, + slowdownChance: 0.3, + slowdown: 100 * time.Millisecond, + changelog: true, + }) +} + + +type balanceChange struct { + id bson.ObjectId + origin int + target int + amount int +} + +func simulate(c *C, params params) { + seed := *seed + if seed == 0 { + seed = time.Now().UnixNano() + } + rand.Seed(seed) + c.Logf("Seed: %v", seed) + + txn.SetChaos(txn.Chaos{ + KillChance: params.killChance, + SlowdownChance: params.slowdownChance, + Slowdown: params.slowdown, + }) + defer txn.SetChaos(txn.Chaos{}) + + session, err := mgo.Dial(mgoaddr) + c.Assert(err, IsNil) + defer session.Close() + + db := session.DB("test") + tc := db.C("tc") + + runner := txn.NewRunner(tc) + + tclog := db.C("tc.log") + if params.changelog { + info := mgo.CollectionInfo{ + Capped: true, + MaxBytes: 1000000, + } + err := tclog.Create(&info) + c.Assert(err, IsNil) + runner.ChangeLog(tclog) + } + + accounts := db.C("accounts") + for i := 0; i < params.accounts; i++ { + err := accounts.Insert(M{"_id": i, "balance": 300}) + c.Assert(err, IsNil) + } + var stop time.Time + if params.changes <= 0 { + stop = time.Now().Add(*duration) + } + + max := params.accounts + if params.reinsertCopy || params.reinsertZeroed { + max = int(float64(params.accounts) * 1.5) + } + + changes := make(chan balanceChange, 1024) + + //session.SetMode(mgo.Eventual, true) + for i := 0; i < params.workers; i++ { + go func() { + n := 0 + for { + if n > 0 && n == params.changes { + break + } + if !stop.IsZero() && time.Now().After(stop) { + break + } + + change := balanceChange{ + id: bson.NewObjectId(), + origin: rand.Intn(max), + target: rand.Intn(max), + amount: 100, + } + + var old Account + var oldExists bool + if params.reinsertCopy || params.reinsertZeroed { + if err := accounts.FindId(change.origin).One(&old); err != mgo.ErrNotFound { + c.Check(err, IsNil) + change.amount = old.Balance + oldExists = true + } + } + + var ops []txn.Op + switch { + case params.reinsertCopy && oldExists: + ops = []txn.Op{{ + C: "accounts", + Id: change.origin, + Assert: M{"balance": change.amount}, + Remove: true, + }, { + C: "accounts", + Id: change.target, + Assert: txn.DocMissing, + Insert: M{"balance": change.amount}, + }} + case params.reinsertZeroed && oldExists: + ops = []txn.Op{{ + C: "accounts", + Id: change.target, + Assert: txn.DocMissing, + Insert: M{"balance": 0}, + }, { + C: "accounts", + Id: change.origin, + Assert: M{"balance": change.amount}, + Remove: true, + }, { + C: "accounts", + Id: change.target, + Assert: txn.DocExists, + Update: M{"$inc": M{"balance": change.amount}}, + }} + case params.changeHalf: + ops = []txn.Op{{ + C: "accounts", + Id: change.origin, + Assert: M{"balance": M{"$gte": change.amount}}, + Update: M{"$inc": M{"balance": -change.amount / 2}}, + }, { + C: "accounts", + Id: change.target, + Assert: txn.DocExists, + Update: M{"$inc": M{"balance": change.amount / 2}}, + }, { + C: "accounts", + Id: change.origin, + Update: M{"$inc": M{"balance": -change.amount / 2}}, + }, { + C: "accounts", + Id: change.target, + Update: M{"$inc": M{"balance": change.amount / 2}}, + }} + default: + ops = []txn.Op{{ + C: "accounts", + Id: change.origin, + Assert: M{"balance": M{"$gte": change.amount}}, + Update: M{"$inc": M{"balance": -change.amount}}, + }, { + C: "accounts", + Id: change.target, + Assert: txn.DocExists, + Update: M{"$inc": M{"balance": change.amount}}, + }} + } + + err = runner.Run(ops, change.id, nil) + if err != nil && err != txn.ErrAborted && err != txn.ErrChaos { + c.Check(err, IsNil) + } + n++ + changes <- change + } + changes <- balanceChange{} + }() + } + + alive := params.workers + changeLog := make([]balanceChange, 0, 1024) + for alive > 0 { + change := <-changes + if change.id == "" { + alive-- + } else { + changeLog = append(changeLog, change) + } + } + c.Check(len(changeLog), Not(Equals), 0, Commentf("No operations were even attempted.")) + + txn.SetChaos(txn.Chaos{}) + err = runner.ResumeAll() + c.Assert(err, IsNil) + + n, err := accounts.Count() + c.Check(err, IsNil) + c.Check(n, Equals, params.accounts, Commentf("Number of accounts has changed.")) + + n, err = accounts.Find(M{"balance": M{"$lt": 0}}).Count() + c.Check(err, IsNil) + c.Check(n, Equals, 0, Commentf("There are %d accounts with negative balance.", n)) + + globalBalance := 0 + iter := accounts.Find(nil).Iter() + account := Account{} + for iter.Next(&account) { + globalBalance += account.Balance + } + c.Check(iter.Close(), IsNil) + c.Check(globalBalance, Equals, params.accounts*300, Commentf("Total amount of money should be constant.")) + + // Compute and verify the exact final state of all accounts. + balance := make(map[int]int) + for i := 0; i < params.accounts; i++ { + balance[i] += 300 + } + var applied, aborted int + for _, change := range changeLog { + err := runner.Resume(change.id) + if err == txn.ErrAborted { + aborted++ + continue + } else if err != nil { + c.Fatalf("resuming %s failed: %v", change.id, err) + } + balance[change.origin] -= change.amount + balance[change.target] += change.amount + applied++ + } + iter = accounts.Find(nil).Iter() + for iter.Next(&account) { + c.Assert(account.Balance, Equals, balance[account.Id]) + } + c.Check(iter.Close(), IsNil) + c.Logf("Total transactions: %d (%d applied, %d aborted)", len(changeLog), applied, aborted) + + if params.changelog { + n, err := tclog.Count() + c.Assert(err, IsNil) + // Check if the capped collection is full. + dummy := make([]byte, 1024) + tclog.Insert(M{"_id": bson.NewObjectId(), "dummy": dummy}) + m, err := tclog.Count() + c.Assert(err, IsNil) + if m == n+1 { + // Wasn't full, so it must have seen it all. + c.Assert(err, IsNil) + c.Assert(n, Equals, applied) + } + } +} diff --git a/vendor/labix.org/v2/mgo/txn/tarjan.go b/vendor/labix.org/v2/mgo/txn/tarjan.go new file mode 100644 index 0000000..a5ac0b0 --- /dev/null +++ b/vendor/labix.org/v2/mgo/txn/tarjan.go @@ -0,0 +1,96 @@ +package txn + +import ( + "labix.org/v2/mgo/bson" + "sort" +) + +func tarjanSort(successors map[bson.ObjectId][]bson.ObjectId) [][]bson.ObjectId { + // http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm + data := &tarjanData{ + successors: successors, + nodes: make([]tarjanNode, 0, len(successors)), + index: make(map[bson.ObjectId]int, len(successors)), + } + + // Sort all nodes to stabilize the logic. + var all []string + for id := range successors { + all = append(all, string(id)) + } + sort.Strings(all) + for _, strid := range all { + id := bson.ObjectId(strid) + if _, seen := data.index[id]; !seen { + data.strongConnect(id) + } + } + return data.output +} + +type tarjanData struct { + successors map[bson.ObjectId][]bson.ObjectId + output [][]bson.ObjectId + + nodes []tarjanNode + stack []bson.ObjectId + index map[bson.ObjectId]int +} + +type tarjanNode struct { + lowlink int + stacked bool +} + +type idList []bson.ObjectId + +func (l idList) Len() int { return len(l) } +func (l idList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l idList) Less(i, j int) bool { return l[i] < l[j] } + +func (data *tarjanData) strongConnect(id bson.ObjectId) *tarjanNode { + index := len(data.nodes) + data.index[id] = index + data.stack = append(data.stack, id) + data.nodes = append(data.nodes, tarjanNode{index, true}) + node := &data.nodes[index] + + // Sort to stabilize the algorithm. + succids := idList(data.successors[id]) + sort.Sort(succids) + for _, succid := range succids { + succindex, seen := data.index[succid] + if !seen { + succnode := data.strongConnect(succid) + if succnode.lowlink < node.lowlink { + node.lowlink = succnode.lowlink + } + } else if data.nodes[succindex].stacked { + // Part of the current strongly-connected component. + if succindex < node.lowlink { + node.lowlink = succindex + } + } + } + + if node.lowlink == index { + // Root node; pop stack and output new + // strongly-connected component. + var scc []bson.ObjectId + i := len(data.stack) - 1 + for { + stackid := data.stack[i] + stackindex := data.index[stackid] + data.nodes[stackindex].stacked = false + scc = append(scc, stackid) + if stackindex == index { + break + } + i-- + } + data.stack = data.stack[:i] + data.output = append(data.output, scc) + } + + return node +} diff --git a/vendor/labix.org/v2/mgo/txn/tarjan_test.go b/vendor/labix.org/v2/mgo/txn/tarjan_test.go new file mode 100644 index 0000000..c422605 --- /dev/null +++ b/vendor/labix.org/v2/mgo/txn/tarjan_test.go @@ -0,0 +1,44 @@ +package txn + +import ( + "fmt" + "labix.org/v2/mgo/bson" + . "launchpad.net/gocheck" +) + +type TarjanSuite struct{} + +var _ = Suite(TarjanSuite{}) + +func bid(n int) bson.ObjectId { + return bson.ObjectId(fmt.Sprintf("%024d", n)) +} + +func bids(ns ...int) (ids []bson.ObjectId) { + for _, n := range ns { + ids = append(ids, bid(n)) + } + return +} + +func (TarjanSuite) TestExample(c *C) { + successors := map[bson.ObjectId][]bson.ObjectId{ + bid(1): bids(2), + bid(2): bids(1, 5), + bid(3): bids(4), + bid(4): bids(3, 5), + bid(5): bids(6), + bid(6): bids(7), + bid(7): bids(8), + bid(8): bids(6, 9), + bid(9): bids(), + } + + c.Assert(tarjanSort(successors), DeepEquals, [][]bson.ObjectId{ + bids(9), + bids(8, 7, 6), + bids(5), + bids(2, 1), + bids(4, 3), + }) +} diff --git a/vendor/labix.org/v2/mgo/txn/txn.go b/vendor/labix.org/v2/mgo/txn/txn.go new file mode 100644 index 0000000..e798e65 --- /dev/null +++ b/vendor/labix.org/v2/mgo/txn/txn.go @@ -0,0 +1,518 @@ +// The txn package implements support for multi-document transactions. +// +// For details check the following blog post: +// +// http://blog.labix.org/2012/08/22/multi-doc-transactions-for-mongodb +// +package txn + +import ( + "encoding/binary" + "fmt" + "labix.org/v2/mgo" + "labix.org/v2/mgo/bson" + "reflect" + "sort" + "sync" + + crand "crypto/rand" + mrand "math/rand" +) + +type state int + +const ( + tpreparing state = 1 // One or more documents not prepared + tprepared state = 2 // Prepared but not yet ready to run + taborting state = 3 // Assertions failed, cleaning up + tapplying state = 4 // Changes are in progress + taborted state = 5 // Pre-conditions failed, nothing done + tapplied state = 6 // All changes applied +) + +func (s state) String() string { + switch s { + case tpreparing: + return "preparing" + case tprepared: + return "prepared" + case taborting: + return "aborting" + case tapplying: + return "applying" + case taborted: + return "aborted" + case tapplied: + return "applied" + } + panic(fmt.Errorf("unknown state: %d", s)) +} + +var rand *mrand.Rand +var randmu sync.Mutex + +func init() { + var seed int64 + err := binary.Read(crand.Reader, binary.BigEndian, &seed) + if err != nil { + panic(err) + } + rand = mrand.New(mrand.NewSource(seed)) +} + +type transaction struct { + Id bson.ObjectId `bson:"_id"` + State state `bson:"s"` + Info interface{} `bson:"i,omitempty"` + Ops []Op `bson:"o"` + Nonce string `bson:"n,omitempty"` + Revnos []int64 `bson:"r,omitempty"` + + docKeysCached docKeys +} + +func (t *transaction) String() string { + if t.Nonce == "" { + return t.Id.Hex() + } + return string(t.token()) +} + +func (t *transaction) done() bool { + return t.State == tapplied || t.State == taborted +} + +func (t *transaction) token() token { + if t.Nonce == "" { + panic("transaction has no nonce") + } + return tokenFor(t) +} + +func (t *transaction) docKeys() docKeys { + if t.docKeysCached != nil { + return t.docKeysCached + } + dkeys := make(docKeys, 0, len(t.Ops)) +NextOp: + for _, op := range t.Ops { + dkey := op.docKey() + for i := range dkeys { + if dkey == dkeys[i] { + continue NextOp + } + } + dkeys = append(dkeys, dkey) + } + sort.Sort(dkeys) + t.docKeysCached = dkeys + return dkeys +} + +// tokenFor returns a unique transaction token that +// is composed by t's id and a nonce. If t already has +// a nonce assigned to it, it will be used, otherwise +// a new nonce will be generated. +func tokenFor(t *transaction) token { + nonce := t.Nonce + if nonce == "" { + nonce = newNonce() + } + return token(t.Id.Hex() + "_" + nonce) +} + +func newNonce() string { + randmu.Lock() + r := rand.Uint32() + randmu.Unlock() + n := make([]byte, 8) + for i := uint(0); i < 8; i++ { + n[i] = "0123456789abcdef"[(r>>(4*i))&0xf] + } + return string(n) +} + +type token string + +func (tt token) id() bson.ObjectId { return bson.ObjectIdHex(string(tt[:24])) } +func (tt token) nonce() string { return string(tt[25:]) } + +// Op represents an operation to a single document that may be +// applied as part of a transaction with other operations. +type Op struct { + // C and Id identify the collection and document this operation + // refers to. Id is matched against the "_id" document field. + C string `bson:"c"` + Id interface{} `bson:"d"` + + // Assert optionally holds a query document that is used to + // test the operation document at the time the transaction is + // going to be applied. The assertions for all operations in + // a transaction are tested before any changes take place, + // and the transaction is entirely aborted if any of them + // fails. This is also the only way to prevent a transaction + // from being being applied (the transaction continues despite + // the outcome of Insert, Update, and Remove). + Assert interface{} `bson:"a,omitempty"` + + // The Insert, Update and Remove fields describe the mutation + // intended by the operation. At most one of them may be set + // per operation. If none are set, Assert must be set and the + // operation becomes a read-only test. + // + // Insert holds the document to be inserted at the time the + // transaction is applied. The Id field will be inserted + // into the document automatically as its _id field. The + // transaction will continue even if the document already + // exists. Use Assert with txn.DocMissing if the insertion is + // required. + // + // Update holds the update document to be applied at the time + // the transaction is applied. The transaction will continue + // even if a document with Id is missing. Use Assert to + // test for the document presence or its contents. + // + // Remove indicates whether to remove the document with Id. + // The transaction continues even if the document doesn't yet + // exist at the time the transaction is applied. Use Assert + // with txn.DocExists to make sure it will be removed. + Insert interface{} `bson:"i,omitempty"` + Update interface{} `bson:"u,omitempty"` + Remove bool `bson:"r,omitempty"` +} + +func (op *Op) isChange() bool { + return op.Update != nil || op.Insert != nil || op.Remove +} + +func (op *Op) docKey() docKey { + return docKey{op.C, op.Id} +} + +func (op *Op) name() string { + switch { + case op.Update != nil: + return "update" + case op.Insert != nil: + return "insert" + case op.Remove: + return "remove" + case op.Assert != nil: + return "assert" + } + return "none" +} + +const ( + // DocExists and DocMissing may be used on an operation's + // Assert value to assert that the document with the given + // Id exists or does not exist, respectively. + DocExists = "d+" + DocMissing = "d-" +) + +// A Runner applies operations as part of a transaction onto any number +// of collections within a database. See the Run method for details. +type Runner struct { + tc *mgo.Collection // txns + sc *mgo.Collection // stash + lc *mgo.Collection // log +} + +// NewRunner returns a new transaction runner that uses tc to hold its +// transactions. +// +// Multiple transaction collections may exist in a single database, but +// all collections that are touched by operations in a given transaction +// collection must be handled exclusively by it. +// +// A second collection with the same name of tc but suffixed by ".stash" +// will be used for implementing the transactional behavior of insert +// and remove operations. +func NewRunner(tc *mgo.Collection) *Runner { + return &Runner{tc, tc.Database.C(tc.Name + ".stash"), nil} +} + +var ErrAborted = fmt.Errorf("transaction aborted") + +// Run creates a new transaction with ops and runs it immediately. +// The id parameter specifies the transaction id, and may be written +// down ahead of time to later verify the success of the change and +// resume it, when the procedure is interrupted for any reason. If +// empty, a random id will be generated. +// The info parameter, if not nil, is included under the "i" +// field of the transaction document. +// +// Operations across documents are not atomically applied, but are +// guaranteed to be eventually all applied in the order provided or +// all aborted, as long as the affected documents are only modified +// through transactions. If documents are simultaneously modified +// by transactions and out of transactions the behavior is undefined. +// +// If Run returns no errors, all operations were applied successfully. +// If it returns ErrAborted, one or more operations can't be applied +// and the transaction was entirely aborted with no changes performed. +// Otherwise, if the transaction is interrupted while running for any +// reason, it may be resumed explicitly or by attempting to apply +// another transaction on any of the documents targeted by ops, as +// long as the interruption was made after the transaction document +// itself was inserted. Run Resume with the obtained transaction id +// to confirm whether the transaction was applied or not. +// +// Any number of transactions may be run concurrently, with one +// runner or many. +func (r *Runner) Run(ops []Op, id bson.ObjectId, info interface{}) (err error) { + const efmt = "error in transaction op %d: %s" + for i := range ops { + op := &ops[i] + if op.C == "" || op.Id == nil { + return fmt.Errorf(efmt, i, "C or Id missing") + } + changes := 0 + if op.Insert != nil { + changes++ + } + if op.Update != nil { + changes++ + } + if op.Remove { + changes++ + } + if changes > 1 { + return fmt.Errorf(efmt, i, "more than one of Insert/Update/Remove set") + } + if changes == 0 && op.Assert == nil { + return fmt.Errorf(efmt, i, "none of Assert/Insert/Update/Remove set") + } + } + if id == "" { + id = bson.NewObjectId() + } + + // Insert transaction sooner rather than later, to stay on the safer side. + t := transaction{ + Id: id, + Ops: ops, + State: tpreparing, + Info: info, + } + if err = r.tc.Insert(&t); err != nil { + return err + } + if err = flush(r, &t); err != nil { + return err + } + if t.State == taborted { + return ErrAborted + } else if t.State != tapplied { + panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State)) + } + return nil +} + +// ResumeAll resumes all pending transactions. All ErrAborted errors +// from individual transactions are ignored. +func (r *Runner) ResumeAll() (err error) { + debugf("Resuming all unfinished transactions") + iter := r.tc.Find(bson.D{{"s", bson.D{{"$in", []state{tpreparing, tprepared, tapplying}}}}}).Iter() + var t transaction + for iter.Next(&t) { + if t.State == tapplied || t.State == taborted { + continue + } + debugf("Resuming %s from %q", t.Id, t.State) + if err := flush(r, &t); err != nil { + return err + } + if !t.done() { + panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State)) + } + } + return nil +} + +// Resume resumes the transaction with id. It returns mgo.ErrNotFound +// if the transaction is not found. Otherwise, it has the same semantics +// of the Run method after the transaction is inserted. +func (r *Runner) Resume(id bson.ObjectId) (err error) { + t, err := r.load(id) + if err != nil { + return err + } + if !t.done() { + debugf("Resuming %s from %q", t, t.State) + if err := flush(r, t); err != nil { + return err + } + } + if t.State == taborted { + return ErrAborted + } else if t.State != tapplied { + panic(fmt.Errorf("invalid state for %s after flush: %q", t, t.State)) + } + return nil +} + +// ChangeLog enables logging of changes to the given collection +// every time a transaction that modifies content is done being +// applied. +// +// Saved documents are in the format: +// +// {"_id": , : {"d": [, ...], "r": [, ...]}} +// +// The document revision is the value of the txn-revno field after +// the change has been applied. Negative values indicate the document +// was not present in the collection. Revisions will not change when +// updates or removes are applied to missing documents or inserts are +// attempted when the document isn't present. +func (r *Runner) ChangeLog(logc *mgo.Collection) { + r.lc = logc +} + +// PurgeMissing removes from collections any state that refers to transaction +// documents that for whatever reason have been lost from the system (removed +// by accident or lost in a hard crash, for example). +// +// This method should very rarely be needed, if at all, and should never be +// used during the normal operation of an application. Its purpose is to put +// a system that has seen unavoidable corruption back in a working state. +func (r *Runner) PurgeMissing(collections ...string) error { + type M map[string]interface{} + type S []interface{} + pipeline := []M{ + {"$project": M{"_id": 1, "txn-queue": 1}}, + {"$unwind": "$txn-queue"}, + {"$sort": M{"_id": 1, "txn-queue": 1}}, + //{"$group": M{"_id": M{"$substr": S{"$txn-queue", 0, 24}}, "docids": M{"$push": "$_id"}}}, + } + + type TRef struct { + DocId interface{} "_id" + TxnId string "txn-queue" + } + + found := make(map[bson.ObjectId]bool) + colls := make(map[string]bool) + + sort.Strings(collections) + for _, collection := range collections { + c := r.tc.Database.C(collection) + iter := c.Pipe(pipeline).Iter() + var tref TRef + for iter.Next(&tref) { + txnId := bson.ObjectIdHex(tref.TxnId[:24]) + if found[txnId] { + continue + } + if r.tc.FindId(txnId).One(nil) == nil { + found[txnId] = true + continue + } + logf("WARNING: purging from document %s/%v the missing transaction id %s", collection, tref.DocId, txnId) + err := c.UpdateId(tref.DocId, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}}) + if err != nil { + return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err) + } + } + colls[collection] = true + } + + type StashTRef struct { + Id docKey "_id" + TxnId string "txn-queue" + } + + iter := r.sc.Pipe(pipeline).Iter() + var stref StashTRef + for iter.Next(&stref) { + txnId := bson.ObjectIdHex(stref.TxnId[:24]) + if found[txnId] { + continue + } + if r.tc.FindId(txnId).One(nil) == nil { + found[txnId] = true + continue + } + logf("WARNING: purging from stash document %s/%v the missing transaction id %s", stref.Id.C, stref.Id.Id, txnId) + err := r.sc.UpdateId(stref.Id, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}}) + if err != nil { + return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err) + } + } + + return nil +} + +func (r *Runner) load(id bson.ObjectId) (*transaction, error) { + var t transaction + err := r.tc.FindId(id).One(&t) + if err == mgo.ErrNotFound { + return nil, fmt.Errorf("cannot find transaction %s", id) + } else if err != nil { + return nil, err + } + return &t, nil +} + +type docKey struct { + C string + Id interface{} +} + +type docKeys []docKey + +func (ks docKeys) Len() int { return len(ks) } +func (ks docKeys) Swap(i, j int) { ks[i], ks[j] = ks[j], ks[i] } +func (ks docKeys) Less(i, j int) bool { + a, b := ks[i], ks[j] + if a.C != b.C { + return a.C < b.C + } + av, an := valueNature(a.Id) + bv, bn := valueNature(b.Id) + if an != bn { + return an < bn + } + switch an { + case natureString: + return av.(string) < bv.(string) + case natureInt: + return av.(int64) < bv.(int64) + case natureFloat: + return av.(float64) < bv.(float64) + case natureBool: + return !av.(bool) && bv.(bool) + } + panic("unreachable") +} + +type typeNature int + +const ( + // The order of these values matters. Transactions + // from applications using different ordering will + // be incompatible with each other. + _ typeNature = iota + natureString + natureInt + natureFloat + natureBool +) + +func valueNature(v interface{}) (value interface{}, nature typeNature) { + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.String: + return rv.String(), natureString + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int(), natureInt + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return int64(rv.Uint()), natureInt + case reflect.Float32, reflect.Float64: + return rv.Float(), natureFloat + case reflect.Bool: + return rv.Bool(), natureBool + } + panic("document id type unsupported by txn: " + rv.Kind().String()) +} diff --git a/vendor/labix.org/v2/mgo/txn/txn_test.go b/vendor/labix.org/v2/mgo/txn/txn_test.go new file mode 100644 index 0000000..b716783 --- /dev/null +++ b/vendor/labix.org/v2/mgo/txn/txn_test.go @@ -0,0 +1,521 @@ +package txn_test + +import ( + "labix.org/v2/mgo" + "labix.org/v2/mgo/bson" + "labix.org/v2/mgo/txn" + . "launchpad.net/gocheck" + "testing" +) + +func TestAll(t *testing.T) { + TestingT(t) +} + +type S struct { + MgoSuite + + db *mgo.Database + tc, sc *mgo.Collection + accounts *mgo.Collection + runner *txn.Runner +} + +var _ = Suite(&S{}) + +type M map[string]interface{} + +func (s *S) SetUpTest(c *C) { + txn.SetChaos(txn.Chaos{}) + txn.SetLogger(c) + txn.SetDebug(true) + s.MgoSuite.SetUpTest(c) + + s.db = s.session.DB("test") + s.tc = s.db.C("tc") + s.sc = s.db.C("tc.stash") + s.accounts = s.db.C("accounts") + s.runner = txn.NewRunner(s.tc) +} + +type Account struct { + Id int `bson:"_id"` + Balance int +} + +func (s *S) TestDocExists(c *C) { + err := s.accounts.Insert(M{"_id": 0, "balance": 300}) + c.Assert(err, IsNil) + + exists := []txn.Op{{ + C: "accounts", + Id: 0, + Assert: txn.DocExists, + }} + missing := []txn.Op{{ + C: "accounts", + Id: 0, + Assert: txn.DocMissing, + }} + + err = s.runner.Run(exists, "", nil) + c.Assert(err, IsNil) + err = s.runner.Run(missing, "", nil) + c.Assert(err, Equals, txn.ErrAborted) + + err = s.accounts.RemoveId(0) + c.Assert(err, IsNil) + + err = s.runner.Run(exists, "", nil) + c.Assert(err, Equals, txn.ErrAborted) + err = s.runner.Run(missing, "", nil) + c.Assert(err, IsNil) +} + +func (s *S) TestInsert(c *C) { + err := s.accounts.Insert(M{"_id": 0, "balance": 300}) + c.Assert(err, IsNil) + + ops := []txn.Op{{ + C: "accounts", + Id: 0, + Insert: M{"balance": 200}, + }} + + err = s.runner.Run(ops, "", nil) + c.Assert(err, IsNil) + + var account Account + err = s.accounts.FindId(0).One(&account) + c.Assert(err, IsNil) + c.Assert(account.Balance, Equals, 300) + + ops[0].Id = 1 + err = s.runner.Run(ops, "", nil) + c.Assert(err, IsNil) + + err = s.accounts.FindId(1).One(&account) + c.Assert(err, IsNil) + c.Assert(account.Balance, Equals, 200) +} + +func (s *S) TestRemove(c *C) { + err := s.accounts.Insert(M{"_id": 0, "balance": 300}) + c.Assert(err, IsNil) + + ops := []txn.Op{{ + C: "accounts", + Id: 0, + Remove: true, + }} + + err = s.runner.Run(ops, "", nil) + c.Assert(err, IsNil) + + err = s.accounts.FindId(0).One(nil) + c.Assert(err, Equals, mgo.ErrNotFound) + + err = s.runner.Run(ops, "", nil) + c.Assert(err, IsNil) +} + +func (s *S) TestUpdate(c *C) { + var err error + err = s.accounts.Insert(M{"_id": 0, "balance": 200}) + c.Assert(err, IsNil) + err = s.accounts.Insert(M{"_id": 1, "balance": 200}) + c.Assert(err, IsNil) + + ops := []txn.Op{{ + C: "accounts", + Id: 0, + Update: M{"$inc": M{"balance": 100}}, + }} + + err = s.runner.Run(ops, "", nil) + c.Assert(err, IsNil) + + var account Account + err = s.accounts.FindId(0).One(&account) + c.Assert(err, IsNil) + c.Assert(account.Balance, Equals, 300) + + ops[0].Id = 1 + + err = s.accounts.FindId(1).One(&account) + c.Assert(err, IsNil) + c.Assert(account.Balance, Equals, 200) +} + +func (s *S) TestInsertUpdate(c *C) { + ops := []txn.Op{{ + C: "accounts", + Id: 0, + Insert: M{"_id": 0, "balance": 200}, + }, { + C: "accounts", + Id: 0, + Update: M{"$inc": M{"balance": 100}}, + }} + + err := s.runner.Run(ops, "", nil) + c.Assert(err, IsNil) + + var account Account + err = s.accounts.FindId(0).One(&account) + c.Assert(err, IsNil) + c.Assert(account.Balance, Equals, 300) + + err = s.runner.Run(ops, "", nil) + c.Assert(err, IsNil) + + err = s.accounts.FindId(0).One(&account) + c.Assert(err, IsNil) + c.Assert(account.Balance, Equals, 400) +} + +func (s *S) TestUpdateInsert(c *C) { + ops := []txn.Op{{ + C: "accounts", + Id: 0, + Update: M{"$inc": M{"balance": 100}}, + }, { + C: "accounts", + Id: 0, + Insert: M{"_id": 0, "balance": 200}, + }} + + err := s.runner.Run(ops, "", nil) + c.Assert(err, IsNil) + + var account Account + err = s.accounts.FindId(0).One(&account) + c.Assert(err, IsNil) + c.Assert(account.Balance, Equals, 200) + + err = s.runner.Run(ops, "", nil) + c.Assert(err, IsNil) + + err = s.accounts.FindId(0).One(&account) + c.Assert(err, IsNil) + c.Assert(account.Balance, Equals, 300) +} + +func (s *S) TestInsertRemoveInsert(c *C) { + ops := []txn.Op{{ + C: "accounts", + Id: 0, + Insert: M{"_id": 0, "balance": 200}, + }, { + C: "accounts", + Id: 0, + Remove: true, + }, { + C: "accounts", + Id: 0, + Insert: M{"_id": 0, "balance": 300}, + }} + + err := s.runner.Run(ops, "", nil) + c.Assert(err, IsNil) + + var account Account + err = s.accounts.FindId(0).One(&account) + c.Assert(err, IsNil) + c.Assert(account.Balance, Equals, 300) +} + +func (s *S) TestQueueStashing(c *C) { + txn.SetChaos(txn.Chaos{ + KillChance: 1, + Breakpoint: "set-applying", + }) + + opses := [][]txn.Op{{{ + C: "accounts", + Id: 0, + Insert: M{"balance": 100}, + }}, {{ + C: "accounts", + Id: 0, + Remove: true, + }}, {{ + C: "accounts", + Id: 0, + Insert: M{"balance": 200}, + }}, {{ + C: "accounts", + Id: 0, + Update: M{"$inc": M{"balance": 100}}, + }}} + + var last bson.ObjectId + for _, ops := range opses { + last = bson.NewObjectId() + err := s.runner.Run(ops, last, nil) + c.Assert(err, Equals, txn.ErrChaos) + } + + txn.SetChaos(txn.Chaos{}) + err := s.runner.Resume(last) + c.Assert(err, IsNil) + + var account Account + err = s.accounts.FindId(0).One(&account) + c.Assert(err, IsNil) + c.Assert(account.Balance, Equals, 300) +} + +func (s *S) TestInfo(c *C) { + ops := []txn.Op{{ + C: "accounts", + Id: 0, + Assert: txn.DocMissing, + }} + + id := bson.NewObjectId() + err := s.runner.Run(ops, id, M{"n": 42}) + c.Assert(err, IsNil) + + var t struct{ I struct{ N int } } + err = s.tc.FindId(id).One(&t) + c.Assert(err, IsNil) + c.Assert(t.I.N, Equals, 42) +} + +func (s *S) TestErrors(c *C) { + doc := bson.M{"foo": 1} + tests := []txn.Op{{ + C: "c", + Id: 0, + }, { + C: "c", + Id: 0, + Insert: doc, + Remove: true, + }, { + C: "c", + Id: 0, + Insert: doc, + Update: doc, + }, { + C: "c", + Id: 0, + Update: doc, + Remove: true, + }, { + C: "c", + Assert: doc, + }, { + Id: 0, + Assert: doc, + }} + + txn.SetChaos(txn.Chaos{KillChance: 1.0}) + for _, op := range tests { + c.Logf("op: %v", op) + err := s.runner.Run([]txn.Op{op}, "", nil) + c.Assert(err, ErrorMatches, "error in transaction op 0: .*") + } +} + +func (s *S) TestAssertNestedOr(c *C) { + // Assert uses $or internally. Ensure nesting works. + err := s.accounts.Insert(M{"_id": 0, "balance": 300}) + c.Assert(err, IsNil) + + ops := []txn.Op{{ + C: "accounts", + Id: 0, + Assert: bson.D{{"$or", []bson.D{{{"balance", 100}}, {{"balance", 300}}}}}, + Update: bson.D{{"$inc", bson.D{{"balance", 100}}}}, + }} + + err = s.runner.Run(ops, "", nil) + c.Assert(err, IsNil) + + var account Account + err = s.accounts.FindId(0).One(&account) + c.Assert(err, IsNil) + c.Assert(account.Balance, Equals, 400) +} + +func (s *S) TestVerifyFieldOrdering(c *C) { + // Used to have a map in certain operations, which means + // the ordering of fields would be messed up. + fields := bson.D{{"a", 1}, {"b", 2}, {"c", 3}} + ops := []txn.Op{{ + C: "accounts", + Id: 0, + Insert: fields, + }} + + err := s.runner.Run(ops, "", nil) + c.Assert(err, IsNil) + + var d bson.D + err = s.accounts.FindId(0).One(&d) + c.Assert(err, IsNil) + + var filtered bson.D + for _, e := range d { + switch e.Name { + case "a", "b", "c": + filtered = append(filtered, e) + } + } + c.Assert(filtered, DeepEquals, fields) +} + +func (s *S) TestChangeLog(c *C) { + chglog := s.db.C("chglog") + s.runner.ChangeLog(chglog) + + ops := []txn.Op{{ + C: "debts", + Id: 0, + Assert: txn.DocMissing, + }, { + C: "accounts", + Id: 0, + Insert: M{"balance": 300}, + }, { + C: "accounts", + Id: 1, + Insert: M{"balance": 300}, + }, { + C: "people", + Id: "joe", + Insert: M{"accounts": []int64{0, 1}}, + }} + id := bson.NewObjectId() + err := s.runner.Run(ops, id, nil) + c.Assert(err, IsNil) + + type IdList []interface{} + type Log struct { + Docs IdList "d" + Revnos []int64 "r" + } + var m map[string]*Log + err = chglog.FindId(id).One(&m) + c.Assert(err, IsNil) + + c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{2, 2}}) + c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{2}}) + c.Assert(m["debts"], IsNil) + + ops = []txn.Op{{ + C: "accounts", + Id: 0, + Update: M{"$inc": M{"balance": 100}}, + }, { + C: "accounts", + Id: 1, + Update: M{"$inc": M{"balance": 100}}, + }} + id = bson.NewObjectId() + err = s.runner.Run(ops, id, nil) + c.Assert(err, IsNil) + + m = nil + err = chglog.FindId(id).One(&m) + c.Assert(err, IsNil) + + c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{3, 3}}) + c.Assert(m["people"], IsNil) + + ops = []txn.Op{{ + C: "accounts", + Id: 0, + Remove: true, + }, { + C: "people", + Id: "joe", + Remove: true, + }} + id = bson.NewObjectId() + err = s.runner.Run(ops, id, nil) + c.Assert(err, IsNil) + + m = nil + err = chglog.FindId(id).One(&m) + c.Assert(err, IsNil) + + c.Assert(m["accounts"], DeepEquals, &Log{IdList{0}, []int64{-4}}) + c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{-3}}) +} + +func (s *S) TestPurgeMissing(c *C) { + txn.SetChaos(txn.Chaos{ + KillChance: 1, + Breakpoint: "set-applying", + }) + + err := s.accounts.Insert(M{"_id": 0, "balance": 100}) + c.Assert(err, IsNil) + err = s.accounts.Insert(M{"_id": 1, "balance": 100}) + c.Assert(err, IsNil) + + ops1 := []txn.Op{{ + C: "accounts", + Id: 3, + Insert: M{"balance": 100}, + }} + + ops2 := []txn.Op{{ + C: "accounts", + Id: 0, + Remove: true, + }, { + C: "accounts", + Id: 1, + Update: M{"$inc": M{"balance": 100}}, + }, { + C: "accounts", + Id: 2, + Insert: M{"balance": 100}, + }} + + err = s.runner.Run(ops1, "", nil) + c.Assert(err, Equals, txn.ErrChaos) + + last := bson.NewObjectId() + err = s.runner.Run(ops2, last, nil) + c.Assert(err, Equals, txn.ErrChaos) + err = s.tc.RemoveId(last) + c.Assert(err, IsNil) + + txn.SetChaos(txn.Chaos{}) + err = s.runner.ResumeAll() + c.Assert(err, IsNil) + + err = s.runner.Run(ops2, "", nil) + c.Assert(err, ErrorMatches, "cannot find transaction .*") + + err = s.runner.PurgeMissing("accounts") + c.Assert(err, IsNil) + + err = s.runner.Run(ops2, "", nil) + c.Assert(err, IsNil) + + expect := []struct{ Id, Balance int }{ + {0, -1}, + {1, 200}, + {2, 100}, + {3, 100}, + } + var got Account + for _, want := range expect { + err = s.accounts.FindId(want.Id).One(&got) + if want.Balance == -1 { + if err != mgo.ErrNotFound { + c.Errorf("Account %d should not exist, find got err=%#v", err) + } + } else if err != nil { + c.Errorf("Account %d should have balance of %d, but wasn't found", want.Id, want.Balance) + } else if got.Balance != want.Balance { + c.Errorf("Account %d should have balance of %d, got %d", want.Id, want.Balance, got.Balance) + } + } +} diff --git a/vendor/launchpad.net/goyaml/.bzr/README b/vendor/launchpad.net/goyaml/.bzr/README new file mode 100644 index 0000000..f82dc1c --- /dev/null +++ b/vendor/launchpad.net/goyaml/.bzr/README @@ -0,0 +1,3 @@ +This is a Bazaar control directory. +Do not change any files in this directory. +See http://bazaar.canonical.com/ for more information about Bazaar. diff --git a/vendor/launchpad.net/goyaml/.bzr/branch-format b/vendor/launchpad.net/goyaml/.bzr/branch-format new file mode 100644 index 0000000..9eb09b7 --- /dev/null +++ b/vendor/launchpad.net/goyaml/.bzr/branch-format @@ -0,0 +1 @@ +Bazaar-NG meta directory, format 1 diff --git a/vendor/launchpad.net/goyaml/.bzr/branch/branch.conf b/vendor/launchpad.net/goyaml/.bzr/branch/branch.conf new file mode 100644 index 0000000..dae50f0 --- /dev/null +++ b/vendor/launchpad.net/goyaml/.bzr/branch/branch.conf @@ -0,0 +1 @@ +parent_location = http://bazaar.launchpad.net/~goyaml/goyaml/trunk/ diff --git a/vendor/launchpad.net/goyaml/.bzr/branch/format b/vendor/launchpad.net/goyaml/.bzr/branch/format new file mode 100644 index 0000000..dc392f4 --- /dev/null +++ b/vendor/launchpad.net/goyaml/.bzr/branch/format @@ -0,0 +1 @@ +Bazaar Branch Format 7 (needs bzr 1.6) diff --git a/vendor/launchpad.net/goyaml/.bzr/branch/last-revision b/vendor/launchpad.net/goyaml/.bzr/branch/last-revision new file mode 100644 index 0000000..5128c99 --- /dev/null +++ b/vendor/launchpad.net/goyaml/.bzr/branch/last-revision @@ -0,0 +1 @@ +51 gustavo@niemeyer.net-20140305200416-7gh64vkcckre5mob diff --git a/vendor/launchpad.net/goyaml/.bzr/checkout/conflicts b/vendor/launchpad.net/goyaml/.bzr/checkout/conflicts new file mode 100644 index 0000000..0dc2d3a --- /dev/null +++ b/vendor/launchpad.net/goyaml/.bzr/checkout/conflicts @@ -0,0 +1 @@ +BZR conflict list format 1 diff --git a/vendor/launchpad.net/goyaml/.bzr/checkout/dirstate b/vendor/launchpad.net/goyaml/.bzr/checkout/dirstate new file mode 100644 index 0000000..748ed30 Binary files /dev/null and b/vendor/launchpad.net/goyaml/.bzr/checkout/dirstate differ diff --git a/vendor/launchpad.net/goyaml/.bzr/checkout/format b/vendor/launchpad.net/goyaml/.bzr/checkout/format new file mode 100644 index 0000000..e0261c7 --- /dev/null +++ b/vendor/launchpad.net/goyaml/.bzr/checkout/format @@ -0,0 +1 @@ +Bazaar Working Tree Format 6 (bzr 1.14) diff --git a/vendor/launchpad.net/goyaml/.bzr/checkout/views b/vendor/launchpad.net/goyaml/.bzr/checkout/views new file mode 100644 index 0000000..e69de29 diff --git a/vendor/launchpad.net/goyaml/.bzr/repository/format b/vendor/launchpad.net/goyaml/.bzr/repository/format new file mode 100644 index 0000000..b200528 --- /dev/null +++ b/vendor/launchpad.net/goyaml/.bzr/repository/format @@ -0,0 +1 @@ +Bazaar repository format 2a (needs bzr 1.16 or later) diff --git a/vendor/launchpad.net/goyaml/.bzr/repository/indices/dfefc85269662e7e438e0069b6349695.cix b/vendor/launchpad.net/goyaml/.bzr/repository/indices/dfefc85269662e7e438e0069b6349695.cix new file mode 100644 index 0000000..580a717 Binary files /dev/null and b/vendor/launchpad.net/goyaml/.bzr/repository/indices/dfefc85269662e7e438e0069b6349695.cix differ diff --git a/vendor/launchpad.net/goyaml/.bzr/repository/indices/dfefc85269662e7e438e0069b6349695.iix b/vendor/launchpad.net/goyaml/.bzr/repository/indices/dfefc85269662e7e438e0069b6349695.iix new file mode 100644 index 0000000..a945349 Binary files /dev/null and b/vendor/launchpad.net/goyaml/.bzr/repository/indices/dfefc85269662e7e438e0069b6349695.iix differ diff --git a/vendor/launchpad.net/goyaml/.bzr/repository/indices/dfefc85269662e7e438e0069b6349695.rix b/vendor/launchpad.net/goyaml/.bzr/repository/indices/dfefc85269662e7e438e0069b6349695.rix new file mode 100644 index 0000000..a1fb02a Binary files /dev/null and b/vendor/launchpad.net/goyaml/.bzr/repository/indices/dfefc85269662e7e438e0069b6349695.rix differ diff --git a/vendor/launchpad.net/goyaml/.bzr/repository/indices/dfefc85269662e7e438e0069b6349695.six b/vendor/launchpad.net/goyaml/.bzr/repository/indices/dfefc85269662e7e438e0069b6349695.six new file mode 100644 index 0000000..a2afde6 --- /dev/null +++ b/vendor/launchpad.net/goyaml/.bzr/repository/indices/dfefc85269662e7e438e0069b6349695.six @@ -0,0 +1,5 @@ +B+Tree Graph Index 2 +node_ref_lists=0 +key_elements=1 +len=0 +row_lengths= diff --git a/vendor/launchpad.net/goyaml/.bzr/repository/indices/dfefc85269662e7e438e0069b6349695.tix b/vendor/launchpad.net/goyaml/.bzr/repository/indices/dfefc85269662e7e438e0069b6349695.tix new file mode 100644 index 0000000..687ad37 Binary files /dev/null and b/vendor/launchpad.net/goyaml/.bzr/repository/indices/dfefc85269662e7e438e0069b6349695.tix differ diff --git a/vendor/launchpad.net/goyaml/.bzr/repository/pack-names b/vendor/launchpad.net/goyaml/.bzr/repository/pack-names new file mode 100644 index 0000000..f17a210 --- /dev/null +++ b/vendor/launchpad.net/goyaml/.bzr/repository/pack-names @@ -0,0 +1,6 @@ +B+Tree Graph Index 2 +node_ref_lists=0 +key_elements=1 +len=1 +row_lengths=1 +x Pj R8PYXظߓ۝uΡ R05AX0F)04̤ϮW[Z \ No newline at end of file diff --git a/vendor/launchpad.net/goyaml/.bzr/repository/packs/dfefc85269662e7e438e0069b6349695.pack b/vendor/launchpad.net/goyaml/.bzr/repository/packs/dfefc85269662e7e438e0069b6349695.pack new file mode 100644 index 0000000..5fec255 Binary files /dev/null and b/vendor/launchpad.net/goyaml/.bzr/repository/packs/dfefc85269662e7e438e0069b6349695.pack differ diff --git a/vendor/launchpad.net/goyaml/.bzrignore b/vendor/launchpad.net/goyaml/.bzrignore new file mode 100644 index 0000000..52c98ab --- /dev/null +++ b/vendor/launchpad.net/goyaml/.bzrignore @@ -0,0 +1,14 @@ +[568].out +_* +*.cgo*.* + +yaml-*/stamp-h1 +yaml-*/Makefile +yaml-*/*/Makefile +yaml-*/libtool +yaml-*/config* +yaml-*/*/*.lo +yaml-*/*/*.la +yaml-*/*/.libs +yaml-*/*/.deps +yaml-*/tests/* diff --git a/vendor/launchpad.net/goyaml/.lbox b/vendor/launchpad.net/goyaml/.lbox new file mode 100644 index 0000000..90ebc09 --- /dev/null +++ b/vendor/launchpad.net/goyaml/.lbox @@ -0,0 +1 @@ +propose -cr -for=lp:goyaml diff --git a/vendor/launchpad.net/goyaml/.lbox.check b/vendor/launchpad.net/goyaml/.lbox.check new file mode 100755 index 0000000..b4211c2 --- /dev/null +++ b/vendor/launchpad.net/goyaml/.lbox.check @@ -0,0 +1,20 @@ +#!/bin/sh + +set -e + +BADFMT=`find * -name '*.go' | xargs gofmt -l` +if [ -n "$BADFMT" ]; then + BADFMT=`echo "$BADFMT" | sed "s/^/ /"` + echo -e "gofmt is sad:\n\n$BADFMT" + exit 1 +fi + +VERSION=`go version | awk '{print $3}'` +if [ $VERSION == 'devel' ]; then + go tool vet \ + -methods \ + -printf \ + -rangeloops \ + -printfuncs 'ErrorContextf:1,notFoundf:0,badReqErrorf:0,Commitf:0,Snapshotf:0,Debugf:0' \ + . +fi diff --git a/vendor/launchpad.net/goyaml/LICENSE b/vendor/launchpad.net/goyaml/LICENSE new file mode 100644 index 0000000..53320c3 --- /dev/null +++ b/vendor/launchpad.net/goyaml/LICENSE @@ -0,0 +1,185 @@ +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/vendor/launchpad.net/goyaml/LICENSE.libyaml b/vendor/launchpad.net/goyaml/LICENSE.libyaml new file mode 100644 index 0000000..050ced2 --- /dev/null +++ b/vendor/launchpad.net/goyaml/LICENSE.libyaml @@ -0,0 +1,19 @@ +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/launchpad.net/goyaml/Makefile b/vendor/launchpad.net/goyaml/Makefile new file mode 100644 index 0000000..873d53b --- /dev/null +++ b/vendor/launchpad.net/goyaml/Makefile @@ -0,0 +1,39 @@ +include $(GOROOT)/src/Make.inc + +YAML=yaml-0.1.3 +LIBYAML=$(PWD)/$(YAML)/src/.libs/libyaml.a + +TARG=launchpad.net/goyaml + +GOFILES=\ + goyaml.go\ + resolve.go\ + +CGOFILES=\ + decode.go\ + encode.go\ + +CGO_OFILES+=\ + helpers.o\ + api.o\ + scanner.o\ + reader.o\ + parser.o\ + writer.o\ + emitter.o\ + +GOFMT=gofmt + +BADFMT:=$(shell $(GOFMT) -l $(GOFILES) $(CGOFILES) $(wildcard *_test.go)) + +all: package +gofmt: $(BADFMT) + @for F in $(BADFMT); do $(GOFMT) -w $$F && echo $$F; done + +include $(GOROOT)/src/Make.pkg + +ifneq ($(BADFMT),) +ifneq ($(MAKECMDGOALS),gofmt) +$(warning WARNING: make gofmt: $(BADFMT)) +endif +endif diff --git a/vendor/launchpad.net/goyaml/apic.go b/vendor/launchpad.net/goyaml/apic.go new file mode 100644 index 0000000..29be91d --- /dev/null +++ b/vendor/launchpad.net/goyaml/apic.go @@ -0,0 +1,742 @@ +package goyaml + +import ( + "io" + "os" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// File read handler. +func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_file.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_file_read_handler + parser.input_file = file +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + return true +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// File write handler. +func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_file.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_file_write_handler + emitter.output_file = file +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } + return true +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } + return true +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } + return true +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } + return true +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } + return true +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/launchpad.net/goyaml/decode.go b/vendor/launchpad.net/goyaml/decode.go new file mode 100644 index 0000000..6586be4 --- /dev/null +++ b/vendor/launchpad.net/goyaml/decode.go @@ -0,0 +1,473 @@ +package goyaml + +import ( + "reflect" + "strconv" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("Failed to initialize YAML emitter") + } + + if len(b) == 0 { + b = []byte{'\n'} + } + + yaml_parser_set_input_string(&p.parser, b) + + p.skip() + if p.event.typ != yaml_STREAM_START_EVENT { + panic("Expected stream start event, got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return &p +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +func (p *parser) skip() { + if p.event.typ != yaml_NO_EVENT { + if p.event.typ == yaml_STREAM_END_EVENT { + panic("Attempted to go past the end of stream. Corrupted value?") + } + yaml_event_delete(&p.event) + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "Unknown problem parsing YAML content" + } + panic(where + msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + switch p.event.typ { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("Attempted to parse unknown event: " + + strconv.Itoa(int(p.event.typ))) + } + panic("Unreachable") +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.skip() + n.children = append(n.children, p.parse()) + if p.event.typ != yaml_DOCUMENT_END_EVENT { + panic("Expected end of document event but got " + + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + p.skip() + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.skip() + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.skip() + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.skip() + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[string]bool +} + +func newDecoder() *decoder { + d := &decoder{} + d.aliases = make(map[string]bool) + return d +} + +// d.setter deals with setters and pointer dereferencing and initialization. +// +// It's a slightly convoluted case to handle properly: +// +// - nil pointers should be initialized, unless being set to nil +// - we don't know at this point yet what's the value to SetYAML() with. +// - we can't separate pointer deref/init and setter checking, because +// a setter may be found while going down a pointer chain. +// +// Thus, here is how it takes care of it: +// +// - out is provided as a pointer, so that it can be replaced. +// - when looking at a non-setter ptr, *out=ptr.Elem(), unless tag=!!null +// - when a setter is found, *out=interface{}, and a set() function is +// returned to call SetYAML() with the value of *out once it's defined. +// +func (d *decoder) setter(tag string, out *reflect.Value, good *bool) (set func()) { + again := true + for again { + again = false + setter, _ := (*out).Interface().(Setter) + if tag != "!!null" || setter != nil { + if pv := (*out); pv.Kind() == reflect.Ptr { + if pv.IsNil() { + *out = reflect.New(pv.Type().Elem()).Elem() + pv.Set((*out).Addr()) + } else { + *out = pv.Elem() + } + setter, _ = pv.Interface().(Setter) + again = true + } + } + if setter != nil { + var arg interface{} + *out = reflect.ValueOf(&arg).Elem() + return func() { + *good = setter.SetYAML(tag, arg) + } + } + } + return nil +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + good = d.document(n, out) + case scalarNode: + good = d.scalar(n, out) + case aliasNode: + good = d.alias(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("Internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + an, ok := d.doc.anchors[n.value] + if !ok { + panic("Unknown anchor '" + n.value + "' referenced") + } + if d.aliases[n.value] { + panic("Anchor '" + n.value + "' value contains itself") + } + d.aliases[n.value] = true + good = d.unmarshal(an, out) + delete(d.aliases, n.value) + return good +} + +func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if set := d.setter(tag, &out, &good); set != nil { + defer set() + } + } + switch out.Kind() { + case reflect.String: + if resolved != nil { + out.SetString(n.value) + good = true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else { + out.Set(reflect.ValueOf(resolved)) + } + good = true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + good = true + } + case float64: + if resolved < 1<<63-1 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 { + out.SetUint(uint64(resolved)) + good = true + } + case int64: + if resolved >= 0 { + out.SetUint(uint64(resolved)) + good = true + } + case float64: + if resolved < 1<<64-1 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + good = true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + good = true + case int64: + out.SetFloat(float64(resolved)) + good = true + case float64: + out.SetFloat(resolved) + good = true + } + case reflect.Ptr: + switch resolved.(type) { + case nil: + out.Set(reflect.Zero(out.Type())) + good = true + default: + if out.Type().Elem() == reflect.TypeOf(resolved) { + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + good = true + } + } + } + return good +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + if set := d.setter("!!seq", &out, &good); set != nil { + defer set() + } + var iface reflect.Value + if out.Kind() == reflect.Interface { + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, 0)) + } + + if out.Kind() != reflect.Slice { + return false + } + et := out.Type().Elem() + + l := len(n.children) + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Set(reflect.Append(out, e)) + } + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + if set := d.setter("!!map", &out, &good); set != nil { + defer set() + } + if out.Kind() == reflect.Struct { + return d.mappingStruct(n, out) + } + + if out.Kind() == reflect.Interface { + // No type hints. Will have to use a generic map. + iface := out + out = settableValueOf(make(map[interface{}]interface{})) + iface.Set(out) + } + + if out.Kind() != reflect.Map { + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + for i := 0; i < l; i += 2 { + if !d.unmarshal(n.children[i], name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } + } + return true +} diff --git a/vendor/launchpad.net/goyaml/decode_test.go b/vendor/launchpad.net/goyaml/decode_test.go new file mode 100644 index 0000000..83696b5 --- /dev/null +++ b/vendor/launchpad.net/goyaml/decode_test.go @@ -0,0 +1,519 @@ +package goyaml_test + +import ( + . "launchpad.net/gocheck" + "launchpad.net/goyaml" + "math" + "reflect" +) + +var unmarshalIntTest = 123 + +var unmarshalTests = []struct { + data string + value interface{} +}{ + { + "", + &struct{}{}, + }, { + "{}", &struct{}{}, + }, { + "v: hi", + map[string]string{"v": "hi"}, + }, { + "v: hi", map[string]interface{}{"v": "hi"}, + }, { + "v: true", + map[string]string{"v": "true"}, + }, { + "v: true", + map[string]interface{}{"v": true}, + }, { + "v: 10", + map[string]interface{}{"v": 10}, + }, { + "v: 0b10", + map[string]interface{}{"v": 2}, + }, { + "v: 0xA", + map[string]interface{}{"v": 10}, + }, { + "v: 4294967296", + map[string]int64{"v": 4294967296}, + }, { + "v: 0.1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .Inf", + map[string]interface{}{"v": math.Inf(+1)}, + }, { + "v: -.Inf", + map[string]interface{}{"v": math.Inf(-1)}, + }, { + "v: -10", + map[string]interface{}{"v": -10}, + }, { + "v: -.1", + map[string]interface{}{"v": -0.1}, + }, + + // Simple values. + { + "123", + &unmarshalIntTest, + }, + + // Floats from spec + { + "canonical: 6.8523e+5", + map[string]interface{}{"canonical": 6.8523e+5}, + }, { + "expo: 685.230_15e+03", + map[string]interface{}{"expo": 685.23015e+03}, + }, { + "fixed: 685_230.15", + map[string]interface{}{"fixed": 685230.15}, + }, { + "neginf: -.inf", + map[string]interface{}{"neginf": math.Inf(-1)}, + }, { + "fixed: 685_230.15", + map[string]float64{"fixed": 685230.15}, + }, + //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported + //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails. + + // Bools from spec + { + "canonical: y", + map[string]interface{}{"canonical": true}, + }, { + "answer: NO", + map[string]interface{}{"answer": false}, + }, { + "logical: True", + map[string]interface{}{"logical": true}, + }, { + "option: on", + map[string]interface{}{"option": true}, + }, { + "option: on", + map[string]bool{"option": true}, + }, + // Ints from spec + { + "canonical: 685230", + map[string]interface{}{"canonical": 685230}, + }, { + "decimal: +685_230", + map[string]interface{}{"decimal": 685230}, + }, { + "octal: 02472256", + map[string]interface{}{"octal": 685230}, + }, { + "hexa: 0x_0A_74_AE", + map[string]interface{}{"hexa": 685230}, + }, { + "bin: 0b1010_0111_0100_1010_1110", + map[string]interface{}{"bin": 685230}, + }, { + "bin: -0b101010", + map[string]interface{}{"bin": -42}, + }, { + "decimal: +685_230", + map[string]int{"decimal": 685230}, + }, + + //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported + + // Nulls from spec + { + "empty:", + map[string]interface{}{"empty": nil}, + }, { + "canonical: ~", + map[string]interface{}{"canonical": nil}, + }, { + "english: null", + map[string]interface{}{"english": nil}, + }, { + "~: null key", + map[interface{}]string{nil: "null key"}, + }, { + "empty:", + map[string]*bool{"empty": nil}, + }, + + // Flow sequence + { + "seq: [A,B]", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq: [A,B,C,]", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]int{"seq": []int{1}}, + }, { + "seq: [A,1,C]", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + // Block sequence + { + "seq:\n - A\n - B", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq:\n - A\n - B\n - C", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]int{"seq": []int{1}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + + // Literal block scalar + { + "scalar: | # Comment\n\n literal\n\n \ttext\n\n", + map[string]string{"scalar": "\nliteral\n\n\ttext\n"}, + }, + + // Folded block scalar + { + "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n", + map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"}, + }, + + // Map inside interface with no type hints. + { + "a: {b: c}", + map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + }, + + // Structs and type conversions. + { + "hello: world", + &struct{ Hello string }{"world"}, + }, { + "a: {b: c}", + &struct{ A struct{ B string } }{struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A *struct{ B string } }{&struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A map[string]string }{map[string]string{"b": "c"}}, + }, { + "a: {b: c}", + &struct{ A *map[string]string }{&map[string]string{"b": "c"}}, + }, { + "a:", + &struct{ A map[string]string }{}, + }, { + "a: 1", + &struct{ A int }{1}, + }, { + "a: 1", + &struct{ A float64 }{1}, + }, { + "a: 1.0", + &struct{ A int }{1}, + }, { + "a: 1.0", + &struct{ A uint }{1}, + }, { + "a: [1, 2]", + &struct{ A []int }{[]int{1, 2}}, + }, { + "a: 1", + &struct{ B int }{0}, + }, { + "a: 1", + &struct { + B int "a" + }{1}, + }, { + "a: y", + &struct{ A bool }{true}, + }, + + // Some cross type conversions + { + "v: 42", + map[string]uint{"v": 42}, + }, { + "v: -42", + map[string]uint{}, + }, { + "v: 4294967296", + map[string]uint64{"v": 4294967296}, + }, { + "v: -4294967296", + map[string]uint64{}, + }, + + // Overflow cases. + { + "v: 4294967297", + map[string]int32{}, + }, { + "v: 128", + map[string]int8{}, + }, + + // Quoted values. + { + "'1': '\"2\"'", + map[interface{}]interface{}{"1": "\"2\""}, + }, { + "v:\n- A\n- 'B\n\n C'\n", + map[string][]string{"v": []string{"A", "B\nC"}}, + }, + + // Explicit tags. + { + "v: !!float '1.1'", + map[string]interface{}{"v": 1.1}, + }, { + "v: !!null ''", + map[string]interface{}{"v": nil}, + }, { + "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'", + map[string]interface{}{"v": 1}, + }, + + // Anchors and aliases. + { + "a: &x 1\nb: &y 2\nc: *x\nd: *y\n", + &struct{ A, B, C, D int }{1, 2, 1, 2}, + }, { + "a: &a {c: 1}\nb: *a", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, { + "a: &a [1, 2]\nb: *a", + &struct{ B []int }{[]int{1, 2}}, + }, + + // Bug #1133337 + { + "foo: ''", + map[string]*string{"foo": new(string)}, + }, { + "foo: null", + map[string]string{}, + }, + + // Ignored field + { + "a: 1\nb: 2\n", + &struct { + A int + B int "-" + }{1, 0}, + }, + + // Bug #1191981 + { + "" + + "%YAML 1.1\n" + + "--- !!str\n" + + `"Generic line break (no glyph)\n\` + "\n" + + ` Generic line break (glyphed)\n\` + "\n" + + ` Line separator\u2028\` + "\n" + + ` Paragraph separator\u2029"` + "\n", + "" + + "Generic line break (no glyph)\n" + + "Generic line break (glyphed)\n" + + "Line separator\u2028Paragraph separator\u2029", + }, + + // Struct inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + }, +} + +type inlineB struct { + B int + inlineC `yaml:",inline"` +} + +type inlineC struct { + C int +} + +func (s *S) TestUnmarshal(c *C) { + for i, item := range unmarshalTests { + t := reflect.ValueOf(item.value).Type() + var value interface{} + switch t.Kind() { + case reflect.Map: + value = reflect.MakeMap(t).Interface() + case reflect.String: + t := reflect.ValueOf(item.value).Type() + v := reflect.New(t) + value = v.Interface() + default: + pt := reflect.ValueOf(item.value).Type() + pv := reflect.New(pt.Elem()) + value = pv.Interface() + } + err := goyaml.Unmarshal([]byte(item.data), value) + c.Assert(err, IsNil, Commentf("Item #%d", i)) + if t.Kind() == reflect.String { + c.Assert(*value.(*string), Equals, item.value, Commentf("Item #%d", i)) + } else { + c.Assert(value, DeepEquals, item.value, Commentf("Item #%d", i)) + } + } +} + +func (s *S) TestUnmarshalNaN(c *C) { + value := map[string]interface{}{} + err := goyaml.Unmarshal([]byte("notanum: .NaN"), &value) + c.Assert(err, IsNil) + c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true) +} + +var unmarshalErrorTests = []struct { + data, error string +}{ + {"v: !!float 'error'", "YAML error: Can't decode !!str 'error' as a !!float"}, + {"v: [A,", "YAML error: line 1: did not find expected node content"}, + {"v:\n- [A,", "YAML error: line 2: did not find expected node content"}, + {"a: *b\n", "YAML error: Unknown anchor 'b' referenced"}, + {"a: &a\n b: *a\n", "YAML error: Anchor 'a' value contains itself"}, + {"value: -", "YAML error: block sequence entries are not allowed in this context"}, +} + +func (s *S) TestUnmarshalErrors(c *C) { + for _, item := range unmarshalErrorTests { + var value interface{} + err := goyaml.Unmarshal([]byte(item.data), &value) + c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value)) + } +} + +var setterTests = []struct { + data, tag string + value interface{} +}{ + {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}}, + {"_: [1,A]", "!!seq", []interface{}{1, "A"}}, + {"_: 10", "!!int", 10}, + {"_: null", "!!null", nil}, + {"_: !!foo 'BAR!'", "!!foo", "BAR!"}, +} + +var setterResult = map[int]bool{} + +type typeWithSetter struct { + tag string + value interface{} +} + +func (o *typeWithSetter) SetYAML(tag string, value interface{}) (ok bool) { + o.tag = tag + o.value = value + if i, ok := value.(int); ok { + if result, ok := setterResult[i]; ok { + return result + } + } + return true +} + +type typeWithSetterField struct { + Field *typeWithSetter "_" +} + +func (s *S) TestUnmarshalWithSetter(c *C) { + for _, item := range setterTests { + obj := &typeWithSetterField{} + err := goyaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + c.Assert(obj.Field, NotNil, + Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.tag, Equals, item.tag) + c.Assert(obj.Field.value, DeepEquals, item.value) + } +} + +func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) { + obj := &typeWithSetter{} + err := goyaml.Unmarshal([]byte(setterTests[0].data), obj) + c.Assert(err, IsNil) + c.Assert(obj.tag, Equals, setterTests[0].tag) + value, ok := obj.value.(map[interface{}]interface{}) + c.Assert(ok, Equals, true) + c.Assert(value["_"], DeepEquals, setterTests[0].value) +} + +func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) { + setterResult[2] = false + setterResult[4] = false + defer func() { + delete(setterResult, 2) + delete(setterResult, 4) + }() + + m := map[string]*typeWithSetter{} + data := "{abc: 1, def: 2, ghi: 3, jkl: 4}" + err := goyaml.Unmarshal([]byte(data), m) + c.Assert(err, IsNil) + c.Assert(m["abc"], NotNil) + c.Assert(m["def"], IsNil) + c.Assert(m["ghi"], NotNil) + c.Assert(m["jkl"], IsNil) + + c.Assert(m["abc"].value, Equals, 1) + c.Assert(m["ghi"].value, Equals, 3) +} + +//var data []byte +//func init() { +// var err error +// data, err = ioutil.ReadFile("/tmp/file.yaml") +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkUnmarshal(c *C) { +// var err error +// for i := 0; i < c.N; i++ { +// var v map[string]interface{} +// err = goyaml.Unmarshal(data, &v) +// } +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkMarshal(c *C) { +// var v map[string]interface{} +// goyaml.Unmarshal(data, &v) +// c.ResetTimer() +// for i := 0; i < c.N; i++ { +// goyaml.Marshal(&v) +// } +//} diff --git a/vendor/launchpad.net/goyaml/emitterc.go b/vendor/launchpad.net/goyaml/emitterc.go new file mode 100644 index 0000000..49c7f46 --- /dev/null +++ b/vendor/launchpad.net/goyaml/emitterc.go @@ -0,0 +1,1682 @@ +package goyaml + +import ( + "bytes" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + } + return false +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an achor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + } + return true + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceeded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceeded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[0]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceeded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceeded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/launchpad.net/goyaml/encode.go b/vendor/launchpad.net/goyaml/encode.go new file mode 100644 index 0000000..b228a10 --- /dev/null +++ b/vendor/launchpad.net/goyaml/encode.go @@ -0,0 +1,221 @@ +package goyaml + +import ( + "reflect" + "sort" + "strconv" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool +} + +func newEncoder() (e *encoder) { + e = &encoder{} + e.must(yaml_emitter_initialize(&e.emitter)) + yaml_emitter_set_output_string(&e.emitter, &e.out) + e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) + e.emit() + e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) + e.emit() + return e +} + +func (e *encoder) finish() { + e.must(yaml_document_end_event_initialize(&e.event, true)) + e.emit() + e.emitter.open_ended = false + e.must(yaml_stream_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { + e.must(false) + } +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "Unknown problem generating YAML content" + } + panic(msg) + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + var value interface{} + if getter, ok := in.Interface().(Getter); ok { + tag, value = getter.GetYAML() + if value == nil { + e.nilv() + return + } + in = reflect.ValueOf(value) + } + switch in.Kind() { + case reflect.Interface: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("Can't marshal type yet: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + f() + e.must(yaml_mapping_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + if rtag, _ := resolve("", s); rtag != "!!str" { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_PLAIN_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // FIXME: Handle 64 bits here. + s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + if !implicit { + style = yaml_PLAIN_SCALAR_STYLE + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/launchpad.net/goyaml/encode_test.go b/vendor/launchpad.net/goyaml/encode_test.go new file mode 100644 index 0000000..2887466 --- /dev/null +++ b/vendor/launchpad.net/goyaml/encode_test.go @@ -0,0 +1,379 @@ +package goyaml_test + +import ( + "fmt" + . "launchpad.net/gocheck" + "launchpad.net/goyaml" + "math" + "strconv" + "strings" +) + +var marshalIntTest = 123 + +var marshalTests = []struct { + value interface{} + data string +}{ + { + &struct{}{}, + "{}\n", + }, { + map[string]string{"v": "hi"}, + "v: hi\n", + }, { + map[string]interface{}{"v": "hi"}, + "v: hi\n", + }, { + map[string]string{"v": "true"}, + "v: \"true\"\n", + }, { + map[string]string{"v": "false"}, + "v: \"false\"\n", + }, { + map[string]interface{}{"v": true}, + "v: true\n", + }, { + map[string]interface{}{"v": false}, + "v: false\n", + }, { + map[string]interface{}{"v": 10}, + "v: 10\n", + }, { + map[string]interface{}{"v": -10}, + "v: -10\n", + }, { + map[string]uint{"v": 42}, + "v: 42\n", + }, { + map[string]interface{}{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]int64{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]uint64{"v": 4294967296}, + "v: 4294967296\n", + }, { + map[string]interface{}{"v": "10"}, + "v: \"10\"\n", + }, { + map[string]interface{}{"v": 0.1}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": float64(0.1)}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": -0.1}, + "v: -0.1\n", + }, { + map[string]interface{}{"v": math.Inf(+1)}, + "v: .inf\n", + }, { + map[string]interface{}{"v": math.Inf(-1)}, + "v: -.inf\n", + }, { + map[string]interface{}{"v": math.NaN()}, + "v: .nan\n", + }, { + map[string]interface{}{"v": nil}, + "v: null\n", + }, { + map[string]interface{}{"v": ""}, + "v: \"\"\n", + }, { + map[string][]string{"v": []string{"A", "B"}}, + "v:\n- A\n- B\n", + }, { + map[string][]string{"v": []string{"A", "B\nC"}}, + "v:\n- A\n- 'B\n\n C'\n", + }, { + map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}}, + "v:\n- A\n- 1\n- B:\n - 2\n - 3\n", + }, { + map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + "a:\n b: c\n", + }, { + map[string]interface{}{"a": "-"}, + "a: '-'\n", + }, + + // Simple values. + { + &marshalIntTest, + "123\n", + }, + + // Structures + { + &struct{ Hello string }{"world"}, + "hello: world\n", + }, { + &struct { + A struct { + B string + } + }{struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{&struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{}, + "a: null\n", + }, { + &struct{ A int }{1}, + "a: 1\n", + }, { + &struct{ A []int }{[]int{1, 2}}, + "a:\n- 1\n- 2\n", + }, { + &struct { + B int "a" + }{1}, + "a: 1\n", + }, { + &struct{ A bool }{true}, + "a: true\n", + }, + + // Conditional flag + { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{1, 0}, + "a: 1\n", + }, { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{0, 0}, + "{}\n", + }, { + &struct { + A *struct{ X int } "a,omitempty" + B int "b,omitempty" + }{nil, 0}, + "{}\n", + }, + + // Flow flag + { + &struct { + A []int "a,flow" + }{[]int{1, 2}}, + "a: [1, 2]\n", + }, { + &struct { + A map[string]string "a,flow" + }{map[string]string{"b": "c", "d": "e"}}, + "a: {b: c, d: e}\n", + }, { + &struct { + A struct { + B, D string + } "a,flow" + }{struct{ B, D string }{"c", "e"}}, + "a: {b: c, d: e}\n", + }, + + // Unexported field + { + &struct { + u int + A int + }{0, 1}, + "a: 1\n", + }, + + // Ignored field + { + &struct { + A int + B int "-" + }{1, 2}, + "a: 1\n", + }, + + // Struct inlining + { + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + "a: 1\nb: 2\nc: 3\n", + }, +} + +func (s *S) TestMarshal(c *C) { + for _, item := range marshalTests { + data, err := goyaml.Marshal(item.value) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, item.data) + } +} + +var marshalErrorTests = []struct { + value interface{} + error string +}{ + { + &struct { + B int + inlineB ",inline" + }{1, inlineB{2, inlineC{3}}}, + `Duplicated key 'b' in struct struct \{ B int; .*`, + }, +} + +func (s *S) TestMarshalErrors(c *C) { + for _, item := range marshalErrorTests { + _, err := goyaml.Marshal(item.value) + c.Assert(err, ErrorMatches, item.error) + } +} + +var marshalTaggedIfaceTest interface{} = &struct{ A string }{"B"} + +var getterTests = []struct { + data, tag string + value interface{} +}{ + {"_:\n hi: there\n", "", map[interface{}]interface{}{"hi": "there"}}, + {"_:\n- 1\n- A\n", "", []interface{}{1, "A"}}, + {"_: 10\n", "", 10}, + {"_: null\n", "", nil}, + {"_: !foo BAR!\n", "!foo", "BAR!"}, + {"_: !foo 1\n", "!foo", "1"}, + {"_: !foo '\"1\"'\n", "!foo", "\"1\""}, + {"_: !foo 1.1\n", "!foo", 1.1}, + {"_: !foo 1\n", "!foo", 1}, + {"_: !foo 1\n", "!foo", uint(1)}, + {"_: !foo true\n", "!foo", true}, + {"_: !foo\n- A\n- B\n", "!foo", []string{"A", "B"}}, + {"_: !foo\n A: B\n", "!foo", map[string]string{"A": "B"}}, + {"_: !foo\n a: B\n", "!foo", &marshalTaggedIfaceTest}, +} + +func (s *S) TestMarshalTypeCache(c *C) { + var data []byte + var err error + func() { + type T struct{ A int } + data, err = goyaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + func() { + type T struct{ B int } + data, err = goyaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + c.Assert(string(data), Equals, "b: 0\n") +} + +type typeWithGetter struct { + tag string + value interface{} +} + +func (o typeWithGetter) GetYAML() (tag string, value interface{}) { + return o.tag, o.value +} + +type typeWithGetterField struct { + Field typeWithGetter "_" +} + +func (s *S) TestMashalWithGetter(c *C) { + for _, item := range getterTests { + obj := &typeWithGetterField{} + obj.Field.tag = item.tag + obj.Field.value = item.value + data, err := goyaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, string(item.data)) + } +} + +func (s *S) TestUnmarshalWholeDocumentWithGetter(c *C) { + obj := &typeWithGetter{} + obj.tag = "" + obj.value = map[string]string{"hello": "world!"} + data, err := goyaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, "hello: world!\n") +} + +func (s *S) TestSortedOutput(c *C) { + order := []interface{}{ + false, + true, + 1, + uint(1), + 1.0, + 1.1, + 1.2, + 2, + uint(2), + 2.0, + 2.1, + "", + ".1", + ".2", + ".a", + "1", + "2", + "a!10", + "a/2", + "a/10", + "a~10", + "ab/1", + "b/1", + "b/01", + "b/2", + "b/02", + "b/3", + "b/03", + "b1", + "b01", + "b3", + "c2.10", + "c10.2", + "d1", + "d12", + "d12a", + } + m := make(map[interface{}]int) + for _, k := range order { + m[k] = 1 + } + data, err := goyaml.Marshal(m) + c.Assert(err, IsNil) + out := "\n" + string(data) + last := 0 + for i, k := range order { + repr := fmt.Sprint(k) + if s, ok := k.(string); ok { + if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil { + repr = `"` + repr + `"` + } + } + index := strings.Index(out, "\n"+repr+":") + if index == -1 { + c.Fatalf("%#v is not in the output: %#v", k, out) + } + if index < last { + c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out) + } + last = index + } +} diff --git a/vendor/launchpad.net/goyaml/goyaml.go b/vendor/launchpad.net/goyaml/goyaml.go new file mode 100644 index 0000000..6bbce6d --- /dev/null +++ b/vendor/launchpad.net/goyaml/goyaml.go @@ -0,0 +1,307 @@ +// Package goyaml implements YAML support for the Go language. +// +// WARNING: You are using an out of date import path. Please update your code and import the following instead: +// +// gonuts.org/v1/yaml +// +// The package name has changed from "yaml" from "goyaml", but the package API has not changed. +// +package goyaml + +import ( + "errors" + "fmt" + "reflect" + "runtime" + "strings" + "sync" +) + +func handleErr(err *error) { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } else if _, ok := r.(*reflect.ValueError); ok { + panic(r) + } else if _, ok := r.(externalPanic); ok { + panic(r) + } else if s, ok := r.(string); ok { + *err = errors.New("YAML error: " + s) + } else if e, ok := r.(error); ok { + *err = e + } else { + panic(r) + } + } +} + +// Objects implementing the goyaml.Setter interface will receive the YAML +// tag and value via the SetYAML method during unmarshaling, rather than +// being implicitly assigned by the goyaml machinery. If setting the value +// works, the method should return true. If it returns false, the given +// value will be omitted from maps and slices. +type Setter interface { + SetYAML(tag string, value interface{}) bool +} + +// Objects implementing the goyaml.Getter interface will get the GetYAML() +// method called when goyaml is requested to marshal the given value, and +// the result of this method will be marshaled in place of the actual object. +type Getter interface { + GetYAML() (tag string, value interface{}) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the object pointed by out. +// +// Maps, pointers to structs and ints, etc, may all be used as out values. +// If an internal pointer within a struct is not initialized, goyaml +// will initialize it if necessary for unmarshalling the provided data, +// but the struct provided as out must not be a nil pointer. +// +// The type of the decoded values and the type of out will be considered, +// and Unmarshal() will do the best possible job to unmarshal values +// appropriately. It is NOT considered an error, though, to skip values +// because they are not available in the decoded YAML, or if they are not +// compatible with the out value. To ensure something was properly +// unmarshaled use a map or compare against the previous value for the +// field (usually the zero value). +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and will be unmarshalled using the field +// name lowercased by default. When custom field names are desired, the +// tag value may be used to tweak the name. Everything before the first +// comma in the field tag will be used as the name. The values following +// the comma are used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var T t +// goyaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + d.unmarshal(node, reflect.ValueOf(out)) + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps, pointers to structs and ints, etc, may all be used as the in value. +// +// In the case of struct values, only exported fields will be serialized. +// The lowercased field name is used as the key for each exported field, +// but this behavior may be changed using the respective field tag. +// The tag may also contain flags to tweak the marshalling behavior for +// the field. Conflicting names result in a runtime error. The tag format +// accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Does not apply to zero valued structs. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps. +// +// inline Inline the struct it's applied to, so its fields +// are processed as if they were part of the outer +// struct. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int "a,omitempty" +// B int +// } +// goyaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// goyaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshal("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from gobson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +type externalPanic string + +func (e externalPanic) String() string { + return string(e) +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st) + panic(externalPanic(msg)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + //case reflect.Map: + // if inlineMap >= 0 { + // return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + // } + // if field.Type.Key() != reflect.TypeOf("") { + // return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + // } + // inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //panic("Option ,inline needs a struct value or map field") + panic("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + } + return false +} diff --git a/vendor/launchpad.net/goyaml/parserc.go b/vendor/launchpad.net/goyaml/parserc.go new file mode 100644 index 0000000..0fdfa4e --- /dev/null +++ b/vendor/launchpad.net/goyaml/parserc.go @@ -0,0 +1,1096 @@ +package goyaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } + return false +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/launchpad.net/goyaml/readerc.go b/vendor/launchpad.net/goyaml/readerc.go new file mode 100644 index 0000000..c732935 --- /dev/null +++ b/vendor/launchpad.net/goyaml/readerc.go @@ -0,0 +1,391 @@ +package goyaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + } + buffer_len += width + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/launchpad.net/goyaml/resolve.go b/vendor/launchpad.net/goyaml/resolve.go new file mode 100644 index 0000000..a31a533 --- /dev/null +++ b/vendor/launchpad.net/goyaml/resolve.go @@ -0,0 +1,155 @@ +package goyaml + +import ( + "math" + "strconv" + "strings" +) + +// TODO: merge, timestamps, base 60 floats, omap. + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + t[int('<')] = '<' // Merge + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, "!!bool", []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, "!!bool", []string{"true", "True", "TRUE"}}, + {true, "!!bool", []string{"on", "On", "ON"}}, + {false, "!!bool", []string{"n", "N", "no", "No", "NO"}}, + {false, "!!bool", []string{"false", "False", "FALSE"}}, + {false, "!!bool", []string{"off", "Off", "OFF"}}, + {nil, "!!null", []string{"~", "null", "Null", "NULL"}}, + {math.NaN(), "!!float", []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), "!!float", []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), "!!float", []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), "!!float", []string{"-.inf", "-.Inf", "-.INF"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", "!!str", "!!bool", "!!int", "!!float", "!!null": + return true + } + return false +} + +func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + if tag != "" && tag != rtag { + panic("Can't decode " + rtag + " '" + in + "' as a " + tag) + } + }() + + if in == "" { + return "!!null", nil + } + + c := resolveTable[in[0]] + if c == 0 { + // It's a string for sure. Nothing to do. + return "!!str", in + } + + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + switch c { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return "!!float", floatv + } + // XXX Handle base 60 floats here (WTF!) + + case 'D', 'S': + // Int, float, or timestamp. + for i := 0; i != len(in); i++ { + if in[i] == '_' { + in = strings.Replace(in, "_", "", -1) + break + } + } + intv, err := strconv.ParseInt(in, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return "!!int", int(intv) + } else { + return "!!int", intv + } + } + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return "!!float", floatv + } + if strings.HasPrefix(in, "0b") { + intv, err := strconv.ParseInt(in[2:], 2, 64) + if err == nil { + return "!!int", int(intv) + } + } else if strings.HasPrefix(in, "-0b") { + intv, err := strconv.ParseInt(in[3:], 2, 64) + if err == nil { + return "!!int", -int(intv) + } + } + // XXX Handle timestamps here. + + case '<': + // XXX Handle merge (<<) here. + + default: + panic("resolveTable item not yet handled: " + + string([]byte{c}) + " (with " + in + ")") + } + return "!!str", in +} diff --git a/vendor/launchpad.net/goyaml/scannerc.go b/vendor/launchpad.net/goyaml/scannerc.go new file mode 100644 index 0000000..943a597 --- /dev/null +++ b/vendor/launchpad.net/goyaml/scannerc.go @@ -0,0 +1,2710 @@ +package goyaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // A simple key is required only when it is the first token in the current + // line. Therefore it is always allowed. But we add a check anyway. + if required && !parser.simple_key_allowed { + panic("should not happen") + } + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each intendation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the intendation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found uknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && !(s[0] == '!' && s[1] == 0) { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the tag is non-empty. + if len(s) == 0 { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the intendation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an intendation indicator equal to 0") + return false + } + + // Get the intendation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an intendation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the intendation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following intendation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan intendation spaces and line breaks for a block scalar. Determine the +// intendation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the intendation spaces and line breaks. + max_indent := 0 + for { + // Eat the intendation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the intendation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an intendation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". + if parser.flow_level > 0 && + parser.buffer[parser.buffer_pos] == ':' && + !is_blankz(parser.buffer, parser.buffer_pos+1) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found unexpected ':'") + return false + } + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab character that abuse intendation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violate intendation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check intendation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/launchpad.net/goyaml/sorter.go b/vendor/launchpad.net/goyaml/sorter.go new file mode 100644 index 0000000..e1ee85f --- /dev/null +++ b/vendor/launchpad.net/goyaml/sorter.go @@ -0,0 +1,104 @@ +package goyaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/launchpad.net/goyaml/suite_test.go b/vendor/launchpad.net/goyaml/suite_test.go new file mode 100644 index 0000000..963ab0b --- /dev/null +++ b/vendor/launchpad.net/goyaml/suite_test.go @@ -0,0 +1,12 @@ +package goyaml_test + +import ( + . "launchpad.net/gocheck" + "testing" +) + +func Test(t *testing.T) { TestingT(t) } + +type S struct{} + +var _ = Suite(&S{}) diff --git a/vendor/launchpad.net/goyaml/writerc.go b/vendor/launchpad.net/goyaml/writerc.go new file mode 100644 index 0000000..4809bfb --- /dev/null +++ b/vendor/launchpad.net/goyaml/writerc.go @@ -0,0 +1,89 @@ +package goyaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + // If the output encoding is UTF-8, we don't need to recode the buffer. + if emitter.encoding == yaml_UTF8_ENCODING { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true + } + + // Recode the buffer into the raw buffer. + var low, high int + if emitter.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + pos := 0 + for pos < emitter.buffer_pos { + // See the "reader.c" code for more details on UTF-8 encoding. Note + // that we assume that the buffer contains a valid UTF-8 sequence. + + // Read the next UTF-8 character. + octet := emitter.buffer[pos] + + var w int + var value rune + switch { + case octet&0x80 == 0x00: + w, value = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, value = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, value = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, value = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = emitter.buffer[pos+k] + value = (value << 6) + (rune(octet) & 0x3F) + } + pos += w + + // Write the character. + if value < 0x10000 { + var b [2]byte + b[high] = byte(value >> 8) + b[low] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) + } else { + // Write the character using a surrogate pair (check "reader.c"). + var b [4]byte + value -= 0x10000 + b[high] = byte(0xD8 + (value >> 18)) + b[low] = byte((value >> 10) & 0xFF) + b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) + b[low+2] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) + } + } + + // Write the raw buffer. + if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + emitter.raw_buffer = emitter.raw_buffer[:0] + return true +} diff --git a/vendor/launchpad.net/goyaml/yamlh.go b/vendor/launchpad.net/goyaml/yamlh.go new file mode 100644 index 0000000..1fea365 --- /dev/null +++ b/vendor/launchpad.net/goyaml/yamlh.go @@ -0,0 +1,712 @@ +package goyaml + +import ( + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occured. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_file io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_file io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/launchpad.net/goyaml/yamlprivateh.go b/vendor/launchpad.net/goyaml/yamlprivateh.go new file mode 100644 index 0000000..1c0b23d --- /dev/null +++ b/vendor/launchpad.net/goyaml/yamlprivateh.go @@ -0,0 +1,173 @@ +package goyaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +}