vendoring and caldav

This commit is contained in:
Vincent Batts 2018-04-01 11:08:20 -04:00
parent a06aa900b7
commit fd9092e5ab
96 changed files with 14832 additions and 5 deletions

61
Gopkg.lock generated Normal file
View File

@ -0,0 +1,61 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "github.com/abbot/go-http-auth"
packages = ["."]
revision = "0ddd408d5d60ea76e320503cc7dd091992dee608"
version = "v0.4.0"
[[projects]]
name = "github.com/beevik/etree"
packages = ["."]
revision = "15a30b44cfd6c5a16a7ddfe271bf146aaf2d3195"
version = "v1.0.0"
[[projects]]
name = "github.com/laurent22/ical-go"
packages = ["ical"]
revision = "4811ac5553eae5fed7cd5d7a9024727f1311b2a2"
version = "v0.1.0"
[[projects]]
name = "github.com/samedi/caldav-go"
packages = [
".",
"data",
"errs",
"files",
"global",
"handlers",
"ixml",
"lib"
]
revision = "4d45d84038c6026578b403ceff7d3081dcd84efd"
version = "v3.0.0"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = [
"bcrypt",
"blowfish"
]
revision = "12892e8c234f4fe6f6803f052061de9057903bb2"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = [
"context",
"webdav",
"webdav/internal/xml"
]
revision = "b68f30494add4df6bd8ef5e82803f308e7f7c59c"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "65e0b2109be1138b841139e4bf266a60b62dc0e84d2ab6dc9785ea94b766074c"
solver-name = "gps-cdcl"
solver-version = 1

42
Gopkg.toml Normal file
View File

@ -0,0 +1,42 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
#
# [prune]
# non-go = false
# go-tests = true
# unused-packages = true
[[constraint]]
name = "github.com/abbot/go-http-auth"
version = "0.4.0"
[[constraint]]
name = "github.com/samedi/caldav-go"
version = "3.0.0"
[[constraint]]
branch = "master"
name = "golang.org/x/net"
[prune]
go-tests = true
unused-packages = true

22
main.go
View File

@ -1,6 +1,7 @@
package main
import (
"context"
"flag"
"fmt"
"log"
@ -11,6 +12,7 @@ import (
"time"
"github.com/abbot/go-http-auth"
caldav "github.com/samedi/caldav-go"
"golang.org/x/net/webdav"
)
@ -19,6 +21,7 @@ var (
flCert = flag.String("cert", "", "server SSL cert (both -cert and -key must be present to use SSL). See `go run $(go env GOROOT)/src/crypto/tls/generate_cert.go -h` to generate development cert/key")
flKey = flag.String("key", "", "server SSL key")
flHtpasswd = flag.String("htpasswd", "", "htpasswd file for auth (must be present to use auth) See htpasswd(1) to create this file.")
flCalDav = flag.String("caldav", "", "local path to store caldav data ('' means no caldav is served)")
)
func main() {
@ -60,10 +63,19 @@ func main() {
authHandlerFunc := func(w http.ResponseWriter, r *auth.AuthenticatedRequest) {
h.ServeHTTP(w, &r.Request)
}
if *flCalDav != "" {
http.HandleFunc("/caldav", authenticator.Wrap(func(w http.ResponseWriter, r *auth.AuthenticatedRequest) {
caldav.RequestHandler(w, &r.Request)
}))
}
http.HandleFunc("/", authenticator.Wrap(authHandlerFunc))
} else {
log.Println("WARNING: connections are not authenticated. STRONGLY consider using -htpasswd.")
if *flCalDav != "" {
http.HandleFunc("/caldav", caldav.RequestHandler)
}
http.Handle("/", h)
}
addr := fmt.Sprintf(":%d", *flPort)
@ -85,23 +97,23 @@ type passThroughFS struct {
root string
}
func (ptfs *passThroughFS) Mkdir(name string, perm os.FileMode) error {
func (ptfs *passThroughFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
// TODO(vbatts) check for escaping the root directory
return os.Mkdir(filepath.Join(ptfs.root, name), perm)
}
func (ptfs *passThroughFS) OpenFile(name string, flag int, perm os.FileMode) (webdav.File, error) {
func (ptfs *passThroughFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (webdav.File, error) {
// TODO(vbatts) check for escaping the root directory
return os.OpenFile(filepath.Join(ptfs.root, name), flag, perm)
}
func (ptfs *passThroughFS) RemoveAll(name string) error {
func (ptfs *passThroughFS) RemoveAll(ctx context.Context, name string) error {
// TODO(vbatts) check for escaping the root directory
return os.RemoveAll(filepath.Join(ptfs.root, name))
}
func (ptfs *passThroughFS) Rename(oldName, newName string) error {
func (ptfs *passThroughFS) Rename(ctx context.Context, oldName, newName string) error {
// TODO(vbatts) check for escaping the root directory
return os.Rename(filepath.Join(ptfs.root, oldName), filepath.Join(ptfs.root, newName))
}
func (ptfs *passThroughFS) Stat(name string) (os.FileInfo, error) {
func (ptfs *passThroughFS) Stat(ctx context.Context, name string) (os.FileInfo, error) {
// TODO(vbatts) check for escaping the root directory
return os.Stat(filepath.Join(ptfs.root, name))
}

5
vendor/github.com/abbot/go-http-auth/.gitignore generated vendored Normal file
View File

@ -0,0 +1,5 @@
*~
*.a
*.6
*.out
_testmain.go

178
vendor/github.com/abbot/go-http-auth/LICENSE generated vendored Normal file
View File

@ -0,0 +1,178 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

12
vendor/github.com/abbot/go-http-auth/Makefile generated vendored Normal file
View File

@ -0,0 +1,12 @@
include $(GOROOT)/src/Make.inc
TARG=auth_digest
GOFILES=\
auth.go\
digest.go\
basic.go\
misc.go\
md5crypt.go\
users.go\
include $(GOROOT)/src/Make.pkg

71
vendor/github.com/abbot/go-http-auth/README.md generated vendored Normal file
View File

@ -0,0 +1,71 @@
HTTP Authentication implementation in Go
========================================
This is an implementation of HTTP Basic and HTTP Digest authentication
in Go language. It is designed as a simple wrapper for
http.RequestHandler functions.
Features
--------
* Supports HTTP Basic and HTTP Digest authentication.
* Supports htpasswd and htdigest formatted files.
* Automatic reloading of password files.
* Pluggable interface for user/password storage.
* Supports MD5, SHA1 and BCrypt for Basic authentication password storage.
* Configurable Digest nonce cache size with expiration.
* Wrapper for legacy http handlers (http.HandlerFunc interface)
Example usage
-------------
This is a complete working example for Basic auth:
package main
import (
"fmt"
"net/http"
auth "github.com/abbot/go-http-auth"
)
func Secret(user, realm string) string {
if user == "john" {
// password is "hello"
return "$1$dlPL2MqE$oQmn16q49SqdmhenQuNgs1"
}
return ""
}
func handle(w http.ResponseWriter, r *auth.AuthenticatedRequest) {
fmt.Fprintf(w, "<html><body><h1>Hello, %s!</h1></body></html>", r.Username)
}
func main() {
authenticator := auth.NewBasicAuthenticator("example.com", Secret)
http.HandleFunc("/", authenticator.Wrap(handle))
http.ListenAndServe(":8080", nil)
}
See more examples in the "examples" directory.
Legal
-----
This module is developed under Apache 2.0 license, and can be used for
open and proprietary projects.
Copyright 2012-2013 Lev Shamardin
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file or any other part of this project except in
compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License.

109
vendor/github.com/abbot/go-http-auth/auth.go generated vendored Normal file
View File

@ -0,0 +1,109 @@
// Package auth is an implementation of HTTP Basic and HTTP Digest authentication.
package auth
import (
"net/http"
"golang.org/x/net/context"
)
/*
Request handlers must take AuthenticatedRequest instead of http.Request
*/
type AuthenticatedRequest struct {
http.Request
/*
Authenticated user name. Current API implies that Username is
never empty, which means that authentication is always done
before calling the request handler.
*/
Username string
}
/*
AuthenticatedHandlerFunc is like http.HandlerFunc, but takes
AuthenticatedRequest instead of http.Request
*/
type AuthenticatedHandlerFunc func(http.ResponseWriter, *AuthenticatedRequest)
/*
Authenticator wraps an AuthenticatedHandlerFunc with
authentication-checking code.
Typical Authenticator usage is something like:
authenticator := SomeAuthenticator(...)
http.HandleFunc("/", authenticator(my_handler))
Authenticator wrapper checks the user authentication and calls the
wrapped function only after authentication has succeeded. Otherwise,
it returns a handler which initiates the authentication procedure.
*/
type Authenticator func(AuthenticatedHandlerFunc) http.HandlerFunc
// Info contains authentication information for the request.
type Info struct {
// Authenticated is set to true when request was authenticated
// successfully, i.e. username and password passed in request did
// pass the check.
Authenticated bool
// Username contains a user name passed in the request when
// Authenticated is true. It's value is undefined if Authenticated
// is false.
Username string
// ResponseHeaders contains extra headers that must be set by server
// when sending back HTTP response.
ResponseHeaders http.Header
}
// UpdateHeaders updates headers with this Info's ResponseHeaders. It is
// safe to call this function on nil Info.
func (i *Info) UpdateHeaders(headers http.Header) {
if i == nil {
return
}
for k, values := range i.ResponseHeaders {
for _, v := range values {
headers.Add(k, v)
}
}
}
type key int // used for context keys
var infoKey key = 0
type AuthenticatorInterface interface {
// NewContext returns a new context carrying authentication
// information extracted from the request.
NewContext(ctx context.Context, r *http.Request) context.Context
// Wrap returns an http.HandlerFunc which wraps
// AuthenticatedHandlerFunc with this authenticator's
// authentication checks.
Wrap(AuthenticatedHandlerFunc) http.HandlerFunc
}
// FromContext returns authentication information from the context or
// nil if no such information present.
func FromContext(ctx context.Context) *Info {
info, ok := ctx.Value(infoKey).(*Info)
if !ok {
return nil
}
return info
}
// AuthUsernameHeader is the header set by JustCheck functions. It
// contains an authenticated username (if authentication was
// successful).
const AuthUsernameHeader = "X-Authenticated-Username"
func JustCheck(auth AuthenticatorInterface, wrapped http.HandlerFunc) http.HandlerFunc {
return auth.Wrap(func(w http.ResponseWriter, ar *AuthenticatedRequest) {
ar.Header.Set(AuthUsernameHeader, ar.Username)
wrapped(w, &ar.Request)
})
}

163
vendor/github.com/abbot/go-http-auth/basic.go generated vendored Normal file
View File

@ -0,0 +1,163 @@
package auth
import (
"bytes"
"crypto/sha1"
"crypto/subtle"
"encoding/base64"
"errors"
"net/http"
"strings"
"golang.org/x/crypto/bcrypt"
"golang.org/x/net/context"
)
type compareFunc func(hashedPassword, password []byte) error
var (
errMismatchedHashAndPassword = errors.New("mismatched hash and password")
compareFuncs = []struct {
prefix string
compare compareFunc
}{
{"", compareMD5HashAndPassword}, // default compareFunc
{"{SHA}", compareShaHashAndPassword},
// Bcrypt is complicated. According to crypt(3) from
// crypt_blowfish version 1.3 (fetched from
// http://www.openwall.com/crypt/crypt_blowfish-1.3.tar.gz), there
// are three different has prefixes: "$2a$", used by versions up
// to 1.0.4, and "$2x$" and "$2y$", used in all later
// versions. "$2a$" has a known bug, "$2x$" was added as a
// migration path for systems with "$2a$" prefix and still has a
// bug, and only "$2y$" should be used by modern systems. The bug
// has something to do with handling of 8-bit characters. Since
// both "$2a$" and "$2x$" are deprecated, we are handling them the
// same way as "$2y$", which will yield correct results for 7-bit
// character passwords, but is wrong for 8-bit character
// passwords. You have to upgrade to "$2y$" if you want sant 8-bit
// character password support with bcrypt. To add to the mess,
// OpenBSD 5.5. introduced "$2b$" prefix, which behaves exactly
// like "$2y$" according to the same source.
{"$2a$", bcrypt.CompareHashAndPassword},
{"$2b$", bcrypt.CompareHashAndPassword},
{"$2x$", bcrypt.CompareHashAndPassword},
{"$2y$", bcrypt.CompareHashAndPassword},
}
)
type BasicAuth struct {
Realm string
Secrets SecretProvider
// Headers used by authenticator. Set to ProxyHeaders to use with
// proxy server. When nil, NormalHeaders are used.
Headers *Headers
}
// check that BasicAuth implements AuthenticatorInterface
var _ = (AuthenticatorInterface)((*BasicAuth)(nil))
/*
Checks the username/password combination from the request. Returns
either an empty string (authentication failed) or the name of the
authenticated user.
Supports MD5 and SHA1 password entries
*/
func (a *BasicAuth) CheckAuth(r *http.Request) string {
s := strings.SplitN(r.Header.Get(a.Headers.V().Authorization), " ", 2)
if len(s) != 2 || s[0] != "Basic" {
return ""
}
b, err := base64.StdEncoding.DecodeString(s[1])
if err != nil {
return ""
}
pair := strings.SplitN(string(b), ":", 2)
if len(pair) != 2 {
return ""
}
user, password := pair[0], pair[1]
secret := a.Secrets(user, a.Realm)
if secret == "" {
return ""
}
compare := compareFuncs[0].compare
for _, cmp := range compareFuncs[1:] {
if strings.HasPrefix(secret, cmp.prefix) {
compare = cmp.compare
break
}
}
if compare([]byte(secret), []byte(password)) != nil {
return ""
}
return pair[0]
}
func compareShaHashAndPassword(hashedPassword, password []byte) error {
d := sha1.New()
d.Write(password)
if subtle.ConstantTimeCompare(hashedPassword[5:], []byte(base64.StdEncoding.EncodeToString(d.Sum(nil)))) != 1 {
return errMismatchedHashAndPassword
}
return nil
}
func compareMD5HashAndPassword(hashedPassword, password []byte) error {
parts := bytes.SplitN(hashedPassword, []byte("$"), 4)
if len(parts) != 4 {
return errMismatchedHashAndPassword
}
magic := []byte("$" + string(parts[1]) + "$")
salt := parts[2]
if subtle.ConstantTimeCompare(hashedPassword, MD5Crypt(password, salt, magic)) != 1 {
return errMismatchedHashAndPassword
}
return nil
}
/*
http.Handler for BasicAuth which initiates the authentication process
(or requires reauthentication).
*/
func (a *BasicAuth) RequireAuth(w http.ResponseWriter, r *http.Request) {
w.Header().Set(contentType, a.Headers.V().UnauthContentType)
w.Header().Set(a.Headers.V().Authenticate, `Basic realm="`+a.Realm+`"`)
w.WriteHeader(a.Headers.V().UnauthCode)
w.Write([]byte(a.Headers.V().UnauthResponse))
}
/*
BasicAuthenticator returns a function, which wraps an
AuthenticatedHandlerFunc converting it to http.HandlerFunc. This
wrapper function checks the authentication and either sends back
required authentication headers, or calls the wrapped function with
authenticated username in the AuthenticatedRequest.
*/
func (a *BasicAuth) Wrap(wrapped AuthenticatedHandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if username := a.CheckAuth(r); username == "" {
a.RequireAuth(w, r)
} else {
ar := &AuthenticatedRequest{Request: *r, Username: username}
wrapped(w, ar)
}
}
}
// NewContext returns a context carrying authentication information for the request.
func (a *BasicAuth) NewContext(ctx context.Context, r *http.Request) context.Context {
info := &Info{Username: a.CheckAuth(r), ResponseHeaders: make(http.Header)}
info.Authenticated = (info.Username != "")
if !info.Authenticated {
info.ResponseHeaders.Set(a.Headers.V().Authenticate, `Basic realm="`+a.Realm+`"`)
}
return context.WithValue(ctx, infoKey, info)
}
func NewBasicAuthenticator(realm string, secrets SecretProvider) *BasicAuth {
return &BasicAuth{Realm: realm, Secrets: secrets}
}

274
vendor/github.com/abbot/go-http-auth/digest.go generated vendored Normal file
View File

@ -0,0 +1,274 @@
package auth
import (
"crypto/subtle"
"fmt"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/net/context"
)
type digest_client struct {
nc uint64
last_seen int64
}
type DigestAuth struct {
Realm string
Opaque string
Secrets SecretProvider
PlainTextSecrets bool
IgnoreNonceCount bool
// Headers used by authenticator. Set to ProxyHeaders to use with
// proxy server. When nil, NormalHeaders are used.
Headers *Headers
/*
Approximate size of Client's Cache. When actual number of
tracked client nonces exceeds
ClientCacheSize+ClientCacheTolerance, ClientCacheTolerance*2
older entries are purged.
*/
ClientCacheSize int
ClientCacheTolerance int
clients map[string]*digest_client
mutex sync.Mutex
}
// check that DigestAuth implements AuthenticatorInterface
var _ = (AuthenticatorInterface)((*DigestAuth)(nil))
type digest_cache_entry struct {
nonce string
last_seen int64
}
type digest_cache []digest_cache_entry
func (c digest_cache) Less(i, j int) bool {
return c[i].last_seen < c[j].last_seen
}
func (c digest_cache) Len() int {
return len(c)
}
func (c digest_cache) Swap(i, j int) {
c[i], c[j] = c[j], c[i]
}
/*
Remove count oldest entries from DigestAuth.clients
*/
func (a *DigestAuth) Purge(count int) {
entries := make([]digest_cache_entry, 0, len(a.clients))
for nonce, client := range a.clients {
entries = append(entries, digest_cache_entry{nonce, client.last_seen})
}
cache := digest_cache(entries)
sort.Sort(cache)
for _, client := range cache[:count] {
delete(a.clients, client.nonce)
}
}
/*
http.Handler for DigestAuth which initiates the authentication process
(or requires reauthentication).
*/
func (a *DigestAuth) RequireAuth(w http.ResponseWriter, r *http.Request) {
if len(a.clients) > a.ClientCacheSize+a.ClientCacheTolerance {
a.Purge(a.ClientCacheTolerance * 2)
}
nonce := RandomKey()
a.clients[nonce] = &digest_client{nc: 0, last_seen: time.Now().UnixNano()}
w.Header().Set(contentType, a.Headers.V().UnauthContentType)
w.Header().Set(a.Headers.V().Authenticate,
fmt.Sprintf(`Digest realm="%s", nonce="%s", opaque="%s", algorithm="MD5", qop="auth"`,
a.Realm, nonce, a.Opaque))
w.WriteHeader(a.Headers.V().UnauthCode)
w.Write([]byte(a.Headers.V().UnauthResponse))
}
/*
Parse Authorization header from the http.Request. Returns a map of
auth parameters or nil if the header is not a valid parsable Digest
auth header.
*/
func DigestAuthParams(authorization string) map[string]string {
s := strings.SplitN(authorization, " ", 2)
if len(s) != 2 || s[0] != "Digest" {
return nil
}
return ParsePairs(s[1])
}
/*
Check if request contains valid authentication data. Returns a pair
of username, authinfo where username is the name of the authenticated
user or an empty string and authinfo is the contents for the optional
Authentication-Info response header.
*/
func (da *DigestAuth) CheckAuth(r *http.Request) (username string, authinfo *string) {
da.mutex.Lock()
defer da.mutex.Unlock()
username = ""
authinfo = nil
auth := DigestAuthParams(r.Header.Get(da.Headers.V().Authorization))
if auth == nil {
return "", nil
}
// RFC2617 Section 3.2.1 specifies that unset value of algorithm in
// WWW-Authenticate Response header should be treated as
// "MD5". According to section 3.2.2 the "algorithm" value in
// subsequent Request Authorization header must be set to whatever
// was supplied in the WWW-Authenticate Response header. This
// implementation always returns an algorithm in WWW-Authenticate
// header, however there seems to be broken clients in the wild
// which do not set the algorithm. Assume the unset algorithm in
// Authorization header to be equal to MD5.
if _, ok := auth["algorithm"]; !ok {
auth["algorithm"] = "MD5"
}
if da.Opaque != auth["opaque"] || auth["algorithm"] != "MD5" || auth["qop"] != "auth" {
return "", nil
}
// Check if the requested URI matches auth header
if r.RequestURI != auth["uri"] {
// We allow auth["uri"] to be a full path prefix of request-uri
// for some reason lost in history, which is probably wrong, but
// used to be like that for quite some time
// (https://tools.ietf.org/html/rfc2617#section-3.2.2 explicitly
// says that auth["uri"] is the request-uri).
//
// TODO: make an option to allow only strict checking.
switch u, err := url.Parse(auth["uri"]); {
case err != nil:
return "", nil
case r.URL == nil:
return "", nil
case len(u.Path) > len(r.URL.Path):
return "", nil
case !strings.HasPrefix(r.URL.Path, u.Path):
return "", nil
}
}
HA1 := da.Secrets(auth["username"], da.Realm)
if da.PlainTextSecrets {
HA1 = H(auth["username"] + ":" + da.Realm + ":" + HA1)
}
HA2 := H(r.Method + ":" + auth["uri"])
KD := H(strings.Join([]string{HA1, auth["nonce"], auth["nc"], auth["cnonce"], auth["qop"], HA2}, ":"))
if subtle.ConstantTimeCompare([]byte(KD), []byte(auth["response"])) != 1 {
return "", nil
}
// At this point crypto checks are completed and validated.
// Now check if the session is valid.
nc, err := strconv.ParseUint(auth["nc"], 16, 64)
if err != nil {
return "", nil
}
if client, ok := da.clients[auth["nonce"]]; !ok {
return "", nil
} else {
if client.nc != 0 && client.nc >= nc && !da.IgnoreNonceCount {
return "", nil
}
client.nc = nc
client.last_seen = time.Now().UnixNano()
}
resp_HA2 := H(":" + auth["uri"])
rspauth := H(strings.Join([]string{HA1, auth["nonce"], auth["nc"], auth["cnonce"], auth["qop"], resp_HA2}, ":"))
info := fmt.Sprintf(`qop="auth", rspauth="%s", cnonce="%s", nc="%s"`, rspauth, auth["cnonce"], auth["nc"])
return auth["username"], &info
}
/*
Default values for ClientCacheSize and ClientCacheTolerance for DigestAuth
*/
const DefaultClientCacheSize = 1000
const DefaultClientCacheTolerance = 100
/*
Wrap returns an Authenticator which uses HTTP Digest
authentication. Arguments:
realm: The authentication realm.
secrets: SecretProvider which must return HA1 digests for the same
realm as above.
*/
func (a *DigestAuth) Wrap(wrapped AuthenticatedHandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if username, authinfo := a.CheckAuth(r); username == "" {
a.RequireAuth(w, r)
} else {
ar := &AuthenticatedRequest{Request: *r, Username: username}
if authinfo != nil {
w.Header().Set(a.Headers.V().AuthInfo, *authinfo)
}
wrapped(w, ar)
}
}
}
/*
JustCheck returns function which converts an http.HandlerFunc into a
http.HandlerFunc which requires authentication. Username is passed as
an extra X-Authenticated-Username header.
*/
func (a *DigestAuth) JustCheck(wrapped http.HandlerFunc) http.HandlerFunc {
return a.Wrap(func(w http.ResponseWriter, ar *AuthenticatedRequest) {
ar.Header.Set(AuthUsernameHeader, ar.Username)
wrapped(w, &ar.Request)
})
}
// NewContext returns a context carrying authentication information for the request.
func (a *DigestAuth) NewContext(ctx context.Context, r *http.Request) context.Context {
username, authinfo := a.CheckAuth(r)
info := &Info{Username: username, ResponseHeaders: make(http.Header)}
if username != "" {
info.Authenticated = true
info.ResponseHeaders.Set(a.Headers.V().AuthInfo, *authinfo)
} else {
// return back digest WWW-Authenticate header
if len(a.clients) > a.ClientCacheSize+a.ClientCacheTolerance {
a.Purge(a.ClientCacheTolerance * 2)
}
nonce := RandomKey()
a.clients[nonce] = &digest_client{nc: 0, last_seen: time.Now().UnixNano()}
info.ResponseHeaders.Set(a.Headers.V().Authenticate,
fmt.Sprintf(`Digest realm="%s", nonce="%s", opaque="%s", algorithm="MD5", qop="auth"`,
a.Realm, nonce, a.Opaque))
}
return context.WithValue(ctx, infoKey, info)
}
func NewDigestAuthenticator(realm string, secrets SecretProvider) *DigestAuth {
da := &DigestAuth{
Opaque: RandomKey(),
Realm: realm,
Secrets: secrets,
PlainTextSecrets: false,
ClientCacheSize: DefaultClientCacheSize,
ClientCacheTolerance: DefaultClientCacheTolerance,
clients: map[string]*digest_client{}}
return da
}

92
vendor/github.com/abbot/go-http-auth/md5crypt.go generated vendored Normal file
View File

@ -0,0 +1,92 @@
package auth
import "crypto/md5"
import "strings"
const itoa64 = "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
var md5_crypt_swaps = [16]int{12, 6, 0, 13, 7, 1, 14, 8, 2, 15, 9, 3, 5, 10, 4, 11}
type MD5Entry struct {
Magic, Salt, Hash []byte
}
func NewMD5Entry(e string) *MD5Entry {
parts := strings.SplitN(e, "$", 4)
if len(parts) != 4 {
return nil
}
return &MD5Entry{
Magic: []byte("$" + parts[1] + "$"),
Salt: []byte(parts[2]),
Hash: []byte(parts[3]),
}
}
/*
MD5 password crypt implementation
*/
func MD5Crypt(password, salt, magic []byte) []byte {
d := md5.New()
d.Write(password)
d.Write(magic)
d.Write(salt)
d2 := md5.New()
d2.Write(password)
d2.Write(salt)
d2.Write(password)
for i, mixin := 0, d2.Sum(nil); i < len(password); i++ {
d.Write([]byte{mixin[i%16]})
}
for i := len(password); i != 0; i >>= 1 {
if i&1 == 0 {
d.Write([]byte{password[0]})
} else {
d.Write([]byte{0})
}
}
final := d.Sum(nil)
for i := 0; i < 1000; i++ {
d2 := md5.New()
if i&1 == 0 {
d2.Write(final)
} else {
d2.Write(password)
}
if i%3 != 0 {
d2.Write(salt)
}
if i%7 != 0 {
d2.Write(password)
}
if i&1 == 0 {
d2.Write(password)
} else {
d2.Write(final)
}
final = d2.Sum(nil)
}
result := make([]byte, 0, 22)
v := uint(0)
bits := uint(0)
for _, i := range md5_crypt_swaps {
v |= (uint(final[i]) << bits)
for bits = bits + 8; bits > 6; bits -= 6 {
result = append(result, itoa64[v&0x3f])
v >>= 6
}
}
result = append(result, itoa64[v&0x3f])
return append(append(append(magic, salt...), '$'), result...)
}

141
vendor/github.com/abbot/go-http-auth/misc.go generated vendored Normal file
View File

@ -0,0 +1,141 @@
package auth
import (
"bytes"
"crypto/md5"
"crypto/rand"
"encoding/base64"
"fmt"
"net/http"
"strings"
)
// RandomKey returns a random 16-byte base64 alphabet string
func RandomKey() string {
k := make([]byte, 12)
for bytes := 0; bytes < len(k); {
n, err := rand.Read(k[bytes:])
if err != nil {
panic("rand.Read() failed")
}
bytes += n
}
return base64.StdEncoding.EncodeToString(k)
}
// H function for MD5 algorithm (returns a lower-case hex MD5 digest)
func H(data string) string {
digest := md5.New()
digest.Write([]byte(data))
return fmt.Sprintf("%x", digest.Sum(nil))
}
// ParseList parses a comma-separated list of values as described by
// RFC 2068 and returns list elements.
//
// Lifted from https://code.google.com/p/gorilla/source/browse/http/parser/parser.go
// which was ported from urllib2.parse_http_list, from the Python
// standard library.
func ParseList(value string) []string {
var list []string
var escape, quote bool
b := new(bytes.Buffer)
for _, r := range value {
switch {
case escape:
b.WriteRune(r)
escape = false
case quote:
if r == '\\' {
escape = true
} else {
if r == '"' {
quote = false
}
b.WriteRune(r)
}
case r == ',':
list = append(list, strings.TrimSpace(b.String()))
b.Reset()
case r == '"':
quote = true
b.WriteRune(r)
default:
b.WriteRune(r)
}
}
// Append last part.
if s := b.String(); s != "" {
list = append(list, strings.TrimSpace(s))
}
return list
}
// ParsePairs extracts key/value pairs from a comma-separated list of
// values as described by RFC 2068 and returns a map[key]value. The
// resulting values are unquoted. If a list element doesn't contain a
// "=", the key is the element itself and the value is an empty
// string.
//
// Lifted from https://code.google.com/p/gorilla/source/browse/http/parser/parser.go
func ParsePairs(value string) map[string]string {
m := make(map[string]string)
for _, pair := range ParseList(strings.TrimSpace(value)) {
if i := strings.Index(pair, "="); i < 0 {
m[pair] = ""
} else {
v := pair[i+1:]
if v[0] == '"' && v[len(v)-1] == '"' {
// Unquote it.
v = v[1 : len(v)-1]
}
m[pair[:i]] = v
}
}
return m
}
// Headers contains header and error codes used by authenticator.
type Headers struct {
Authenticate string // WWW-Authenticate
Authorization string // Authorization
AuthInfo string // Authentication-Info
UnauthCode int // 401
UnauthContentType string // text/plain
UnauthResponse string // Unauthorized.
}
// V returns NormalHeaders when h is nil, or h otherwise. Allows to
// use uninitialized *Headers values in structs.
func (h *Headers) V() *Headers {
if h == nil {
return NormalHeaders
}
return h
}
var (
// NormalHeaders are the regular Headers used by an HTTP Server for
// request authentication.
NormalHeaders = &Headers{
Authenticate: "WWW-Authenticate",
Authorization: "Authorization",
AuthInfo: "Authentication-Info",
UnauthCode: http.StatusUnauthorized,
UnauthContentType: "text/plain",
UnauthResponse: fmt.Sprintf("%d %s\n", http.StatusUnauthorized, http.StatusText(http.StatusUnauthorized)),
}
// ProxyHeaders are Headers used by an HTTP Proxy server for proxy
// access authentication.
ProxyHeaders = &Headers{
Authenticate: "Proxy-Authenticate",
Authorization: "Proxy-Authorization",
AuthInfo: "Proxy-Authentication-Info",
UnauthCode: http.StatusProxyAuthRequired,
UnauthContentType: "text/plain",
UnauthResponse: fmt.Sprintf("%d %s\n", http.StatusProxyAuthRequired, http.StatusText(http.StatusProxyAuthRequired)),
}
)
const contentType = "Content-Type"

1
vendor/github.com/abbot/go-http-auth/test.htdigest generated vendored Normal file
View File

@ -0,0 +1 @@
test:example.com:aa78524fceb0e50fd8ca96dd818b8cf9

4
vendor/github.com/abbot/go-http-auth/test.htpasswd generated vendored Normal file
View File

@ -0,0 +1,4 @@
test:{SHA}qvTGHdzF6KLavt4PO0gs2a6pQ00=
test2:$apr1$a0j62R97$mYqFkloXH0/UOaUnAiV2b0
test16:$apr1$JI4wh3am$AmhephVqLTUyAVpFQeHZC0
test3:$2y$05$ih3C91zUBSTFcAh2mQnZYuob0UOZVEf16wl/ukgjDhjvj.xgM1WwS

154
vendor/github.com/abbot/go-http-auth/users.go generated vendored Normal file
View File

@ -0,0 +1,154 @@
package auth
import (
"encoding/csv"
"os"
"sync"
)
/*
SecretProvider is used by authenticators. Takes user name and realm
as an argument, returns secret required for authentication (HA1 for
digest authentication, properly encrypted password for basic).
Returning an empty string means failing the authentication.
*/
type SecretProvider func(user, realm string) string
/*
Common functions for file auto-reloading
*/
type File struct {
Path string
Info os.FileInfo
/* must be set in inherited types during initialization */
Reload func()
mu sync.Mutex
}
func (f *File) ReloadIfNeeded() {
info, err := os.Stat(f.Path)
if err != nil {
panic(err)
}
f.mu.Lock()
defer f.mu.Unlock()
if f.Info == nil || f.Info.ModTime() != info.ModTime() {
f.Info = info
f.Reload()
}
}
/*
Structure used for htdigest file authentication. Users map realms to
maps of users to their HA1 digests.
*/
type HtdigestFile struct {
File
Users map[string]map[string]string
mu sync.RWMutex
}
func reload_htdigest(hf *HtdigestFile) {
r, err := os.Open(hf.Path)
if err != nil {
panic(err)
}
csv_reader := csv.NewReader(r)
csv_reader.Comma = ':'
csv_reader.Comment = '#'
csv_reader.TrimLeadingSpace = true
records, err := csv_reader.ReadAll()
if err != nil {
panic(err)
}
hf.mu.Lock()
defer hf.mu.Unlock()
hf.Users = make(map[string]map[string]string)
for _, record := range records {
_, exists := hf.Users[record[1]]
if !exists {
hf.Users[record[1]] = make(map[string]string)
}
hf.Users[record[1]][record[0]] = record[2]
}
}
/*
SecretProvider implementation based on htdigest-formated files. Will
reload htdigest file on changes. Will panic on syntax errors in
htdigest files.
*/
func HtdigestFileProvider(filename string) SecretProvider {
hf := &HtdigestFile{File: File{Path: filename}}
hf.Reload = func() { reload_htdigest(hf) }
return func(user, realm string) string {
hf.ReloadIfNeeded()
hf.mu.RLock()
defer hf.mu.RUnlock()
_, exists := hf.Users[realm]
if !exists {
return ""
}
digest, exists := hf.Users[realm][user]
if !exists {
return ""
}
return digest
}
}
/*
Structure used for htdigest file authentication. Users map users to
their salted encrypted password
*/
type HtpasswdFile struct {
File
Users map[string]string
mu sync.RWMutex
}
func reload_htpasswd(h *HtpasswdFile) {
r, err := os.Open(h.Path)
if err != nil {
panic(err)
}
csv_reader := csv.NewReader(r)
csv_reader.Comma = ':'
csv_reader.Comment = '#'
csv_reader.TrimLeadingSpace = true
records, err := csv_reader.ReadAll()
if err != nil {
panic(err)
}
h.mu.Lock()
defer h.mu.Unlock()
h.Users = make(map[string]string)
for _, record := range records {
h.Users[record[0]] = record[1]
}
}
/*
SecretProvider implementation based on htpasswd-formated files. Will
reload htpasswd file on changes. Will panic on syntax errors in
htpasswd files. Realm argument of the SecretProvider is ignored.
*/
func HtpasswdFileProvider(filename string) SecretProvider {
h := &HtpasswdFile{File: File{Path: filename}}
h.Reload = func() { reload_htpasswd(h) }
return func(user, realm string) string {
h.ReloadIfNeeded()
h.mu.RLock()
password, exists := h.Users[user]
h.mu.RUnlock()
if !exists {
return ""
}
return password
}
}

16
vendor/github.com/beevik/etree/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,16 @@
language: go
sudo: false
go:
- 1.4.2
- 1.5.1
- 1.6
- tip
matrix:
allow_failures:
- go: tip
script:
- go vet ./...
- go test -v ./...

8
vendor/github.com/beevik/etree/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,8 @@
Brett Vickers (beevik)
Felix Geisendörfer (felixge)
Kamil Kisiel (kisielk)
Graham King (grahamking)
Matt Smith (ma314smith)
Michal Jemala (michaljemala)
Nicolas Piganeau (npiganeau)
Chris Brown (ccbrown)

24
vendor/github.com/beevik/etree/LICENSE generated vendored Normal file
View File

@ -0,0 +1,24 @@
Copyright 2015 Brett Vickers. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

203
vendor/github.com/beevik/etree/README.md generated vendored Normal file
View File

@ -0,0 +1,203 @@
[![Build Status](https://travis-ci.org/beevik/etree.svg?branch=master)](https://travis-ci.org/beevik/etree)
[![GoDoc](https://godoc.org/github.com/beevik/etree?status.svg)](https://godoc.org/github.com/beevik/etree)
etree
=====
The etree package is a lightweight, pure go package that expresses XML in
the form of an element tree. Its design was inspired by the Python
[ElementTree](http://docs.python.org/2/library/xml.etree.elementtree.html)
module. Some of the package's features include:
* Represents XML documents as trees of elements for easy traversal.
* Imports, serializes, modifies or creates XML documents from scratch.
* Writes and reads XML to/from files, byte slices, strings and io interfaces.
* Performs simple or complex searches with lightweight XPath-like query APIs.
* Auto-indents XML using spaces or tabs for better readability.
* Implemented in pure go; depends only on standard go libraries.
* Built on top of the go [encoding/xml](http://golang.org/pkg/encoding/xml)
package.
### Creating an XML document
The following example creates an XML document from scratch using the etree
package and outputs its indented contents to stdout.
```go
doc := etree.NewDocument()
doc.CreateProcInst("xml", `version="1.0" encoding="UTF-8"`)
doc.CreateProcInst("xml-stylesheet", `type="text/xsl" href="style.xsl"`)
people := doc.CreateElement("People")
people.CreateComment("These are all known people")
jon := people.CreateElement("Person")
jon.CreateAttr("name", "Jon")
sally := people.CreateElement("Person")
sally.CreateAttr("name", "Sally")
doc.Indent(2)
doc.WriteTo(os.Stdout)
```
Output:
```xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="style.xsl"?>
<People>
<!--These are all known people-->
<Person name="Jon"/>
<Person name="Sally"/>
</People>
```
### Reading an XML file
Suppose you have a file on disk called `bookstore.xml` containing the
following data:
```xml
<bookstore xmlns:p="urn:schemas-books-com:prices">
<book category="COOKING">
<title lang="en">Everyday Italian</title>
<author>Giada De Laurentiis</author>
<year>2005</year>
<p:price>30.00</p:price>
</book>
<book category="CHILDREN">
<title lang="en">Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<p:price>29.99</p:price>
</book>
<book category="WEB">
<title lang="en">XQuery Kick Start</title>
<author>James McGovern</author>
<author>Per Bothner</author>
<author>Kurt Cagle</author>
<author>James Linn</author>
<author>Vaidyanathan Nagarajan</author>
<year>2003</year>
<p:price>49.99</p:price>
</book>
<book category="WEB">
<title lang="en">Learning XML</title>
<author>Erik T. Ray</author>
<year>2003</year>
<p:price>39.95</p:price>
</book>
</bookstore>
```
This code reads the file's contents into an etree document.
```go
doc := etree.NewDocument()
if err := doc.ReadFromFile("bookstore.xml"); err != nil {
panic(err)
}
```
You can also read XML from a string, a byte slice, or an `io.Reader`.
### Processing elements and attributes
This example illustrates several ways to access elements and attributes using
etree selection queries.
```go
root := doc.SelectElement("bookstore")
fmt.Println("ROOT element:", root.Tag)
for _, book := range root.SelectElements("book") {
fmt.Println("CHILD element:", book.Tag)
if title := book.SelectElement("title"); title != nil {
lang := title.SelectAttrValue("lang", "unknown")
fmt.Printf(" TITLE: %s (%s)\n", title.Text(), lang)
}
for _, attr := range book.Attr {
fmt.Printf(" ATTR: %s=%s\n", attr.Key, attr.Value)
}
}
```
Output:
```
ROOT element: bookstore
CHILD element: book
TITLE: Everyday Italian (en)
ATTR: category=COOKING
CHILD element: book
TITLE: Harry Potter (en)
ATTR: category=CHILDREN
CHILD element: book
TITLE: XQuery Kick Start (en)
ATTR: category=WEB
CHILD element: book
TITLE: Learning XML (en)
ATTR: category=WEB
```
### Path queries
This example uses etree's path functions to select all book titles that fall
into the category of 'WEB'. The double-slash prefix in the path causes the
search for book elements to occur recursively; book elements may appear at any
level of the XML hierarchy.
```go
for _, t := range doc.FindElements("//book[@category='WEB']/title") {
fmt.Println("Title:", t.Text())
}
```
Output:
```
Title: XQuery Kick Start
Title: Learning XML
```
This example finds the first book element under the root bookstore element and
outputs the tag and text of each of its child elements.
```go
for _, e := range doc.FindElements("./bookstore/book[1]/*") {
fmt.Printf("%s: %s\n", e.Tag, e.Text())
}
```
Output:
```
title: Everyday Italian
author: Giada De Laurentiis
year: 2005
price: 30.00
```
This example finds all books with a price of 49.99 and outputs their titles.
```go
path := etree.MustCompilePath("./bookstore/book[p:price='49.99']/title")
for _, e := range doc.FindElementsPath(path) {
fmt.Println(e.Text())
}
```
Output:
```
XQuery Kick Start
```
Note that this example uses the FindElementsPath function, which takes as an
argument a pre-compiled path object. Use precompiled paths when you plan to
search with the same path more than once.
### Other features
These are just a few examples of the things the etree package can do. See the
[documentation](http://godoc.org/github.com/beevik/etree) for a complete
description of its capabilities.
### Contributing
This project accepts contributions. Just fork the repo and submit a pull
request!

943
vendor/github.com/beevik/etree/etree.go generated vendored Normal file
View File

@ -0,0 +1,943 @@
// Copyright 2015 Brett Vickers.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package etree provides XML services through an Element Tree
// abstraction.
package etree
import (
"bufio"
"bytes"
"encoding/xml"
"errors"
"io"
"os"
"strings"
)
const (
// NoIndent is used with Indent to disable all indenting.
NoIndent = -1
)
// ErrXML is returned when XML parsing fails due to incorrect formatting.
var ErrXML = errors.New("etree: invalid XML format")
// ReadSettings allow for changing the default behavior of the ReadFrom*
// methods.
type ReadSettings struct {
// CharsetReader to be passed to standard xml.Decoder. Default: nil.
CharsetReader func(charset string, input io.Reader) (io.Reader, error)
// Permissive allows input containing common mistakes such as missing tags
// or attribute values. Default: false.
Permissive bool
}
// newReadSettings creates a default ReadSettings record.
func newReadSettings() ReadSettings {
return ReadSettings{}
}
// WriteSettings allow for changing the serialization behavior of the WriteTo*
// methods.
type WriteSettings struct {
// CanonicalEndTags forces the production of XML end tags, even for
// elements that have no child elements. Default: false.
CanonicalEndTags bool
// CanonicalText forces the production of XML character references for
// text data characters &, <, and >. If false, XML character references
// are also produced for " and '. Default: false.
CanonicalText bool
// CanonicalAttrVal forces the production of XML character references for
// attribute value characters &, < and ". If false, XML character
// references are also produced for > and '. Default: false.
CanonicalAttrVal bool
}
// newWriteSettings creates a default WriteSettings record.
func newWriteSettings() WriteSettings {
return WriteSettings{
CanonicalEndTags: false,
CanonicalText: false,
CanonicalAttrVal: false,
}
}
// A Token is an empty interface that represents an Element, CharData,
// Comment, Directive, or ProcInst.
type Token interface {
Parent() *Element
dup(parent *Element) Token
setParent(parent *Element)
writeTo(w *bufio.Writer, s *WriteSettings)
}
// A Document is a container holding a complete XML hierarchy. Its embedded
// element contains zero or more children, one of which is usually the root
// element. The embedded element may include other children such as
// processing instructions or BOM CharData tokens.
type Document struct {
Element
ReadSettings ReadSettings
WriteSettings WriteSettings
}
// An Element represents an XML element, its attributes, and its child tokens.
type Element struct {
Space, Tag string // namespace and tag
Attr []Attr // key-value attribute pairs
Child []Token // child tokens (elements, comments, etc.)
parent *Element // parent element
}
// An Attr represents a key-value attribute of an XML element.
type Attr struct {
Space, Key string // The attribute's namespace and key
Value string // The attribute value string
}
// CharData represents character data within XML.
type CharData struct {
Data string
parent *Element
whitespace bool
}
// A Comment represents an XML comment.
type Comment struct {
Data string
parent *Element
}
// A Directive represents an XML directive.
type Directive struct {
Data string
parent *Element
}
// A ProcInst represents an XML processing instruction.
type ProcInst struct {
Target string
Inst string
parent *Element
}
// NewDocument creates an XML document without a root element.
func NewDocument() *Document {
return &Document{
Element{Child: make([]Token, 0)},
newReadSettings(),
newWriteSettings(),
}
}
// Copy returns a recursive, deep copy of the document.
func (d *Document) Copy() *Document {
return &Document{*(d.dup(nil).(*Element)), d.ReadSettings, d.WriteSettings}
}
// Root returns the root element of the document, or nil if there is no root
// element.
func (d *Document) Root() *Element {
for _, t := range d.Child {
if c, ok := t.(*Element); ok {
return c
}
}
return nil
}
// SetRoot replaces the document's root element with e. If the document
// already has a root when this function is called, then the document's
// original root is unbound first. If the element e is bound to another
// document (or to another element within a document), then it is unbound
// first.
func (d *Document) SetRoot(e *Element) {
if e.parent != nil {
e.parent.RemoveChild(e)
}
e.setParent(&d.Element)
for i, t := range d.Child {
if _, ok := t.(*Element); ok {
t.setParent(nil)
d.Child[i] = e
return
}
}
d.Child = append(d.Child, e)
}
// ReadFrom reads XML from the reader r into the document d. It returns the
// number of bytes read and any error encountered.
func (d *Document) ReadFrom(r io.Reader) (n int64, err error) {
return d.Element.readFrom(r, d.ReadSettings)
}
// ReadFromFile reads XML from the string s into the document d.
func (d *Document) ReadFromFile(filename string) error {
f, err := os.Open(filename)
if err != nil {
return err
}
defer f.Close()
_, err = d.ReadFrom(f)
return err
}
// ReadFromBytes reads XML from the byte slice b into the document d.
func (d *Document) ReadFromBytes(b []byte) error {
_, err := d.ReadFrom(bytes.NewReader(b))
return err
}
// ReadFromString reads XML from the string s into the document d.
func (d *Document) ReadFromString(s string) error {
_, err := d.ReadFrom(strings.NewReader(s))
return err
}
// WriteTo serializes an XML document into the writer w. It
// returns the number of bytes written and any error encountered.
func (d *Document) WriteTo(w io.Writer) (n int64, err error) {
cw := newCountWriter(w)
b := bufio.NewWriter(cw)
for _, c := range d.Child {
c.writeTo(b, &d.WriteSettings)
}
err, n = b.Flush(), cw.bytes
return
}
// WriteToFile serializes an XML document into the file named
// filename.
func (d *Document) WriteToFile(filename string) error {
f, err := os.Create(filename)
if err != nil {
return err
}
defer f.Close()
_, err = d.WriteTo(f)
return err
}
// WriteToBytes serializes the XML document into a slice of
// bytes.
func (d *Document) WriteToBytes() (b []byte, err error) {
var buf bytes.Buffer
if _, err = d.WriteTo(&buf); err != nil {
return
}
return buf.Bytes(), nil
}
// WriteToString serializes the XML document into a string.
func (d *Document) WriteToString() (s string, err error) {
var b []byte
if b, err = d.WriteToBytes(); err != nil {
return
}
return string(b), nil
}
type indentFunc func(depth int) string
// Indent modifies the document's element tree by inserting CharData entities
// containing carriage returns and indentation. The amount of indentation per
// depth level is given as spaces. Pass etree.NoIndent for spaces if you want
// no indentation at all.
func (d *Document) Indent(spaces int) {
var indent indentFunc
switch {
case spaces < 0:
indent = func(depth int) string { return "" }
default:
indent = func(depth int) string { return crIndent(depth*spaces, crsp) }
}
d.Element.indent(0, indent)
}
// IndentTabs modifies the document's element tree by inserting CharData
// entities containing carriage returns and tabs for indentation. One tab is
// used per indentation level.
func (d *Document) IndentTabs() {
indent := func(depth int) string { return crIndent(depth, crtab) }
d.Element.indent(0, indent)
}
// NewElement creates an unparented element with the specified tag. The tag
// may be prefixed by a namespace and a colon.
func NewElement(tag string) *Element {
space, stag := spaceDecompose(tag)
return newElement(space, stag, nil)
}
// newElement is a helper function that creates an element and binds it to
// a parent element if possible.
func newElement(space, tag string, parent *Element) *Element {
e := &Element{
Space: space,
Tag: tag,
Attr: make([]Attr, 0),
Child: make([]Token, 0),
parent: parent,
}
if parent != nil {
parent.addChild(e)
}
return e
}
// Copy creates a recursive, deep copy of the element and all its attributes
// and children. The returned element has no parent but can be parented to a
// another element using AddElement, or to a document using SetRoot.
func (e *Element) Copy() *Element {
var parent *Element
return e.dup(parent).(*Element)
}
// Text returns the characters immediately following the element's
// opening tag.
func (e *Element) Text() string {
if len(e.Child) == 0 {
return ""
}
if cd, ok := e.Child[0].(*CharData); ok {
return cd.Data
}
return ""
}
// SetText replaces an element's subsidiary CharData text with a new string.
func (e *Element) SetText(text string) {
if len(e.Child) > 0 {
if cd, ok := e.Child[0].(*CharData); ok {
cd.Data = text
return
}
}
cd := newCharData(text, false, e)
copy(e.Child[1:], e.Child[0:])
e.Child[0] = cd
}
// CreateElement creates an element with the specified tag and adds it as the
// last child element of the element e. The tag may be prefixed by a namespace
// and a colon.
func (e *Element) CreateElement(tag string) *Element {
space, stag := spaceDecompose(tag)
return newElement(space, stag, e)
}
// AddChild adds the token t as the last child of element e. If token t was
// already the child of another element, it is first removed from its current
// parent element.
func (e *Element) AddChild(t Token) {
if t.Parent() != nil {
t.Parent().RemoveChild(t)
}
t.setParent(e)
e.addChild(t)
}
// InsertChild inserts the token t before e's existing child token ex. If ex
// is nil (or if ex is not a child of e), then t is added to the end of e's
// child token list. If token t was already the child of another element, it
// is first removed from its current parent element.
func (e *Element) InsertChild(ex Token, t Token) {
if t.Parent() != nil {
t.Parent().RemoveChild(t)
}
t.setParent(e)
for i, c := range e.Child {
if c == ex {
e.Child = append(e.Child, nil)
copy(e.Child[i+1:], e.Child[i:])
e.Child[i] = t
return
}
}
e.addChild(t)
}
// RemoveChild attempts to remove the token t from element e's list of
// children. If the token t is a child of e, then it is returned. Otherwise,
// nil is returned.
func (e *Element) RemoveChild(t Token) Token {
for i, c := range e.Child {
if c == t {
e.Child = append(e.Child[:i], e.Child[i+1:]...)
c.setParent(nil)
return t
}
}
return nil
}
// ReadFrom reads XML from the reader r and stores the result as a new child
// of element e.
func (e *Element) readFrom(ri io.Reader, settings ReadSettings) (n int64, err error) {
r := newCountReader(ri)
dec := xml.NewDecoder(r)
dec.CharsetReader = settings.CharsetReader
dec.Strict = !settings.Permissive
var stack stack
stack.push(e)
for {
t, err := dec.RawToken()
switch {
case err == io.EOF:
return r.bytes, nil
case err != nil:
return r.bytes, err
case stack.empty():
return r.bytes, ErrXML
}
top := stack.peek().(*Element)
switch t := t.(type) {
case xml.StartElement:
e := newElement(t.Name.Space, t.Name.Local, top)
for _, a := range t.Attr {
e.createAttr(a.Name.Space, a.Name.Local, a.Value)
}
stack.push(e)
case xml.EndElement:
stack.pop()
case xml.CharData:
data := string(t)
newCharData(data, isWhitespace(data), top)
case xml.Comment:
newComment(string(t), top)
case xml.Directive:
newDirective(string(t), top)
case xml.ProcInst:
newProcInst(t.Target, string(t.Inst), top)
}
}
}
// SelectAttr finds an element attribute matching the requested key and
// returns it if found. The key may be prefixed by a namespace and a colon.
func (e *Element) SelectAttr(key string) *Attr {
space, skey := spaceDecompose(key)
for i, a := range e.Attr {
if spaceMatch(space, a.Space) && skey == a.Key {
return &e.Attr[i]
}
}
return nil
}
// SelectAttrValue finds an element attribute matching the requested key and
// returns its value if found. The key may be prefixed by a namespace and a
// colon. If the key is not found, the dflt value is returned instead.
func (e *Element) SelectAttrValue(key, dflt string) string {
space, skey := spaceDecompose(key)
for _, a := range e.Attr {
if spaceMatch(space, a.Space) && skey == a.Key {
return a.Value
}
}
return dflt
}
// ChildElements returns all elements that are children of element e.
func (e *Element) ChildElements() []*Element {
var elements []*Element
for _, t := range e.Child {
if c, ok := t.(*Element); ok {
elements = append(elements, c)
}
}
return elements
}
// SelectElement returns the first child element with the given tag. The tag
// may be prefixed by a namespace and a colon.
func (e *Element) SelectElement(tag string) *Element {
space, stag := spaceDecompose(tag)
for _, t := range e.Child {
if c, ok := t.(*Element); ok && spaceMatch(space, c.Space) && stag == c.Tag {
return c
}
}
return nil
}
// SelectElements returns a slice of all child elements with the given tag.
// The tag may be prefixed by a namespace and a colon.
func (e *Element) SelectElements(tag string) []*Element {
space, stag := spaceDecompose(tag)
var elements []*Element
for _, t := range e.Child {
if c, ok := t.(*Element); ok && spaceMatch(space, c.Space) && stag == c.Tag {
elements = append(elements, c)
}
}
return elements
}
// FindElement returns the first element matched by the XPath-like path
// string. Panics if an invalid path string is supplied.
func (e *Element) FindElement(path string) *Element {
return e.FindElementPath(MustCompilePath(path))
}
// FindElementPath returns the first element matched by the XPath-like path
// string.
func (e *Element) FindElementPath(path Path) *Element {
p := newPather()
elements := p.traverse(e, path)
switch {
case len(elements) > 0:
return elements[0]
default:
return nil
}
}
// FindElements returns a slice of elements matched by the XPath-like path
// string. Panics if an invalid path string is supplied.
func (e *Element) FindElements(path string) []*Element {
return e.FindElementsPath(MustCompilePath(path))
}
// FindElementsPath returns a slice of elements matched by the Path object.
func (e *Element) FindElementsPath(path Path) []*Element {
p := newPather()
return p.traverse(e, path)
}
// indent recursively inserts proper indentation between an
// XML element's child tokens.
func (e *Element) indent(depth int, indent indentFunc) {
e.stripIndent()
n := len(e.Child)
if n == 0 {
return
}
oldChild := e.Child
e.Child = make([]Token, 0, n*2+1)
isCharData, firstNonCharData := false, true
for _, c := range oldChild {
// Insert CR+indent before child if it's not character data.
// Exceptions: when it's the first non-character-data child, or when
// the child is at root depth.
_, isCharData = c.(*CharData)
if !isCharData {
if !firstNonCharData || depth > 0 {
newCharData(indent(depth), true, e)
}
firstNonCharData = false
}
e.addChild(c)
// Recursively process child elements.
if ce, ok := c.(*Element); ok {
ce.indent(depth+1, indent)
}
}
// Insert CR+indent before the last child.
if !isCharData {
if !firstNonCharData || depth > 0 {
newCharData(indent(depth-1), true, e)
}
}
}
// stripIndent removes any previously inserted indentation.
func (e *Element) stripIndent() {
// Count the number of non-indent child tokens
n := len(e.Child)
for _, c := range e.Child {
if cd, ok := c.(*CharData); ok && cd.whitespace {
n--
}
}
if n == len(e.Child) {
return
}
// Strip out indent CharData
newChild := make([]Token, n)
j := 0
for _, c := range e.Child {
if cd, ok := c.(*CharData); ok && cd.whitespace {
continue
}
newChild[j] = c
j++
}
e.Child = newChild
}
// dup duplicates the element.
func (e *Element) dup(parent *Element) Token {
ne := &Element{
Space: e.Space,
Tag: e.Tag,
Attr: make([]Attr, len(e.Attr)),
Child: make([]Token, len(e.Child)),
parent: parent,
}
for i, t := range e.Child {
ne.Child[i] = t.dup(ne)
}
for i, a := range e.Attr {
ne.Attr[i] = a
}
return ne
}
// Parent returns the element token's parent element, or nil if it has no
// parent.
func (e *Element) Parent() *Element {
return e.parent
}
// setParent replaces the element token's parent.
func (e *Element) setParent(parent *Element) {
e.parent = parent
}
// writeTo serializes the element to the writer w.
func (e *Element) writeTo(w *bufio.Writer, s *WriteSettings) {
w.WriteByte('<')
if e.Space != "" {
w.WriteString(e.Space)
w.WriteByte(':')
}
w.WriteString(e.Tag)
for _, a := range e.Attr {
w.WriteByte(' ')
a.writeTo(w, s)
}
if len(e.Child) > 0 {
w.WriteString(">")
for _, c := range e.Child {
c.writeTo(w, s)
}
w.Write([]byte{'<', '/'})
if e.Space != "" {
w.WriteString(e.Space)
w.WriteByte(':')
}
w.WriteString(e.Tag)
w.WriteByte('>')
} else {
if s.CanonicalEndTags {
w.Write([]byte{'>', '<', '/'})
if e.Space != "" {
w.WriteString(e.Space)
w.WriteByte(':')
}
w.WriteString(e.Tag)
w.WriteByte('>')
} else {
w.Write([]byte{'/', '>'})
}
}
}
// addChild adds a child token to the element e.
func (e *Element) addChild(t Token) {
e.Child = append(e.Child, t)
}
// CreateAttr creates an attribute and adds it to element e. The key may be
// prefixed by a namespace and a colon. If an attribute with the key already
// exists, its value is replaced.
func (e *Element) CreateAttr(key, value string) *Attr {
space, skey := spaceDecompose(key)
return e.createAttr(space, skey, value)
}
// createAttr is a helper function that creates attributes.
func (e *Element) createAttr(space, key, value string) *Attr {
for i, a := range e.Attr {
if space == a.Space && key == a.Key {
e.Attr[i].Value = value
return &e.Attr[i]
}
}
a := Attr{space, key, value}
e.Attr = append(e.Attr, a)
return &e.Attr[len(e.Attr)-1]
}
// RemoveAttr removes and returns the first attribute of the element whose key
// matches the given key. The key may be prefixed by a namespace and a colon.
// If an equal attribute does not exist, nil is returned.
func (e *Element) RemoveAttr(key string) *Attr {
space, skey := spaceDecompose(key)
for i, a := range e.Attr {
if space == a.Space && skey == a.Key {
e.Attr = append(e.Attr[0:i], e.Attr[i+1:]...)
return &a
}
}
return nil
}
var xmlReplacerNormal = strings.NewReplacer(
"&", "&amp;",
"<", "&lt;",
">", "&gt;",
"'", "&apos;",
`"`, "&quot;",
)
var xmlReplacerCanonicalText = strings.NewReplacer(
"&", "&amp;",
"<", "&lt;",
">", "&gt;",
"\r", "&#xD;",
)
var xmlReplacerCanonicalAttrVal = strings.NewReplacer(
"&", "&amp;",
"<", "&lt;",
`"`, "&quot;",
"\t", "&#x9;",
"\n", "&#xA;",
"\r", "&#xD;",
)
// writeTo serializes the attribute to the writer.
func (a *Attr) writeTo(w *bufio.Writer, s *WriteSettings) {
if a.Space != "" {
w.WriteString(a.Space)
w.WriteByte(':')
}
w.WriteString(a.Key)
w.WriteString(`="`)
var r *strings.Replacer
if s.CanonicalAttrVal {
r = xmlReplacerCanonicalAttrVal
} else {
r = xmlReplacerNormal
}
w.WriteString(r.Replace(a.Value))
w.WriteByte('"')
}
// NewCharData creates a parentless XML character data entity.
func NewCharData(data string) *CharData {
return newCharData(data, false, nil)
}
// newCharData creates an XML character data entity and binds it to a parent
// element. If parent is nil, the CharData token remains unbound.
func newCharData(data string, whitespace bool, parent *Element) *CharData {
c := &CharData{
Data: data,
whitespace: whitespace,
parent: parent,
}
if parent != nil {
parent.addChild(c)
}
return c
}
// CreateCharData creates an XML character data entity and adds it as a child
// of element e.
func (e *Element) CreateCharData(data string) *CharData {
return newCharData(data, false, e)
}
// dup duplicates the character data.
func (c *CharData) dup(parent *Element) Token {
return &CharData{
Data: c.Data,
whitespace: c.whitespace,
parent: parent,
}
}
// Parent returns the character data token's parent element, or nil if it has
// no parent.
func (c *CharData) Parent() *Element {
return c.parent
}
// setParent replaces the character data token's parent.
func (c *CharData) setParent(parent *Element) {
c.parent = parent
}
// writeTo serializes the character data entity to the writer.
func (c *CharData) writeTo(w *bufio.Writer, s *WriteSettings) {
var r *strings.Replacer
if s.CanonicalText {
r = xmlReplacerCanonicalText
} else {
r = xmlReplacerNormal
}
w.WriteString(r.Replace(c.Data))
}
// NewComment creates a parentless XML comment.
func NewComment(comment string) *Comment {
return newComment(comment, nil)
}
// NewComment creates an XML comment and binds it to a parent element. If
// parent is nil, the Comment remains unbound.
func newComment(comment string, parent *Element) *Comment {
c := &Comment{
Data: comment,
parent: parent,
}
if parent != nil {
parent.addChild(c)
}
return c
}
// CreateComment creates an XML comment and adds it as a child of element e.
func (e *Element) CreateComment(comment string) *Comment {
return newComment(comment, e)
}
// dup duplicates the comment.
func (c *Comment) dup(parent *Element) Token {
return &Comment{
Data: c.Data,
parent: parent,
}
}
// Parent returns comment token's parent element, or nil if it has no parent.
func (c *Comment) Parent() *Element {
return c.parent
}
// setParent replaces the comment token's parent.
func (c *Comment) setParent(parent *Element) {
c.parent = parent
}
// writeTo serialies the comment to the writer.
func (c *Comment) writeTo(w *bufio.Writer, s *WriteSettings) {
w.WriteString("<!--")
w.WriteString(c.Data)
w.WriteString("-->")
}
// NewDirective creates a parentless XML directive.
func NewDirective(data string) *Directive {
return newDirective(data, nil)
}
// newDirective creates an XML directive and binds it to a parent element. If
// parent is nil, the Directive remains unbound.
func newDirective(data string, parent *Element) *Directive {
d := &Directive{
Data: data,
parent: parent,
}
if parent != nil {
parent.addChild(d)
}
return d
}
// CreateDirective creates an XML directive and adds it as the last child of
// element e.
func (e *Element) CreateDirective(data string) *Directive {
return newDirective(data, e)
}
// dup duplicates the directive.
func (d *Directive) dup(parent *Element) Token {
return &Directive{
Data: d.Data,
parent: parent,
}
}
// Parent returns directive token's parent element, or nil if it has no
// parent.
func (d *Directive) Parent() *Element {
return d.parent
}
// setParent replaces the directive token's parent.
func (d *Directive) setParent(parent *Element) {
d.parent = parent
}
// writeTo serializes the XML directive to the writer.
func (d *Directive) writeTo(w *bufio.Writer, s *WriteSettings) {
w.WriteString("<!")
w.WriteString(d.Data)
w.WriteString(">")
}
// NewProcInst creates a parentless XML processing instruction.
func NewProcInst(target, inst string) *ProcInst {
return newProcInst(target, inst, nil)
}
// newProcInst creates an XML processing instruction and binds it to a parent
// element. If parent is nil, the ProcInst remains unbound.
func newProcInst(target, inst string, parent *Element) *ProcInst {
p := &ProcInst{
Target: target,
Inst: inst,
parent: parent,
}
if parent != nil {
parent.addChild(p)
}
return p
}
// CreateProcInst creates a processing instruction and adds it as a child of
// element e.
func (e *Element) CreateProcInst(target, inst string) *ProcInst {
return newProcInst(target, inst, e)
}
// dup duplicates the procinst.
func (p *ProcInst) dup(parent *Element) Token {
return &ProcInst{
Target: p.Target,
Inst: p.Inst,
parent: parent,
}
}
// Parent returns processing instruction token's parent element, or nil if it
// has no parent.
func (p *ProcInst) Parent() *Element {
return p.parent
}
// setParent replaces the processing instruction token's parent.
func (p *ProcInst) setParent(parent *Element) {
p.parent = parent
}
// writeTo serializes the processing instruction to the writer.
func (p *ProcInst) writeTo(w *bufio.Writer, s *WriteSettings) {
w.WriteString("<?")
w.WriteString(p.Target)
if p.Inst != "" {
w.WriteByte(' ')
w.WriteString(p.Inst)
}
w.WriteString("?>")
}

188
vendor/github.com/beevik/etree/helpers.go generated vendored Normal file
View File

@ -0,0 +1,188 @@
// Copyright 2015 Brett Vickers.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package etree
import (
"io"
"strings"
)
// A simple stack
type stack struct {
data []interface{}
}
func (s *stack) empty() bool {
return len(s.data) == 0
}
func (s *stack) push(value interface{}) {
s.data = append(s.data, value)
}
func (s *stack) pop() interface{} {
value := s.data[len(s.data)-1]
s.data[len(s.data)-1] = nil
s.data = s.data[:len(s.data)-1]
return value
}
func (s *stack) peek() interface{} {
return s.data[len(s.data)-1]
}
// A fifo is a simple first-in-first-out queue.
type fifo struct {
data []interface{}
head, tail int
}
func (f *fifo) add(value interface{}) {
if f.len()+1 >= len(f.data) {
f.grow()
}
f.data[f.tail] = value
if f.tail++; f.tail == len(f.data) {
f.tail = 0
}
}
func (f *fifo) remove() interface{} {
value := f.data[f.head]
f.data[f.head] = nil
if f.head++; f.head == len(f.data) {
f.head = 0
}
return value
}
func (f *fifo) len() int {
if f.tail >= f.head {
return f.tail - f.head
}
return len(f.data) - f.head + f.tail
}
func (f *fifo) grow() {
c := len(f.data) * 2
if c == 0 {
c = 4
}
buf, count := make([]interface{}, c), f.len()
if f.tail >= f.head {
copy(buf[0:count], f.data[f.head:f.tail])
} else {
hindex := len(f.data) - f.head
copy(buf[0:hindex], f.data[f.head:])
copy(buf[hindex:count], f.data[:f.tail])
}
f.data, f.head, f.tail = buf, 0, count
}
// countReader implements a proxy reader that counts the number of
// bytes read from its encapsulated reader.
type countReader struct {
r io.Reader
bytes int64
}
func newCountReader(r io.Reader) *countReader {
return &countReader{r: r}
}
func (cr *countReader) Read(p []byte) (n int, err error) {
b, err := cr.r.Read(p)
cr.bytes += int64(b)
return b, err
}
// countWriter implements a proxy writer that counts the number of
// bytes written by its encapsulated writer.
type countWriter struct {
w io.Writer
bytes int64
}
func newCountWriter(w io.Writer) *countWriter {
return &countWriter{w: w}
}
func (cw *countWriter) Write(p []byte) (n int, err error) {
b, err := cw.w.Write(p)
cw.bytes += int64(b)
return b, err
}
// isWhitespace returns true if the byte slice contains only
// whitespace characters.
func isWhitespace(s string) bool {
for i := 0; i < len(s); i++ {
if c := s[i]; c != ' ' && c != '\t' && c != '\n' && c != '\r' {
return false
}
}
return true
}
// spaceMatch returns true if namespace a is the empty string
// or if namespace a equals namespace b.
func spaceMatch(a, b string) bool {
switch {
case a == "":
return true
default:
return a == b
}
}
// spaceDecompose breaks a namespace:tag identifier at the ':'
// and returns the two parts.
func spaceDecompose(str string) (space, key string) {
colon := strings.IndexByte(str, ':')
if colon == -1 {
return "", str
}
return str[:colon], str[colon+1:]
}
// Strings used by crIndent
const (
crsp = "\n "
crtab = "\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t"
)
// crIndent returns a carriage return followed by n copies of the
// first non-CR character in the source string.
func crIndent(n int, source string) string {
switch {
case n < 0:
return source[:1]
case n < len(source):
return source[:n+1]
default:
return source + strings.Repeat(source[1:2], n-len(source)+1)
}
}
// nextIndex returns the index of the next occurrence of sep in s,
// starting from offset. It returns -1 if the sep string is not found.
func nextIndex(s, sep string, offset int) int {
switch i := strings.Index(s[offset:], sep); i {
case -1:
return -1
default:
return offset + i
}
}
// isInteger returns true if the string s contains an integer.
func isInteger(s string) bool {
for i := 0; i < len(s); i++ {
if (s[i] < '0' || s[i] > '9') && !(i == 0 && s[i] == '-') {
return false
}
}
return true
}

516
vendor/github.com/beevik/etree/path.go generated vendored Normal file
View File

@ -0,0 +1,516 @@
// Copyright 2015 Brett Vickers.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package etree
import (
"strconv"
"strings"
)
/*
A Path is an object that represents an optimized version of an
XPath-like search string. Although path strings are XPath-like,
only the following limited syntax is supported:
. Selects the current element
.. Selects the parent of the current element
* Selects all child elements
// Selects all descendants of the current element
tag Selects all child elements with the given tag
[#] Selects the element of the given index (1-based,
negative starts from the end)
[@attrib] Selects all elements with the given attribute
[@attrib='val'] Selects all elements with the given attribute set to val
[tag] Selects all elements with a child element named tag
[tag='val'] Selects all elements with a child element named tag
and text matching val
[text()] Selects all elements with non-empty text
[text()='val'] Selects all elements whose text matches val
Examples:
Select the title elements of all descendant book elements having a
'category' attribute of 'WEB':
//book[@category='WEB']/title
Select the first book element with a title child containing the text
'Great Expectations':
.//book[title='Great Expectations'][1]
Starting from the current element, select all children of book elements
with an attribute 'language' set to 'english':
./book/*[@language='english']
Starting from the current element, select all children of book elements
containing the text 'special':
./book/*[text()='special']
Select all descendant book elements whose title element has an attribute
'language' set to 'french':
//book/title[@language='french']/..
*/
type Path struct {
segments []segment
}
// ErrPath is returned by path functions when an invalid etree path is provided.
type ErrPath string
// Error returns the string describing a path error.
func (err ErrPath) Error() string {
return "etree: " + string(err)
}
// CompilePath creates an optimized version of an XPath-like string that
// can be used to query elements in an element tree.
func CompilePath(path string) (Path, error) {
var comp compiler
segments := comp.parsePath(path)
if comp.err != ErrPath("") {
return Path{nil}, comp.err
}
return Path{segments}, nil
}
// MustCompilePath creates an optimized version of an XPath-like string that
// can be used to query elements in an element tree. Panics if an error
// occurs. Use this function to create Paths when you know the path is
// valid (i.e., if it's hard-coded).
func MustCompilePath(path string) Path {
p, err := CompilePath(path)
if err != nil {
panic(err)
}
return p
}
// A segment is a portion of a path between "/" characters.
// It contains one selector and zero or more [filters].
type segment struct {
sel selector
filters []filter
}
func (seg *segment) apply(e *Element, p *pather) {
seg.sel.apply(e, p)
for _, f := range seg.filters {
f.apply(p)
}
}
// A selector selects XML elements for consideration by the
// path traversal.
type selector interface {
apply(e *Element, p *pather)
}
// A filter pares down a list of candidate XML elements based
// on a path filter in [brackets].
type filter interface {
apply(p *pather)
}
// A pather is helper object that traverses an element tree using
// a Path object. It collects and deduplicates all elements matching
// the path query.
type pather struct {
queue fifo
results []*Element
inResults map[*Element]bool
candidates []*Element
scratch []*Element // used by filters
}
// A node represents an element and the remaining path segments that
// should be applied against it by the pather.
type node struct {
e *Element
segments []segment
}
func newPather() *pather {
return &pather{
results: make([]*Element, 0),
inResults: make(map[*Element]bool),
candidates: make([]*Element, 0),
scratch: make([]*Element, 0),
}
}
// traverse follows the path from the element e, collecting
// and then returning all elements that match the path's selectors
// and filters.
func (p *pather) traverse(e *Element, path Path) []*Element {
for p.queue.add(node{e, path.segments}); p.queue.len() > 0; {
p.eval(p.queue.remove().(node))
}
return p.results
}
// eval evalutes the current path node by applying the remaining
// path's selector rules against the node's element.
func (p *pather) eval(n node) {
p.candidates = p.candidates[0:0]
seg, remain := n.segments[0], n.segments[1:]
seg.apply(n.e, p)
if len(remain) == 0 {
for _, c := range p.candidates {
if in := p.inResults[c]; !in {
p.inResults[c] = true
p.results = append(p.results, c)
}
}
} else {
for _, c := range p.candidates {
p.queue.add(node{c, remain})
}
}
}
// A compiler generates a compiled path from a path string.
type compiler struct {
err ErrPath
}
// parsePath parses an XPath-like string describing a path
// through an element tree and returns a slice of segment
// descriptors.
func (c *compiler) parsePath(path string) []segment {
// If path starts or ends with //, fix it
if strings.HasPrefix(path, "//") {
path = "." + path
}
if strings.HasSuffix(path, "//") {
path = path + "*"
}
// Paths cannot be absolute
if strings.HasPrefix(path, "/") {
c.err = ErrPath("paths cannot be absolute.")
return nil
}
// Split path into segment objects
var segments []segment
for _, s := range splitPath(path) {
segments = append(segments, c.parseSegment(s))
if c.err != ErrPath("") {
break
}
}
return segments
}
func splitPath(path string) []string {
pieces := make([]string, 0)
start := 0
inquote := false
for i := 0; i+1 <= len(path); i++ {
if path[i] == '\'' {
inquote = !inquote
} else if path[i] == '/' && !inquote {
pieces = append(pieces, path[start:i])
start = i + 1
}
}
return append(pieces, path[start:])
}
// parseSegment parses a path segment between / characters.
func (c *compiler) parseSegment(path string) segment {
pieces := strings.Split(path, "[")
seg := segment{
sel: c.parseSelector(pieces[0]),
filters: make([]filter, 0),
}
for i := 1; i < len(pieces); i++ {
fpath := pieces[i]
if fpath[len(fpath)-1] != ']' {
c.err = ErrPath("path has invalid filter [brackets].")
break
}
seg.filters = append(seg.filters, c.parseFilter(fpath[:len(fpath)-1]))
}
return seg
}
// parseSelector parses a selector at the start of a path segment.
func (c *compiler) parseSelector(path string) selector {
switch path {
case ".":
return new(selectSelf)
case "..":
return new(selectParent)
case "*":
return new(selectChildren)
case "":
return new(selectDescendants)
default:
return newSelectChildrenByTag(path)
}
}
// parseFilter parses a path filter contained within [brackets].
func (c *compiler) parseFilter(path string) filter {
if len(path) == 0 {
c.err = ErrPath("path contains an empty filter expression.")
return nil
}
// Filter contains [@attr='val'], [text()='val'], or [tag='val']?
eqindex := strings.Index(path, "='")
if eqindex >= 0 {
rindex := nextIndex(path, "'", eqindex+2)
if rindex != len(path)-1 {
c.err = ErrPath("path has mismatched filter quotes.")
return nil
}
switch {
case path[0] == '@':
return newFilterAttrVal(path[1:eqindex], path[eqindex+2:rindex])
case strings.HasPrefix(path, "text()"):
return newFilterTextVal(path[eqindex+2 : rindex])
default:
return newFilterChildText(path[:eqindex], path[eqindex+2:rindex])
}
}
// Filter contains [@attr], [N], [tag] or [text()]
switch {
case path[0] == '@':
return newFilterAttr(path[1:])
case path == "text()":
return newFilterText()
case isInteger(path):
pos, _ := strconv.Atoi(path)
switch {
case pos > 0:
return newFilterPos(pos - 1)
default:
return newFilterPos(pos)
}
default:
return newFilterChild(path)
}
}
// selectSelf selects the current element into the candidate list.
type selectSelf struct{}
func (s *selectSelf) apply(e *Element, p *pather) {
p.candidates = append(p.candidates, e)
}
// selectParent selects the element's parent into the candidate list.
type selectParent struct{}
func (s *selectParent) apply(e *Element, p *pather) {
if e.parent != nil {
p.candidates = append(p.candidates, e.parent)
}
}
// selectChildren selects the element's child elements into the
// candidate list.
type selectChildren struct{}
func (s *selectChildren) apply(e *Element, p *pather) {
for _, c := range e.Child {
if c, ok := c.(*Element); ok {
p.candidates = append(p.candidates, c)
}
}
}
// selectDescendants selects all descendant child elements
// of the element into the candidate list.
type selectDescendants struct{}
func (s *selectDescendants) apply(e *Element, p *pather) {
var queue fifo
for queue.add(e); queue.len() > 0; {
e := queue.remove().(*Element)
p.candidates = append(p.candidates, e)
for _, c := range e.Child {
if c, ok := c.(*Element); ok {
queue.add(c)
}
}
}
}
// selectChildrenByTag selects into the candidate list all child
// elements of the element having the specified tag.
type selectChildrenByTag struct {
space, tag string
}
func newSelectChildrenByTag(path string) *selectChildrenByTag {
s, l := spaceDecompose(path)
return &selectChildrenByTag{s, l}
}
func (s *selectChildrenByTag) apply(e *Element, p *pather) {
for _, c := range e.Child {
if c, ok := c.(*Element); ok && spaceMatch(s.space, c.Space) && s.tag == c.Tag {
p.candidates = append(p.candidates, c)
}
}
}
// filterPos filters the candidate list, keeping only the
// candidate at the specified index.
type filterPos struct {
index int
}
func newFilterPos(pos int) *filterPos {
return &filterPos{pos}
}
func (f *filterPos) apply(p *pather) {
if f.index >= 0 {
if f.index < len(p.candidates) {
p.scratch = append(p.scratch, p.candidates[f.index])
}
} else {
if -f.index <= len(p.candidates) {
p.scratch = append(p.scratch, p.candidates[len(p.candidates)+f.index])
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}
// filterAttr filters the candidate list for elements having
// the specified attribute.
type filterAttr struct {
space, key string
}
func newFilterAttr(str string) *filterAttr {
s, l := spaceDecompose(str)
return &filterAttr{s, l}
}
func (f *filterAttr) apply(p *pather) {
for _, c := range p.candidates {
for _, a := range c.Attr {
if spaceMatch(f.space, a.Space) && f.key == a.Key {
p.scratch = append(p.scratch, c)
break
}
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}
// filterAttrVal filters the candidate list for elements having
// the specified attribute with the specified value.
type filterAttrVal struct {
space, key, val string
}
func newFilterAttrVal(str, value string) *filterAttrVal {
s, l := spaceDecompose(str)
return &filterAttrVal{s, l, value}
}
func (f *filterAttrVal) apply(p *pather) {
for _, c := range p.candidates {
for _, a := range c.Attr {
if spaceMatch(f.space, a.Space) && f.key == a.Key && f.val == a.Value {
p.scratch = append(p.scratch, c)
break
}
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}
// filterText filters the candidate list for elements having text.
type filterText struct{}
func newFilterText() *filterText {
return &filterText{}
}
func (f *filterText) apply(p *pather) {
for _, c := range p.candidates {
if c.Text() != "" {
p.scratch = append(p.scratch, c)
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}
// filterTextVal filters the candidate list for elements having
// text equal to the specified value.
type filterTextVal struct {
val string
}
func newFilterTextVal(value string) *filterTextVal {
return &filterTextVal{value}
}
func (f *filterTextVal) apply(p *pather) {
for _, c := range p.candidates {
if c.Text() == f.val {
p.scratch = append(p.scratch, c)
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}
// filterChild filters the candidate list for elements having
// a child element with the specified tag.
type filterChild struct {
space, tag string
}
func newFilterChild(str string) *filterChild {
s, l := spaceDecompose(str)
return &filterChild{s, l}
}
func (f *filterChild) apply(p *pather) {
for _, c := range p.candidates {
for _, cc := range c.Child {
if cc, ok := cc.(*Element); ok &&
spaceMatch(f.space, cc.Space) &&
f.tag == cc.Tag {
p.scratch = append(p.scratch, c)
}
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}
// filterChildText filters the candidate list for elements having
// a child element with the specified tag and text.
type filterChildText struct {
space, tag, text string
}
func newFilterChildText(str, text string) *filterChildText {
s, l := spaceDecompose(str)
return &filterChildText{s, l, text}
}
func (f *filterChildText) apply(p *pather) {
for _, c := range p.candidates {
for _, cc := range c.Child {
if cc, ok := cc.(*Element); ok &&
spaceMatch(f.space, cc.Space) &&
f.tag == cc.Tag &&
f.text == cc.Text() {
p.scratch = append(p.scratch, c)
}
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}

17
vendor/github.com/laurent22/ical-go/ical/calendar.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
package ical
type Calendar struct {
Items []CalendarEvent
}
func (this *Calendar) Serialize() string {
serializer := calSerializer{
calendar: this,
buffer: new(strBuffer),
}
return serializer.serialize()
}
func (this *Calendar) ToICS() string {
return this.Serialize()
}

View File

@ -0,0 +1,50 @@
package ical
import (
"time"
)
type CalendarEvent struct {
Id string
Summary string
Description string
Location string
CreatedAtUTC *time.Time
ModifiedAtUTC *time.Time
StartAt *time.Time
EndAt *time.Time
}
func (this *CalendarEvent) StartAtUTC() *time.Time {
return inUTC(this.StartAt)
}
func (this *CalendarEvent) EndAtUTC() *time.Time {
return inUTC(this.EndAt)
}
func (this *CalendarEvent) Serialize() string {
buffer := new(strBuffer)
return this.serializeWithBuffer(buffer)
}
func (this *CalendarEvent) ToICS() string {
return this.Serialize()
}
func (this *CalendarEvent) serializeWithBuffer(buffer *strBuffer) string {
serializer := calEventSerializer{
event: this,
buffer: buffer,
}
return serializer.serialize()
}
func inUTC(t *time.Time) *time.Time {
if t == nil {
return nil
}
tUTC := t.UTC()
return &tUTC
}

18
vendor/github.com/laurent22/ical-go/ical/lib.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
package ical
import (
"fmt"
"bytes"
)
type strBuffer struct {
buffer bytes.Buffer
}
func (b *strBuffer) Write(format string, elem ...interface{}) {
b.buffer.WriteString(fmt.Sprintf(format, elem...))
}
func (b *strBuffer) String() string {
return b.buffer.String()
}

168
vendor/github.com/laurent22/ical-go/ical/node.go generated vendored Normal file
View File

@ -0,0 +1,168 @@
package ical
import (
"time"
"regexp"
"strconv"
)
type Node struct {
Name string
Value string
Type int // 1 = Object, 0 = Name/Value
Parameters map[string]string
Children []*Node
}
func (this *Node) ChildrenByName(name string) []*Node {
var output []*Node
for _, child := range this.Children {
if child.Name == name {
output = append(output, child)
}
}
return output
}
func (this *Node) ChildByName(name string) *Node {
for _, child := range this.Children {
if child.Name == name {
return child
}
}
return nil
}
func (this *Node) PropString(name string, defaultValue string) string {
for _, child := range this.Children {
if child.Name == name {
return child.Value
}
}
return defaultValue
}
func (this *Node) PropDate(name string, defaultValue time.Time) time.Time {
node := this.ChildByName(name)
if node == nil { return defaultValue }
tzid := node.Parameter("TZID", "")
var output time.Time
var err error
if tzid != "" {
loc, err := time.LoadLocation(tzid)
if err != nil { panic(err) }
output, err = time.ParseInLocation("20060102T150405", node.Value, loc)
} else {
output, err = time.Parse("20060102T150405Z", node.Value)
}
if err != nil { panic(err) }
return output
}
func (this *Node) PropDuration(name string) time.Duration {
durStr := this.PropString(name, "")
if durStr == "" {
return time.Duration(0)
}
durRgx := regexp.MustCompile("PT(?:([0-9]+)H)?(?:([0-9]+)M)?(?:([0-9]+)S)?")
matches := durRgx.FindStringSubmatch(durStr)
if len(matches) != 4 {
return time.Duration(0)
}
strToDuration := func(value string) time.Duration {
d := 0
if value != "" {
d, _ = strconv.Atoi(value)
}
return time.Duration(d)
}
hours := strToDuration(matches[1])
min := strToDuration(matches[2])
sec := strToDuration(matches[3])
return hours * time.Hour + min * time.Minute + sec * time.Second
}
func (this *Node) PropInt(name string, defaultValue int) int {
n := this.PropString(name, "")
if n == "" { return defaultValue }
output, err := strconv.Atoi(n)
if err != nil { panic(err) }
return output
}
func (this *Node) DigProperty(propPath... string) (string, bool) {
return this.dig("prop", propPath...)
}
func (this *Node) Parameter(name string, defaultValue string) string {
if len(this.Parameters) <= 0 { return defaultValue }
v, ok := this.Parameters[name]
if !ok { return defaultValue }
return v
}
func (this *Node) DigParameter(paramPath... string) (string, bool) {
return this.dig("param", paramPath...)
}
// Digs a value based on a given value path.
// valueType: can be "param" or "prop".
// valuePath: the path to access the value.
// Returns ("", false) when not found or (value, true) when found.
//
// Example:
// dig("param", "VCALENDAR", "VEVENT", "DTEND", "TYPE") -> It will search for "VCALENDAR" node,
// then a "VEVENT" node, then a "DTEND" note, then finally the "TYPE" param.
func (this *Node) dig(valueType string, valuePath... string) (string, bool) {
current := this
lastIndex := len(valuePath) - 1
for _, v := range valuePath[:lastIndex] {
current = current.ChildByName(v)
if current == nil {
return "", false
}
}
target := valuePath[lastIndex]
value := ""
if valueType == "param" {
value = current.Parameter(target, "")
} else if valueType == "prop" {
value = current.PropString(target, "")
}
if value == "" {
return "", false
}
return value, true
}
func (this *Node) String() string {
s := ""
if this.Type == 1 {
s += "===== " + this.Name
s += "\n"
} else {
s += this.Name
s += ":" + this.Value
s += "\n"
}
for _, child := range this.Children {
s += child.String()
}
if this.Type == 1 {
s += "===== /" + this.Name
s += "\n"
}
return s
}

106
vendor/github.com/laurent22/ical-go/ical/parsers.go generated vendored Normal file
View File

@ -0,0 +1,106 @@
package ical
import (
"log"
"errors"
"regexp"
"strings"
)
func ParseCalendar(data string) (*Node, error) {
r := regexp.MustCompile("([\r|\t| ]*\n[\r|\t| ]*)+")
lines := r.Split(strings.TrimSpace(data), -1)
node, _, err, _ := parseCalendarNode(lines, 0)
return node, err
}
func parseCalendarNode(lines []string, lineIndex int) (*Node, bool, error, int) {
line := strings.TrimSpace(lines[lineIndex])
_ = log.Println
colonIndex := strings.Index(line, ":")
if colonIndex <= 0 {
return nil, false, errors.New("Invalid value/pair: " + line), lineIndex + 1
}
name := line[0:colonIndex]
splitted := strings.Split(name, ";")
var parameters map[string]string
if len(splitted) >= 2 {
name = splitted[0]
parameters = make(map[string]string)
for i := 1; i < len(splitted); i++ {
p := strings.Split(splitted[i], "=")
if len(p) != 2 { panic("Invalid parameter format: " + name) }
parameters[p[0]] = p[1]
}
}
value := line[colonIndex+1:len(line)]
if name == "BEGIN" {
node := new(Node)
node.Name = value
node.Type = 1
lineIndex = lineIndex + 1
for {
child, finished, _, newLineIndex := parseCalendarNode(lines, lineIndex)
if finished {
return node, false, nil, newLineIndex
} else {
if child != nil {
node.Children = append(node.Children, child)
}
lineIndex = newLineIndex
}
}
} else if name == "END" {
return nil, true, nil, lineIndex + 1
} else {
node := new(Node)
node.Name = name
if name == "DESCRIPTION" || name == "SUMMARY" {
text, newLineIndex := parseTextType(lines, lineIndex)
node.Value = text
node.Parameters = parameters
return node, false, nil, newLineIndex
} else {
node.Value = value
node.Parameters = parameters
return node, false, nil, lineIndex + 1
}
}
panic("Unreachable")
return nil, false, nil, lineIndex + 1
}
func parseTextType(lines []string, lineIndex int) (string, int) {
line := lines[lineIndex]
colonIndex := strings.Index(line, ":")
output := strings.TrimSpace(line[colonIndex+1:len(line)])
lineIndex++
for {
line := lines[lineIndex]
if line == "" || line[0] != ' ' {
return unescapeTextType(output), lineIndex
}
output += line[1:len(line)]
lineIndex++
}
return unescapeTextType(output), lineIndex
}
func escapeTextType(input string) string {
output := strings.Replace(input, "\\", "\\\\", -1)
output = strings.Replace(output, ";", "\\;", -1)
output = strings.Replace(output, ",", "\\,", -1)
output = strings.Replace(output, "\n", "\\n", -1)
return output
}
func unescapeTextType(s string) string {
s = strings.Replace(s, "\\;", ";", -1)
s = strings.Replace(s, "\\,", ",", -1)
s = strings.Replace(s, "\\n", "\n", -1)
s = strings.Replace(s, "\\\\", "\\", -1)
return s
}

View File

@ -0,0 +1,9 @@
package ical
const (
VCALENDAR = "VCALENDAR"
VEVENT = "VEVENT"
DTSTART = "DTSTART"
DTEND = "DTEND"
DURATION = "DURATION"
)

121
vendor/github.com/laurent22/ical-go/ical/serializers.go generated vendored Normal file
View File

@ -0,0 +1,121 @@
package ical
import (
"time"
"strings"
)
type calSerializer struct {
calendar *Calendar
buffer *strBuffer
}
func (this *calSerializer) serialize() string {
this.serializeCalendar()
return strings.TrimSpace(this.buffer.String())
}
func (this *calSerializer) serializeCalendar() {
this.begin()
this.version()
this.items()
this.end()
}
func (this *calSerializer) begin() {
this.buffer.Write("BEGIN:VCALENDAR\n")
}
func (this *calSerializer) end() {
this.buffer.Write("END:VCALENDAR\n")
}
func (this *calSerializer) version() {
this.buffer.Write("VERSION:2.0\n")
}
func (this *calSerializer) items() {
for _, item := range this.calendar.Items {
item.serializeWithBuffer(this.buffer)
}
}
type calEventSerializer struct {
event *CalendarEvent
buffer *strBuffer
}
const (
eventSerializerTimeFormat = "20060102T150405Z"
)
func (this *calEventSerializer) serialize() string {
this.serializeEvent()
return strings.TrimSpace(this.buffer.String())
}
func (this *calEventSerializer) serializeEvent() {
this.begin()
this.uid()
this.created()
this.lastModified()
this.dtstart()
this.dtend()
this.summary()
this.description()
this.location()
this.end()
}
func (this *calEventSerializer) begin() {
this.buffer.Write("BEGIN:VEVENT\n")
}
func (this *calEventSerializer) end() {
this.buffer.Write("END:VEVENT\n")
}
func (this *calEventSerializer) uid() {
this.serializeStringProp("UID", this.event.Id)
}
func (this *calEventSerializer) summary() {
this.serializeStringProp("SUMMARY", this.event.Summary)
}
func (this *calEventSerializer) description() {
this.serializeStringProp("DESCRIPTION", this.event.Description)
}
func (this *calEventSerializer) location() {
this.serializeStringProp("LOCATION", this.event.Location)
}
func (this *calEventSerializer) dtstart() {
this.serializeTimeProp("DTSTART", this.event.StartAtUTC())
}
func (this *calEventSerializer) dtend() {
this.serializeTimeProp("DTEND", this.event.EndAtUTC())
}
func (this *calEventSerializer) created() {
this.serializeTimeProp("CREATED", this.event.CreatedAtUTC)
}
func (this *calEventSerializer) lastModified() {
this.serializeTimeProp("LAST-MODIFIED", this.event.ModifiedAtUTC)
}
func (this *calEventSerializer) serializeStringProp(name, value string) {
if value != "" {
escapedValue := escapeTextType(value)
this.buffer.Write("%s:%s\n", name, escapedValue)
}
}
func (this *calEventSerializer) serializeTimeProp(name string, value *time.Time) {
if value != nil {
this.buffer.Write("%s:%s\n", name, value.Format(eventSerializerTimeFormat))
}
}

94
vendor/github.com/laurent22/ical-go/ical/todo.go generated vendored Normal file
View File

@ -0,0 +1,94 @@
package ical
// import (
// "time"
// "strconv"
// "strings"
// )
//
// func TodoFromNode(node *Node) Todo {
// if node.Name != "VTODO" { panic("Node is not a VTODO") }
//
// var todo Todo
// todo.SetId(node.PropString("UID", ""))
// todo.SetSummary(node.PropString("SUMMARY", ""))
// todo.SetDescription(node.PropString("DESCRIPTION", ""))
// todo.SetDueDate(node.PropDate("DUE", time.Time{}))
// //todo.SetAlarmDate(this.TimestampBytesToTime(reminderDate))
// todo.SetCreatedDate(node.PropDate("CREATED", time.Time{}))
// todo.SetModifiedDate(node.PropDate("DTSTAMP", time.Time{}))
// todo.SetPriority(node.PropInt("PRIORITY", 0))
// todo.SetPercentComplete(node.PropInt("PERCENT-COMPLETE", 0))
// return todo
// }
//
// type Todo struct {
// CalendarItem
// dueDate time.Time
// }
//
// func (this *Todo) SetDueDate(v time.Time) { this.dueDate = v }
// func (this *Todo) DueDate() time.Time { return this.dueDate }
//
// func (this *Todo) ICalString(target string) string {
// s := "BEGIN:VTODO\n"
//
// if target == "macTodo" {
// status := "NEEDS-ACTION"
// if this.PercentComplete() == 100 {
// status = "COMPLETED"
// }
// s += "STATUS:" + status + "\n"
// }
//
// s += encodeDateProperty("CREATED", this.CreatedDate()) + "\n"
// s += "UID:" + this.Id() + "\n"
// s += "SUMMARY:" + escapeTextType(this.Summary()) + "\n"
// if this.PercentComplete() == 100 && !this.CompletedDate().IsZero() {
// s += encodeDateProperty("COMPLETED", this.CompletedDate()) + "\n"
// }
// s += encodeDateProperty("DTSTAMP", this.ModifiedDate()) + "\n"
// if this.Priority() != 0 {
// s += "PRIORITY:" + strconv.Itoa(this.Priority()) + "\n"
// }
// if this.PercentComplete() != 0 {
// s += "PERCENT-COMPLETE:" + strconv.Itoa(this.PercentComplete()) + "\n"
// }
// if target == "macTodo" {
// s += "SEQUENCE:" + strconv.Itoa(this.Sequence()) + "\n"
// }
// if this.Description() != "" {
// s += "DESCRIPTION:" + encodeTextType(this.Description()) + "\n"
// }
//
// s += "END:VTODO\n"
//
// return s
// }
//
// func encodeDateProperty(name string, t time.Time) string {
// var output string
// zone, _ := t.Zone()
// if zone != "UTC" && zone != "" {
// output = ";TZID=" + zone + ":" + t.Format("20060102T150405")
// } else {
// output = ":" + t.Format("20060102T150405") + "Z"
// }
// return name + output
// }
//
//
// func encodeTextType(s string) string {
// output := ""
// s = escapeTextType(s)
// lineLength := 0
// for _, c := range s {
// if lineLength + len(string(c)) > 75 {
// output += "\n "
// lineLength = 1
// }
// output += string(c)
// lineLength += len(string(c))
// }
// return output
// }

2
vendor/github.com/samedi/caldav-go/.gitignore generated vendored Normal file
View File

@ -0,0 +1,2 @@
test-data/
vendor

69
vendor/github.com/samedi/caldav-go/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,69 @@
# CHANGELOG
v3.0.0
-----------
2017-08-01 Daniel Ferraz <d.ferrazm@gmail.com>
Main change:
Add two ways to get resource from the storage: shallow or not.
`data.GetShallowResource`: means that, if it's collection resource, it will not include its child VEVENTs in the ICS data.
This is used throughout the palces where the children dont matter.
`data.GetResource`: means that the child VEVENTs will be included in the returned ICS content data for collection resources.
This is used then sending a GET request to fetch a specific resource and expecting its full ICS data in response.
Other changes:
* Removed the need to pass the useless `writer http.ResponseWriter` parameter when calling the `caldav.HandleRequest` function.
* Added a `caldav.HandleRequestWithStorage` function that makes it easy to pass a custom storage to be used and handle the request with a single function call.
v2.0.0
-----------
2017-05-10 Daniel Ferraz <d.ferrazm@gmail.com>
All commits squashed and LICENSE updated to release as OSS in github.
Feature-wise it remains the same.
v1.0.1
-----------
2017-01-25 Daniel Ferraz <d.ferrazm@gmail.com>
Escape the contents in `<calendar-data>` and `<displayname>` in the `multistatus` XML responses. Fixing possible bugs
related to having special characters (e.g. &) in the XML multistatus responses that would possible break the encoding.
v1.0.0
-----------
2017-01-18 Daniel Ferraz <d.ferrazm@gmail.com>
Main feature:
* Handles the `Prefer` header on PROPFIND and REPORT requests (defined in this [draft/proposal](https://tools.ietf.org/html/draft-murchison-webdav-prefer-05)). Useful to shrink down possible big and verbose responses when the client demands. Ex: current iOS calendar client uses this feature on its PROPFIND requests.
Other changes:
* Added the `handlers.Response` to allow clients of the lib to interact with the generated response before being written/sent back to the client.
* Added `GetResourcesByFilters` to the storage interface to allow filtering of resources in the storage level. Useful to provide an already filtered and smaller resource collection to a the REPORT handler when dealing with a filtered REPORT request.
* Added `GetResourcesByList` to the storage interface to fetch a set a of resources based on a set of paths. Useful to provide, in one call, the correct resource collection to the REPORT handler when dealing with a REPORT request for specific `hrefs`.
* Remove useless `IsResourcePresent` from the storage interface.
v0.1.0
-----------
2016-09-23 Daniel Ferraz <d.ferrazm@gmail.com>
This version implements:
* Allow: "GET, HEAD, PUT, DELETE, OPTIONS, PROPFIND, REPORT"
* DAV: "1, 3, calendar-access"
* Also only handles the following components: `VCALENDAR`, `VEVENT`
Currently unsupported:
* Components `VTODO`, `VJOURNAL`, `VFREEBUSY`
* `VEVENT` recurrences
* Resource locking
* User authentication

20
vendor/github.com/samedi/caldav-go/LICENSE generated vendored Normal file
View File

@ -0,0 +1,20 @@
Copyright 2017 samedi GmbH
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NON INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

140
vendor/github.com/samedi/caldav-go/README.md generated vendored Normal file
View File

@ -0,0 +1,140 @@
# go CalDAV
This is a Go lib that aims to implement the CalDAV specification ([RFC4791]). It allows the quick implementation of a CalDAV server in Go. Basically, it provides the request handlers that will handle the several CalDAV HTTP requests, fetch the appropriate resources, build and return the responses.
### How to install
```
go get github.com/samedi/caldav-go
```
### Dependencies
For dependency management, `glide` is used.
```bash
# install glide (once!)
curl https://glide.sh/get | sh
# install dependencies
glide install
```
### How to use it
The easiest way to quickly implement a CalDAV server is by just using the lib's request handler. Example:
```go
package mycaldav
import (
"net/http"
"github.com/samedi/caldav-go"
)
func runServer() {
http.HandleFunc(PATH, caldav.RequestHandler)
http.ListenAndServe(PORT, nil)
}
```
With that, all the HTTP requests (GET, PUT, REPORT, PROPFIND, etc) will be handled and responded by the `caldav` handler. In case of any HTTP methods not supported by the lib, a `501 Not Implemented` response will be returned.
In case you want more flexibility to handle the requests, e.g., if you wanted to access the generated response before being sent back to the caller, you could do like:
```go
package mycaldav
import (
"net/http"
"github.com/samedi/caldav-go"
)
func runServer() {
http.HandleFunc(PATH, myHandler)
http.ListenAndServe(PORT, nil)
}
func myHandler(writer http.ResponseWriter, request *http.Request) {
response := caldav.HandleRequest(writer, request)
// ... do something with the `response` ...
// the response is written with the current `http.ResponseWriter` and ready to be sent back
response.Write(writer)
}
```
### Storage & Resources
The storage is where the caldav resources are stored. To interact with that, the caldav lib needs only a type that conforms with the `data.Storage` interface to operate on top of the storage. Basically, this interface defines all the CRUD functions to work on top of the resources. With that, resources can be stored anywhere: in the filesystem, in the cloud, database, etc. As long as the used storage implements all the required storage interface functions, the caldav lib will work fine.
For example, we could use the following dummy storage implementation:
```go
type DummyStorage struct{
}
func (d *DummyStorage) GetResources(rpath string, withChildren bool) ([]Resource, error) {
return []Resource{}, nil
}
func (d *DummyStorage) GetResourcesByFilters(rpath string, filters *ResourceFilter) ([]Resource, error) {
return []Resource{}, nil
}
func (d *DummyStorage) GetResourcesByList(rpaths []string) ([]Resource, error) {
return []Resource{}, nil
}
func (d *DummyStorage) GetResource(rpath string) (*Resource, bool, error) {
return nil, false, nil
}
func (d *DummyStorage) GetShallowResource(rpath string) (*Resource, bool, error) {
return nil, false, nil
}
func (d *DummyStorage) CreateResource(rpath, content string) (*Resource, error) {
return nil, nil
}
func (d *DummyStorage) UpdateResource(rpath, content string) (*Resource, error) {
return nil, nil
}
func (d *DummyStorage) DeleteResource(rpath string) error {
return nil
}
```
Then we just need to tell the caldav lib to use our dummy storage:
```go
dummyStg := new(DummyStorage)
caldav.SetupStorage(dummyStg)
```
All the CRUD operations on resources will then be forwarded to our dummy storage.
The default storage used (if none is explicitly set) is the `data.FileStorage` which deals with resources as files in the File System.
The resources can be of two types: collection and non-collection. A collection resource is basically a resource that has children resources, but does not have any data content. A non-collection resource is a resource that does not have children, but has data. In the case of a file storage, collections correspond to directories and non-collection to plain files. The data of a caldav resource is all the info that shows up in the calendar client, in the [iCalendar](https://en.wikipedia.org/wiki/ICalendar) format.
### Features
Please check the **CHANGELOG** to see specific features that are currently implemented.
### Contributing and testing
Everyone is welcome to contribute. Please raise an issue or pull request accordingly.
To run the tests:
```
./test.sh
```
### License
MIT License.
[RFC4791]: https://tools.ietf.org/html/rfc4791

14
vendor/github.com/samedi/caldav-go/config.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
package caldav
import (
"github.com/samedi/caldav-go/data"
"github.com/samedi/caldav-go/global"
)
func SetupStorage(stg data.Storage) {
global.Storage = stg
}
func SetupUser(username string) {
global.User = &data.CalUser{username}
}

362
vendor/github.com/samedi/caldav-go/data/filters.go generated vendored Normal file
View File

@ -0,0 +1,362 @@
package data
import (
"log"
"time"
"strings"
"errors"
"github.com/beevik/etree"
"github.com/samedi/caldav-go/lib"
)
// ================ FILTERS ==================
// Filters are a set of rules used to retrieve a range of resources. It is used primarily
// on REPORT requests and is described in details here (RFC4791#7.8).
const (
TAG_FILTER = "filter"
TAG_COMP_FILTER = "comp-filter"
TAG_PROP_FILTER = "prop-filter"
TAG_PARAM_FILTER = "param-filter"
TAG_TIME_RANGE = "time-range"
TAG_TEXT_MATCH = "text-match"
TAG_IS_NOT_DEFINED = "is-not-defined"
// from the RFC, the time range `start` and `end` attributes MUST be in UTC and in this specific format
FILTER_TIME_FORMAT = "20060102T150405Z"
)
type ResourceFilter struct {
name string
text string
attrs map[string]string
children []ResourceFilter // collection of child filters.
etreeElem *etree.Element // holds the parsed XML node/tag as an `etree` element.
}
// This function creates a new filter object from a piece of XML string.
func ParseResourceFilters(xml string) (*ResourceFilter, error) {
doc := etree.NewDocument()
if err := doc.ReadFromString(xml); err != nil {
log.Printf("ERROR: Could not parse filter from XML string. XML:\n%s", xml)
return new(ResourceFilter), err
}
// Right now we're searching for a <filter> tag to initialize the filter struct from it.
// It SHOULD be a valid XML CALDAV:filter tag (RFC4791#9.7). We're not checking namespaces yet.
// TODO: check for XML namespaces and restrict it to accept only CALDAV:filter tag.
elem := doc.FindElement("//" + TAG_FILTER)
if elem == nil {
log.Printf("WARNING: The filter XML should contain a <%s> element. XML:\n%s", TAG_FILTER, xml)
return new(ResourceFilter), errors.New("invalid XML filter")
}
filter := newFilterFromEtreeElem(elem)
return &filter, nil
}
func newFilterFromEtreeElem(elem *etree.Element) ResourceFilter {
// init filter from etree element
filter := ResourceFilter{
name: elem.Tag,
text: strings.TrimSpace(elem.Text()),
etreeElem: elem,
attrs: make(map[string]string),
}
// set attributes
for _, attr := range elem.Attr {
filter.attrs[attr.Key] = attr.Value
}
return filter
}
func (f *ResourceFilter) Attr(attrName string) string {
return f.attrs[attrName]
}
func (f *ResourceFilter) TimeAttr(attrName string) *time.Time {
t, err := time.Parse(FILTER_TIME_FORMAT, f.attrs[attrName])
if err != nil {
return nil
}
return &t
}
// GetTimeRangeFilter checks if the current filter has a child "time-range" filter and
// returns it (wrapped in a `ResourceFilter` type). It returns nil if the current filter does
// not contain any "time-range" filter.
func (f *ResourceFilter) GetTimeRangeFilter() *ResourceFilter {
return f.findChild(TAG_TIME_RANGE, true)
}
func (f *ResourceFilter) Match(target ResourceInterface) bool {
if f.name == TAG_FILTER {
return f.rootFilterMatch(target)
}
return false
}
func (f *ResourceFilter) rootFilterMatch(target ResourceInterface) bool {
if f.isEmpty() {
return false
}
return f.rootChildrenMatch(target)
}
// checks if all the root's child filters match the target resource
func (f *ResourceFilter) rootChildrenMatch(target ResourceInterface) bool {
scope := []string{}
for _, child := range f.getChildren() {
// root filters only accept comp filters as children
if child.name != TAG_COMP_FILTER || !child.compMatch(target, scope) {
return false
}
}
return true
}
// See RFC4791-9.7.1.
func (f *ResourceFilter) compMatch(target ResourceInterface, scope []string) bool {
targetComp := target.ComponentName()
compName := f.attrs["name"]
if f.isEmpty() {
// Point #1 of RFC4791#9.7.1
return compName == targetComp
} else if f.contains(TAG_IS_NOT_DEFINED) {
// Point #2 of RFC4791#9.7.1
return compName != targetComp
} else {
// check each child of the current filter if they all match.
childrenScope := append(scope, compName)
return f.compChildrenMatch(target, childrenScope)
}
}
// checks if all the comp's child filters match the target resource
func (f *ResourceFilter) compChildrenMatch(target ResourceInterface, scope []string) bool {
for _, child := range f.getChildren() {
var match bool
switch child.name {
case TAG_TIME_RANGE:
// Point #3 of RFC4791#9.7.1
match = child.timeRangeMatch(target)
case TAG_PROP_FILTER:
// Point #4 of RFC4791#9.7.1
match = child.propMatch(target, scope)
case TAG_COMP_FILTER:
// Point #4 of RFC4791#9.7.1
match = child.compMatch(target, scope)
}
if !match {
return false
}
}
return true
}
// See RFC4791-9.9
func (f *ResourceFilter) timeRangeMatch(target ResourceInterface) bool {
startAttr := f.attrs["start"]
endAttr := f.attrs["end"]
// at least one of the two MUST be present
if startAttr == "" && endAttr == "" {
// if both of them are missing, return false
return false
} else if startAttr == "" {
// if missing only the `start`, set it open ended to the left
startAttr = "00010101T000000Z"
} else if endAttr == "" {
// if missing only the `end`, set it open ended to the right
endAttr = "99991231T235959Z"
}
// The logic below is only applicable for VEVENT components. So
// we return false if the resource is not a VEVENT component.
if target.ComponentName() != lib.VEVENT {
return false
}
rangeStart, err := time.Parse(FILTER_TIME_FORMAT, startAttr)
if err != nil {
log.Printf("ERROR: Could not parse start time in time-range filter.\nError: %s.\nStart attr: %s", err, startAttr)
return false
}
rangeEnd, err := time.Parse(FILTER_TIME_FORMAT, endAttr)
if err != nil {
log.Printf("ERROR: Could not parse end time in time-range filter.\nError: %s.\nEnd attr: %s", err, endAttr)
return false
}
// the following logic is inferred from the rules table for VEVENT components,
// described in RFC4791-9.9.
overlapRange := func(dtStart, dtEnd, rangeStart, rangeEnd time.Time) bool {
if dtStart.Equal(dtEnd) {
// Lines 3 and 4 of the table deal when the DTSTART and DTEND dates are equals.
// In this case we use the rule: (start <= DTSTART && end > DTSTART)
return (rangeStart.Before(dtStart) || rangeStart.Equal(dtStart)) && rangeEnd.After(dtStart)
} else {
// Lines 1, 2 and 6 of the table deal when the DTSTART and DTEND dates are different.
// In this case we use the rule: (start < DTEND && end > DTSTART)
return rangeStart.Before(dtEnd) && rangeEnd.After(dtStart)
}
}
// first we check each of the target recurrences (if any).
for _, recurrence := range target.Recurrences() {
// if any of them overlap the filter range, we return true right away
if overlapRange(recurrence.StartTime, recurrence.EndTime, rangeStart, rangeEnd) {
return true
}
}
// if none of the recurrences match, we just return if the actual
// resource's `start` and `end` times match the filter range
return overlapRange(target.StartTimeUTC(), target.EndTimeUTC(), rangeStart, rangeEnd)
}
// See RFC4791-9.7.2.
func (f *ResourceFilter) propMatch(target ResourceInterface, scope []string) bool {
propName := f.attrs["name"]
propPath := append(scope, propName)
if f.isEmpty() {
// Point #1 of RFC4791#9.7.2
return target.HasProperty(propPath...)
} else if f.contains(TAG_IS_NOT_DEFINED) {
// Point #2 of RFC4791#9.7.2
return !target.HasProperty(propPath...)
} else {
// check each child of the current filter if they all match.
return f.propChildrenMatch(target, propPath)
}
}
// checks if all the prop's child filters match the target resource
func (f *ResourceFilter) propChildrenMatch(target ResourceInterface, propPath []string) bool {
for _, child := range f.getChildren() {
var match bool
switch child.name {
case TAG_TIME_RANGE:
// Point #3 of RFC4791#9.7.2
// TODO: this point is not very clear on how to match time range against properties.
// So we're returning `false` in the meantime.
match = false
case TAG_TEXT_MATCH:
// Point #4 of RFC4791#9.7.2
propText := target.GetPropertyValue(propPath...)
match = child.textMatch(propText)
case TAG_PARAM_FILTER:
// Point #4 of RFC4791#9.7.2
match = child.paramMatch(target, propPath)
}
if !match {
return false
}
}
return true
}
// See RFC4791-9.7.3
func (f *ResourceFilter) paramMatch(target ResourceInterface, parentPropPath []string) bool {
paramName := f.attrs["name"]
paramPath := append(parentPropPath, paramName)
if f.isEmpty() {
// Point #1 of RFC4791#9.7.3
return target.HasPropertyParam(paramPath...)
} else if f.contains(TAG_IS_NOT_DEFINED) {
// Point #2 of RFC4791#9.7.3
return !target.HasPropertyParam(paramPath...)
} else {
child := f.getChildren()[0]
// param filters can also have (only-one) nested text-match filter
if child.name == TAG_TEXT_MATCH {
paramValue := target.GetPropertyParamValue(paramPath...)
return child.textMatch(paramValue)
}
}
return false
}
// See RFC4791-9.7.5
func (f *ResourceFilter) textMatch(targetText string) bool {
// TODO: collations are not being considered/supported yet.
// Texts are lowered to be case-insensitive, almost as the "i;ascii-casemap" value.
targetText = strings.ToLower(targetText)
expectedSubstr := strings.ToLower(f.text)
match := strings.Contains(targetText, expectedSubstr)
if f.attrs["negate-condition"] == "yes" {
return !match
}
return match
}
func (f *ResourceFilter) isEmpty() bool {
return len(f.getChildren()) == 0 && f.text == ""
}
func (f *ResourceFilter) contains(filterName string) bool {
if f.findChild(filterName, false) != nil {
return true
}
return false
}
func (f *ResourceFilter) findChild(filterName string, dig bool) *ResourceFilter {
for _, child := range f.getChildren() {
if child.name == filterName {
return &child
}
if !dig {
continue
}
dugChild := child.findChild(filterName, true)
if dugChild != nil {
return dugChild
}
}
return nil
}
// lazy evaluation of the child filters
func (f *ResourceFilter) getChildren() []ResourceFilter {
if f.children == nil {
f.children = []ResourceFilter{}
for _, childElem := range f.etreeElem.ChildElements() {
childFilter := newFilterFromEtreeElem(childElem)
f.children = append(f.children, childFilter)
}
}
return f.children
}

277
vendor/github.com/samedi/caldav-go/data/resource.go generated vendored Normal file
View File

@ -0,0 +1,277 @@
package data
import (
"os"
"fmt"
"log"
"time"
"strings"
"strconv"
"io/ioutil"
"github.com/laurent22/ical-go/ical"
"github.com/samedi/caldav-go/lib"
"github.com/samedi/caldav-go/files"
)
type ResourceInterface interface {
ComponentName() string
StartTimeUTC() time.Time
EndTimeUTC() time.Time
Recurrences() []ResourceRecurrence
HasProperty(propPath... string) bool
GetPropertyValue(propPath... string) string
HasPropertyParam(paramName... string) bool
GetPropertyParamValue(paramName... string) string
}
type ResourceAdapter interface {
IsCollection() bool
CalculateEtag() string
GetContent() string
GetContentSize() int64
GetModTime() time.Time
}
type ResourceRecurrence struct {
StartTime time.Time
EndTime time.Time
}
type Resource struct {
Name string
Path string
pathSplit []string
adapter ResourceAdapter
emptyTime time.Time
}
func NewResource(resPath string, adp ResourceAdapter) Resource {
pClean := lib.ToSlashPath(resPath)
pSplit := strings.Split(strings.Trim(pClean, "/"), "/")
return Resource {
Name: pSplit[len(pSplit) - 1],
Path: pClean,
pathSplit: pSplit,
adapter: adp,
}
}
func (r *Resource) IsCollection() bool {
return r.adapter.IsCollection()
}
func (r *Resource) IsPrincipal() bool {
return len(r.pathSplit) <= 1
}
func (r *Resource) ComponentName() string {
if r.IsCollection() {
return lib.VCALENDAR
} else {
return lib.VEVENT
}
}
func (r *Resource) StartTimeUTC() time.Time {
vevent := r.icalVEVENT()
dtstart := vevent.PropDate(ical.DTSTART, r.emptyTime)
if dtstart == r.emptyTime {
log.Printf("WARNING: The property DTSTART was not found in the resource's ical data.\nResource path: %s", r.Path)
return r.emptyTime
}
return dtstart.UTC()
}
func (r *Resource) EndTimeUTC() time.Time {
vevent := r.icalVEVENT()
dtend := vevent.PropDate(ical.DTEND, r.emptyTime)
// when the DTEND property is not present, we just add the DURATION (if any) to the DTSTART
if dtend == r.emptyTime {
duration := vevent.PropDuration(ical.DURATION)
dtend = r.StartTimeUTC().Add(duration)
}
return dtend.UTC()
}
func (r *Resource) Recurrences() []ResourceRecurrence {
// TODO: Implement. This server does not support ical recurrences yet. We just return an empty array.
return []ResourceRecurrence{}
}
func (r *Resource) HasProperty(propPath... string) bool {
return r.GetPropertyValue(propPath...) != ""
}
func (r *Resource) GetPropertyValue(propPath... string) string {
if propPath[0] == ical.VCALENDAR {
propPath = propPath[1:]
}
prop, _ := r.icalendar().DigProperty(propPath...)
return prop
}
func (r *Resource) HasPropertyParam(paramPath... string) bool {
return r.GetPropertyParamValue(paramPath...) != ""
}
func (r *Resource) GetPropertyParamValue(paramPath... string) string {
if paramPath[0] == ical.VCALENDAR {
paramPath = paramPath[1:]
}
param, _ := r.icalendar().DigParameter(paramPath...)
return param
}
func (r *Resource) GetEtag() (string, bool) {
if r.IsCollection() {
return "", false
}
return r.adapter.CalculateEtag(), true
}
func (r *Resource) GetContentType() (string, bool) {
if r.IsCollection() {
return "text/calendar", true
} else {
return "text/calendar; component=vcalendar", true
}
}
func (r *Resource) GetDisplayName() (string, bool) {
return r.Name, true
}
func (r *Resource) GetContentData() (string, bool) {
data := r.adapter.GetContent()
found := data != ""
return data, found
}
func (r *Resource) GetContentLength() (string, bool) {
// If its collection, it does not have any content, so mark it as not found
if r.IsCollection() {
return "", false
}
contentSize := r.adapter.GetContentSize()
return strconv.FormatInt(contentSize, 10), true
}
func (r *Resource) GetLastModified(format string) (string, bool) {
return r.adapter.GetModTime().Format(format), true
}
func (r *Resource) GetOwner() (string, bool) {
var owner string
if len(r.pathSplit) > 1 {
owner = r.pathSplit[0]
} else {
owner = ""
}
return owner, true
}
func (r *Resource) GetOwnerPath() (string, bool) {
owner, _ := r.GetOwner()
if owner != "" {
return fmt.Sprintf("/%s/", owner), true
} else {
return "", false
}
}
// TODO: memoize
func (r *Resource) icalVEVENT() *ical.Node {
vevent := r.icalendar().ChildByName(ical.VEVENT)
// if nil, log it and return an empty vevent
if vevent == nil {
log.Printf("WARNING: The resource's ical data is missing the VEVENT property.\nResource path: %s", r.Path)
return &ical.Node{
Name: ical.VEVENT,
}
}
return vevent
}
// TODO: memoize
func (r *Resource) icalendar() *ical.Node {
data, found := r.GetContentData()
if !found {
log.Printf("WARNING: The resource's ical data does not have any data.\nResource path: %s", r.Path)
return &ical.Node{
Name: ical.VCALENDAR,
}
}
icalNode, err := ical.ParseCalendar(data)
if err != nil {
log.Printf("ERROR: Could not parse the resource's ical data.\nError: %s.\nResource path: %s", err, r.Path)
return &ical.Node{
Name: ical.VCALENDAR,
}
}
return icalNode
}
type FileResourceAdapter struct {
finfo os.FileInfo
resourcePath string
}
func (adp *FileResourceAdapter) IsCollection() bool {
return adp.finfo.IsDir()
}
func (adp *FileResourceAdapter) GetContent() string {
if adp.IsCollection() {
return ""
}
data, err := ioutil.ReadFile(files.AbsPath(adp.resourcePath))
if err != nil {
log.Printf("ERROR: Could not read file content for the resource.\nError: %s.\nResource path: %s.", err, adp.resourcePath)
return ""
}
return string(data)
}
func (adp *FileResourceAdapter) GetContentSize() int64 {
return adp.finfo.Size()
}
func (adp *FileResourceAdapter) CalculateEtag() string {
// returns ETag as the concatenated hex values of a file's
// modification time and size. This is not a reliable synchronization
// mechanism for directories, so for collections we return empty.
if adp.IsCollection() {
return ""
}
fi := adp.finfo
return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size())
}
func (adp *FileResourceAdapter) GetModTime() time.Time {
return adp.finfo.ModTime()
}

217
vendor/github.com/samedi/caldav-go/data/storage.go generated vendored Normal file
View File

@ -0,0 +1,217 @@
package data
import (
"os"
"log"
"io/ioutil"
"github.com/samedi/caldav-go/errs"
"github.com/samedi/caldav-go/files"
)
// The Storage is the responsible for the CRUD operations on the caldav resources.
type Storage interface {
// GetResources gets a list of resources based on a given `rpath`. The
// `rpath` is the path to the original resource that's being requested. The resultant list
// will/must contain that original resource in it, apart from any additional resources. It also receives
// `withChildren` flag to say if the result must also include all the original resource`s
// children (if original is a collection resource). If `true`, the result will have the requested resource + children.
// If `false`, it will have only the requested original resource (from the `rpath` path).
// It returns errors if anything went wrong or if it could not find any resource on `rpath` path.
GetResources(rpath string, withChildren bool) ([]Resource, error)
// GetResourcesByList fetches a list of resources by path from the storage.
// This method fetches all the `rpaths` and return an array of the reosurces found.
// No error 404 will be returned if one of the resources cannot be found.
// Errors are returned if any errors other than "not found" happens.
GetResourcesByList(rpaths []string) ([]Resource, error)
// GetResourcesByFilters returns the filtered children of a target collection resource.
// The target collection resource is the one pointed by the `rpath` parameter. All of its children
// will be checked against a set of `filters` and the matching ones are returned. The results
// contains only the filtered children and does NOT include the target resource. If the target resource
// is not a collection, an empty array is returned as the result.
GetResourcesByFilters(rpath string, filters *ResourceFilter) ([]Resource, error)
// GetResource gets the requested resource based on a given `rpath` path. It returns the resource (if found) or
// nil (if not found). Also returns a flag specifying if the resource was found or not.
GetResource(rpath string) (*Resource, bool, error)
// GetShallowResource has the same behaviour of `storage.GetResource`. The only difference is that, for collection resources,
// it does not return its children in the collection `storage.Resource` struct (hence the name shallow). The motive is
// for optimizations reasons, as this function is used on places where the collection's children are not important.
GetShallowResource(rpath string) (*Resource, bool, error)
// CreateResource creates a new resource on the `rpath` path with a given `content`.
CreateResource(rpath, content string) (*Resource, error)
// UpdateResource udpates a resource on the `rpath` path with a given `content`.
UpdateResource(rpath, content string) (*Resource, error)
// DeleteResource deletes a resource on the `rpath` path.
DeleteResource(rpath string) error
}
// FileStorage is the storage that deals with resources as files in the file system. So, a collection resource
// is treated as a folder/directory and its children resources are the files it contains. On the other hand, non-collection
// resources are just plain files.
type FileStorage struct {
}
func (fs *FileStorage) GetResources(rpath string, withChildren bool) ([]Resource, error) {
result := []Resource{}
// tries to open the file by the given path
f, e := fs.openResourceFile(rpath, os.O_RDONLY)
if e != nil {
return nil, e
}
// add it as a resource to the result list
finfo, _ := f.Stat()
resource := NewResource(rpath, &FileResourceAdapter{finfo, rpath})
result = append(result, resource)
// if the file is a dir, add its children to the result list
if withChildren && finfo.IsDir() {
dirFiles, _ := f.Readdir(0)
for _, finfo := range dirFiles {
childPath := files.JoinPaths(rpath, finfo.Name())
resource = NewResource(childPath, &FileResourceAdapter{finfo, childPath})
result = append(result, resource)
}
}
return result, nil
}
func (fs *FileStorage) GetResourcesByFilters(rpath string, filters *ResourceFilter) ([]Resource, error) {
result := []Resource{}
childPaths := fs.getDirectoryChildPaths(rpath)
for _, path := range childPaths {
resource, _, err := fs.GetShallowResource(path)
if err != nil {
// if we can't find this resource, something weird went wrong, but not that serious, so we log it and continue
log.Printf("WARNING: returned error when trying to get resource with path %s from collection with path %s. Error: %s", path, rpath, err)
continue
}
// only add it if the resource matches the filters
if filters == nil || filters.Match(resource) {
result = append(result, *resource)
}
}
return result, nil
}
func (fs *FileStorage) GetResourcesByList(rpaths []string) ([]Resource, error) {
results := []Resource{}
for _, rpath := range rpaths {
resource, found, err := fs.GetShallowResource(rpath)
if err != nil && err != errs.ResourceNotFoundError {
return nil, err
}
if found {
results = append(results, *resource)
}
}
return results, nil
}
func (fs *FileStorage) GetResource(rpath string) (*Resource, bool, error) {
// For simplicity we just return the shallow resource.
return fs.GetShallowResource(rpath)
}
func (fs *FileStorage) GetShallowResource(rpath string) (*Resource, bool, error) {
resources, err := fs.GetResources(rpath, false)
if err != nil {
return nil, false, err
}
if resources == nil || len(resources) == 0 {
return nil, false, errs.ResourceNotFoundError
}
res := resources[0]
return &res, true, nil
}
func (fs *FileStorage) CreateResource(rpath, content string) (*Resource, error) {
rAbsPath := files.AbsPath(rpath)
if fs.isResourcePresent(rAbsPath) {
return nil, errs.ResourceAlreadyExistsError
}
// create parent directories (if needed)
if err := os.MkdirAll(files.DirPath(rAbsPath), os.ModePerm); err != nil {
return nil, err
}
// create file/resource and write content
f, err := os.Create(rAbsPath)
if err != nil {
return nil, err
}
f.WriteString(content)
finfo, _ := f.Stat()
res := NewResource(rpath, &FileResourceAdapter{finfo, rpath})
return &res, nil
}
func (fs *FileStorage) UpdateResource(rpath, content string) (*Resource, error) {
f, e := fs.openResourceFile(rpath, os.O_RDWR)
if e != nil {
return nil, e
}
// update content
f.Truncate(0)
f.WriteString(content)
finfo, _ := f.Stat()
res := NewResource(rpath, &FileResourceAdapter{finfo, rpath})
return &res, nil
}
func (fs *FileStorage) DeleteResource(rpath string) error {
err := os.Remove(files.AbsPath(rpath))
return err
}
func (fs *FileStorage) isResourcePresent(rpath string) bool {
_, found, _ := fs.GetShallowResource(rpath)
return found
}
func (fs *FileStorage) openResourceFile(filepath string, mode int) (*os.File, error) {
f, e := os.OpenFile(files.AbsPath(filepath), mode, 0666)
if e != nil {
if os.IsNotExist(e) {
return nil, errs.ResourceNotFoundError
}
return nil, e
}
return f, nil
}
func (fs *FileStorage) getDirectoryChildPaths(dirpath string) []string {
content, err := ioutil.ReadDir(files.AbsPath(dirpath))
if err != nil {
log.Printf("ERROR: Could not read resource as file directory.\nError: %s.\nResource path: %s.", err, dirpath)
return nil
}
result := []string{}
for _, file := range content {
fpath := files.JoinPaths(dirpath, file.Name())
result = append(result, fpath)
}
return result
}

5
vendor/github.com/samedi/caldav-go/data/user.go generated vendored Normal file
View File

@ -0,0 +1,5 @@
package data
type CalUser struct {
Name string
}

12
vendor/github.com/samedi/caldav-go/errs/errors.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
package errs
import (
"errors"
)
var (
ResourceNotFoundError = errors.New("caldav: resource not found")
ResourceAlreadyExistsError = errors.New("caldav: resource already exists")
UnauthorizedError = errors.New("caldav: unauthorized. credentials needed.")
ForbiddenError = errors.New("caldav: forbidden operation.")
)

30
vendor/github.com/samedi/caldav-go/files/paths.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package files
import (
"strings"
"path/filepath"
"github.com/samedi/caldav-go/lib"
)
const (
Separator = string(filepath.Separator)
)
func AbsPath(path string) string {
path = strings.Trim(path, "/")
absPath, _ := filepath.Abs(path)
return absPath
}
func DirPath(path string) string {
return filepath.Dir(path)
}
func JoinPaths(paths ...string) string {
return filepath.Join(paths...)
}
func ToSlashPath(path string) string {
return lib.ToSlashPath(path)
}

10
vendor/github.com/samedi/caldav-go/glide.lock generated vendored Normal file
View File

@ -0,0 +1,10 @@
hash: 2796726e69757f4af1a13f6ebd056ebc626d712051aa213875bb03f5bdc1ebfd
updated: 2017-01-18T11:43:23.127761353+01:00
imports:
- name: github.com/beevik/etree
version: 4cd0dd976db869f817248477718071a28e978df0
- name: github.com/laurent22/ical-go
version: 4811ac5553eae5fed7cd5d7a9024727f1311b2a2
subpackages:
- ical
testImports: []

7
vendor/github.com/samedi/caldav-go/glide.yaml generated vendored Normal file
View File

@ -0,0 +1,7 @@
package: github.com/samedi/caldav-go
import:
- package: github.com/beevik/etree
- package: github.com/laurent22/ical-go
version: ~0.1.0
subpackages:
- ical

12
vendor/github.com/samedi/caldav-go/global/global.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
package global
// This file defines accessible variables used to setup the caldav server.
import (
"github.com/samedi/caldav-go/data"
)
// The global storage used in the CRUD operations of resources. Default storage is the `FileStorage`.
var Storage data.Storage = new(data.FileStorage)
// Current caldav user. It is used to keep the info of the current user that is interacting with the calendar.
var User *data.CalUser

28
vendor/github.com/samedi/caldav-go/handler.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
package caldav
import (
"net/http"
"github.com/samedi/caldav-go/data"
"github.com/samedi/caldav-go/handlers"
)
// RequestHandler handles the given CALDAV request and writes the reponse righ away. This function is to be
// used by passing it directly as the handle func to the `http` lib. Example: http.HandleFunc("/", caldav.RequestHandler).
func RequestHandler(writer http.ResponseWriter, request *http.Request) {
response := HandleRequest(request)
response.Write(writer)
}
// HandleRequest handles the given CALDAV request and returns the response. Useful when the caller
// wants to do something else with the response before writing it to the response stream.
func HandleRequest(request *http.Request) *handlers.Response {
handler := handlers.NewHandler(request)
return handler.Handle()
}
// HandleRequestWithStorage handles the request the same way as `HandleRequest` does, but before,
// it sets the given storage that will be used throughout the request handling flow.
func HandleRequestWithStorage(request *http.Request, stg data.Storage) *handlers.Response {
SetupStorage(stg)
return HandleRequest(request)
}

24
vendor/github.com/samedi/caldav-go/handlers/builder.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
package handlers
import (
"net/http"
)
type handlerInterface interface {
Handle() *Response
}
func NewHandler(request *http.Request) handlerInterface {
response := NewResponse()
switch request.Method {
case "GET": return getHandler{request, response, false}
case "HEAD": return getHandler{request, response, true}
case "PUT": return putHandler{request, response}
case "DELETE": return deleteHandler{request, response}
case "PROPFIND": return propfindHandler{request, response}
case "OPTIONS": return optionsHandler{response}
case "REPORT": return reportHandler{request, response}
default: return notImplementedHandler{response}
}
}

40
vendor/github.com/samedi/caldav-go/handlers/delete.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
package handlers
import (
"net/http"
"github.com/samedi/caldav-go/global"
)
type deleteHandler struct {
request *http.Request
response *Response
}
func (dh deleteHandler) Handle() *Response {
precond := requestPreconditions{dh.request}
// get the event from the storage
resource, _, err := global.Storage.GetShallowResource(dh.request.URL.Path)
if err != nil {
return dh.response.SetError(err)
}
// TODO: Handle delete on collections
if resource.IsCollection() {
return dh.response.Set(http.StatusMethodNotAllowed, "")
}
// check ETag pre-condition
resourceEtag, _ := resource.GetEtag()
if !precond.IfMatch(resourceEtag) {
return dh.response.Set(http.StatusPreconditionFailed, "")
}
// delete event after pre-condition passed
err = global.Storage.DeleteResource(resource.Path)
if err != nil {
return dh.response.SetError(err)
}
return dh.response.Set(http.StatusNoContent, "")
}

37
vendor/github.com/samedi/caldav-go/handlers/get.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
package handlers
import (
"net/http"
"github.com/samedi/caldav-go/global"
)
type getHandler struct {
request *http.Request
response *Response
onlyHeaders bool
}
func (gh getHandler) Handle() *Response {
resource, _, err := global.Storage.GetResource(gh.request.URL.Path)
if err != nil {
return gh.response.SetError(err)
}
var response string
if gh.onlyHeaders {
response = ""
} else {
response, _ = resource.GetContentData()
}
etag, _ := resource.GetEtag()
lastm, _ := resource.GetLastModified(http.TimeFormat)
ctype, _ := resource.GetContentType()
gh.response.SetHeader("ETag", etag).
SetHeader("Last-Modified", lastm).
SetHeader("Content-Type", ctype).
Set(http.StatusOK, response)
return gh.response
}

27
vendor/github.com/samedi/caldav-go/handlers/headers.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
package handlers
import (
"net/http"
)
const (
HD_DEPTH = "Depth"
HD_DEPTH_DEEP = "1"
HD_PREFER = "Prefer"
HD_PREFER_MINIMAL = "return=minimal"
HD_PREFERENCE_APPLIED = "Preference-Applied"
)
type headers struct {
http.Header
}
func (this headers) IsDeep() bool {
depth := this.Get(HD_DEPTH)
return (depth == HD_DEPTH_DEEP)
}
func (this headers) IsMinimal() bool {
prefer := this.Get(HD_PREFER)
return (prefer == HD_PREFER_MINIMAL)
}

View File

@ -0,0 +1,207 @@
package handlers
import (
"fmt"
"net/http"
"encoding/xml"
"github.com/samedi/caldav-go/lib"
"github.com/samedi/caldav-go/global"
"github.com/samedi/caldav-go/data"
"github.com/samedi/caldav-go/ixml"
)
// Wraps a multistatus response. It contains the set of `Responses`
// that will serve to build the final XML. Multistatus responses are
// used by the REPORT and PROPFIND methods.
type multistatusResp struct {
// The set of multistatus responses used to build each of the <DAV:response> nodes.
Responses []msResponse
// Flag that XML should be minimal or not
// [defined in the draft https://tools.ietf.org/html/draft-murchison-webdav-prefer-05]
Minimal bool
}
type msResponse struct {
Href string
Found bool
Propstats msPropstats
}
type msPropstats map[int]msProps
// Adds a msProp to the map with the key being the prop status.
func (stats msPropstats) Add(prop msProp) {
stats[prop.Status] = append(stats[prop.Status], prop)
}
func (stats msPropstats) Clone() msPropstats {
clone := make(msPropstats)
for k, v := range stats {
clone[k] = v
}
return clone
}
type msProps []msProp
type msProp struct {
Tag xml.Name
Content string
Contents []string
Status int
}
// Function that processes all the required props for a given resource.
// ## Params
// resource: the target calendar resource.
// reqprops: set of required props that must be processed for the resource.
// ## Returns
// The set of props (msProp) processed. Each prop is mapped to a HTTP status code.
// So if a prop is found and processed ok, it'll be mapped to 200. If it's not found,
// it'll be mapped to 404, and so on.
func (ms *multistatusResp) Propstats(resource *data.Resource, reqprops []xml.Name) msPropstats {
if resource == nil {
return nil
}
result := make(msPropstats)
for _, ptag := range reqprops {
pvalue := msProp{
Tag: ptag,
Status: http.StatusOK,
}
pfound := false
switch ptag {
case ixml.CALENDAR_DATA_TG:
pvalue.Content, pfound = resource.GetContentData()
if pfound {
pvalue.Content = ixml.EscapeText(pvalue.Content)
}
case ixml.GET_ETAG_TG:
pvalue.Content, pfound = resource.GetEtag()
case ixml.GET_CONTENT_TYPE_TG:
pvalue.Content, pfound = resource.GetContentType()
case ixml.GET_CONTENT_LENGTH_TG:
pvalue.Content, pfound = resource.GetContentLength()
case ixml.DISPLAY_NAME_TG:
pvalue.Content, pfound = resource.GetDisplayName()
if pfound {
pvalue.Content = ixml.EscapeText(pvalue.Content)
}
case ixml.GET_LAST_MODIFIED_TG:
pvalue.Content, pfound = resource.GetLastModified(http.TimeFormat)
case ixml.OWNER_TG:
pvalue.Content, pfound = resource.GetOwnerPath()
case ixml.GET_CTAG_TG:
pvalue.Content, pfound = resource.GetEtag()
case ixml.PRINCIPAL_URL_TG,
ixml.PRINCIPAL_COLLECTION_SET_TG,
ixml.CALENDAR_USER_ADDRESS_SET_TG,
ixml.CALENDAR_HOME_SET_TG:
pvalue.Content, pfound = ixml.HrefTag(resource.Path), true
case ixml.RESOURCE_TYPE_TG:
if resource.IsCollection() {
pvalue.Content, pfound = ixml.Tag(ixml.COLLECTION_TG, "") + ixml.Tag(ixml.CALENDAR_TG, ""), true
if resource.IsPrincipal() {
pvalue.Content += ixml.Tag(ixml.PRINCIPAL_TG, "")
}
} else {
// resourcetype must be returned empty for non-collection elements
pvalue.Content, pfound = "", true
}
case ixml.CURRENT_USER_PRINCIPAL_TG:
if global.User != nil {
path := fmt.Sprintf("/%s/", global.User.Name)
pvalue.Content, pfound = ixml.HrefTag(path), true
}
case ixml.SUPPORTED_CALENDAR_COMPONENT_SET_TG:
if resource.IsCollection() {
for _, component := range supportedComponents {
// TODO: use ixml somehow to build the below tag
compTag := fmt.Sprintf(`<C:comp name="%s"/>`, component)
pvalue.Contents = append(pvalue.Contents, compTag)
}
pfound = true
}
}
if !pfound {
pvalue.Status = http.StatusNotFound
}
result.Add(pvalue)
}
return result
}
// Adds a new `msResponse` to the `Responses` array.
func (ms *multistatusResp) AddResponse(href string, found bool, propstats msPropstats) {
ms.Responses = append(ms.Responses, msResponse{
Href: href,
Found: found,
Propstats: propstats,
})
}
func (ms *multistatusResp) ToXML() string {
// init multistatus
var bf lib.StringBuffer
bf.Write(`<?xml version="1.0" encoding="UTF-8"?>`)
bf.Write(`<D:multistatus %s>`, ixml.Namespaces())
// iterate over event hrefs and build multistatus XML on the fly
for _, response := range ms.Responses {
bf.Write("<D:response>")
bf.Write(ixml.HrefTag(response.Href))
if response.Found {
propstats := response.Propstats.Clone()
if ms.Minimal {
delete(propstats, http.StatusNotFound)
if len(propstats) == 0 {
bf.Write("<D:propstat>")
bf.Write("<D:prop/>")
bf.Write(ixml.StatusTag(http.StatusOK))
bf.Write("</D:propstat>")
bf.Write("</D:response>")
continue
}
}
for status, props := range propstats {
bf.Write("<D:propstat>")
bf.Write("<D:prop>")
for _, prop := range props {
bf.Write(ms.propToXML(prop))
}
bf.Write("</D:prop>")
bf.Write(ixml.StatusTag(status))
bf.Write("</D:propstat>")
}
} else {
// if does not find the resource set 404
bf.Write(ixml.StatusTag(http.StatusNotFound))
}
bf.Write("</D:response>")
}
bf.Write("</D:multistatus>")
return bf.String()
}
func (ms *multistatusResp) propToXML(prop msProp) string {
for _, content := range prop.Contents {
prop.Content += content
}
xmlString := ixml.Tag(prop.Tag, prop.Content)
return xmlString
}

View File

@ -0,0 +1,13 @@
package handlers
import (
"net/http"
)
type notImplementedHandler struct {
response *Response
}
func (h notImplementedHandler) Handle() *Response {
return h.response.Set(http.StatusNotImplemented, "")
}

23
vendor/github.com/samedi/caldav-go/handlers/options.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package handlers
import (
"net/http"
)
type optionsHandler struct {
response *Response
}
// Returns the allowed methods and the DAV features implemented by the current server.
// For more information about the values and format read RFC4918 Sections 10.1 and 18.
func (oh optionsHandler) Handle() *Response {
// Set the DAV compliance header:
// 1: Server supports all the requirements specified in RFC2518
// 3: Server supports all the revisions specified in RFC4918
// calendar-access: Server supports all the extensions specified in RFC4791
oh.response.SetHeader("DAV", "1, 3, calendar-access").
SetHeader("Allow", "GET, HEAD, PUT, DELETE, OPTIONS, PROPFIND, REPORT").
Set(http.StatusOK, "")
return oh.response
}

View File

@ -0,0 +1,23 @@
package handlers
import (
"net/http"
)
type requestPreconditions struct {
request *http.Request
}
func (p *requestPreconditions) IfMatch(etag string) bool {
etagMatch := p.request.Header["If-Match"]
return len(etagMatch) == 0 || etagMatch[0] == "*" || etagMatch[0] == etag
}
func (p *requestPreconditions) IfMatchPresent() bool {
return len(p.request.Header["If-Match"]) != 0
}
func (p *requestPreconditions) IfNoneMatch(value string) bool {
valueMatch := p.request.Header["If-None-Match"]
return len(valueMatch) == 1 && valueMatch[0] == value
}

View File

@ -0,0 +1,49 @@
package handlers
import (
"net/http"
"encoding/xml"
"github.com/samedi/caldav-go/global"
)
type propfindHandler struct {
request *http.Request
response *Response
}
func (ph propfindHandler) Handle() *Response {
requestBody := readRequestBody(ph.request)
header := headers{ph.request.Header}
// get the target resources based on the request URL
resources, err := global.Storage.GetResources(ph.request.URL.Path, header.IsDeep())
if err != nil {
return ph.response.SetError(err)
}
// read body string to xml struct
type XMLProp2 struct {
Tags []xml.Name `xml:",any"`
}
type XMLRoot2 struct {
XMLName xml.Name
Prop XMLProp2 `xml:"DAV: prop"`
}
var requestXML XMLRoot2
xml.Unmarshal([]byte(requestBody), &requestXML)
multistatus := &multistatusResp{
Minimal: header.IsMinimal(),
}
// for each href, build the multistatus responses
for _, resource := range resources {
propstats := multistatus.Propstats(&resource, requestXML.Prop.Tags)
multistatus.AddResponse(resource.Path, true, propstats)
}
if multistatus.Minimal {
ph.response.SetHeader(HD_PREFERENCE_APPLIED, HD_PREFER_MINIMAL)
}
return ph.response.Set(207, multistatus.ToXML())
}

65
vendor/github.com/samedi/caldav-go/handlers/put.go generated vendored Normal file
View File

@ -0,0 +1,65 @@
package handlers
import (
"net/http"
"github.com/samedi/caldav-go/errs"
"github.com/samedi/caldav-go/global"
)
type putHandler struct {
request *http.Request
response *Response
}
func (ph putHandler) Handle() *Response {
requestBody := readRequestBody(ph.request)
precond := requestPreconditions{ph.request}
success := false
// check if resource exists
resourcePath := ph.request.URL.Path
resource, found, err := global.Storage.GetShallowResource(resourcePath)
if err != nil && err != errs.ResourceNotFoundError {
return ph.response.SetError(err)
}
// PUT is allowed in 2 cases:
//
// 1. Item NOT FOUND and there is NO ETAG match header: CREATE a new item
if !found && !precond.IfMatchPresent() {
// create new event resource
resource, err = global.Storage.CreateResource(resourcePath, requestBody)
if err != nil {
return ph.response.SetError(err)
}
success = true
}
if found {
// TODO: Handle PUT on collections
if resource.IsCollection() {
return ph.response.Set(http.StatusPreconditionFailed, "")
}
// 2. Item exists, the resource etag is verified and there's no IF-NONE-MATCH=* header: UPDATE the item
resourceEtag, _ := resource.GetEtag()
if found && precond.IfMatch(resourceEtag) && !precond.IfNoneMatch("*") {
// update resource
resource, err = global.Storage.UpdateResource(resourcePath, requestBody)
if err != nil {
return ph.response.SetError(err)
}
success = true
}
}
if !success {
return ph.response.Set(http.StatusPreconditionFailed, "")
}
resourceEtag, _ := resource.GetEtag()
return ph.response.SetHeader("ETag", resourceEtag).
Set(http.StatusCreated, "")
}

168
vendor/github.com/samedi/caldav-go/handlers/report.go generated vendored Normal file
View File

@ -0,0 +1,168 @@
package handlers
import (
"fmt"
"strings"
"net/http"
"encoding/xml"
"github.com/samedi/caldav-go/data"
"github.com/samedi/caldav-go/ixml"
"github.com/samedi/caldav-go/global"
)
type reportHandler struct{
request *http.Request
response *Response
}
// See more at RFC4791#section-7.1
func (rh reportHandler) Handle() *Response {
requestBody := readRequestBody(rh.request)
header := headers{rh.request.Header}
urlResource, found, err := global.Storage.GetShallowResource(rh.request.URL.Path)
if !found {
return rh.response.Set(http.StatusNotFound, "")
} else if err != nil {
return rh.response.SetError(err)
}
// read body string to xml struct
var requestXML reportRootXML
xml.Unmarshal([]byte(requestBody), &requestXML)
// The resources to be reported are fetched by the type of the request. If it is
// a `calendar-multiget`, the resources come based on a set of `hrefs` in the request body.
// If it is a `calendar-query`, the resources are calculated based on set of filters in the request.
var resourcesToReport []reportRes
switch requestXML.XMLName {
case ixml.CALENDAR_MULTIGET_TG:
resourcesToReport, err = rh.fetchResourcesByList(urlResource, requestXML.Hrefs)
case ixml.CALENDAR_QUERY_TG:
resourcesToReport, err = rh.fetchResourcesByFilters(urlResource, requestXML.Filters)
default:
return rh.response.Set(http.StatusPreconditionFailed, "")
}
if err != nil {
return rh.response.SetError(err)
}
multistatus := &multistatusResp{
Minimal: header.IsMinimal(),
}
// for each href, build the multistatus responses
for _, r := range resourcesToReport {
propstats := multistatus.Propstats(r.resource, requestXML.Prop.Tags)
multistatus.AddResponse(r.href, r.found, propstats)
}
if multistatus.Minimal {
rh.response.SetHeader(HD_PREFERENCE_APPLIED, HD_PREFER_MINIMAL)
}
return rh.response.Set(207, multistatus.ToXML())
}
type reportPropXML struct {
Tags []xml.Name `xml:",any"`
}
type reportRootXML struct {
XMLName xml.Name
Prop reportPropXML `xml:"DAV: prop"`
Hrefs []string `xml:"DAV: href"`
Filters reportFilterXML `xml:"urn:ietf:params:xml:ns:caldav filter"`
}
type reportFilterXML struct {
XMLName xml.Name
InnerContent string `xml:",innerxml"`
}
func (this reportFilterXML) toString() string {
return fmt.Sprintf("<%s>%s</%s>", this.XMLName.Local, this.InnerContent, this.XMLName.Local)
}
// Wraps a resource that has to be reported, either fetched by filters or by a list.
// Basically it contains the original requested `href`, the actual `resource` (can be nil)
// and if the `resource` was `found` or not
type reportRes struct {
href string
resource *data.Resource
found bool
}
// The resources are fetched based on the origin resource and a set of filters.
// If the origin resource is a collection, the filters are checked against each of the collection's resources
// to see if they match. The collection's resources that match the filters are returned. The ones that will be returned
// are the resources that were not found (does not exist) and the ones that matched the filters. The ones that did not
// match the filter will not appear in the response result.
// If the origin resource is not a collection, the function just returns it and ignore any filter processing.
// [See RFC4791#section-7.8]
func (rh reportHandler) fetchResourcesByFilters(origin *data.Resource, filtersXML reportFilterXML) ([]reportRes, error) {
// The list of resources that has to be reported back in the response.
reps := []reportRes{}
if origin.IsCollection() {
filters, _ := data.ParseResourceFilters(filtersXML.toString())
resources, err := global.Storage.GetResourcesByFilters(origin.Path, filters)
if err != nil {
return reps, err
}
for _, resource := range resources {
reps = append(reps, reportRes{resource.Path, &resource, true})
}
} else {
// the origin resource is not a collection, so returns just that as the result
reps = append(reps, reportRes{origin.Path, origin, true})
}
return reps, nil
}
// The hrefs can come from (1) the request URL or (2) from the request body itself.
// If the origin resource from the URL points to a collection (2), we will check the request body
// to get the requested `hrefs` (resource paths). Each requested href has to be related to the collection.
// The ones that are not, we simply ignore them.
// If the resource from the URL is NOT a collection (1) we process the the report only for this resource
// and ignore any othre requested hrefs that might be present in the request body.
// [See RFC4791#section-7.9]
func (rh reportHandler) fetchResourcesByList(origin *data.Resource, requestedPaths []string) ([]reportRes, error) {
reps := []reportRes{}
if origin.IsCollection() {
resources, err := global.Storage.GetResourcesByList(requestedPaths)
if err != nil {
return reps, err
}
// we put all the resources found in a map path -> resource.
// this will be used later to query which requested resource was found
// or not and mount the response
resourcesMap := make(map[string]*data.Resource)
for _, resource := range resources {
r := resource
resourcesMap[resource.Path] = &r
}
for _, requestedPath := range requestedPaths {
// if the requested path does not belong to the origin collection, skip
// ('belonging' means that the path's prefix is the same as the collection path)
if !strings.HasPrefix(requestedPath, origin.Path) {
continue
}
resource, found := resourcesMap[requestedPath]
reps = append(reps, reportRes{requestedPath, resource, found})
}
} else {
reps = append(reps, reportRes{origin.Path, origin, true})
}
return reps, nil
}

View File

@ -0,0 +1,65 @@
package handlers
import (
"io"
"net/http"
"github.com/samedi/caldav-go/errs"
)
type Response struct {
Status int
Header http.Header
Body string
Error error
}
func NewResponse() *Response {
return &Response{
Header: make(http.Header),
}
}
func (this *Response) Set(status int, body string) *Response {
this.Status = status
this.Body = body
return this
}
func (this *Response) SetHeader(key, value string) *Response {
this.Header.Set(key, value)
return this
}
func (this *Response) SetError(err error) *Response {
this.Error = err
switch err {
case errs.ResourceNotFoundError:
this.Status = http.StatusNotFound
case errs.UnauthorizedError:
this.Status = http.StatusUnauthorized
case errs.ForbiddenError:
this.Status = http.StatusForbidden
default:
this.Status = http.StatusInternalServerError
}
return this
}
func (this *Response) Write(writer http.ResponseWriter) {
if this.Error == errs.UnauthorizedError {
this.SetHeader("WWW-Authenticate", `Basic realm="Restricted"`)
}
for key, values := range this.Header {
for _, value := range values {
writer.Header().Set(key, value)
}
}
writer.WriteHeader(this.Status)
io.WriteString(writer, this.Body)
}

22
vendor/github.com/samedi/caldav-go/handlers/shared.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
package handlers
import (
"net/http"
"io/ioutil"
"bytes"
"github.com/samedi/caldav-go/lib"
)
// Supported ICal components on this server.
var supportedComponents = []string{lib.VCALENDAR, lib.VEVENT}
// This function reads the request body and restore its content, so that
// the request body can be read a second time.
func readRequestBody(request *http.Request) string {
// Read the content
body, _ := ioutil.ReadAll(request.Body)
// Restore the io.ReadCloser to its original state
request.Body = ioutil.NopCloser(bytes.NewBuffer(body))
// Use the content
return string(body)
}

93
vendor/github.com/samedi/caldav-go/ixml/ixml.go generated vendored Normal file
View File

@ -0,0 +1,93 @@
package ixml
import (
"fmt"
"bytes"
"net/http"
"encoding/xml"
"github.com/samedi/caldav-go/lib"
)
const (
DAV_NS = "DAV:"
CALDAV_NS = "urn:ietf:params:xml:ns:caldav"
CALSERV_NS = "http://calendarserver.org/ns/"
)
var NS_PREFIXES = map[string]string{
DAV_NS: "D",
CALDAV_NS: "C",
CALSERV_NS: "CS",
}
var (
CALENDAR_TG = xml.Name{CALDAV_NS, "calendar"}
CALENDAR_DATA_TG = xml.Name{CALDAV_NS, "calendar-data"}
CALENDAR_HOME_SET_TG = xml.Name{CALDAV_NS, "calendar-home-set"}
CALENDAR_QUERY_TG = xml.Name{CALDAV_NS, "calendar-query"}
CALENDAR_MULTIGET_TG = xml.Name{CALDAV_NS, "calendar-multiget"}
CALENDAR_USER_ADDRESS_SET_TG = xml.Name{CALDAV_NS, "calendar-user-address-set"}
COLLECTION_TG = xml.Name{DAV_NS, "collection"}
CURRENT_USER_PRINCIPAL_TG = xml.Name{DAV_NS, "current-user-principal"}
DISPLAY_NAME_TG = xml.Name{DAV_NS, "displayname"}
GET_CONTENT_LENGTH_TG = xml.Name{DAV_NS, "getcontentlength"}
GET_CONTENT_TYPE_TG = xml.Name{DAV_NS, "getcontenttype"}
GET_CTAG_TG = xml.Name{CALSERV_NS, "getctag"}
GET_ETAG_TG = xml.Name{DAV_NS, "getetag"}
GET_LAST_MODIFIED_TG = xml.Name{DAV_NS, "getlastmodified"}
HREF_TG = xml.Name{DAV_NS, "href"}
OWNER_TG = xml.Name{DAV_NS, "owner"}
PRINCIPAL_TG = xml.Name{DAV_NS, "principal"}
PRINCIPAL_COLLECTION_SET_TG = xml.Name{DAV_NS, "principal-collection-set"}
PRINCIPAL_URL_TG = xml.Name{DAV_NS, "principal-URL"}
RESOURCE_TYPE_TG = xml.Name{DAV_NS, "resourcetype"}
STATUS_TG = xml.Name{DAV_NS, "status"}
SUPPORTED_CALENDAR_COMPONENT_SET_TG = xml.Name{CALDAV_NS, "supported-calendar-component-set"}
)
func Namespaces() string {
bf := new(lib.StringBuffer)
bf.Write(`xmlns:%s="%s" `, NS_PREFIXES[DAV_NS], DAV_NS)
bf.Write(`xmlns:%s="%s" `, NS_PREFIXES[CALDAV_NS], CALDAV_NS)
bf.Write(`xmlns:%s="%s"`, NS_PREFIXES[CALSERV_NS], CALSERV_NS)
return bf.String()
}
// Tag returns a XML tag as string based on the given tag name and content. It
// takes in consideration the namespace and also if it is an empty content or not.
func Tag(xmlName xml.Name, content string) string {
name := xmlName.Local
ns := NS_PREFIXES[xmlName.Space]
if ns != "" {
ns = ns + ":"
}
if content != "" {
return fmt.Sprintf("<%s%s>%s</%s%s>", ns, name, content, ns, name)
} else {
return fmt.Sprintf("<%s%s/>", ns, name)
}
}
// HrefTag returns a DAV <D:href> tag with the given href path.
func HrefTag(href string) (tag string) {
return Tag(HREF_TG, href)
}
// StatusTag returns a DAV <D:status> tag with the given HTTP status. The
// status is translated into a label, e.g.: HTTP/1.1 404 NotFound.
func StatusTag(status int) string {
statusText := fmt.Sprintf("HTTP/1.1 %d %s", status, http.StatusText(status))
return Tag(STATUS_TG, statusText)
}
// EscapeText escapes any special character in the given text and returns the result.
func EscapeText(text string) string {
buffer := bytes.NewBufferString("")
xml.EscapeText(buffer, []byte(text))
return buffer.String()
}

8
vendor/github.com/samedi/caldav-go/lib/components.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
package lib
const (
VCALENDAR = "VCALENDAR"
VEVENT = "VEVENT"
VJOURNAL = "VJOURNAL"
VTODO = "VTODO"
)

10
vendor/github.com/samedi/caldav-go/lib/paths.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
package lib
import (
"path/filepath"
)
func ToSlashPath(path string) string {
cleanPath := filepath.Clean(path)
return filepath.ToSlash(cleanPath)
}

18
vendor/github.com/samedi/caldav-go/lib/strbuff.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
package lib
import (
"fmt"
"bytes"
)
type StringBuffer struct {
buffer bytes.Buffer
}
func (b *StringBuffer) Write(format string, elem ...interface{}) {
b.buffer.WriteString(fmt.Sprintf(format, elem...))
}
func (b *StringBuffer) String() string {
return b.buffer.String()
}

4
vendor/github.com/samedi/caldav-go/test.sh generated vendored Executable file
View File

@ -0,0 +1,4 @@
#!/usr/bin/env bash
go test -race ./...
rm -rf test-data

5
vendor/github.com/samedi/caldav-go/version.go generated vendored Normal file
View File

@ -0,0 +1,5 @@
package caldav
const (
VERSION = "3.0.0"
)

3
vendor/golang.org/x/crypto/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at https://tip.golang.org/AUTHORS.

3
vendor/golang.org/x/crypto/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at https://tip.golang.org/CONTRIBUTORS.

27
vendor/golang.org/x/crypto/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/crypto/PATENTS generated vendored Normal file
View File

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

35
vendor/golang.org/x/crypto/bcrypt/base64.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bcrypt
import "encoding/base64"
const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
var bcEncoding = base64.NewEncoding(alphabet)
func base64Encode(src []byte) []byte {
n := bcEncoding.EncodedLen(len(src))
dst := make([]byte, n)
bcEncoding.Encode(dst, src)
for dst[n-1] == '=' {
n--
}
return dst[:n]
}
func base64Decode(src []byte) ([]byte, error) {
numOfEquals := 4 - (len(src) % 4)
for i := 0; i < numOfEquals; i++ {
src = append(src, '=')
}
dst := make([]byte, bcEncoding.DecodedLen(len(src)))
n, err := bcEncoding.Decode(dst, src)
if err != nil {
return nil, err
}
return dst[:n], nil
}

295
vendor/golang.org/x/crypto/bcrypt/bcrypt.go generated vendored Normal file
View File

@ -0,0 +1,295 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing
// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf
package bcrypt // import "golang.org/x/crypto/bcrypt"
// The code is a port of Provos and Mazières's C implementation.
import (
"crypto/rand"
"crypto/subtle"
"errors"
"fmt"
"io"
"strconv"
"golang.org/x/crypto/blowfish"
)
const (
MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword
MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword
DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword
)
// The error returned from CompareHashAndPassword when a password and hash do
// not match.
var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password")
// The error returned from CompareHashAndPassword when a hash is too short to
// be a bcrypt hash.
var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password")
// The error returned from CompareHashAndPassword when a hash was created with
// a bcrypt algorithm newer than this implementation.
type HashVersionTooNewError byte
func (hv HashVersionTooNewError) Error() string {
return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion)
}
// The error returned from CompareHashAndPassword when a hash starts with something other than '$'
type InvalidHashPrefixError byte
func (ih InvalidHashPrefixError) Error() string {
return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih))
}
type InvalidCostError int
func (ic InvalidCostError) Error() string {
return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost))
}
const (
majorVersion = '2'
minorVersion = 'a'
maxSaltSize = 16
maxCryptedHashSize = 23
encodedSaltSize = 22
encodedHashSize = 31
minHashSize = 59
)
// magicCipherData is an IV for the 64 Blowfish encryption calls in
// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes.
var magicCipherData = []byte{
0x4f, 0x72, 0x70, 0x68,
0x65, 0x61, 0x6e, 0x42,
0x65, 0x68, 0x6f, 0x6c,
0x64, 0x65, 0x72, 0x53,
0x63, 0x72, 0x79, 0x44,
0x6f, 0x75, 0x62, 0x74,
}
type hashed struct {
hash []byte
salt []byte
cost int // allowed range is MinCost to MaxCost
major byte
minor byte
}
// GenerateFromPassword returns the bcrypt hash of the password at the given
// cost. If the cost given is less than MinCost, the cost will be set to
// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package,
// to compare the returned hashed password with its cleartext version.
func GenerateFromPassword(password []byte, cost int) ([]byte, error) {
p, err := newFromPassword(password, cost)
if err != nil {
return nil, err
}
return p.Hash(), nil
}
// CompareHashAndPassword compares a bcrypt hashed password with its possible
// plaintext equivalent. Returns nil on success, or an error on failure.
func CompareHashAndPassword(hashedPassword, password []byte) error {
p, err := newFromHash(hashedPassword)
if err != nil {
return err
}
otherHash, err := bcrypt(password, p.cost, p.salt)
if err != nil {
return err
}
otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor}
if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 {
return nil
}
return ErrMismatchedHashAndPassword
}
// Cost returns the hashing cost used to create the given hashed
// password. When, in the future, the hashing cost of a password system needs
// to be increased in order to adjust for greater computational power, this
// function allows one to establish which passwords need to be updated.
func Cost(hashedPassword []byte) (int, error) {
p, err := newFromHash(hashedPassword)
if err != nil {
return 0, err
}
return p.cost, nil
}
func newFromPassword(password []byte, cost int) (*hashed, error) {
if cost < MinCost {
cost = DefaultCost
}
p := new(hashed)
p.major = majorVersion
p.minor = minorVersion
err := checkCost(cost)
if err != nil {
return nil, err
}
p.cost = cost
unencodedSalt := make([]byte, maxSaltSize)
_, err = io.ReadFull(rand.Reader, unencodedSalt)
if err != nil {
return nil, err
}
p.salt = base64Encode(unencodedSalt)
hash, err := bcrypt(password, p.cost, p.salt)
if err != nil {
return nil, err
}
p.hash = hash
return p, err
}
func newFromHash(hashedSecret []byte) (*hashed, error) {
if len(hashedSecret) < minHashSize {
return nil, ErrHashTooShort
}
p := new(hashed)
n, err := p.decodeVersion(hashedSecret)
if err != nil {
return nil, err
}
hashedSecret = hashedSecret[n:]
n, err = p.decodeCost(hashedSecret)
if err != nil {
return nil, err
}
hashedSecret = hashedSecret[n:]
// The "+2" is here because we'll have to append at most 2 '=' to the salt
// when base64 decoding it in expensiveBlowfishSetup().
p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2)
copy(p.salt, hashedSecret[:encodedSaltSize])
hashedSecret = hashedSecret[encodedSaltSize:]
p.hash = make([]byte, len(hashedSecret))
copy(p.hash, hashedSecret)
return p, nil
}
func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) {
cipherData := make([]byte, len(magicCipherData))
copy(cipherData, magicCipherData)
c, err := expensiveBlowfishSetup(password, uint32(cost), salt)
if err != nil {
return nil, err
}
for i := 0; i < 24; i += 8 {
for j := 0; j < 64; j++ {
c.Encrypt(cipherData[i:i+8], cipherData[i:i+8])
}
}
// Bug compatibility with C bcrypt implementations. We only encode 23 of
// the 24 bytes encrypted.
hsh := base64Encode(cipherData[:maxCryptedHashSize])
return hsh, nil
}
func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {
csalt, err := base64Decode(salt)
if err != nil {
return nil, err
}
// Bug compatibility with C bcrypt implementations. They use the trailing
// NULL in the key string during expansion.
// We copy the key to prevent changing the underlying array.
ckey := append(key[:len(key):len(key)], 0)
c, err := blowfish.NewSaltedCipher(ckey, csalt)
if err != nil {
return nil, err
}
var i, rounds uint64
rounds = 1 << cost
for i = 0; i < rounds; i++ {
blowfish.ExpandKey(ckey, c)
blowfish.ExpandKey(csalt, c)
}
return c, nil
}
func (p *hashed) Hash() []byte {
arr := make([]byte, 60)
arr[0] = '$'
arr[1] = p.major
n := 2
if p.minor != 0 {
arr[2] = p.minor
n = 3
}
arr[n] = '$'
n++
copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost)))
n += 2
arr[n] = '$'
n++
copy(arr[n:], p.salt)
n += encodedSaltSize
copy(arr[n:], p.hash)
n += encodedHashSize
return arr[:n]
}
func (p *hashed) decodeVersion(sbytes []byte) (int, error) {
if sbytes[0] != '$' {
return -1, InvalidHashPrefixError(sbytes[0])
}
if sbytes[1] > majorVersion {
return -1, HashVersionTooNewError(sbytes[1])
}
p.major = sbytes[1]
n := 3
if sbytes[2] != '$' {
p.minor = sbytes[2]
n++
}
return n, nil
}
// sbytes should begin where decodeVersion left off.
func (p *hashed) decodeCost(sbytes []byte) (int, error) {
cost, err := strconv.Atoi(string(sbytes[0:2]))
if err != nil {
return -1, err
}
err = checkCost(cost)
if err != nil {
return -1, err
}
p.cost = cost
return 3, nil
}
func (p *hashed) String() string {
return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor)
}
func checkCost(cost int) error {
if cost < MinCost || cost > MaxCost {
return InvalidCostError(cost)
}
return nil
}

159
vendor/golang.org/x/crypto/blowfish/block.go generated vendored Normal file
View File

@ -0,0 +1,159 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package blowfish
// getNextWord returns the next big-endian uint32 value from the byte slice
// at the given position in a circular manner, updating the position.
func getNextWord(b []byte, pos *int) uint32 {
var w uint32
j := *pos
for i := 0; i < 4; i++ {
w = w<<8 | uint32(b[j])
j++
if j >= len(b) {
j = 0
}
}
*pos = j
return w
}
// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
// pi and substitution tables for calls to Encrypt. This is used, primarily,
// by the bcrypt package to reuse the Blowfish key schedule during its
// set up. It's unlikely that you need to use this directly.
func ExpandKey(key []byte, c *Cipher) {
j := 0
for i := 0; i < 18; i++ {
// Using inlined getNextWord for performance.
var d uint32
for k := 0; k < 4; k++ {
d = d<<8 | uint32(key[j])
j++
if j >= len(key) {
j = 0
}
}
c.p[i] ^= d
}
var l, r uint32
for i := 0; i < 18; i += 2 {
l, r = encryptBlock(l, r, c)
c.p[i], c.p[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s0[i], c.s0[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s1[i], c.s1[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s2[i], c.s2[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s3[i], c.s3[i+1] = l, r
}
}
// This is similar to ExpandKey, but folds the salt during the key
// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero
// salt passed in, reusing ExpandKey turns out to be a place of inefficiency
// and specializing it here is useful.
func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
j := 0
for i := 0; i < 18; i++ {
c.p[i] ^= getNextWord(key, &j)
}
j = 0
var l, r uint32
for i := 0; i < 18; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.p[i], c.p[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s0[i], c.s0[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s1[i], c.s1[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s2[i], c.s2[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s3[i], c.s3[i+1] = l, r
}
}
func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
xl, xr := l, r
xl ^= c.p[0]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16]
xr ^= c.p[17]
return xr, xl
}
func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
xl, xr := l, r
xl ^= c.p[17]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1]
xr ^= c.p[0]
return xr, xl
}

91
vendor/golang.org/x/crypto/blowfish/cipher.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm.
package blowfish // import "golang.org/x/crypto/blowfish"
// The code is a port of Bruce Schneier's C implementation.
// See https://www.schneier.com/blowfish.html.
import "strconv"
// The Blowfish block size in bytes.
const BlockSize = 8
// A Cipher is an instance of Blowfish encryption using a particular key.
type Cipher struct {
p [18]uint32
s0, s1, s2, s3 [256]uint32
}
type KeySizeError int
func (k KeySizeError) Error() string {
return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k))
}
// NewCipher creates and returns a Cipher.
// The key argument should be the Blowfish key, from 1 to 56 bytes.
func NewCipher(key []byte) (*Cipher, error) {
var result Cipher
if k := len(key); k < 1 || k > 56 {
return nil, KeySizeError(k)
}
initCipher(&result)
ExpandKey(key, &result)
return &result, nil
}
// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
// sufficient and desirable. For bcrypt compatibility, the key can be over 56
// bytes.
func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
if len(salt) == 0 {
return NewCipher(key)
}
var result Cipher
if k := len(key); k < 1 {
return nil, KeySizeError(k)
}
initCipher(&result)
expandKeyWithSalt(key, salt, &result)
return &result, nil
}
// BlockSize returns the Blowfish block size, 8 bytes.
// It is necessary to satisfy the Block interface in the
// package "crypto/cipher".
func (c *Cipher) BlockSize() int { return BlockSize }
// Encrypt encrypts the 8-byte buffer src using the key k
// and stores the result in dst.
// Note that for amounts of data larger than a block,
// it is not safe to just call Encrypt on successive blocks;
// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
func (c *Cipher) Encrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = encryptBlock(l, r, c)
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
}
// Decrypt decrypts the 8-byte buffer src using the key k
// and stores the result in dst.
func (c *Cipher) Decrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = decryptBlock(l, r, c)
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
}
func initCipher(c *Cipher) {
copy(c.p[0:], p[0:])
copy(c.s0[0:], s0[0:])
copy(c.s1[0:], s1[0:])
copy(c.s2[0:], s2[0:])
copy(c.s3[0:], s3[0:])
}

199
vendor/golang.org/x/crypto/blowfish/const.go generated vendored Normal file
View File

@ -0,0 +1,199 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The startup permutation array and substitution boxes.
// They are the hexadecimal digits of PI; see:
// https://www.schneier.com/code/constants.txt.
package blowfish
var s0 = [256]uint32{
0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96,
0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658,
0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e,
0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6,
0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c,
0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1,
0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a,
0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176,
0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706,
0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b,
0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c,
0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a,
0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760,
0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8,
0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33,
0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0,
0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777,
0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705,
0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e,
0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9,
0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f,
0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
}
var s1 = [256]uint32{
0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d,
0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65,
0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9,
0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d,
0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc,
0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908,
0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124,
0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908,
0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b,
0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa,
0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d,
0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5,
0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96,
0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca,
0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77,
0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054,
0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea,
0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646,
0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea,
0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e,
0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd,
0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
}
var s2 = [256]uint32{
0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7,
0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af,
0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4,
0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec,
0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332,
0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58,
0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22,
0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60,
0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99,
0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74,
0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3,
0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979,
0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa,
0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086,
0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24,
0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84,
0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09,
0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe,
0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0,
0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188,
0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8,
0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
}
var s3 = [256]uint32{
0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742,
0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79,
0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a,
0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1,
0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797,
0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6,
0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba,
0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5,
0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce,
0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd,
0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb,
0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc,
0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc,
0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a,
0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a,
0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b,
0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e,
0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623,
0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a,
0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3,
0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c,
0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
}
var p = [18]uint32{
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0,
0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b,
}

3
vendor/golang.org/x/net/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

3
vendor/golang.org/x/net/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

27
vendor/golang.org/x/net/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/net/PATENTS generated vendored Normal file
View File

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

56
vendor/golang.org/x/net/context/context.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package context defines the Context type, which carries deadlines,
// cancelation signals, and other request-scoped values across API boundaries
// and between processes.
// As of Go 1.7 this package is available in the standard library under the
// name context. https://golang.org/pkg/context.
//
// Incoming requests to a server should create a Context, and outgoing calls to
// servers should accept a Context. The chain of function calls between must
// propagate the Context, optionally replacing it with a modified copy created
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
//
// Programs that use Contexts should follow these rules to keep interfaces
// consistent across packages and enable static analysis tools to check context
// propagation:
//
// Do not store Contexts inside a struct type; instead, pass a Context
// explicitly to each function that needs it. The Context should be the first
// parameter, typically named ctx:
//
// func DoSomething(ctx context.Context, arg Arg) error {
// // ... use ctx ...
// }
//
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
// if you are unsure about which Context to use.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
//
// The same Context may be passed to functions running in different goroutines;
// Contexts are safe for simultaneous use by multiple goroutines.
//
// See http://blog.golang.org/context for example code for a server that uses
// Contexts.
package context // import "golang.org/x/net/context"
// Background returns a non-nil, empty Context. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming
// requests.
func Background() Context {
return background
}
// TODO returns a non-nil, empty Context. Code should use context.TODO when
// it's unclear which Context to use or it is not yet available (because the
// surrounding function has not yet been extended to accept a Context
// parameter). TODO is recognized by static analysis tools that determine
// whether Contexts are propagated correctly in a program.
func TODO() Context {
return todo
}

72
vendor/golang.org/x/net/context/go17.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7
package context
import (
"context" // standard library's context, as of Go 1.7
"time"
)
var (
todo = context.TODO()
background = context.Background()
)
// Canceled is the error returned by Context.Err when the context is canceled.
var Canceled = context.Canceled
// DeadlineExceeded is the error returned by Context.Err when the context's
// deadline passes.
var DeadlineExceeded = context.DeadlineExceeded
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
ctx, f := context.WithCancel(parent)
return ctx, CancelFunc(f)
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
ctx, f := context.WithDeadline(parent, deadline)
return ctx, CancelFunc(f)
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key interface{}, val interface{}) Context {
return context.WithValue(parent, key, val)
}

20
vendor/golang.org/x/net/context/go19.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.9
package context
import "context" // standard library's context, as of Go 1.7
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context = context.Context
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc = context.CancelFunc

300
vendor/golang.org/x/net/context/pre_go17.go generated vendored Normal file
View File

@ -0,0 +1,300 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.7
package context
import (
"errors"
"fmt"
"sync"
"time"
)
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
// struct{}, since vars of this type must have distinct addresses.
type emptyCtx int
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
return
}
func (*emptyCtx) Done() <-chan struct{} {
return nil
}
func (*emptyCtx) Err() error {
return nil
}
func (*emptyCtx) Value(key interface{}) interface{} {
return nil
}
func (e *emptyCtx) String() string {
switch e {
case background:
return "context.Background"
case todo:
return "context.TODO"
}
return "unknown empty Context"
}
var (
background = new(emptyCtx)
todo = new(emptyCtx)
)
// Canceled is the error returned by Context.Err when the context is canceled.
var Canceled = errors.New("context canceled")
// DeadlineExceeded is the error returned by Context.Err when the context's
// deadline passes.
var DeadlineExceeded = errors.New("context deadline exceeded")
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
c := newCancelCtx(parent)
propagateCancel(parent, c)
return c, func() { c.cancel(true, Canceled) }
}
// newCancelCtx returns an initialized cancelCtx.
func newCancelCtx(parent Context) *cancelCtx {
return &cancelCtx{
Context: parent,
done: make(chan struct{}),
}
}
// propagateCancel arranges for child to be canceled when parent is.
func propagateCancel(parent Context, child canceler) {
if parent.Done() == nil {
return // parent is never canceled
}
if p, ok := parentCancelCtx(parent); ok {
p.mu.Lock()
if p.err != nil {
// parent has already been canceled
child.cancel(false, p.err)
} else {
if p.children == nil {
p.children = make(map[canceler]bool)
}
p.children[child] = true
}
p.mu.Unlock()
} else {
go func() {
select {
case <-parent.Done():
child.cancel(false, parent.Err())
case <-child.Done():
}
}()
}
}
// parentCancelCtx follows a chain of parent references until it finds a
// *cancelCtx. This function understands how each of the concrete types in this
// package represents its parent.
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
for {
switch c := parent.(type) {
case *cancelCtx:
return c, true
case *timerCtx:
return c.cancelCtx, true
case *valueCtx:
parent = c.Context
default:
return nil, false
}
}
}
// removeChild removes a context from its parent.
func removeChild(parent Context, child canceler) {
p, ok := parentCancelCtx(parent)
if !ok {
return
}
p.mu.Lock()
if p.children != nil {
delete(p.children, child)
}
p.mu.Unlock()
}
// A canceler is a context type that can be canceled directly. The
// implementations are *cancelCtx and *timerCtx.
type canceler interface {
cancel(removeFromParent bool, err error)
Done() <-chan struct{}
}
// A cancelCtx can be canceled. When canceled, it also cancels any children
// that implement canceler.
type cancelCtx struct {
Context
done chan struct{} // closed by the first cancel call.
mu sync.Mutex
children map[canceler]bool // set to nil by the first cancel call
err error // set to non-nil by the first cancel call
}
func (c *cancelCtx) Done() <-chan struct{} {
return c.done
}
func (c *cancelCtx) Err() error {
c.mu.Lock()
defer c.mu.Unlock()
return c.err
}
func (c *cancelCtx) String() string {
return fmt.Sprintf("%v.WithCancel", c.Context)
}
// cancel closes c.done, cancels each of c's children, and, if
// removeFromParent is true, removes c from its parent's children.
func (c *cancelCtx) cancel(removeFromParent bool, err error) {
if err == nil {
panic("context: internal error: missing cancel error")
}
c.mu.Lock()
if c.err != nil {
c.mu.Unlock()
return // already canceled
}
c.err = err
close(c.done)
for child := range c.children {
// NOTE: acquiring the child's lock while holding parent's lock.
child.cancel(false, err)
}
c.children = nil
c.mu.Unlock()
if removeFromParent {
removeChild(c.Context, c)
}
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
// The current deadline is already sooner than the new one.
return WithCancel(parent)
}
c := &timerCtx{
cancelCtx: newCancelCtx(parent),
deadline: deadline,
}
propagateCancel(parent, c)
d := deadline.Sub(time.Now())
if d <= 0 {
c.cancel(true, DeadlineExceeded) // deadline has already passed
return c, func() { c.cancel(true, Canceled) }
}
c.mu.Lock()
defer c.mu.Unlock()
if c.err == nil {
c.timer = time.AfterFunc(d, func() {
c.cancel(true, DeadlineExceeded)
})
}
return c, func() { c.cancel(true, Canceled) }
}
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
// implement Done and Err. It implements cancel by stopping its timer then
// delegating to cancelCtx.cancel.
type timerCtx struct {
*cancelCtx
timer *time.Timer // Under cancelCtx.mu.
deadline time.Time
}
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
return c.deadline, true
}
func (c *timerCtx) String() string {
return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
}
func (c *timerCtx) cancel(removeFromParent bool, err error) {
c.cancelCtx.cancel(false, err)
if removeFromParent {
// Remove this timerCtx from its parent cancelCtx's children.
removeChild(c.cancelCtx.Context, c)
}
c.mu.Lock()
if c.timer != nil {
c.timer.Stop()
c.timer = nil
}
c.mu.Unlock()
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key interface{}, val interface{}) Context {
return &valueCtx{parent, key, val}
}
// A valueCtx carries a key-value pair. It implements Value for that key and
// delegates all other calls to the embedded Context.
type valueCtx struct {
Context
key, val interface{}
}
func (c *valueCtx) String() string {
return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
}
func (c *valueCtx) Value(key interface{}) interface{} {
if c.key == key {
return c.val
}
return c.Context.Value(key)
}

109
vendor/golang.org/x/net/context/pre_go19.go generated vendored Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.9
package context
import "time"
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context interface {
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value.
//
// WithCancel arranges for Done to be closed when cancel is called;
// WithDeadline arranges for Done to be closed when the deadline
// expires; WithTimeout arranges for Done to be closed when the timeout
// elapses.
//
// Done is provided for use in select statements:
//
// // Stream generates values with DoSomething and sends them to out
// // until DoSomething returns an error or ctx.Done is closed.
// func Stream(ctx context.Context, out chan<- Value) error {
// for {
// v, err := DoSomething(ctx)
// if err != nil {
// return err
// }
// select {
// case <-ctx.Done():
// return ctx.Err()
// case out <- v:
// }
// }
// }
//
// See http://blog.golang.org/pipelines for more examples of how to use
// a Done channel for cancelation.
Done() <-chan struct{}
// Err returns a non-nil error value after Done is closed. Err returns
// Canceled if the context was canceled or DeadlineExceeded if the
// context's deadline passed. No other values for Err are defined.
// After Done is closed, successive calls to Err return the same value.
Err() error
// Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with
// the same key returns the same result.
//
// Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to
// functions.
//
// A key identifies a specific value in a Context. Functions that wish
// to store values in Context typically allocate a key in a global
// variable then use that key as the argument to context.WithValue and
// Context.Value. A key can be any type that supports equality;
// packages should define keys as an unexported type to avoid
// collisions.
//
// Packages that define a Context key should provide type-safe accessors
// for the values stores using that key:
//
// // Package user defines a User type that's stored in Contexts.
// package user
//
// import "golang.org/x/net/context"
//
// // User is the type of value stored in the Contexts.
// type User struct {...}
//
// // key is an unexported type for keys defined in this package.
// // This prevents collisions with keys defined in other packages.
// type key int
//
// // userKey is the key for user.User values in Contexts. It is
// // unexported; clients use user.NewContext and user.FromContext
// // instead of using this key directly.
// var userKey key = 0
//
// // NewContext returns a new Context that carries value u.
// func NewContext(ctx context.Context, u *User) context.Context {
// return context.WithValue(ctx, userKey, u)
// }
//
// // FromContext returns the User value stored in ctx, if any.
// func FromContext(ctx context.Context) (*User, bool) {
// u, ok := ctx.Value(userKey).(*User)
// return u, ok
// }
Value(key interface{}) interface{}
}
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc func()

796
vendor/golang.org/x/net/webdav/file.go generated vendored Normal file
View File

@ -0,0 +1,796 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
import (
"encoding/xml"
"io"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
"golang.org/x/net/context"
)
// slashClean is equivalent to but slightly more efficient than
// path.Clean("/" + name).
func slashClean(name string) string {
if name == "" || name[0] != '/' {
name = "/" + name
}
return path.Clean(name)
}
// A FileSystem implements access to a collection of named files. The elements
// in a file path are separated by slash ('/', U+002F) characters, regardless
// of host operating system convention.
//
// Each method has the same semantics as the os package's function of the same
// name.
//
// Note that the os.Rename documentation says that "OS-specific restrictions
// might apply". In particular, whether or not renaming a file or directory
// overwriting another existing file or directory is an error is OS-dependent.
type FileSystem interface {
Mkdir(ctx context.Context, name string, perm os.FileMode) error
OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error)
RemoveAll(ctx context.Context, name string) error
Rename(ctx context.Context, oldName, newName string) error
Stat(ctx context.Context, name string) (os.FileInfo, error)
}
// A File is returned by a FileSystem's OpenFile method and can be served by a
// Handler.
//
// A File may optionally implement the DeadPropsHolder interface, if it can
// load and save dead properties.
type File interface {
http.File
io.Writer
}
// A Dir implements FileSystem using the native file system restricted to a
// specific directory tree.
//
// While the FileSystem.OpenFile method takes '/'-separated paths, a Dir's
// string value is a filename on the native file system, not a URL, so it is
// separated by filepath.Separator, which isn't necessarily '/'.
//
// An empty Dir is treated as ".".
type Dir string
func (d Dir) resolve(name string) string {
// This implementation is based on Dir.Open's code in the standard net/http package.
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
strings.Contains(name, "\x00") {
return ""
}
dir := string(d)
if dir == "" {
dir = "."
}
return filepath.Join(dir, filepath.FromSlash(slashClean(name)))
}
func (d Dir) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
if name = d.resolve(name); name == "" {
return os.ErrNotExist
}
return os.Mkdir(name, perm)
}
func (d Dir) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {
if name = d.resolve(name); name == "" {
return nil, os.ErrNotExist
}
f, err := os.OpenFile(name, flag, perm)
if err != nil {
return nil, err
}
return f, nil
}
func (d Dir) RemoveAll(ctx context.Context, name string) error {
if name = d.resolve(name); name == "" {
return os.ErrNotExist
}
if name == filepath.Clean(string(d)) {
// Prohibit removing the virtual root directory.
return os.ErrInvalid
}
return os.RemoveAll(name)
}
func (d Dir) Rename(ctx context.Context, oldName, newName string) error {
if oldName = d.resolve(oldName); oldName == "" {
return os.ErrNotExist
}
if newName = d.resolve(newName); newName == "" {
return os.ErrNotExist
}
if root := filepath.Clean(string(d)); root == oldName || root == newName {
// Prohibit renaming from or to the virtual root directory.
return os.ErrInvalid
}
return os.Rename(oldName, newName)
}
func (d Dir) Stat(ctx context.Context, name string) (os.FileInfo, error) {
if name = d.resolve(name); name == "" {
return nil, os.ErrNotExist
}
return os.Stat(name)
}
// NewMemFS returns a new in-memory FileSystem implementation.
func NewMemFS() FileSystem {
return &memFS{
root: memFSNode{
children: make(map[string]*memFSNode),
mode: 0660 | os.ModeDir,
modTime: time.Now(),
},
}
}
// A memFS implements FileSystem, storing all metadata and actual file data
// in-memory. No limits on filesystem size are used, so it is not recommended
// this be used where the clients are untrusted.
//
// Concurrent access is permitted. The tree structure is protected by a mutex,
// and each node's contents and metadata are protected by a per-node mutex.
//
// TODO: Enforce file permissions.
type memFS struct {
mu sync.Mutex
root memFSNode
}
// TODO: clean up and rationalize the walk/find code.
// walk walks the directory tree for the fullname, calling f at each step. If f
// returns an error, the walk will be aborted and return that same error.
//
// dir is the directory at that step, frag is the name fragment, and final is
// whether it is the final step. For example, walking "/foo/bar/x" will result
// in 3 calls to f:
// - "/", "foo", false
// - "/foo/", "bar", false
// - "/foo/bar/", "x", true
// The frag argument will be empty only if dir is the root node and the walk
// ends at that root node.
func (fs *memFS) walk(op, fullname string, f func(dir *memFSNode, frag string, final bool) error) error {
original := fullname
fullname = slashClean(fullname)
// Strip any leading "/"s to make fullname a relative path, as the walk
// starts at fs.root.
if fullname[0] == '/' {
fullname = fullname[1:]
}
dir := &fs.root
for {
frag, remaining := fullname, ""
i := strings.IndexRune(fullname, '/')
final := i < 0
if !final {
frag, remaining = fullname[:i], fullname[i+1:]
}
if frag == "" && dir != &fs.root {
panic("webdav: empty path fragment for a clean path")
}
if err := f(dir, frag, final); err != nil {
return &os.PathError{
Op: op,
Path: original,
Err: err,
}
}
if final {
break
}
child := dir.children[frag]
if child == nil {
return &os.PathError{
Op: op,
Path: original,
Err: os.ErrNotExist,
}
}
if !child.mode.IsDir() {
return &os.PathError{
Op: op,
Path: original,
Err: os.ErrInvalid,
}
}
dir, fullname = child, remaining
}
return nil
}
// find returns the parent of the named node and the relative name fragment
// from the parent to the child. For example, if finding "/foo/bar/baz" then
// parent will be the node for "/foo/bar" and frag will be "baz".
//
// If the fullname names the root node, then parent, frag and err will be zero.
//
// find returns an error if the parent does not already exist or the parent
// isn't a directory, but it will not return an error per se if the child does
// not already exist. The error returned is either nil or an *os.PathError
// whose Op is op.
func (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err error) {
err = fs.walk(op, fullname, func(parent0 *memFSNode, frag0 string, final bool) error {
if !final {
return nil
}
if frag0 != "" {
parent, frag = parent0, frag0
}
return nil
})
return parent, frag, err
}
func (fs *memFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
fs.mu.Lock()
defer fs.mu.Unlock()
dir, frag, err := fs.find("mkdir", name)
if err != nil {
return err
}
if dir == nil {
// We can't create the root.
return os.ErrInvalid
}
if _, ok := dir.children[frag]; ok {
return os.ErrExist
}
dir.children[frag] = &memFSNode{
children: make(map[string]*memFSNode),
mode: perm.Perm() | os.ModeDir,
modTime: time.Now(),
}
return nil
}
func (fs *memFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {
fs.mu.Lock()
defer fs.mu.Unlock()
dir, frag, err := fs.find("open", name)
if err != nil {
return nil, err
}
var n *memFSNode
if dir == nil {
// We're opening the root.
if flag&(os.O_WRONLY|os.O_RDWR) != 0 {
return nil, os.ErrPermission
}
n, frag = &fs.root, "/"
} else {
n = dir.children[frag]
if flag&(os.O_SYNC|os.O_APPEND) != 0 {
// memFile doesn't support these flags yet.
return nil, os.ErrInvalid
}
if flag&os.O_CREATE != 0 {
if flag&os.O_EXCL != 0 && n != nil {
return nil, os.ErrExist
}
if n == nil {
n = &memFSNode{
mode: perm.Perm(),
}
dir.children[frag] = n
}
}
if n == nil {
return nil, os.ErrNotExist
}
if flag&(os.O_WRONLY|os.O_RDWR) != 0 && flag&os.O_TRUNC != 0 {
n.mu.Lock()
n.data = nil
n.mu.Unlock()
}
}
children := make([]os.FileInfo, 0, len(n.children))
for cName, c := range n.children {
children = append(children, c.stat(cName))
}
return &memFile{
n: n,
nameSnapshot: frag,
childrenSnapshot: children,
}, nil
}
func (fs *memFS) RemoveAll(ctx context.Context, name string) error {
fs.mu.Lock()
defer fs.mu.Unlock()
dir, frag, err := fs.find("remove", name)
if err != nil {
return err
}
if dir == nil {
// We can't remove the root.
return os.ErrInvalid
}
delete(dir.children, frag)
return nil
}
func (fs *memFS) Rename(ctx context.Context, oldName, newName string) error {
fs.mu.Lock()
defer fs.mu.Unlock()
oldName = slashClean(oldName)
newName = slashClean(newName)
if oldName == newName {
return nil
}
if strings.HasPrefix(newName, oldName+"/") {
// We can't rename oldName to be a sub-directory of itself.
return os.ErrInvalid
}
oDir, oFrag, err := fs.find("rename", oldName)
if err != nil {
return err
}
if oDir == nil {
// We can't rename from the root.
return os.ErrInvalid
}
nDir, nFrag, err := fs.find("rename", newName)
if err != nil {
return err
}
if nDir == nil {
// We can't rename to the root.
return os.ErrInvalid
}
oNode, ok := oDir.children[oFrag]
if !ok {
return os.ErrNotExist
}
if oNode.children != nil {
if nNode, ok := nDir.children[nFrag]; ok {
if nNode.children == nil {
return errNotADirectory
}
if len(nNode.children) != 0 {
return errDirectoryNotEmpty
}
}
}
delete(oDir.children, oFrag)
nDir.children[nFrag] = oNode
return nil
}
func (fs *memFS) Stat(ctx context.Context, name string) (os.FileInfo, error) {
fs.mu.Lock()
defer fs.mu.Unlock()
dir, frag, err := fs.find("stat", name)
if err != nil {
return nil, err
}
if dir == nil {
// We're stat'ting the root.
return fs.root.stat("/"), nil
}
if n, ok := dir.children[frag]; ok {
return n.stat(path.Base(name)), nil
}
return nil, os.ErrNotExist
}
// A memFSNode represents a single entry in the in-memory filesystem and also
// implements os.FileInfo.
type memFSNode struct {
// children is protected by memFS.mu.
children map[string]*memFSNode
mu sync.Mutex
data []byte
mode os.FileMode
modTime time.Time
deadProps map[xml.Name]Property
}
func (n *memFSNode) stat(name string) *memFileInfo {
n.mu.Lock()
defer n.mu.Unlock()
return &memFileInfo{
name: name,
size: int64(len(n.data)),
mode: n.mode,
modTime: n.modTime,
}
}
func (n *memFSNode) DeadProps() (map[xml.Name]Property, error) {
n.mu.Lock()
defer n.mu.Unlock()
if len(n.deadProps) == 0 {
return nil, nil
}
ret := make(map[xml.Name]Property, len(n.deadProps))
for k, v := range n.deadProps {
ret[k] = v
}
return ret, nil
}
func (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) {
n.mu.Lock()
defer n.mu.Unlock()
pstat := Propstat{Status: http.StatusOK}
for _, patch := range patches {
for _, p := range patch.Props {
pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
if patch.Remove {
delete(n.deadProps, p.XMLName)
continue
}
if n.deadProps == nil {
n.deadProps = map[xml.Name]Property{}
}
n.deadProps[p.XMLName] = p
}
}
return []Propstat{pstat}, nil
}
type memFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (f *memFileInfo) Name() string { return f.name }
func (f *memFileInfo) Size() int64 { return f.size }
func (f *memFileInfo) Mode() os.FileMode { return f.mode }
func (f *memFileInfo) ModTime() time.Time { return f.modTime }
func (f *memFileInfo) IsDir() bool { return f.mode.IsDir() }
func (f *memFileInfo) Sys() interface{} { return nil }
// A memFile is a File implementation for a memFSNode. It is a per-file (not
// per-node) read/write position, and a snapshot of the memFS' tree structure
// (a node's name and children) for that node.
type memFile struct {
n *memFSNode
nameSnapshot string
childrenSnapshot []os.FileInfo
// pos is protected by n.mu.
pos int
}
// A *memFile implements the optional DeadPropsHolder interface.
var _ DeadPropsHolder = (*memFile)(nil)
func (f *memFile) DeadProps() (map[xml.Name]Property, error) { return f.n.DeadProps() }
func (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) }
func (f *memFile) Close() error {
return nil
}
func (f *memFile) Read(p []byte) (int, error) {
f.n.mu.Lock()
defer f.n.mu.Unlock()
if f.n.mode.IsDir() {
return 0, os.ErrInvalid
}
if f.pos >= len(f.n.data) {
return 0, io.EOF
}
n := copy(p, f.n.data[f.pos:])
f.pos += n
return n, nil
}
func (f *memFile) Readdir(count int) ([]os.FileInfo, error) {
f.n.mu.Lock()
defer f.n.mu.Unlock()
if !f.n.mode.IsDir() {
return nil, os.ErrInvalid
}
old := f.pos
if old >= len(f.childrenSnapshot) {
// The os.File Readdir docs say that at the end of a directory,
// the error is io.EOF if count > 0 and nil if count <= 0.
if count > 0 {
return nil, io.EOF
}
return nil, nil
}
if count > 0 {
f.pos += count
if f.pos > len(f.childrenSnapshot) {
f.pos = len(f.childrenSnapshot)
}
} else {
f.pos = len(f.childrenSnapshot)
old = 0
}
return f.childrenSnapshot[old:f.pos], nil
}
func (f *memFile) Seek(offset int64, whence int) (int64, error) {
f.n.mu.Lock()
defer f.n.mu.Unlock()
npos := f.pos
// TODO: How to handle offsets greater than the size of system int?
switch whence {
case os.SEEK_SET:
npos = int(offset)
case os.SEEK_CUR:
npos += int(offset)
case os.SEEK_END:
npos = len(f.n.data) + int(offset)
default:
npos = -1
}
if npos < 0 {
return 0, os.ErrInvalid
}
f.pos = npos
return int64(f.pos), nil
}
func (f *memFile) Stat() (os.FileInfo, error) {
return f.n.stat(f.nameSnapshot), nil
}
func (f *memFile) Write(p []byte) (int, error) {
lenp := len(p)
f.n.mu.Lock()
defer f.n.mu.Unlock()
if f.n.mode.IsDir() {
return 0, os.ErrInvalid
}
if f.pos < len(f.n.data) {
n := copy(f.n.data[f.pos:], p)
f.pos += n
p = p[n:]
} else if f.pos > len(f.n.data) {
// Write permits the creation of holes, if we've seek'ed past the
// existing end of file.
if f.pos <= cap(f.n.data) {
oldLen := len(f.n.data)
f.n.data = f.n.data[:f.pos]
hole := f.n.data[oldLen:]
for i := range hole {
hole[i] = 0
}
} else {
d := make([]byte, f.pos, f.pos+len(p))
copy(d, f.n.data)
f.n.data = d
}
}
if len(p) > 0 {
// We should only get here if f.pos == len(f.n.data).
f.n.data = append(f.n.data, p...)
f.pos = len(f.n.data)
}
f.n.modTime = time.Now()
return lenp, nil
}
// moveFiles moves files and/or directories from src to dst.
//
// See section 9.9.4 for when various HTTP status codes apply.
func moveFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool) (status int, err error) {
created := false
if _, err := fs.Stat(ctx, dst); err != nil {
if !os.IsNotExist(err) {
return http.StatusForbidden, err
}
created = true
} else if overwrite {
// Section 9.9.3 says that "If a resource exists at the destination
// and the Overwrite header is "T", then prior to performing the move,
// the server must perform a DELETE with "Depth: infinity" on the
// destination resource.
if err := fs.RemoveAll(ctx, dst); err != nil {
return http.StatusForbidden, err
}
} else {
return http.StatusPreconditionFailed, os.ErrExist
}
if err := fs.Rename(ctx, src, dst); err != nil {
return http.StatusForbidden, err
}
if created {
return http.StatusCreated, nil
}
return http.StatusNoContent, nil
}
func copyProps(dst, src File) error {
d, ok := dst.(DeadPropsHolder)
if !ok {
return nil
}
s, ok := src.(DeadPropsHolder)
if !ok {
return nil
}
m, err := s.DeadProps()
if err != nil {
return err
}
props := make([]Property, 0, len(m))
for _, prop := range m {
props = append(props, prop)
}
_, err = d.Patch([]Proppatch{{Props: props}})
return err
}
// copyFiles copies files and/or directories from src to dst.
//
// See section 9.8.5 for when various HTTP status codes apply.
func copyFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) {
if recursion == 1000 {
return http.StatusInternalServerError, errRecursionTooDeep
}
recursion++
// TODO: section 9.8.3 says that "Note that an infinite-depth COPY of /A/
// into /A/B/ could lead to infinite recursion if not handled correctly."
srcFile, err := fs.OpenFile(ctx, src, os.O_RDONLY, 0)
if err != nil {
if os.IsNotExist(err) {
return http.StatusNotFound, err
}
return http.StatusInternalServerError, err
}
defer srcFile.Close()
srcStat, err := srcFile.Stat()
if err != nil {
if os.IsNotExist(err) {
return http.StatusNotFound, err
}
return http.StatusInternalServerError, err
}
srcPerm := srcStat.Mode() & os.ModePerm
created := false
if _, err := fs.Stat(ctx, dst); err != nil {
if os.IsNotExist(err) {
created = true
} else {
return http.StatusForbidden, err
}
} else {
if !overwrite {
return http.StatusPreconditionFailed, os.ErrExist
}
if err := fs.RemoveAll(ctx, dst); err != nil && !os.IsNotExist(err) {
return http.StatusForbidden, err
}
}
if srcStat.IsDir() {
if err := fs.Mkdir(ctx, dst, srcPerm); err != nil {
return http.StatusForbidden, err
}
if depth == infiniteDepth {
children, err := srcFile.Readdir(-1)
if err != nil {
return http.StatusForbidden, err
}
for _, c := range children {
name := c.Name()
s := path.Join(src, name)
d := path.Join(dst, name)
cStatus, cErr := copyFiles(ctx, fs, s, d, overwrite, depth, recursion)
if cErr != nil {
// TODO: MultiStatus.
return cStatus, cErr
}
}
}
} else {
dstFile, err := fs.OpenFile(ctx, dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm)
if err != nil {
if os.IsNotExist(err) {
return http.StatusConflict, err
}
return http.StatusForbidden, err
}
_, copyErr := io.Copy(dstFile, srcFile)
propsErr := copyProps(dstFile, srcFile)
closeErr := dstFile.Close()
if copyErr != nil {
return http.StatusInternalServerError, copyErr
}
if propsErr != nil {
return http.StatusInternalServerError, propsErr
}
if closeErr != nil {
return http.StatusInternalServerError, closeErr
}
}
if created {
return http.StatusCreated, nil
}
return http.StatusNoContent, nil
}
// walkFS traverses filesystem fs starting at name up to depth levels.
//
// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node,
// walkFS calls walkFn. If a visited file system node is a directory and
// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node.
func walkFS(ctx context.Context, fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error {
// This implementation is based on Walk's code in the standard path/filepath package.
err := walkFn(name, info, nil)
if err != nil {
if info.IsDir() && err == filepath.SkipDir {
return nil
}
return err
}
if !info.IsDir() || depth == 0 {
return nil
}
if depth == 1 {
depth = 0
}
// Read directory names.
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return walkFn(name, info, err)
}
fileInfos, err := f.Readdir(0)
f.Close()
if err != nil {
return walkFn(name, info, err)
}
for _, fileInfo := range fileInfos {
filename := path.Join(name, fileInfo.Name())
fileInfo, err := fs.Stat(ctx, filename)
if err != nil {
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
return err
}
} else {
err = walkFS(ctx, fs, depth, filename, fileInfo, walkFn)
if err != nil {
if !fileInfo.IsDir() || err != filepath.SkipDir {
return err
}
}
}
}
return nil
}

17
vendor/golang.org/x/net/webdav/file_go1.6.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.7
package webdav
import (
"net/http"
"golang.org/x/net/context"
)
func getContext(r *http.Request) context.Context {
return context.Background()
}

16
vendor/golang.org/x/net/webdav/file_go1.7.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7
package webdav
import (
"context"
"net/http"
)
func getContext(r *http.Request) context.Context {
return r.Context()
}

173
vendor/golang.org/x/net/webdav/if.go generated vendored Normal file
View File

@ -0,0 +1,173 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
// The If header is covered by Section 10.4.
// http://www.webdav.org/specs/rfc4918.html#HEADER_If
import (
"strings"
)
// ifHeader is a disjunction (OR) of ifLists.
type ifHeader struct {
lists []ifList
}
// ifList is a conjunction (AND) of Conditions, and an optional resource tag.
type ifList struct {
resourceTag string
conditions []Condition
}
// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string
// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is
// returned by req.Header.Get("If") for a http.Request req.
func parseIfHeader(httpHeader string) (h ifHeader, ok bool) {
s := strings.TrimSpace(httpHeader)
switch tokenType, _, _ := lex(s); tokenType {
case '(':
return parseNoTagLists(s)
case angleTokenType:
return parseTaggedLists(s)
default:
return ifHeader{}, false
}
}
func parseNoTagLists(s string) (h ifHeader, ok bool) {
for {
l, remaining, ok := parseList(s)
if !ok {
return ifHeader{}, false
}
h.lists = append(h.lists, l)
if remaining == "" {
return h, true
}
s = remaining
}
}
func parseTaggedLists(s string) (h ifHeader, ok bool) {
resourceTag, n := "", 0
for first := true; ; first = false {
tokenType, tokenStr, remaining := lex(s)
switch tokenType {
case angleTokenType:
if !first && n == 0 {
return ifHeader{}, false
}
resourceTag, n = tokenStr, 0
s = remaining
case '(':
n++
l, remaining, ok := parseList(s)
if !ok {
return ifHeader{}, false
}
l.resourceTag = resourceTag
h.lists = append(h.lists, l)
if remaining == "" {
return h, true
}
s = remaining
default:
return ifHeader{}, false
}
}
}
func parseList(s string) (l ifList, remaining string, ok bool) {
tokenType, _, s := lex(s)
if tokenType != '(' {
return ifList{}, "", false
}
for {
tokenType, _, remaining = lex(s)
if tokenType == ')' {
if len(l.conditions) == 0 {
return ifList{}, "", false
}
return l, remaining, true
}
c, remaining, ok := parseCondition(s)
if !ok {
return ifList{}, "", false
}
l.conditions = append(l.conditions, c)
s = remaining
}
}
func parseCondition(s string) (c Condition, remaining string, ok bool) {
tokenType, tokenStr, s := lex(s)
if tokenType == notTokenType {
c.Not = true
tokenType, tokenStr, s = lex(s)
}
switch tokenType {
case strTokenType, angleTokenType:
c.Token = tokenStr
case squareTokenType:
c.ETag = tokenStr
default:
return Condition{}, "", false
}
return c, s, true
}
// Single-rune tokens like '(' or ')' have a token type equal to their rune.
// All other tokens have a negative token type.
const (
errTokenType = rune(-1)
eofTokenType = rune(-2)
strTokenType = rune(-3)
notTokenType = rune(-4)
angleTokenType = rune(-5)
squareTokenType = rune(-6)
)
func lex(s string) (tokenType rune, tokenStr string, remaining string) {
// The net/textproto Reader that parses the HTTP header will collapse
// Linear White Space that spans multiple "\r\n" lines to a single " ",
// so we don't need to look for '\r' or '\n'.
for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') {
s = s[1:]
}
if len(s) == 0 {
return eofTokenType, "", ""
}
i := 0
loop:
for ; i < len(s); i++ {
switch s[i] {
case '\t', ' ', '(', ')', '<', '>', '[', ']':
break loop
}
}
if i != 0 {
tokenStr, remaining = s[:i], s[i:]
if tokenStr == "Not" {
return notTokenType, "", remaining
}
return strTokenType, tokenStr, remaining
}
j := 0
switch s[0] {
case '<':
j, tokenType = strings.IndexByte(s, '>'), angleTokenType
case '[':
j, tokenType = strings.IndexByte(s, ']'), squareTokenType
default:
return rune(s[0]), "", s[1:]
}
if j < 0 {
return errTokenType, "", ""
}
return tokenType, s[1:j], s[j+1:]
}

11
vendor/golang.org/x/net/webdav/internal/xml/README generated vendored Normal file
View File

@ -0,0 +1,11 @@
This is a fork of the encoding/xml package at ca1d6c4, the last commit before
https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name
space behavior" made late in the lead-up to the Go 1.5 release.
The list of encoding/xml changes is at
https://go.googlesource.com/go/+log/master/src/encoding/xml
This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is
released.
See http://golang.org/issue/11841

1223
vendor/golang.org/x/net/webdav/internal/xml/marshal.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

692
vendor/golang.org/x/net/webdav/internal/xml/read.go generated vendored Normal file
View File

@ -0,0 +1,692 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import (
"bytes"
"encoding"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
)
// BUG(rsc): Mapping between XML elements and data structures is inherently flawed:
// an XML element is an order-dependent collection of anonymous
// values, while a data structure is an order-independent collection
// of named values.
// See package json for a textual representation more suitable
// to data structures.
// Unmarshal parses the XML-encoded data and stores the result in
// the value pointed to by v, which must be an arbitrary struct,
// slice, or string. Well-formed data that does not fit into v is
// discarded.
//
// Because Unmarshal uses the reflect package, it can only assign
// to exported (upper case) fields. Unmarshal uses a case-sensitive
// comparison to match XML element names to tag values and struct
// field names.
//
// Unmarshal maps an XML element to a struct using the following rules.
// In the rules, the tag of a field refers to the value associated with the
// key 'xml' in the struct field's tag (see the example above).
//
// * If the struct has a field of type []byte or string with tag
// ",innerxml", Unmarshal accumulates the raw XML nested inside the
// element in that field. The rest of the rules still apply.
//
// * If the struct has a field named XMLName of type xml.Name,
// Unmarshal records the element name in that field.
//
// * If the XMLName field has an associated tag of the form
// "name" or "namespace-URL name", the XML element must have
// the given name (and, optionally, name space) or else Unmarshal
// returns an error.
//
// * If the XML element has an attribute whose name matches a
// struct field name with an associated tag containing ",attr" or
// the explicit name in a struct field tag of the form "name,attr",
// Unmarshal records the attribute value in that field.
//
// * If the XML element contains character data, that data is
// accumulated in the first struct field that has tag ",chardata".
// The struct field may have type []byte or string.
// If there is no such field, the character data is discarded.
//
// * If the XML element contains comments, they are accumulated in
// the first struct field that has tag ",comment". The struct
// field may have type []byte or string. If there is no such
// field, the comments are discarded.
//
// * If the XML element contains a sub-element whose name matches
// the prefix of a tag formatted as "a" or "a>b>c", unmarshal
// will descend into the XML structure looking for elements with the
// given names, and will map the innermost elements to that struct
// field. A tag starting with ">" is equivalent to one starting
// with the field name followed by ">".
//
// * If the XML element contains a sub-element whose name matches
// a struct field's XMLName tag and the struct field has no
// explicit name tag as per the previous rule, unmarshal maps
// the sub-element to that struct field.
//
// * If the XML element contains a sub-element whose name matches a
// field without any mode flags (",attr", ",chardata", etc), Unmarshal
// maps the sub-element to that struct field.
//
// * If the XML element contains a sub-element that hasn't matched any
// of the above rules and the struct has a field with tag ",any",
// unmarshal maps the sub-element to that struct field.
//
// * An anonymous struct field is handled as if the fields of its
// value were part of the outer struct.
//
// * A struct field with tag "-" is never unmarshalled into.
//
// Unmarshal maps an XML element to a string or []byte by saving the
// concatenation of that element's character data in the string or
// []byte. The saved []byte is never nil.
//
// Unmarshal maps an attribute value to a string or []byte by saving
// the value in the string or slice.
//
// Unmarshal maps an XML element to a slice by extending the length of
// the slice and mapping the element to the newly created value.
//
// Unmarshal maps an XML element or attribute value to a bool by
// setting it to the boolean value represented by the string.
//
// Unmarshal maps an XML element or attribute value to an integer or
// floating-point field by setting the field to the result of
// interpreting the string value in decimal. There is no check for
// overflow.
//
// Unmarshal maps an XML element to an xml.Name by recording the
// element name.
//
// Unmarshal maps an XML element to a pointer by setting the pointer
// to a freshly allocated value and then mapping the element to that value.
//
func Unmarshal(data []byte, v interface{}) error {
return NewDecoder(bytes.NewReader(data)).Decode(v)
}
// Decode works like xml.Unmarshal, except it reads the decoder
// stream to find the start element.
func (d *Decoder) Decode(v interface{}) error {
return d.DecodeElement(v, nil)
}
// DecodeElement works like xml.Unmarshal except that it takes
// a pointer to the start XML element to decode into v.
// It is useful when a client reads some raw XML tokens itself
// but also wants to defer to Unmarshal for some elements.
func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error {
val := reflect.ValueOf(v)
if val.Kind() != reflect.Ptr {
return errors.New("non-pointer passed to Unmarshal")
}
return d.unmarshal(val.Elem(), start)
}
// An UnmarshalError represents an error in the unmarshalling process.
type UnmarshalError string
func (e UnmarshalError) Error() string { return string(e) }
// Unmarshaler is the interface implemented by objects that can unmarshal
// an XML element description of themselves.
//
// UnmarshalXML decodes a single XML element
// beginning with the given start element.
// If it returns an error, the outer call to Unmarshal stops and
// returns that error.
// UnmarshalXML must consume exactly one XML element.
// One common implementation strategy is to unmarshal into
// a separate value with a layout matching the expected XML
// using d.DecodeElement, and then to copy the data from
// that value into the receiver.
// Another common strategy is to use d.Token to process the
// XML object one token at a time.
// UnmarshalXML may not use d.RawToken.
type Unmarshaler interface {
UnmarshalXML(d *Decoder, start StartElement) error
}
// UnmarshalerAttr is the interface implemented by objects that can unmarshal
// an XML attribute description of themselves.
//
// UnmarshalXMLAttr decodes a single XML attribute.
// If it returns an error, the outer call to Unmarshal stops and
// returns that error.
// UnmarshalXMLAttr is used only for struct fields with the
// "attr" option in the field tag.
type UnmarshalerAttr interface {
UnmarshalXMLAttr(attr Attr) error
}
// receiverType returns the receiver type to use in an expression like "%s.MethodName".
func receiverType(val interface{}) string {
t := reflect.TypeOf(val)
if t.Name() != "" {
return t.String()
}
return "(" + t.String() + ")"
}
// unmarshalInterface unmarshals a single XML element into val.
// start is the opening tag of the element.
func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error {
// Record that decoder must stop at end tag corresponding to start.
p.pushEOF()
p.unmarshalDepth++
err := val.UnmarshalXML(p, *start)
p.unmarshalDepth--
if err != nil {
p.popEOF()
return err
}
if !p.popEOF() {
return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local)
}
return nil
}
// unmarshalTextInterface unmarshals a single XML element into val.
// The chardata contained in the element (but not its children)
// is passed to the text unmarshaler.
func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error {
var buf []byte
depth := 1
for depth > 0 {
t, err := p.Token()
if err != nil {
return err
}
switch t := t.(type) {
case CharData:
if depth == 1 {
buf = append(buf, t...)
}
case StartElement:
depth++
case EndElement:
depth--
}
}
return val.UnmarshalText(buf)
}
// unmarshalAttr unmarshals a single XML attribute into val.
func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error {
if val.Kind() == reflect.Ptr {
if val.IsNil() {
val.Set(reflect.New(val.Type().Elem()))
}
val = val.Elem()
}
if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) {
// This is an unmarshaler with a non-pointer receiver,
// so it's likely to be incorrect, but we do what we're told.
return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) {
return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
}
}
// Not an UnmarshalerAttr; try encoding.TextUnmarshaler.
if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
// This is an unmarshaler with a non-pointer receiver,
// so it's likely to be incorrect, but we do what we're told.
return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
}
}
copyValue(val, []byte(attr.Value))
return nil
}
var (
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem()
textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
)
// Unmarshal a single XML element into val.
func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error {
// Find start element if we need it.
if start == nil {
for {
tok, err := p.Token()
if err != nil {
return err
}
if t, ok := tok.(StartElement); ok {
start = &t
break
}
}
}
// Load value from interface, but only if the result will be
// usefully addressable.
if val.Kind() == reflect.Interface && !val.IsNil() {
e := val.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() {
val = e
}
}
if val.Kind() == reflect.Ptr {
if val.IsNil() {
val.Set(reflect.New(val.Type().Elem()))
}
val = val.Elem()
}
if val.CanInterface() && val.Type().Implements(unmarshalerType) {
// This is an unmarshaler with a non-pointer receiver,
// so it's likely to be incorrect, but we do what we're told.
return p.unmarshalInterface(val.Interface().(Unmarshaler), start)
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(unmarshalerType) {
return p.unmarshalInterface(pv.Interface().(Unmarshaler), start)
}
}
if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start)
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start)
}
}
var (
data []byte
saveData reflect.Value
comment []byte
saveComment reflect.Value
saveXML reflect.Value
saveXMLIndex int
saveXMLData []byte
saveAny reflect.Value
sv reflect.Value
tinfo *typeInfo
err error
)
switch v := val; v.Kind() {
default:
return errors.New("unknown type " + v.Type().String())
case reflect.Interface:
// TODO: For now, simply ignore the field. In the near
// future we may choose to unmarshal the start
// element on it, if not nil.
return p.Skip()
case reflect.Slice:
typ := v.Type()
if typ.Elem().Kind() == reflect.Uint8 {
// []byte
saveData = v
break
}
// Slice of element values.
// Grow slice.
n := v.Len()
if n >= v.Cap() {
ncap := 2 * n
if ncap < 4 {
ncap = 4
}
new := reflect.MakeSlice(typ, n, ncap)
reflect.Copy(new, v)
v.Set(new)
}
v.SetLen(n + 1)
// Recur to read element into slice.
if err := p.unmarshal(v.Index(n), start); err != nil {
v.SetLen(n)
return err
}
return nil
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String:
saveData = v
case reflect.Struct:
typ := v.Type()
if typ == nameType {
v.Set(reflect.ValueOf(start.Name))
break
}
sv = v
tinfo, err = getTypeInfo(typ)
if err != nil {
return err
}
// Validate and assign element name.
if tinfo.xmlname != nil {
finfo := tinfo.xmlname
if finfo.name != "" && finfo.name != start.Name.Local {
return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">")
}
if finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have "
if start.Name.Space == "" {
e += "no name space"
} else {
e += start.Name.Space
}
return UnmarshalError(e)
}
fv := finfo.value(sv)
if _, ok := fv.Interface().(Name); ok {
fv.Set(reflect.ValueOf(start.Name))
}
}
// Assign attributes.
// Also, determine whether we need to save character data or comments.
for i := range tinfo.fields {
finfo := &tinfo.fields[i]
switch finfo.flags & fMode {
case fAttr:
strv := finfo.value(sv)
// Look for attribute.
for _, a := range start.Attr {
if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) {
if err := p.unmarshalAttr(strv, a); err != nil {
return err
}
break
}
}
case fCharData:
if !saveData.IsValid() {
saveData = finfo.value(sv)
}
case fComment:
if !saveComment.IsValid() {
saveComment = finfo.value(sv)
}
case fAny, fAny | fElement:
if !saveAny.IsValid() {
saveAny = finfo.value(sv)
}
case fInnerXml:
if !saveXML.IsValid() {
saveXML = finfo.value(sv)
if p.saved == nil {
saveXMLIndex = 0
p.saved = new(bytes.Buffer)
} else {
saveXMLIndex = p.savedOffset()
}
}
}
}
}
// Find end element.
// Process sub-elements along the way.
Loop:
for {
var savedOffset int
if saveXML.IsValid() {
savedOffset = p.savedOffset()
}
tok, err := p.Token()
if err != nil {
return err
}
switch t := tok.(type) {
case StartElement:
consumed := false
if sv.IsValid() {
consumed, err = p.unmarshalPath(tinfo, sv, nil, &t)
if err != nil {
return err
}
if !consumed && saveAny.IsValid() {
consumed = true
if err := p.unmarshal(saveAny, &t); err != nil {
return err
}
}
}
if !consumed {
if err := p.Skip(); err != nil {
return err
}
}
case EndElement:
if saveXML.IsValid() {
saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset]
if saveXMLIndex == 0 {
p.saved = nil
}
}
break Loop
case CharData:
if saveData.IsValid() {
data = append(data, t...)
}
case Comment:
if saveComment.IsValid() {
comment = append(comment, t...)
}
}
}
if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) {
if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
return err
}
saveData = reflect.Value{}
}
if saveData.IsValid() && saveData.CanAddr() {
pv := saveData.Addr()
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
return err
}
saveData = reflect.Value{}
}
}
if err := copyValue(saveData, data); err != nil {
return err
}
switch t := saveComment; t.Kind() {
case reflect.String:
t.SetString(string(comment))
case reflect.Slice:
t.Set(reflect.ValueOf(comment))
}
switch t := saveXML; t.Kind() {
case reflect.String:
t.SetString(string(saveXMLData))
case reflect.Slice:
t.Set(reflect.ValueOf(saveXMLData))
}
return nil
}
func copyValue(dst reflect.Value, src []byte) (err error) {
dst0 := dst
if dst.Kind() == reflect.Ptr {
if dst.IsNil() {
dst.Set(reflect.New(dst.Type().Elem()))
}
dst = dst.Elem()
}
// Save accumulated data.
switch dst.Kind() {
case reflect.Invalid:
// Probably a comment.
default:
return errors.New("cannot unmarshal into " + dst0.Type().String())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits())
if err != nil {
return err
}
dst.SetInt(itmp)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits())
if err != nil {
return err
}
dst.SetUint(utmp)
case reflect.Float32, reflect.Float64:
ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits())
if err != nil {
return err
}
dst.SetFloat(ftmp)
case reflect.Bool:
value, err := strconv.ParseBool(strings.TrimSpace(string(src)))
if err != nil {
return err
}
dst.SetBool(value)
case reflect.String:
dst.SetString(string(src))
case reflect.Slice:
if len(src) == 0 {
// non-nil to flag presence
src = []byte{}
}
dst.SetBytes(src)
}
return nil
}
// unmarshalPath walks down an XML structure looking for wanted
// paths, and calls unmarshal on them.
// The consumed result tells whether XML elements have been consumed
// from the Decoder until start's matching end element, or if it's
// still untouched because start is uninteresting for sv's fields.
func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) {
recurse := false
Loop:
for i := range tinfo.fields {
finfo := &tinfo.fields[i]
if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
continue
}
for j := range parents {
if parents[j] != finfo.parents[j] {
continue Loop
}
}
if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local {
// It's a perfect match, unmarshal the field.
return true, p.unmarshal(finfo.value(sv), start)
}
if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local {
// It's a prefix for the field. Break and recurse
// since it's not ok for one field path to be itself
// the prefix for another field path.
recurse = true
// We can reuse the same slice as long as we
// don't try to append to it.
parents = finfo.parents[:len(parents)+1]
break
}
}
if !recurse {
// We have no business with this element.
return false, nil
}
// The element is not a perfect match for any field, but one
// or more fields have the path to this element as a parent
// prefix. Recurse and attempt to match these.
for {
var tok Token
tok, err = p.Token()
if err != nil {
return true, err
}
switch t := tok.(type) {
case StartElement:
consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t)
if err != nil {
return true, err
}
if !consumed2 {
if err := p.Skip(); err != nil {
return true, err
}
}
case EndElement:
return true, nil
}
}
}
// Skip reads tokens until it has consumed the end element
// matching the most recent start element already consumed.
// It recurs if it encounters a start element, so it can be used to
// skip nested structures.
// It returns nil if it finds an end element matching the start
// element; otherwise it returns an error describing the problem.
func (d *Decoder) Skip() error {
for {
tok, err := d.Token()
if err != nil {
return err
}
switch tok.(type) {
case StartElement:
if err := d.Skip(); err != nil {
return err
}
case EndElement:
return nil
}
}
}

371
vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go generated vendored Normal file
View File

@ -0,0 +1,371 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import (
"fmt"
"reflect"
"strings"
"sync"
)
// typeInfo holds details for the xml representation of a type.
type typeInfo struct {
xmlname *fieldInfo
fields []fieldInfo
}
// fieldInfo holds details for the xml representation of a single field.
type fieldInfo struct {
idx []int
name string
xmlns string
flags fieldFlags
parents []string
}
type fieldFlags int
const (
fElement fieldFlags = 1 << iota
fAttr
fCharData
fInnerXml
fComment
fAny
fOmitEmpty
fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny
)
var tinfoMap = make(map[reflect.Type]*typeInfo)
var tinfoLock sync.RWMutex
var nameType = reflect.TypeOf(Name{})
// getTypeInfo returns the typeInfo structure with details necessary
// for marshalling and unmarshalling typ.
func getTypeInfo(typ reflect.Type) (*typeInfo, error) {
tinfoLock.RLock()
tinfo, ok := tinfoMap[typ]
tinfoLock.RUnlock()
if ok {
return tinfo, nil
}
tinfo = &typeInfo{}
if typ.Kind() == reflect.Struct && typ != nameType {
n := typ.NumField()
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.PkgPath != "" || f.Tag.Get("xml") == "-" {
continue // Private field
}
// For embedded structs, embed its fields.
if f.Anonymous {
t := f.Type
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() == reflect.Struct {
inner, err := getTypeInfo(t)
if err != nil {
return nil, err
}
if tinfo.xmlname == nil {
tinfo.xmlname = inner.xmlname
}
for _, finfo := range inner.fields {
finfo.idx = append([]int{i}, finfo.idx...)
if err := addFieldInfo(typ, tinfo, &finfo); err != nil {
return nil, err
}
}
continue
}
}
finfo, err := structFieldInfo(typ, &f)
if err != nil {
return nil, err
}
if f.Name == "XMLName" {
tinfo.xmlname = finfo
continue
}
// Add the field if it doesn't conflict with other fields.
if err := addFieldInfo(typ, tinfo, finfo); err != nil {
return nil, err
}
}
}
tinfoLock.Lock()
tinfoMap[typ] = tinfo
tinfoLock.Unlock()
return tinfo, nil
}
// structFieldInfo builds and returns a fieldInfo for f.
func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) {
finfo := &fieldInfo{idx: f.Index}
// Split the tag from the xml namespace if necessary.
tag := f.Tag.Get("xml")
if i := strings.Index(tag, " "); i >= 0 {
finfo.xmlns, tag = tag[:i], tag[i+1:]
}
// Parse flags.
tokens := strings.Split(tag, ",")
if len(tokens) == 1 {
finfo.flags = fElement
} else {
tag = tokens[0]
for _, flag := range tokens[1:] {
switch flag {
case "attr":
finfo.flags |= fAttr
case "chardata":
finfo.flags |= fCharData
case "innerxml":
finfo.flags |= fInnerXml
case "comment":
finfo.flags |= fComment
case "any":
finfo.flags |= fAny
case "omitempty":
finfo.flags |= fOmitEmpty
}
}
// Validate the flags used.
valid := true
switch mode := finfo.flags & fMode; mode {
case 0:
finfo.flags |= fElement
case fAttr, fCharData, fInnerXml, fComment, fAny:
if f.Name == "XMLName" || tag != "" && mode != fAttr {
valid = false
}
default:
// This will also catch multiple modes in a single field.
valid = false
}
if finfo.flags&fMode == fAny {
finfo.flags |= fElement
}
if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 {
valid = false
}
if !valid {
return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q",
f.Name, typ, f.Tag.Get("xml"))
}
}
// Use of xmlns without a name is not allowed.
if finfo.xmlns != "" && tag == "" {
return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q",
f.Name, typ, f.Tag.Get("xml"))
}
if f.Name == "XMLName" {
// The XMLName field records the XML element name. Don't
// process it as usual because its name should default to
// empty rather than to the field name.
finfo.name = tag
return finfo, nil
}
if tag == "" {
// If the name part of the tag is completely empty, get
// default from XMLName of underlying struct if feasible,
// or field name otherwise.
if xmlname := lookupXMLName(f.Type); xmlname != nil {
finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name
} else {
finfo.name = f.Name
}
return finfo, nil
}
if finfo.xmlns == "" && finfo.flags&fAttr == 0 {
// If it's an element no namespace specified, get the default
// from the XMLName of enclosing struct if possible.
if xmlname := lookupXMLName(typ); xmlname != nil {
finfo.xmlns = xmlname.xmlns
}
}
// Prepare field name and parents.
parents := strings.Split(tag, ">")
if parents[0] == "" {
parents[0] = f.Name
}
if parents[len(parents)-1] == "" {
return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ)
}
finfo.name = parents[len(parents)-1]
if len(parents) > 1 {
if (finfo.flags & fElement) == 0 {
return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ","))
}
finfo.parents = parents[:len(parents)-1]
}
// If the field type has an XMLName field, the names must match
// so that the behavior of both marshalling and unmarshalling
// is straightforward and unambiguous.
if finfo.flags&fElement != 0 {
ftyp := f.Type
xmlname := lookupXMLName(ftyp)
if xmlname != nil && xmlname.name != finfo.name {
return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName",
finfo.name, typ, f.Name, xmlname.name, ftyp)
}
}
return finfo, nil
}
// lookupXMLName returns the fieldInfo for typ's XMLName field
// in case it exists and has a valid xml field tag, otherwise
// it returns nil.
func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) {
for typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
if typ.Kind() != reflect.Struct {
return nil
}
for i, n := 0, typ.NumField(); i < n; i++ {
f := typ.Field(i)
if f.Name != "XMLName" {
continue
}
finfo, err := structFieldInfo(typ, &f)
if finfo.name != "" && err == nil {
return finfo
}
// Also consider errors as a non-existent field tag
// and let getTypeInfo itself report the error.
break
}
return nil
}
func min(a, b int) int {
if a <= b {
return a
}
return b
}
// addFieldInfo adds finfo to tinfo.fields if there are no
// conflicts, or if conflicts arise from previous fields that were
// obtained from deeper embedded structures than finfo. In the latter
// case, the conflicting entries are dropped.
// A conflict occurs when the path (parent + name) to a field is
// itself a prefix of another path, or when two paths match exactly.
// It is okay for field paths to share a common, shorter prefix.
func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error {
var conflicts []int
Loop:
// First, figure all conflicts. Most working code will have none.
for i := range tinfo.fields {
oldf := &tinfo.fields[i]
if oldf.flags&fMode != newf.flags&fMode {
continue
}
if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns {
continue
}
minl := min(len(newf.parents), len(oldf.parents))
for p := 0; p < minl; p++ {
if oldf.parents[p] != newf.parents[p] {
continue Loop
}
}
if len(oldf.parents) > len(newf.parents) {
if oldf.parents[len(newf.parents)] == newf.name {
conflicts = append(conflicts, i)
}
} else if len(oldf.parents) < len(newf.parents) {
if newf.parents[len(oldf.parents)] == oldf.name {
conflicts = append(conflicts, i)
}
} else {
if newf.name == oldf.name {
conflicts = append(conflicts, i)
}
}
}
// Without conflicts, add the new field and return.
if conflicts == nil {
tinfo.fields = append(tinfo.fields, *newf)
return nil
}
// If any conflict is shallower, ignore the new field.
// This matches the Go field resolution on embedding.
for _, i := range conflicts {
if len(tinfo.fields[i].idx) < len(newf.idx) {
return nil
}
}
// Otherwise, if any of them is at the same depth level, it's an error.
for _, i := range conflicts {
oldf := &tinfo.fields[i]
if len(oldf.idx) == len(newf.idx) {
f1 := typ.FieldByIndex(oldf.idx)
f2 := typ.FieldByIndex(newf.idx)
return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")}
}
}
// Otherwise, the new field is shallower, and thus takes precedence,
// so drop the conflicting fields from tinfo and append the new one.
for c := len(conflicts) - 1; c >= 0; c-- {
i := conflicts[c]
copy(tinfo.fields[i:], tinfo.fields[i+1:])
tinfo.fields = tinfo.fields[:len(tinfo.fields)-1]
}
tinfo.fields = append(tinfo.fields, *newf)
return nil
}
// A TagPathError represents an error in the unmarshalling process
// caused by the use of field tags with conflicting paths.
type TagPathError struct {
Struct reflect.Type
Field1, Tag1 string
Field2, Tag2 string
}
func (e *TagPathError) Error() string {
return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2)
}
// value returns v's field value corresponding to finfo.
// It's equivalent to v.FieldByIndex(finfo.idx), but initializes
// and dereferences pointers as necessary.
func (finfo *fieldInfo) value(v reflect.Value) reflect.Value {
for i, x := range finfo.idx {
if i > 0 {
t := v.Type()
if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
}
v = v.Field(x)
}
return v
}

1998
vendor/golang.org/x/net/webdav/internal/xml/xml.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

94
vendor/golang.org/x/net/webdav/litmus_test_server.go generated vendored Normal file
View File

@ -0,0 +1,94 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
This program is a server for the WebDAV 'litmus' compliance test at
http://www.webdav.org/neon/litmus/
To run the test:
go run litmus_test_server.go
and separately, from the downloaded litmus-xxx directory:
make URL=http://localhost:9999/ check
*/
package main
import (
"flag"
"fmt"
"log"
"net/http"
"net/url"
"golang.org/x/net/webdav"
)
var port = flag.Int("port", 9999, "server port")
func main() {
flag.Parse()
log.SetFlags(0)
h := &webdav.Handler{
FileSystem: webdav.NewMemFS(),
LockSystem: webdav.NewMemLS(),
Logger: func(r *http.Request, err error) {
litmus := r.Header.Get("X-Litmus")
if len(litmus) > 19 {
litmus = litmus[:16] + "..."
}
switch r.Method {
case "COPY", "MOVE":
dst := ""
if u, err := url.Parse(r.Header.Get("Destination")); err == nil {
dst = u.Path
}
o := r.Header.Get("Overwrite")
log.Printf("%-20s%-10s%-30s%-30so=%-2s%v", litmus, r.Method, r.URL.Path, dst, o, err)
default:
log.Printf("%-20s%-10s%-30s%v", litmus, r.Method, r.URL.Path, err)
}
},
}
// The next line would normally be:
// http.Handle("/", h)
// but we wrap that HTTP handler h to cater for a special case.
//
// The propfind_invalid2 litmus test case expects an empty namespace prefix
// declaration to be an error. The FAQ in the webdav litmus test says:
//
// "What does the "propfind_invalid2" test check for?...
//
// If a request was sent with an XML body which included an empty namespace
// prefix declaration (xmlns:ns1=""), then the server must reject that with
// a "400 Bad Request" response, as it is invalid according to the XML
// Namespace specification."
//
// On the other hand, the Go standard library's encoding/xml package
// accepts an empty xmlns namespace, as per the discussion at
// https://github.com/golang/go/issues/8068
//
// Empty namespaces seem disallowed in the second (2006) edition of the XML
// standard, but allowed in a later edition. The grammar differs between
// http://www.w3.org/TR/2006/REC-xml-names-20060816/#ns-decl and
// http://www.w3.org/TR/REC-xml-names/#dt-prefix
//
// Thus, we assume that the propfind_invalid2 test is obsolete, and
// hard-code the 400 Bad Request response that the test expects.
http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("X-Litmus") == "props: 3 (propfind_invalid2)" {
http.Error(w, "400 Bad Request", http.StatusBadRequest)
return
}
h.ServeHTTP(w, r)
}))
addr := fmt.Sprintf(":%d", *port)
log.Printf("Serving %v", addr)
log.Fatal(http.ListenAndServe(addr, nil))
}

445
vendor/golang.org/x/net/webdav/lock.go generated vendored Normal file
View File

@ -0,0 +1,445 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
import (
"container/heap"
"errors"
"strconv"
"strings"
"sync"
"time"
)
var (
// ErrConfirmationFailed is returned by a LockSystem's Confirm method.
ErrConfirmationFailed = errors.New("webdav: confirmation failed")
// ErrForbidden is returned by a LockSystem's Unlock method.
ErrForbidden = errors.New("webdav: forbidden")
// ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods.
ErrLocked = errors.New("webdav: locked")
// ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods.
ErrNoSuchLock = errors.New("webdav: no such lock")
)
// Condition can match a WebDAV resource, based on a token or ETag.
// Exactly one of Token and ETag should be non-empty.
type Condition struct {
Not bool
Token string
ETag string
}
// LockSystem manages access to a collection of named resources. The elements
// in a lock name are separated by slash ('/', U+002F) characters, regardless
// of host operating system convention.
type LockSystem interface {
// Confirm confirms that the caller can claim all of the locks specified by
// the given conditions, and that holding the union of all of those locks
// gives exclusive access to all of the named resources. Up to two resources
// can be named. Empty names are ignored.
//
// Exactly one of release and err will be non-nil. If release is non-nil,
// all of the requested locks are held until release is called. Calling
// release does not unlock the lock, in the WebDAV UNLOCK sense, but once
// Confirm has confirmed that a lock claim is valid, that lock cannot be
// Confirmed again until it has been released.
//
// If Confirm returns ErrConfirmationFailed then the Handler will continue
// to try any other set of locks presented (a WebDAV HTTP request can
// present more than one set of locks). If it returns any other non-nil
// error, the Handler will write a "500 Internal Server Error" HTTP status.
Confirm(now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error)
// Create creates a lock with the given depth, duration, owner and root
// (name). The depth will either be negative (meaning infinite) or zero.
//
// If Create returns ErrLocked then the Handler will write a "423 Locked"
// HTTP status. If it returns any other non-nil error, the Handler will
// write a "500 Internal Server Error" HTTP status.
//
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
// when to use each error.
//
// The token returned identifies the created lock. It should be an absolute
// URI as defined by RFC 3986, Section 4.3. In particular, it should not
// contain whitespace.
Create(now time.Time, details LockDetails) (token string, err error)
// Refresh refreshes the lock with the given token.
//
// If Refresh returns ErrLocked then the Handler will write a "423 Locked"
// HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write
// a "412 Precondition Failed" HTTP Status. If it returns any other non-nil
// error, the Handler will write a "500 Internal Server Error" HTTP status.
//
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
// when to use each error.
Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error)
// Unlock unlocks the lock with the given token.
//
// If Unlock returns ErrForbidden then the Handler will write a "403
// Forbidden" HTTP Status. If Unlock returns ErrLocked then the Handler
// will write a "423 Locked" HTTP status. If Unlock returns ErrNoSuchLock
// then the Handler will write a "409 Conflict" HTTP Status. If it returns
// any other non-nil error, the Handler will write a "500 Internal Server
// Error" HTTP status.
//
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for
// when to use each error.
Unlock(now time.Time, token string) error
}
// LockDetails are a lock's metadata.
type LockDetails struct {
// Root is the root resource name being locked. For a zero-depth lock, the
// root is the only resource being locked.
Root string
// Duration is the lock timeout. A negative duration means infinite.
Duration time.Duration
// OwnerXML is the verbatim <owner> XML given in a LOCK HTTP request.
//
// TODO: does the "verbatim" nature play well with XML namespaces?
// Does the OwnerXML field need to have more structure? See
// https://codereview.appspot.com/175140043/#msg2
OwnerXML string
// ZeroDepth is whether the lock has zero depth. If it does not have zero
// depth, it has infinite depth.
ZeroDepth bool
}
// NewMemLS returns a new in-memory LockSystem.
func NewMemLS() LockSystem {
return &memLS{
byName: make(map[string]*memLSNode),
byToken: make(map[string]*memLSNode),
gen: uint64(time.Now().Unix()),
}
}
type memLS struct {
mu sync.Mutex
byName map[string]*memLSNode
byToken map[string]*memLSNode
gen uint64
// byExpiry only contains those nodes whose LockDetails have a finite
// Duration and are yet to expire.
byExpiry byExpiry
}
func (m *memLS) nextToken() string {
m.gen++
return strconv.FormatUint(m.gen, 10)
}
func (m *memLS) collectExpiredNodes(now time.Time) {
for len(m.byExpiry) > 0 {
if now.Before(m.byExpiry[0].expiry) {
break
}
m.remove(m.byExpiry[0])
}
}
func (m *memLS) Confirm(now time.Time, name0, name1 string, conditions ...Condition) (func(), error) {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
var n0, n1 *memLSNode
if name0 != "" {
if n0 = m.lookup(slashClean(name0), conditions...); n0 == nil {
return nil, ErrConfirmationFailed
}
}
if name1 != "" {
if n1 = m.lookup(slashClean(name1), conditions...); n1 == nil {
return nil, ErrConfirmationFailed
}
}
// Don't hold the same node twice.
if n1 == n0 {
n1 = nil
}
if n0 != nil {
m.hold(n0)
}
if n1 != nil {
m.hold(n1)
}
return func() {
m.mu.Lock()
defer m.mu.Unlock()
if n1 != nil {
m.unhold(n1)
}
if n0 != nil {
m.unhold(n0)
}
}, nil
}
// lookup returns the node n that locks the named resource, provided that n
// matches at least one of the given conditions and that lock isn't held by
// another party. Otherwise, it returns nil.
//
// n may be a parent of the named resource, if n is an infinite depth lock.
func (m *memLS) lookup(name string, conditions ...Condition) (n *memLSNode) {
// TODO: support Condition.Not and Condition.ETag.
for _, c := range conditions {
n = m.byToken[c.Token]
if n == nil || n.held {
continue
}
if name == n.details.Root {
return n
}
if n.details.ZeroDepth {
continue
}
if n.details.Root == "/" || strings.HasPrefix(name, n.details.Root+"/") {
return n
}
}
return nil
}
func (m *memLS) hold(n *memLSNode) {
if n.held {
panic("webdav: memLS inconsistent held state")
}
n.held = true
if n.details.Duration >= 0 && n.byExpiryIndex >= 0 {
heap.Remove(&m.byExpiry, n.byExpiryIndex)
}
}
func (m *memLS) unhold(n *memLSNode) {
if !n.held {
panic("webdav: memLS inconsistent held state")
}
n.held = false
if n.details.Duration >= 0 {
heap.Push(&m.byExpiry, n)
}
}
func (m *memLS) Create(now time.Time, details LockDetails) (string, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
details.Root = slashClean(details.Root)
if !m.canCreate(details.Root, details.ZeroDepth) {
return "", ErrLocked
}
n := m.create(details.Root)
n.token = m.nextToken()
m.byToken[n.token] = n
n.details = details
if n.details.Duration >= 0 {
n.expiry = now.Add(n.details.Duration)
heap.Push(&m.byExpiry, n)
}
return n.token, nil
}
func (m *memLS) Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
n := m.byToken[token]
if n == nil {
return LockDetails{}, ErrNoSuchLock
}
if n.held {
return LockDetails{}, ErrLocked
}
if n.byExpiryIndex >= 0 {
heap.Remove(&m.byExpiry, n.byExpiryIndex)
}
n.details.Duration = duration
if n.details.Duration >= 0 {
n.expiry = now.Add(n.details.Duration)
heap.Push(&m.byExpiry, n)
}
return n.details, nil
}
func (m *memLS) Unlock(now time.Time, token string) error {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
n := m.byToken[token]
if n == nil {
return ErrNoSuchLock
}
if n.held {
return ErrLocked
}
m.remove(n)
return nil
}
func (m *memLS) canCreate(name string, zeroDepth bool) bool {
return walkToRoot(name, func(name0 string, first bool) bool {
n := m.byName[name0]
if n == nil {
return true
}
if first {
if n.token != "" {
// The target node is already locked.
return false
}
if !zeroDepth {
// The requested lock depth is infinite, and the fact that n exists
// (n != nil) means that a descendent of the target node is locked.
return false
}
} else if n.token != "" && !n.details.ZeroDepth {
// An ancestor of the target node is locked with infinite depth.
return false
}
return true
})
}
func (m *memLS) create(name string) (ret *memLSNode) {
walkToRoot(name, func(name0 string, first bool) bool {
n := m.byName[name0]
if n == nil {
n = &memLSNode{
details: LockDetails{
Root: name0,
},
byExpiryIndex: -1,
}
m.byName[name0] = n
}
n.refCount++
if first {
ret = n
}
return true
})
return ret
}
func (m *memLS) remove(n *memLSNode) {
delete(m.byToken, n.token)
n.token = ""
walkToRoot(n.details.Root, func(name0 string, first bool) bool {
x := m.byName[name0]
x.refCount--
if x.refCount == 0 {
delete(m.byName, name0)
}
return true
})
if n.byExpiryIndex >= 0 {
heap.Remove(&m.byExpiry, n.byExpiryIndex)
}
}
func walkToRoot(name string, f func(name0 string, first bool) bool) bool {
for first := true; ; first = false {
if !f(name, first) {
return false
}
if name == "/" {
break
}
name = name[:strings.LastIndex(name, "/")]
if name == "" {
name = "/"
}
}
return true
}
type memLSNode struct {
// details are the lock metadata. Even if this node's name is not explicitly locked,
// details.Root will still equal the node's name.
details LockDetails
// token is the unique identifier for this node's lock. An empty token means that
// this node is not explicitly locked.
token string
// refCount is the number of self-or-descendent nodes that are explicitly locked.
refCount int
// expiry is when this node's lock expires.
expiry time.Time
// byExpiryIndex is the index of this node in memLS.byExpiry. It is -1
// if this node does not expire, or has expired.
byExpiryIndex int
// held is whether this node's lock is actively held by a Confirm call.
held bool
}
type byExpiry []*memLSNode
func (b *byExpiry) Len() int {
return len(*b)
}
func (b *byExpiry) Less(i, j int) bool {
return (*b)[i].expiry.Before((*b)[j].expiry)
}
func (b *byExpiry) Swap(i, j int) {
(*b)[i], (*b)[j] = (*b)[j], (*b)[i]
(*b)[i].byExpiryIndex = i
(*b)[j].byExpiryIndex = j
}
func (b *byExpiry) Push(x interface{}) {
n := x.(*memLSNode)
n.byExpiryIndex = len(*b)
*b = append(*b, n)
}
func (b *byExpiry) Pop() interface{} {
i := len(*b) - 1
n := (*b)[i]
(*b)[i] = nil
n.byExpiryIndex = -1
*b = (*b)[:i]
return n
}
const infiniteTimeout = -1
// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is
// empty, an infiniteTimeout is returned.
func parseTimeout(s string) (time.Duration, error) {
if s == "" {
return infiniteTimeout, nil
}
if i := strings.IndexByte(s, ','); i >= 0 {
s = s[:i]
}
s = strings.TrimSpace(s)
if s == "Infinite" {
return infiniteTimeout, nil
}
const pre = "Second-"
if !strings.HasPrefix(s, pre) {
return 0, errInvalidTimeout
}
s = s[len(pre):]
if s == "" || s[0] < '0' || '9' < s[0] {
return 0, errInvalidTimeout
}
n, err := strconv.ParseInt(s, 10, 64)
if err != nil || 1<<32-1 < n {
return 0, errInvalidTimeout
}
return time.Duration(n) * time.Second, nil
}

418
vendor/golang.org/x/net/webdav/prop.go generated vendored Normal file
View File

@ -0,0 +1,418 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"mime"
"net/http"
"os"
"path/filepath"
"strconv"
"golang.org/x/net/context"
)
// Proppatch describes a property update instruction as defined in RFC 4918.
// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH
type Proppatch struct {
// Remove specifies whether this patch removes properties. If it does not
// remove them, it sets them.
Remove bool
// Props contains the properties to be set or removed.
Props []Property
}
// Propstat describes a XML propstat element as defined in RFC 4918.
// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
type Propstat struct {
// Props contains the properties for which Status applies.
Props []Property
// Status defines the HTTP status code of the properties in Prop.
// Allowed values include, but are not limited to the WebDAV status
// code extensions for HTTP/1.1.
// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
Status int
// XMLError contains the XML representation of the optional error element.
// XML content within this field must not rely on any predefined
// namespace declarations or prefixes. If empty, the XML error element
// is omitted.
XMLError string
// ResponseDescription contains the contents of the optional
// responsedescription field. If empty, the XML element is omitted.
ResponseDescription string
}
// makePropstats returns a slice containing those of x and y whose Props slice
// is non-empty. If both are empty, it returns a slice containing an otherwise
// zero Propstat whose HTTP status code is 200 OK.
func makePropstats(x, y Propstat) []Propstat {
pstats := make([]Propstat, 0, 2)
if len(x.Props) != 0 {
pstats = append(pstats, x)
}
if len(y.Props) != 0 {
pstats = append(pstats, y)
}
if len(pstats) == 0 {
pstats = append(pstats, Propstat{
Status: http.StatusOK,
})
}
return pstats
}
// DeadPropsHolder holds the dead properties of a resource.
//
// Dead properties are those properties that are explicitly defined. In
// comparison, live properties, such as DAV:getcontentlength, are implicitly
// defined by the underlying resource, and cannot be explicitly overridden or
// removed. See the Terminology section of
// http://www.webdav.org/specs/rfc4918.html#rfc.section.3
//
// There is a whitelist of the names of live properties. This package handles
// all live properties, and will only pass non-whitelisted names to the Patch
// method of DeadPropsHolder implementations.
type DeadPropsHolder interface {
// DeadProps returns a copy of the dead properties held.
DeadProps() (map[xml.Name]Property, error)
// Patch patches the dead properties held.
//
// Patching is atomic; either all or no patches succeed. It returns (nil,
// non-nil) if an internal server error occurred, otherwise the Propstats
// collectively contain one Property for each proposed patch Property. If
// all patches succeed, Patch returns a slice of length one and a Propstat
// element with a 200 OK HTTP status code. If none succeed, for reasons
// other than an internal server error, no Propstat has status 200 OK.
//
// For more details on when various HTTP status codes apply, see
// http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status
Patch([]Proppatch) ([]Propstat, error)
}
// liveProps contains all supported, protected DAV: properties.
var liveProps = map[xml.Name]struct {
// findFn implements the propfind function of this property. If nil,
// it indicates a hidden property.
findFn func(context.Context, FileSystem, LockSystem, string, os.FileInfo) (string, error)
// dir is true if the property applies to directories.
dir bool
}{
{Space: "DAV:", Local: "resourcetype"}: {
findFn: findResourceType,
dir: true,
},
{Space: "DAV:", Local: "displayname"}: {
findFn: findDisplayName,
dir: true,
},
{Space: "DAV:", Local: "getcontentlength"}: {
findFn: findContentLength,
dir: false,
},
{Space: "DAV:", Local: "getlastmodified"}: {
findFn: findLastModified,
// http://webdav.org/specs/rfc4918.html#PROPERTY_getlastmodified
// suggests that getlastmodified should only apply to GETable
// resources, and this package does not support GET on directories.
//
// Nonetheless, some WebDAV clients expect child directories to be
// sortable by getlastmodified date, so this value is true, not false.
// See golang.org/issue/15334.
dir: true,
},
{Space: "DAV:", Local: "creationdate"}: {
findFn: nil,
dir: false,
},
{Space: "DAV:", Local: "getcontentlanguage"}: {
findFn: nil,
dir: false,
},
{Space: "DAV:", Local: "getcontenttype"}: {
findFn: findContentType,
dir: false,
},
{Space: "DAV:", Local: "getetag"}: {
findFn: findETag,
// findETag implements ETag as the concatenated hex values of a file's
// modification time and size. This is not a reliable synchronization
// mechanism for directories, so we do not advertise getetag for DAV
// collections.
dir: false,
},
// TODO: The lockdiscovery property requires LockSystem to list the
// active locks on a resource.
{Space: "DAV:", Local: "lockdiscovery"}: {},
{Space: "DAV:", Local: "supportedlock"}: {
findFn: findSupportedLock,
dir: true,
},
}
// TODO(nigeltao) merge props and allprop?
// Props returns the status of the properties named pnames for resource name.
//
// Each Propstat has a unique status and each property name will only be part
// of one Propstat element.
func props(ctx context.Context, fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) {
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return nil, err
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return nil, err
}
isDir := fi.IsDir()
var deadProps map[xml.Name]Property
if dph, ok := f.(DeadPropsHolder); ok {
deadProps, err = dph.DeadProps()
if err != nil {
return nil, err
}
}
pstatOK := Propstat{Status: http.StatusOK}
pstatNotFound := Propstat{Status: http.StatusNotFound}
for _, pn := range pnames {
// If this file has dead properties, check if they contain pn.
if dp, ok := deadProps[pn]; ok {
pstatOK.Props = append(pstatOK.Props, dp)
continue
}
// Otherwise, it must either be a live property or we don't know it.
if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) {
innerXML, err := prop.findFn(ctx, fs, ls, name, fi)
if err != nil {
return nil, err
}
pstatOK.Props = append(pstatOK.Props, Property{
XMLName: pn,
InnerXML: []byte(innerXML),
})
} else {
pstatNotFound.Props = append(pstatNotFound.Props, Property{
XMLName: pn,
})
}
}
return makePropstats(pstatOK, pstatNotFound), nil
}
// Propnames returns the property names defined for resource name.
func propnames(ctx context.Context, fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) {
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return nil, err
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return nil, err
}
isDir := fi.IsDir()
var deadProps map[xml.Name]Property
if dph, ok := f.(DeadPropsHolder); ok {
deadProps, err = dph.DeadProps()
if err != nil {
return nil, err
}
}
pnames := make([]xml.Name, 0, len(liveProps)+len(deadProps))
for pn, prop := range liveProps {
if prop.findFn != nil && (prop.dir || !isDir) {
pnames = append(pnames, pn)
}
}
for pn := range deadProps {
pnames = append(pnames, pn)
}
return pnames, nil
}
// Allprop returns the properties defined for resource name and the properties
// named in include.
//
// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined
// within the RFC plus dead properties. Other live properties should only be
// returned if they are named in 'include'.
//
// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
func allprop(ctx context.Context, fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) {
pnames, err := propnames(ctx, fs, ls, name)
if err != nil {
return nil, err
}
// Add names from include if they are not already covered in pnames.
nameset := make(map[xml.Name]bool)
for _, pn := range pnames {
nameset[pn] = true
}
for _, pn := range include {
if !nameset[pn] {
pnames = append(pnames, pn)
}
}
return props(ctx, fs, ls, name, pnames)
}
// Patch patches the properties of resource name. The return values are
// constrained in the same manner as DeadPropsHolder.Patch.
func patch(ctx context.Context, fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) {
conflict := false
loop:
for _, patch := range patches {
for _, p := range patch.Props {
if _, ok := liveProps[p.XMLName]; ok {
conflict = true
break loop
}
}
}
if conflict {
pstatForbidden := Propstat{
Status: http.StatusForbidden,
XMLError: `<D:cannot-modify-protected-property xmlns:D="DAV:"/>`,
}
pstatFailedDep := Propstat{
Status: StatusFailedDependency,
}
for _, patch := range patches {
for _, p := range patch.Props {
if _, ok := liveProps[p.XMLName]; ok {
pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName})
} else {
pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName})
}
}
}
return makePropstats(pstatForbidden, pstatFailedDep), nil
}
f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0)
if err != nil {
return nil, err
}
defer f.Close()
if dph, ok := f.(DeadPropsHolder); ok {
ret, err := dph.Patch(patches)
if err != nil {
return nil, err
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that
// "The contents of the prop XML element must only list the names of
// properties to which the result in the status element applies."
for _, pstat := range ret {
for i, p := range pstat.Props {
pstat.Props[i] = Property{XMLName: p.XMLName}
}
}
return ret, nil
}
// The file doesn't implement the optional DeadPropsHolder interface, so
// all patches are forbidden.
pstat := Propstat{Status: http.StatusForbidden}
for _, patch := range patches {
for _, p := range patch.Props {
pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
}
}
return []Propstat{pstat}, nil
}
func escapeXML(s string) string {
for i := 0; i < len(s); i++ {
// As an optimization, if s contains only ASCII letters, digits or a
// few special characters, the escaped value is s itself and we don't
// need to allocate a buffer and convert between string and []byte.
switch c := s[i]; {
case c == ' ' || c == '_' ||
('+' <= c && c <= '9') || // Digits as well as + , - . and /
('A' <= c && c <= 'Z') ||
('a' <= c && c <= 'z'):
continue
}
// Otherwise, go through the full escaping process.
var buf bytes.Buffer
xml.EscapeText(&buf, []byte(s))
return buf.String()
}
return s
}
func findResourceType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
if fi.IsDir() {
return `<D:collection xmlns:D="DAV:"/>`, nil
}
return "", nil
}
func findDisplayName(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
if slashClean(name) == "/" {
// Hide the real name of a possibly prefixed root directory.
return "", nil
}
return escapeXML(fi.Name()), nil
}
func findContentLength(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
return strconv.FormatInt(fi.Size(), 10), nil
}
func findLastModified(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
return fi.ModTime().Format(http.TimeFormat), nil
}
func findContentType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return "", err
}
defer f.Close()
// This implementation is based on serveContent's code in the standard net/http package.
ctype := mime.TypeByExtension(filepath.Ext(name))
if ctype != "" {
return ctype, nil
}
// Read a chunk to decide between utf-8 text and binary.
var buf [512]byte
n, err := io.ReadFull(f, buf[:])
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
return "", err
}
ctype = http.DetectContentType(buf[:n])
// Rewind file.
_, err = f.Seek(0, os.SEEK_SET)
return ctype, err
}
func findETag(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
// The Apache http 2.4 web server by default concatenates the
// modification time and size of a file. We replicate the heuristic
// with nanosecond granularity.
return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil
}
func findSupportedLock(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
return `` +
`<D:lockentry xmlns:D="DAV:">` +
`<D:lockscope><D:exclusive/></D:lockscope>` +
`<D:locktype><D:write/></D:locktype>` +
`</D:lockentry>`, nil
}

702
vendor/golang.org/x/net/webdav/webdav.go generated vendored Normal file
View File

@ -0,0 +1,702 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package webdav provides a WebDAV server implementation.
package webdav // import "golang.org/x/net/webdav"
import (
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path"
"strings"
"time"
)
type Handler struct {
// Prefix is the URL path prefix to strip from WebDAV resource paths.
Prefix string
// FileSystem is the virtual file system.
FileSystem FileSystem
// LockSystem is the lock management system.
LockSystem LockSystem
// Logger is an optional error logger. If non-nil, it will be called
// for all HTTP requests.
Logger func(*http.Request, error)
}
func (h *Handler) stripPrefix(p string) (string, int, error) {
if h.Prefix == "" {
return p, http.StatusOK, nil
}
if r := strings.TrimPrefix(p, h.Prefix); len(r) < len(p) {
return r, http.StatusOK, nil
}
return p, http.StatusNotFound, errPrefixMismatch
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
status, err := http.StatusBadRequest, errUnsupportedMethod
if h.FileSystem == nil {
status, err = http.StatusInternalServerError, errNoFileSystem
} else if h.LockSystem == nil {
status, err = http.StatusInternalServerError, errNoLockSystem
} else {
switch r.Method {
case "OPTIONS":
status, err = h.handleOptions(w, r)
case "GET", "HEAD", "POST":
status, err = h.handleGetHeadPost(w, r)
case "DELETE":
status, err = h.handleDelete(w, r)
case "PUT":
status, err = h.handlePut(w, r)
case "MKCOL":
status, err = h.handleMkcol(w, r)
case "COPY", "MOVE":
status, err = h.handleCopyMove(w, r)
case "LOCK":
status, err = h.handleLock(w, r)
case "UNLOCK":
status, err = h.handleUnlock(w, r)
case "PROPFIND":
status, err = h.handlePropfind(w, r)
case "PROPPATCH":
status, err = h.handleProppatch(w, r)
}
}
if status != 0 {
w.WriteHeader(status)
if status != http.StatusNoContent {
w.Write([]byte(StatusText(status)))
}
}
if h.Logger != nil {
h.Logger(r, err)
}
}
func (h *Handler) lock(now time.Time, root string) (token string, status int, err error) {
token, err = h.LockSystem.Create(now, LockDetails{
Root: root,
Duration: infiniteTimeout,
ZeroDepth: true,
})
if err != nil {
if err == ErrLocked {
return "", StatusLocked, err
}
return "", http.StatusInternalServerError, err
}
return token, 0, nil
}
func (h *Handler) confirmLocks(r *http.Request, src, dst string) (release func(), status int, err error) {
hdr := r.Header.Get("If")
if hdr == "" {
// An empty If header means that the client hasn't previously created locks.
// Even if this client doesn't care about locks, we still need to check that
// the resources aren't locked by another client, so we create temporary
// locks that would conflict with another client's locks. These temporary
// locks are unlocked at the end of the HTTP request.
now, srcToken, dstToken := time.Now(), "", ""
if src != "" {
srcToken, status, err = h.lock(now, src)
if err != nil {
return nil, status, err
}
}
if dst != "" {
dstToken, status, err = h.lock(now, dst)
if err != nil {
if srcToken != "" {
h.LockSystem.Unlock(now, srcToken)
}
return nil, status, err
}
}
return func() {
if dstToken != "" {
h.LockSystem.Unlock(now, dstToken)
}
if srcToken != "" {
h.LockSystem.Unlock(now, srcToken)
}
}, 0, nil
}
ih, ok := parseIfHeader(hdr)
if !ok {
return nil, http.StatusBadRequest, errInvalidIfHeader
}
// ih is a disjunction (OR) of ifLists, so any ifList will do.
for _, l := range ih.lists {
lsrc := l.resourceTag
if lsrc == "" {
lsrc = src
} else {
u, err := url.Parse(lsrc)
if err != nil {
continue
}
if u.Host != r.Host {
continue
}
lsrc, status, err = h.stripPrefix(u.Path)
if err != nil {
return nil, status, err
}
}
release, err = h.LockSystem.Confirm(time.Now(), lsrc, dst, l.conditions...)
if err == ErrConfirmationFailed {
continue
}
if err != nil {
return nil, http.StatusInternalServerError, err
}
return release, 0, nil
}
// Section 10.4.1 says that "If this header is evaluated and all state lists
// fail, then the request must fail with a 412 (Precondition Failed) status."
// We follow the spec even though the cond_put_corrupt_token test case from
// the litmus test warns on seeing a 412 instead of a 423 (Locked).
return nil, http.StatusPreconditionFailed, ErrLocked
}
func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
ctx := getContext(r)
allow := "OPTIONS, LOCK, PUT, MKCOL"
if fi, err := h.FileSystem.Stat(ctx, reqPath); err == nil {
if fi.IsDir() {
allow = "OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND"
} else {
allow = "OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT"
}
}
w.Header().Set("Allow", allow)
// http://www.webdav.org/specs/rfc4918.html#dav.compliance.classes
w.Header().Set("DAV", "1, 2")
// http://msdn.microsoft.com/en-au/library/cc250217.aspx
w.Header().Set("MS-Author-Via", "DAV")
return 0, nil
}
func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
// TODO: check locks for read-only access??
ctx := getContext(r)
f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDONLY, 0)
if err != nil {
return http.StatusNotFound, err
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return http.StatusNotFound, err
}
if fi.IsDir() {
return http.StatusMethodNotAllowed, nil
}
etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi)
if err != nil {
return http.StatusInternalServerError, err
}
w.Header().Set("ETag", etag)
// Let ServeContent determine the Content-Type header.
http.ServeContent(w, r, reqPath, fi.ModTime(), f)
return 0, nil
}
func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "")
if err != nil {
return status, err
}
defer release()
ctx := getContext(r)
// TODO: return MultiStatus where appropriate.
// "godoc os RemoveAll" says that "If the path does not exist, RemoveAll
// returns nil (no error)." WebDAV semantics are that it should return a
// "404 Not Found". We therefore have to Stat before we RemoveAll.
if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
if os.IsNotExist(err) {
return http.StatusNotFound, err
}
return http.StatusMethodNotAllowed, err
}
if err := h.FileSystem.RemoveAll(ctx, reqPath); err != nil {
return http.StatusMethodNotAllowed, err
}
return http.StatusNoContent, nil
}
func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "")
if err != nil {
return status, err
}
defer release()
// TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz'
// comments in http.checkEtag.
ctx := getContext(r)
f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return http.StatusNotFound, err
}
_, copyErr := io.Copy(f, r.Body)
fi, statErr := f.Stat()
closeErr := f.Close()
// TODO(rost): Returning 405 Method Not Allowed might not be appropriate.
if copyErr != nil {
return http.StatusMethodNotAllowed, copyErr
}
if statErr != nil {
return http.StatusMethodNotAllowed, statErr
}
if closeErr != nil {
return http.StatusMethodNotAllowed, closeErr
}
etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi)
if err != nil {
return http.StatusInternalServerError, err
}
w.Header().Set("ETag", etag)
return http.StatusCreated, nil
}
func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "")
if err != nil {
return status, err
}
defer release()
ctx := getContext(r)
if r.ContentLength > 0 {
return http.StatusUnsupportedMediaType, nil
}
if err := h.FileSystem.Mkdir(ctx, reqPath, 0777); err != nil {
if os.IsNotExist(err) {
return http.StatusConflict, err
}
return http.StatusMethodNotAllowed, err
}
return http.StatusCreated, nil
}
func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status int, err error) {
hdr := r.Header.Get("Destination")
if hdr == "" {
return http.StatusBadRequest, errInvalidDestination
}
u, err := url.Parse(hdr)
if err != nil {
return http.StatusBadRequest, errInvalidDestination
}
if u.Host != r.Host {
return http.StatusBadGateway, errInvalidDestination
}
src, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
dst, status, err := h.stripPrefix(u.Path)
if err != nil {
return status, err
}
if dst == "" {
return http.StatusBadGateway, errInvalidDestination
}
if dst == src {
return http.StatusForbidden, errDestinationEqualsSource
}
ctx := getContext(r)
if r.Method == "COPY" {
// Section 7.5.1 says that a COPY only needs to lock the destination,
// not both destination and source. Strictly speaking, this is racy,
// even though a COPY doesn't modify the source, if a concurrent
// operation modifies the source. However, the litmus test explicitly
// checks that COPYing a locked-by-another source is OK.
release, status, err := h.confirmLocks(r, "", dst)
if err != nil {
return status, err
}
defer release()
// Section 9.8.3 says that "The COPY method on a collection without a Depth
// header must act as if a Depth header with value "infinity" was included".
depth := infiniteDepth
if hdr := r.Header.Get("Depth"); hdr != "" {
depth = parseDepth(hdr)
if depth != 0 && depth != infiniteDepth {
// Section 9.8.3 says that "A client may submit a Depth header on a
// COPY on a collection with a value of "0" or "infinity"."
return http.StatusBadRequest, errInvalidDepth
}
}
return copyFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") != "F", depth, 0)
}
release, status, err := h.confirmLocks(r, src, dst)
if err != nil {
return status, err
}
defer release()
// Section 9.9.2 says that "The MOVE method on a collection must act as if
// a "Depth: infinity" header was used on it. A client must not submit a
// Depth header on a MOVE on a collection with any value but "infinity"."
if hdr := r.Header.Get("Depth"); hdr != "" {
if parseDepth(hdr) != infiniteDepth {
return http.StatusBadRequest, errInvalidDepth
}
}
return moveFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") == "T")
}
func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus int, retErr error) {
duration, err := parseTimeout(r.Header.Get("Timeout"))
if err != nil {
return http.StatusBadRequest, err
}
li, status, err := readLockInfo(r.Body)
if err != nil {
return status, err
}
ctx := getContext(r)
token, ld, now, created := "", LockDetails{}, time.Now(), false
if li == (lockInfo{}) {
// An empty lockInfo means to refresh the lock.
ih, ok := parseIfHeader(r.Header.Get("If"))
if !ok {
return http.StatusBadRequest, errInvalidIfHeader
}
if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 {
token = ih.lists[0].conditions[0].Token
}
if token == "" {
return http.StatusBadRequest, errInvalidLockToken
}
ld, err = h.LockSystem.Refresh(now, token, duration)
if err != nil {
if err == ErrNoSuchLock {
return http.StatusPreconditionFailed, err
}
return http.StatusInternalServerError, err
}
} else {
// Section 9.10.3 says that "If no Depth header is submitted on a LOCK request,
// then the request MUST act as if a "Depth:infinity" had been submitted."
depth := infiniteDepth
if hdr := r.Header.Get("Depth"); hdr != "" {
depth = parseDepth(hdr)
if depth != 0 && depth != infiniteDepth {
// Section 9.10.3 says that "Values other than 0 or infinity must not be
// used with the Depth header on a LOCK method".
return http.StatusBadRequest, errInvalidDepth
}
}
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
ld = LockDetails{
Root: reqPath,
Duration: duration,
OwnerXML: li.Owner.InnerXML,
ZeroDepth: depth == 0,
}
token, err = h.LockSystem.Create(now, ld)
if err != nil {
if err == ErrLocked {
return StatusLocked, err
}
return http.StatusInternalServerError, err
}
defer func() {
if retErr != nil {
h.LockSystem.Unlock(now, token)
}
}()
// Create the resource if it didn't previously exist.
if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
// TODO: detect missing intermediate dirs and return http.StatusConflict?
return http.StatusInternalServerError, err
}
f.Close()
created = true
}
// http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
// Lock-Token value is a Coded-URL. We add angle brackets.
w.Header().Set("Lock-Token", "<"+token+">")
}
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
if created {
// This is "w.WriteHeader(http.StatusCreated)" and not "return
// http.StatusCreated, nil" because we write our own (XML) response to w
// and Handler.ServeHTTP would otherwise write "Created".
w.WriteHeader(http.StatusCreated)
}
writeLockInfo(w, token, ld)
return 0, nil
}
func (h *Handler) handleUnlock(w http.ResponseWriter, r *http.Request) (status int, err error) {
// http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
// Lock-Token value is a Coded-URL. We strip its angle brackets.
t := r.Header.Get("Lock-Token")
if len(t) < 2 || t[0] != '<' || t[len(t)-1] != '>' {
return http.StatusBadRequest, errInvalidLockToken
}
t = t[1 : len(t)-1]
switch err = h.LockSystem.Unlock(time.Now(), t); err {
case nil:
return http.StatusNoContent, err
case ErrForbidden:
return http.StatusForbidden, err
case ErrLocked:
return StatusLocked, err
case ErrNoSuchLock:
return http.StatusConflict, err
default:
return http.StatusInternalServerError, err
}
}
func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
ctx := getContext(r)
fi, err := h.FileSystem.Stat(ctx, reqPath)
if err != nil {
if os.IsNotExist(err) {
return http.StatusNotFound, err
}
return http.StatusMethodNotAllowed, err
}
depth := infiniteDepth
if hdr := r.Header.Get("Depth"); hdr != "" {
depth = parseDepth(hdr)
if depth == invalidDepth {
return http.StatusBadRequest, errInvalidDepth
}
}
pf, status, err := readPropfind(r.Body)
if err != nil {
return status, err
}
mw := multistatusWriter{w: w}
walkFn := func(reqPath string, info os.FileInfo, err error) error {
if err != nil {
return err
}
var pstats []Propstat
if pf.Propname != nil {
pnames, err := propnames(ctx, h.FileSystem, h.LockSystem, reqPath)
if err != nil {
return err
}
pstat := Propstat{Status: http.StatusOK}
for _, xmlname := range pnames {
pstat.Props = append(pstat.Props, Property{XMLName: xmlname})
}
pstats = append(pstats, pstat)
} else if pf.Allprop != nil {
pstats, err = allprop(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop)
} else {
pstats, err = props(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop)
}
if err != nil {
return err
}
return mw.write(makePropstatResponse(path.Join(h.Prefix, reqPath), pstats))
}
walkErr := walkFS(ctx, h.FileSystem, depth, reqPath, fi, walkFn)
closeErr := mw.close()
if walkErr != nil {
return http.StatusInternalServerError, walkErr
}
if closeErr != nil {
return http.StatusInternalServerError, closeErr
}
return 0, nil
}
func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "")
if err != nil {
return status, err
}
defer release()
ctx := getContext(r)
if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
if os.IsNotExist(err) {
return http.StatusNotFound, err
}
return http.StatusMethodNotAllowed, err
}
patches, status, err := readProppatch(r.Body)
if err != nil {
return status, err
}
pstats, err := patch(ctx, h.FileSystem, h.LockSystem, reqPath, patches)
if err != nil {
return http.StatusInternalServerError, err
}
mw := multistatusWriter{w: w}
writeErr := mw.write(makePropstatResponse(r.URL.Path, pstats))
closeErr := mw.close()
if writeErr != nil {
return http.StatusInternalServerError, writeErr
}
if closeErr != nil {
return http.StatusInternalServerError, closeErr
}
return 0, nil
}
func makePropstatResponse(href string, pstats []Propstat) *response {
resp := response{
Href: []string{(&url.URL{Path: href}).EscapedPath()},
Propstat: make([]propstat, 0, len(pstats)),
}
for _, p := range pstats {
var xmlErr *xmlError
if p.XMLError != "" {
xmlErr = &xmlError{InnerXML: []byte(p.XMLError)}
}
resp.Propstat = append(resp.Propstat, propstat{
Status: fmt.Sprintf("HTTP/1.1 %d %s", p.Status, StatusText(p.Status)),
Prop: p.Props,
ResponseDescription: p.ResponseDescription,
Error: xmlErr,
})
}
return &resp
}
const (
infiniteDepth = -1
invalidDepth = -2
)
// parseDepth maps the strings "0", "1" and "infinity" to 0, 1 and
// infiniteDepth. Parsing any other string returns invalidDepth.
//
// Different WebDAV methods have further constraints on valid depths:
// - PROPFIND has no further restrictions, as per section 9.1.
// - COPY accepts only "0" or "infinity", as per section 9.8.3.
// - MOVE accepts only "infinity", as per section 9.9.2.
// - LOCK accepts only "0" or "infinity", as per section 9.10.3.
// These constraints are enforced by the handleXxx methods.
func parseDepth(s string) int {
switch s {
case "0":
return 0
case "1":
return 1
case "infinity":
return infiniteDepth
}
return invalidDepth
}
// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
const (
StatusMulti = 207
StatusUnprocessableEntity = 422
StatusLocked = 423
StatusFailedDependency = 424
StatusInsufficientStorage = 507
)
func StatusText(code int) string {
switch code {
case StatusMulti:
return "Multi-Status"
case StatusUnprocessableEntity:
return "Unprocessable Entity"
case StatusLocked:
return "Locked"
case StatusFailedDependency:
return "Failed Dependency"
case StatusInsufficientStorage:
return "Insufficient Storage"
}
return http.StatusText(code)
}
var (
errDestinationEqualsSource = errors.New("webdav: destination equals source")
errDirectoryNotEmpty = errors.New("webdav: directory not empty")
errInvalidDepth = errors.New("webdav: invalid depth")
errInvalidDestination = errors.New("webdav: invalid destination")
errInvalidIfHeader = errors.New("webdav: invalid If header")
errInvalidLockInfo = errors.New("webdav: invalid lock info")
errInvalidLockToken = errors.New("webdav: invalid lock token")
errInvalidPropfind = errors.New("webdav: invalid propfind")
errInvalidProppatch = errors.New("webdav: invalid proppatch")
errInvalidResponse = errors.New("webdav: invalid response")
errInvalidTimeout = errors.New("webdav: invalid timeout")
errNoFileSystem = errors.New("webdav: no file system")
errNoLockSystem = errors.New("webdav: no lock system")
errNotADirectory = errors.New("webdav: not a directory")
errPrefixMismatch = errors.New("webdav: prefix mismatch")
errRecursionTooDeep = errors.New("webdav: recursion too deep")
errUnsupportedLockInfo = errors.New("webdav: unsupported lock info")
errUnsupportedMethod = errors.New("webdav: unsupported method")
)

519
vendor/golang.org/x/net/webdav/xml.go generated vendored Normal file
View File

@ -0,0 +1,519 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
// The XML encoding is covered by Section 14.
// http://www.webdav.org/specs/rfc4918.html#xml.element.definitions
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"net/http"
"time"
// As of https://go-review.googlesource.com/#/c/12772/ which was submitted
// in July 2015, this package uses an internal fork of the standard
// library's encoding/xml package, due to changes in the way namespaces
// were encoded. Such changes were introduced in the Go 1.5 cycle, but were
// rolled back in response to https://github.com/golang/go/issues/11841
//
// However, this package's exported API, specifically the Property and
// DeadPropsHolder types, need to refer to the standard library's version
// of the xml.Name type, as code that imports this package cannot refer to
// the internal version.
//
// This file therefore imports both the internal and external versions, as
// ixml and xml, and converts between them.
//
// In the long term, this package should use the standard library's version
// only, and the internal fork deleted, once
// https://github.com/golang/go/issues/13400 is resolved.
ixml "golang.org/x/net/webdav/internal/xml"
)
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_lockinfo
type lockInfo struct {
XMLName ixml.Name `xml:"lockinfo"`
Exclusive *struct{} `xml:"lockscope>exclusive"`
Shared *struct{} `xml:"lockscope>shared"`
Write *struct{} `xml:"locktype>write"`
Owner owner `xml:"owner"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner
type owner struct {
InnerXML string `xml:",innerxml"`
}
func readLockInfo(r io.Reader) (li lockInfo, status int, err error) {
c := &countingReader{r: r}
if err = ixml.NewDecoder(c).Decode(&li); err != nil {
if err == io.EOF {
if c.n == 0 {
// An empty body means to refresh the lock.
// http://www.webdav.org/specs/rfc4918.html#refreshing-locks
return lockInfo{}, 0, nil
}
err = errInvalidLockInfo
}
return lockInfo{}, http.StatusBadRequest, err
}
// We only support exclusive (non-shared) write locks. In practice, these are
// the only types of locks that seem to matter.
if li.Exclusive == nil || li.Shared != nil || li.Write == nil {
return lockInfo{}, http.StatusNotImplemented, errUnsupportedLockInfo
}
return li, 0, nil
}
type countingReader struct {
n int
r io.Reader
}
func (c *countingReader) Read(p []byte) (int, error) {
n, err := c.r.Read(p)
c.n += n
return n, err
}
func writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) {
depth := "infinity"
if ld.ZeroDepth {
depth = "0"
}
timeout := ld.Duration / time.Second
return fmt.Fprintf(w, "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n"+
"<D:prop xmlns:D=\"DAV:\"><D:lockdiscovery><D:activelock>\n"+
" <D:locktype><D:write/></D:locktype>\n"+
" <D:lockscope><D:exclusive/></D:lockscope>\n"+
" <D:depth>%s</D:depth>\n"+
" <D:owner>%s</D:owner>\n"+
" <D:timeout>Second-%d</D:timeout>\n"+
" <D:locktoken><D:href>%s</D:href></D:locktoken>\n"+
" <D:lockroot><D:href>%s</D:href></D:lockroot>\n"+
"</D:activelock></D:lockdiscovery></D:prop>",
depth, ld.OwnerXML, timeout, escape(token), escape(ld.Root),
)
}
func escape(s string) string {
for i := 0; i < len(s); i++ {
switch s[i] {
case '"', '&', '\'', '<', '>':
b := bytes.NewBuffer(nil)
ixml.EscapeText(b, []byte(s))
return b.String()
}
}
return s
}
// Next returns the next token, if any, in the XML stream of d.
// RFC 4918 requires to ignore comments, processing instructions
// and directives.
// http://www.webdav.org/specs/rfc4918.html#property_values
// http://www.webdav.org/specs/rfc4918.html#xml-extensibility
func next(d *ixml.Decoder) (ixml.Token, error) {
for {
t, err := d.Token()
if err != nil {
return t, err
}
switch t.(type) {
case ixml.Comment, ixml.Directive, ixml.ProcInst:
continue
default:
return t, nil
}
}
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for propfind)
type propfindProps []xml.Name
// UnmarshalXML appends the property names enclosed within start to pn.
//
// It returns an error if start does not contain any properties or if
// properties contain values. Character data between properties is ignored.
func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
for {
t, err := next(d)
if err != nil {
return err
}
switch t.(type) {
case ixml.EndElement:
if len(*pn) == 0 {
return fmt.Errorf("%s must not be empty", start.Name.Local)
}
return nil
case ixml.StartElement:
name := t.(ixml.StartElement).Name
t, err = next(d)
if err != nil {
return err
}
if _, ok := t.(ixml.EndElement); !ok {
return fmt.Errorf("unexpected token %T", t)
}
*pn = append(*pn, xml.Name(name))
}
}
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind
type propfind struct {
XMLName ixml.Name `xml:"DAV: propfind"`
Allprop *struct{} `xml:"DAV: allprop"`
Propname *struct{} `xml:"DAV: propname"`
Prop propfindProps `xml:"DAV: prop"`
Include propfindProps `xml:"DAV: include"`
}
func readPropfind(r io.Reader) (pf propfind, status int, err error) {
c := countingReader{r: r}
if err = ixml.NewDecoder(&c).Decode(&pf); err != nil {
if err == io.EOF {
if c.n == 0 {
// An empty body means to propfind allprop.
// http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
return propfind{Allprop: new(struct{})}, 0, nil
}
err = errInvalidPropfind
}
return propfind{}, http.StatusBadRequest, err
}
if pf.Allprop == nil && pf.Include != nil {
return propfind{}, http.StatusBadRequest, errInvalidPropfind
}
if pf.Allprop != nil && (pf.Prop != nil || pf.Propname != nil) {
return propfind{}, http.StatusBadRequest, errInvalidPropfind
}
if pf.Prop != nil && pf.Propname != nil {
return propfind{}, http.StatusBadRequest, errInvalidPropfind
}
if pf.Propname == nil && pf.Allprop == nil && pf.Prop == nil {
return propfind{}, http.StatusBadRequest, errInvalidPropfind
}
return pf, 0, nil
}
// Property represents a single DAV resource property as defined in RFC 4918.
// See http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties
type Property struct {
// XMLName is the fully qualified name that identifies this property.
XMLName xml.Name
// Lang is an optional xml:lang attribute.
Lang string `xml:"xml:lang,attr,omitempty"`
// InnerXML contains the XML representation of the property value.
// See http://www.webdav.org/specs/rfc4918.html#property_values
//
// Property values of complex type or mixed-content must have fully
// expanded XML namespaces or be self-contained with according
// XML namespace declarations. They must not rely on any XML
// namespace declarations within the scope of the XML document,
// even including the DAV: namespace.
InnerXML []byte `xml:",innerxml"`
}
// ixmlProperty is the same as the Property type except it holds an ixml.Name
// instead of an xml.Name.
type ixmlProperty struct {
XMLName ixml.Name
Lang string `xml:"xml:lang,attr,omitempty"`
InnerXML []byte `xml:",innerxml"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error
// See multistatusWriter for the "D:" namespace prefix.
type xmlError struct {
XMLName ixml.Name `xml:"D:error"`
InnerXML []byte `xml:",innerxml"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
// See multistatusWriter for the "D:" namespace prefix.
type propstat struct {
Prop []Property `xml:"D:prop>_ignored_"`
Status string `xml:"D:status"`
Error *xmlError `xml:"D:error"`
ResponseDescription string `xml:"D:responsedescription,omitempty"`
}
// ixmlPropstat is the same as the propstat type except it holds an ixml.Name
// instead of an xml.Name.
type ixmlPropstat struct {
Prop []ixmlProperty `xml:"D:prop>_ignored_"`
Status string `xml:"D:status"`
Error *xmlError `xml:"D:error"`
ResponseDescription string `xml:"D:responsedescription,omitempty"`
}
// MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace
// before encoding. See multistatusWriter.
func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error {
// Convert from a propstat to an ixmlPropstat.
ixmlPs := ixmlPropstat{
Prop: make([]ixmlProperty, len(ps.Prop)),
Status: ps.Status,
Error: ps.Error,
ResponseDescription: ps.ResponseDescription,
}
for k, prop := range ps.Prop {
ixmlPs.Prop[k] = ixmlProperty{
XMLName: ixml.Name(prop.XMLName),
Lang: prop.Lang,
InnerXML: prop.InnerXML,
}
}
for k, prop := range ixmlPs.Prop {
if prop.XMLName.Space == "DAV:" {
prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local}
ixmlPs.Prop[k] = prop
}
}
// Distinct type to avoid infinite recursion of MarshalXML.
type newpropstat ixmlPropstat
return e.EncodeElement(newpropstat(ixmlPs), start)
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_response
// See multistatusWriter for the "D:" namespace prefix.
type response struct {
XMLName ixml.Name `xml:"D:response"`
Href []string `xml:"D:href"`
Propstat []propstat `xml:"D:propstat"`
Status string `xml:"D:status,omitempty"`
Error *xmlError `xml:"D:error"`
ResponseDescription string `xml:"D:responsedescription,omitempty"`
}
// MultistatusWriter marshals one or more Responses into a XML
// multistatus response.
// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_multistatus
// TODO(rsto, mpl): As a workaround, the "D:" namespace prefix, defined as
// "DAV:" on this element, is prepended on the nested response, as well as on all
// its nested elements. All property names in the DAV: namespace are prefixed as
// well. This is because some versions of Mini-Redirector (on windows 7) ignore
// elements with a default namespace (no prefixed namespace). A less intrusive fix
// should be possible after golang.org/cl/11074. See https://golang.org/issue/11177
type multistatusWriter struct {
// ResponseDescription contains the optional responsedescription
// of the multistatus XML element. Only the latest content before
// close will be emitted. Empty response descriptions are not
// written.
responseDescription string
w http.ResponseWriter
enc *ixml.Encoder
}
// Write validates and emits a DAV response as part of a multistatus response
// element.
//
// It sets the HTTP status code of its underlying http.ResponseWriter to 207
// (Multi-Status) and populates the Content-Type header. If r is the
// first, valid response to be written, Write prepends the XML representation
// of r with a multistatus tag. Callers must call close after the last response
// has been written.
func (w *multistatusWriter) write(r *response) error {
switch len(r.Href) {
case 0:
return errInvalidResponse
case 1:
if len(r.Propstat) > 0 != (r.Status == "") {
return errInvalidResponse
}
default:
if len(r.Propstat) > 0 || r.Status == "" {
return errInvalidResponse
}
}
err := w.writeHeader()
if err != nil {
return err
}
return w.enc.Encode(r)
}
// writeHeader writes a XML multistatus start element on w's underlying
// http.ResponseWriter and returns the result of the write operation.
// After the first write attempt, writeHeader becomes a no-op.
func (w *multistatusWriter) writeHeader() error {
if w.enc != nil {
return nil
}
w.w.Header().Add("Content-Type", "text/xml; charset=utf-8")
w.w.WriteHeader(StatusMulti)
_, err := fmt.Fprintf(w.w, `<?xml version="1.0" encoding="UTF-8"?>`)
if err != nil {
return err
}
w.enc = ixml.NewEncoder(w.w)
return w.enc.EncodeToken(ixml.StartElement{
Name: ixml.Name{
Space: "DAV:",
Local: "multistatus",
},
Attr: []ixml.Attr{{
Name: ixml.Name{Space: "xmlns", Local: "D"},
Value: "DAV:",
}},
})
}
// Close completes the marshalling of the multistatus response. It returns
// an error if the multistatus response could not be completed. If both the
// return value and field enc of w are nil, then no multistatus response has
// been written.
func (w *multistatusWriter) close() error {
if w.enc == nil {
return nil
}
var end []ixml.Token
if w.responseDescription != "" {
name := ixml.Name{Space: "DAV:", Local: "responsedescription"}
end = append(end,
ixml.StartElement{Name: name},
ixml.CharData(w.responseDescription),
ixml.EndElement{Name: name},
)
}
end = append(end, ixml.EndElement{
Name: ixml.Name{Space: "DAV:", Local: "multistatus"},
})
for _, t := range end {
err := w.enc.EncodeToken(t)
if err != nil {
return err
}
}
return w.enc.Flush()
}
var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"}
func xmlLang(s ixml.StartElement, d string) string {
for _, attr := range s.Attr {
if attr.Name == xmlLangName {
return attr.Value
}
}
return d
}
type xmlValue []byte
func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
// The XML value of a property can be arbitrary, mixed-content XML.
// To make sure that the unmarshalled value contains all required
// namespaces, we encode all the property value XML tokens into a
// buffer. This forces the encoder to redeclare any used namespaces.
var b bytes.Buffer
e := ixml.NewEncoder(&b)
for {
t, err := next(d)
if err != nil {
return err
}
if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name {
break
}
if err = e.EncodeToken(t); err != nil {
return err
}
}
err := e.Flush()
if err != nil {
return err
}
*v = b.Bytes()
return nil
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch)
type proppatchProps []Property
// UnmarshalXML appends the property names and values enclosed within start
// to ps.
//
// An xml:lang attribute that is defined either on the DAV:prop or property
// name XML element is propagated to the property's Lang field.
//
// UnmarshalXML returns an error if start does not contain any properties or if
// property values contain syntactically incorrect XML.
func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
lang := xmlLang(start, "")
for {
t, err := next(d)
if err != nil {
return err
}
switch elem := t.(type) {
case ixml.EndElement:
if len(*ps) == 0 {
return fmt.Errorf("%s must not be empty", start.Name.Local)
}
return nil
case ixml.StartElement:
p := Property{
XMLName: xml.Name(t.(ixml.StartElement).Name),
Lang: xmlLang(t.(ixml.StartElement), lang),
}
err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem)
if err != nil {
return err
}
*ps = append(*ps, p)
}
}
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove
type setRemove struct {
XMLName ixml.Name
Lang string `xml:"xml:lang,attr,omitempty"`
Prop proppatchProps `xml:"DAV: prop"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate
type propertyupdate struct {
XMLName ixml.Name `xml:"DAV: propertyupdate"`
Lang string `xml:"xml:lang,attr,omitempty"`
SetRemove []setRemove `xml:",any"`
}
func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) {
var pu propertyupdate
if err = ixml.NewDecoder(r).Decode(&pu); err != nil {
return nil, http.StatusBadRequest, err
}
for _, op := range pu.SetRemove {
remove := false
switch op.XMLName {
case ixml.Name{Space: "DAV:", Local: "set"}:
// No-op.
case ixml.Name{Space: "DAV:", Local: "remove"}:
for _, p := range op.Prop {
if len(p.InnerXML) > 0 {
return nil, http.StatusBadRequest, errInvalidProppatch
}
}
remove = true
default:
return nil, http.StatusBadRequest, errInvalidProppatch
}
patches = append(patches, Proppatch{Remove: remove, Props: op.Prop})
}
return patches, 0, nil
}