vendor: save all the vendor sources
Signed-off-by: Vincent Batts <vbatts@hashbangbash.com>
This commit is contained in:
parent
8fd3f36252
commit
81b5c05fd8
656 changed files with 244782 additions and 0 deletions
36
vendor/github.com/eclipse/paho.mqtt.golang/.gitignore
generated
vendored
Normal file
36
vendor/github.com/eclipse/paho.mqtt.golang/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
*.msg
|
||||
*.lok
|
||||
|
||||
samples/trivial
|
||||
samples/trivial2
|
||||
samples/sample
|
||||
samples/reconnect
|
||||
samples/ssl
|
||||
samples/custom_store
|
||||
samples/simple
|
||||
samples/stdinpub
|
||||
samples/stdoutsub
|
||||
samples/routing
|
56
vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md
generated
vendored
Normal file
56
vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
Contributing to Paho
|
||||
====================
|
||||
|
||||
Thanks for your interest in this project.
|
||||
|
||||
Project description:
|
||||
--------------------
|
||||
|
||||
The Paho project has been created to provide scalable open-source implementations of open and standard messaging protocols aimed at new, existing, and emerging applications for Machine-to-Machine (M2M) and Internet of Things (IoT).
|
||||
Paho reflects the inherent physical and cost constraints of device connectivity. Its objectives include effective levels of decoupling between devices and applications, designed to keep markets open and encourage the rapid growth of scalable Web and Enterprise middleware and applications. Paho is being kicked off with MQTT publish/subscribe client implementations for use on embedded platforms, along with corresponding server support as determined by the community.
|
||||
|
||||
- https://projects.eclipse.org/projects/technology.paho
|
||||
|
||||
Developer resources:
|
||||
--------------------
|
||||
|
||||
Information regarding source code management, builds, coding standards, and more.
|
||||
|
||||
- https://projects.eclipse.org/projects/technology.paho/developer
|
||||
|
||||
Contributor License Agreement:
|
||||
------------------------------
|
||||
|
||||
Before your contribution can be accepted by the project, you need to create and electronically sign the Eclipse Foundation Contributor License Agreement (CLA).
|
||||
|
||||
- http://www.eclipse.org/legal/CLA.php
|
||||
|
||||
Contributing Code:
|
||||
------------------
|
||||
|
||||
The Go client is developed in Github, see their documentation on the process of forking and pull requests; https://help.github.com/categories/collaborating-on-projects-using-pull-requests/
|
||||
|
||||
Git commit messages should follow the style described here;
|
||||
|
||||
http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
|
||||
|
||||
Contact:
|
||||
--------
|
||||
|
||||
Contact the project developers via the project's "dev" list.
|
||||
|
||||
- https://dev.eclipse.org/mailman/listinfo/paho-dev
|
||||
|
||||
Search for bugs:
|
||||
----------------
|
||||
|
||||
This project uses Github issues to track ongoing development and issues.
|
||||
|
||||
- https://github.com/eclipse/paho.mqtt.golang/issues
|
||||
|
||||
Create a new bug:
|
||||
-----------------
|
||||
|
||||
Be sure to search for existing bugs before you create another one. Remember that contributions are always welcome!
|
||||
|
||||
- https://github.com/eclipse/paho.mqtt.golang/issues
|
15
vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION
generated
vendored
Normal file
15
vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
|
||||
|
||||
Eclipse Distribution License - v 1.0
|
||||
|
||||
Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
20
vendor/github.com/eclipse/paho.mqtt.golang/LICENSE
generated
vendored
Normal file
20
vendor/github.com/eclipse/paho.mqtt.golang/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
This project is dual licensed under the Eclipse Public License 1.0 and the
|
||||
Eclipse Distribution License 1.0 as described in the epl-v10 and edl-v10 files.
|
||||
|
||||
The EDL is copied below in order to pass the pkg.go.dev license check (https://pkg.go.dev/license-policy).
|
||||
|
||||
****
|
||||
Eclipse Distribution License - v 1.0
|
||||
|
||||
Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
177
vendor/github.com/eclipse/paho.mqtt.golang/README.md
generated
vendored
Normal file
177
vendor/github.com/eclipse/paho.mqtt.golang/README.md
generated
vendored
Normal file
|
@ -0,0 +1,177 @@
|
|||
|
||||
[![PkgGoDev](https://pkg.go.dev/badge/github.com/eclipse/paho.mqtt.golang)](https://pkg.go.dev/github.com/eclipse/paho.mqtt.golang)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/eclipse/paho.mqtt.golang)](https://goreportcard.com/report/github.com/eclipse/paho.mqtt.golang)
|
||||
|
||||
Eclipse Paho MQTT Go client
|
||||
===========================
|
||||
|
||||
|
||||
This repository contains the source code for the [Eclipse Paho](https://eclipse.org/paho) MQTT 3.1/3.11 Go client library.
|
||||
|
||||
This code builds a library which enable applications to connect to an [MQTT](https://mqtt.org) broker to publish
|
||||
messages, and to subscribe to topics and receive published messages.
|
||||
|
||||
This library supports a fully asynchronous mode of operation.
|
||||
|
||||
A client supporting MQTT V5 is [also available](https://github.com/eclipse/paho.golang).
|
||||
|
||||
Installation and Build
|
||||
----------------------
|
||||
|
||||
The process depends upon whether you are using [modules](https://golang.org/ref/mod) (recommended) or `GOPATH`.
|
||||
|
||||
#### Modules
|
||||
|
||||
If you are using [modules](https://blog.golang.org/using-go-modules) then `import "github.com/eclipse/paho.mqtt.golang"`
|
||||
and start using it. The necessary packages will be download automatically when you run `go build`.
|
||||
|
||||
Note that the latest release will be downloaded and changes may have been made since the release. If you have
|
||||
encountered an issue, or wish to try the latest code for another reason, then run
|
||||
`go get github.com/eclipse/paho.mqtt.golang@master` to get the latest commit.
|
||||
|
||||
#### GOPATH
|
||||
|
||||
Installation is as easy as:
|
||||
|
||||
```
|
||||
go get github.com/eclipse/paho.mqtt.golang
|
||||
```
|
||||
|
||||
The client depends on Google's [proxy](https://godoc.org/golang.org/x/net/proxy) package and the
|
||||
[websockets](https://godoc.org/github.com/gorilla/websocket) package, also easily installed with the commands:
|
||||
|
||||
```
|
||||
go get github.com/gorilla/websocket
|
||||
go get golang.org/x/net/proxy
|
||||
```
|
||||
|
||||
|
||||
Usage and API
|
||||
-------------
|
||||
|
||||
Detailed API documentation is available by using to godoc tool, or can be browsed online
|
||||
using the [pkg.go.dev](https://pkg.go.dev/github.com/eclipse/paho.mqtt.golang) service.
|
||||
|
||||
Samples are available in the `cmd` directory for reference.
|
||||
|
||||
Note:
|
||||
|
||||
The library also supports using MQTT over websockets by using the `ws://` (unsecure) or `wss://` (secure) prefix in the
|
||||
URI. If the client is running behind a corporate http/https proxy then the following environment variables `HTTP_PROXY`,
|
||||
`HTTPS_PROXY` and `NO_PROXY` are taken into account when establishing the connection.
|
||||
|
||||
Troubleshooting
|
||||
---------------
|
||||
|
||||
If you are new to MQTT and your application is not working as expected reviewing the
|
||||
[MQTT specification](https://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html), which this library implements,
|
||||
is a good first step. [MQTT.org](https://mqtt.org) has some [good resources](https://mqtt.org/getting-started/) that answer many
|
||||
common questions.
|
||||
|
||||
### Error Handling
|
||||
|
||||
The asynchronous nature of this library makes it easy to forget to check for errors. Consider using a go routine to
|
||||
log these:
|
||||
|
||||
```go
|
||||
t := client.Publish("topic", qos, retained, msg)
|
||||
go func() {
|
||||
_ = t.Wait() // Can also use '<-t.Done()' in releases > 1.2.0
|
||||
if t.Error() != nil {
|
||||
log.Error(t.Error()) // Use your preferred logging technique (or just fmt.Printf)
|
||||
}
|
||||
}()
|
||||
```
|
||||
|
||||
### Logging
|
||||
|
||||
If you are encountering issues then enabling logging, both within this library and on your broker, is a good way to
|
||||
begin troubleshooting. This library can produce various levels of log by assigning the logging endpoints, ERROR,
|
||||
CRITICAL, WARN and DEBUG. For example:
|
||||
|
||||
```go
|
||||
func main() {
|
||||
mqtt.ERROR = log.New(os.Stdout, "[ERROR] ", 0)
|
||||
mqtt.CRITICAL = log.New(os.Stdout, "[CRIT] ", 0)
|
||||
mqtt.WARN = log.New(os.Stdout, "[WARN] ", 0)
|
||||
mqtt.DEBUG = log.New(os.Stdout, "[DEBUG] ", 0)
|
||||
|
||||
// Connect, Subscribe, Publish etc..
|
||||
}
|
||||
```
|
||||
|
||||
### Common Problems
|
||||
|
||||
* Seemingly random disconnections may be caused by another client connecting to the broker with the same client
|
||||
identifier; this is as per the [spec](https://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc384800405).
|
||||
* Unless ordered delivery of messages is essential (and you have configured your broker to support this e.g.
|
||||
`max_inflight_messages=1` in mosquitto) then set `ClientOptions.SetOrderMatters(false)`. Doing so will avoid the
|
||||
below issue (deadlocks due to blocking message handlers).
|
||||
* A `MessageHandler` (called when a new message is received) must not block (unless
|
||||
`ClientOptions.SetOrderMatters(false)` set). If you wish to perform a long-running task, or publish a message, then
|
||||
please use a go routine (blocking in the handler is a common cause of unexpected `pingresp
|
||||
not received, disconnecting` errors).
|
||||
* When QOS1+ subscriptions have been created previously and you connect with `CleanSession` set to false it is possible that the broker will deliver retained
|
||||
messages before `Subscribe` can be called. To process these messages either configure a handler with `AddRoute` or
|
||||
set a `DefaultPublishHandler`.
|
||||
* Loss of network connectivity may not be detected immediately. If this is an issue then consider setting
|
||||
`ClientOptions.KeepAlive` (sends regular messages to check the link is active).
|
||||
* Brokers offer many configuration options; some settings may lead to unexpected results. If using Mosquitto check
|
||||
`max_inflight_messages`, `max_queued_messages`, `persistence` (the defaults may not be what you expect).
|
||||
|
||||
Reporting bugs
|
||||
--------------
|
||||
|
||||
Please report bugs by raising issues for this project in github https://github.com/eclipse/paho.mqtt.golang/issues
|
||||
|
||||
*A limited number of contributors monitor the issues section so if you have a general question please consider the
|
||||
resources in the [more information](#more-information) section (your question will be seen by more people, and you are
|
||||
likely to receive an answer more quickly).*
|
||||
|
||||
We welcome bug reports, but it is important they are actionable. A significant percentage of issues reported are not
|
||||
resolved due to a lack of information. If we cannot replicate the problem then it is unlikely we will be able to fix it.
|
||||
The information required will vary from issue to issue but consider including:
|
||||
|
||||
* Which version of the package you are using (tag or commit - this should be in your go.mod file)
|
||||
* A [Minimal, Reproducible Example](https://stackoverflow.com/help/minimal-reproducible-example). Providing an example
|
||||
is the best way to demonstrate the issue you are facing; it is important this includes all relevant information
|
||||
(including broker configuration). Docker (see `cmd/docker`) makes it relatively simple to provide a working end-to-end
|
||||
example.
|
||||
* A full, clear, description of the problem (detail what you are expecting vs what actually happens).
|
||||
* Details of your attempts to resolve the issue (what have you tried, what worked, what did not).
|
||||
* [Application Logs](#logging) covering the period the issue occurred. Unless you have isolated the root cause of the issue please include a link to a full log (including data from well before the problem arose).
|
||||
* Broker Logs covering the period the issue occurred.
|
||||
|
||||
It is important to remember that this library does not stand alone; it communicates with a broker and any issues you are
|
||||
seeing may be due to:
|
||||
|
||||
* Bugs in your code.
|
||||
* Bugs in this library.
|
||||
* The broker configuration.
|
||||
* Bugs in the broker.
|
||||
* Issues with whatever you are communicating with.
|
||||
|
||||
When submitting an issue, please ensure that you provide sufficient details to enable us to eliminate causes outside of
|
||||
this library.
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
We welcome pull requests but before your contribution can be accepted by the project, you need to create and
|
||||
electronically sign the Eclipse Contributor Agreement (ECA) and sign off on the Eclipse Foundation Certificate of Origin.
|
||||
|
||||
More information is available in the
|
||||
[Eclipse Development Resources](http://wiki.eclipse.org/Development_Resources/Contributing_via_Git); please take special
|
||||
note of the requirement that the commit record contain a "Signed-off-by" entry.
|
||||
|
||||
More information
|
||||
----------------
|
||||
|
||||
Discussion of the Paho clients takes place on the [Eclipse paho-dev mailing list](https://dev.eclipse.org/mailman/listinfo/paho-dev).
|
||||
|
||||
General questions about the MQTT protocol are discussed in the [MQTT Google Group](https://groups.google.com/forum/?hl=en-US&fromgroups#!forum/mqtt).
|
||||
|
||||
There is much more information available via the [MQTT community site](http://mqtt.org).
|
||||
|
||||
[Stack Overflow](https://stackoverflow.com/questions/tagged/mqtt+go) has a range questions covering a range of common
|
||||
issues (both relating to use of this library and MQTT in general).
|
41
vendor/github.com/eclipse/paho.mqtt.golang/about.html
generated
vendored
Normal file
41
vendor/github.com/eclipse/paho.mqtt.golang/about.html
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
|
||||
<html xmlns="http://www.w3.org/1999/xhtml"><head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
|
||||
<title>About</title>
|
||||
</head>
|
||||
<body lang="EN-US">
|
||||
<h2>About This Content</h2>
|
||||
|
||||
<p><em>December 9, 2013</em></p>
|
||||
<h3>License</h3>
|
||||
|
||||
<p>The Eclipse Foundation makes available all content in this plug-in ("Content"). Unless otherwise
|
||||
indicated below, the Content is provided to you under the terms and conditions of the
|
||||
Eclipse Public License Version 1.0 ("EPL") and Eclipse Distribution License Version 1.0 ("EDL").
|
||||
A copy of the EPL is available at
|
||||
<a href="http://www.eclipse.org/legal/epl-v10.html">http://www.eclipse.org/legal/epl-v10.html</a>
|
||||
and a copy of the EDL is available at
|
||||
<a href="http://www.eclipse.org/org/documents/edl-v10.php">http://www.eclipse.org/org/documents/edl-v10.php</a>.
|
||||
For purposes of the EPL, "Program" will mean the Content.</p>
|
||||
|
||||
<p>If you did not receive this Content directly from the Eclipse Foundation, the Content is
|
||||
being redistributed by another party ("Redistributor") and different terms and conditions may
|
||||
apply to your use of any object code in the Content. Check the Redistributor's license that was
|
||||
provided with the Content. If no such license exists, contact the Redistributor. Unless otherwise
|
||||
indicated below, the terms and conditions of the EPL still apply to any source code in the Content
|
||||
and such source code may be obtained at <a href="http://www.eclipse.org/">http://www.eclipse.org</a>.</p>
|
||||
|
||||
|
||||
<h3>Third Party Content</h3>
|
||||
<p>The Content includes items that have been sourced from third parties as set out below. If you
|
||||
did not receive this Content directly from the Eclipse Foundation, the following is provided
|
||||
for informational purposes only, and you should look to the Redistributor's license for
|
||||
terms and conditions of use.</p>
|
||||
<p><em>
|
||||
<strong>None</strong> <br><br>
|
||||
<br><br>
|
||||
</em></p>
|
||||
|
||||
|
||||
|
||||
</body></html>
|
1097
vendor/github.com/eclipse/paho.mqtt.golang/client.go
generated
vendored
Normal file
1097
vendor/github.com/eclipse/paho.mqtt.golang/client.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
32
vendor/github.com/eclipse/paho.mqtt.golang/components.go
generated
vendored
Normal file
32
vendor/github.com/eclipse/paho.mqtt.golang/components.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
type component string
|
||||
|
||||
// Component names for debug output
|
||||
const (
|
||||
NET component = "[net] "
|
||||
PNG component = "[pinger] "
|
||||
CLI component = "[client] "
|
||||
DEC component = "[decode] "
|
||||
MES component = "[message] "
|
||||
STR component = "[store] "
|
||||
MID component = "[msgids] "
|
||||
TST component = "[test] "
|
||||
STA component = "[state] "
|
||||
ERR component = "[error] "
|
||||
ROU component = "[router] "
|
||||
)
|
15
vendor/github.com/eclipse/paho.mqtt.golang/edl-v10
generated
vendored
Normal file
15
vendor/github.com/eclipse/paho.mqtt.golang/edl-v10
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
|
||||
Eclipse Distribution License - v 1.0
|
||||
|
||||
Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
70
vendor/github.com/eclipse/paho.mqtt.golang/epl-v10
generated
vendored
Normal file
70
vendor/github.com/eclipse/paho.mqtt.golang/epl-v10
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
|||
Eclipse Public License - v 1.0
|
||||
|
||||
THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
|
||||
|
||||
1. DEFINITIONS
|
||||
|
||||
"Contribution" means:
|
||||
|
||||
a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and
|
||||
b) in the case of each subsequent Contributor:
|
||||
i) changes to the Program, and
|
||||
ii) additions to the Program;
|
||||
where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program.
|
||||
"Contributor" means any person or entity that distributes the Program.
|
||||
|
||||
"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program.
|
||||
|
||||
"Program" means the Contributions distributed in accordance with this Agreement.
|
||||
|
||||
"Recipient" means anyone who receives the Program under this Agreement, including all Contributors.
|
||||
|
||||
2. GRANT OF RIGHTS
|
||||
|
||||
a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form.
|
||||
b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder.
|
||||
c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program.
|
||||
d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement.
|
||||
3. REQUIREMENTS
|
||||
|
||||
A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that:
|
||||
|
||||
a) it complies with the terms and conditions of this Agreement; and
|
||||
b) its license agreement:
|
||||
i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose;
|
||||
ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits;
|
||||
iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and
|
||||
iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange.
|
||||
When the Program is made available in source code form:
|
||||
|
||||
a) it must be made available under this Agreement; and
|
||||
b) a copy of this Agreement must be included with each copy of the Program.
|
||||
Contributors may not remove or alter any copyright notices contained within the Program.
|
||||
|
||||
Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution.
|
||||
|
||||
4. COMMERCIAL DISTRIBUTION
|
||||
|
||||
Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense.
|
||||
|
||||
For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages.
|
||||
|
||||
5. NO WARRANTY
|
||||
|
||||
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations.
|
||||
|
||||
6. DISCLAIMER OF LIABILITY
|
||||
|
||||
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
7. GENERAL
|
||||
|
||||
If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.
|
||||
|
||||
If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed.
|
||||
|
||||
All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive.
|
||||
|
||||
Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved.
|
||||
|
||||
This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation.
|
257
vendor/github.com/eclipse/paho.mqtt.golang/filestore.go
generated
vendored
Normal file
257
vendor/github.com/eclipse/paho.mqtt.golang/filestore.go
generated
vendored
Normal file
|
@ -0,0 +1,257 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||
)
|
||||
|
||||
const (
|
||||
msgExt = ".msg"
|
||||
tmpExt = ".tmp"
|
||||
corruptExt = ".CORRUPT"
|
||||
)
|
||||
|
||||
// FileStore implements the store interface using the filesystem to provide
|
||||
// true persistence, even across client failure. This is designed to use a
|
||||
// single directory per running client. If you are running multiple clients
|
||||
// on the same filesystem, you will need to be careful to specify unique
|
||||
// store directories for each.
|
||||
type FileStore struct {
|
||||
sync.RWMutex
|
||||
directory string
|
||||
opened bool
|
||||
}
|
||||
|
||||
// NewFileStore will create a new FileStore which stores its messages in the
|
||||
// directory provided.
|
||||
func NewFileStore(directory string) *FileStore {
|
||||
store := &FileStore{
|
||||
directory: directory,
|
||||
opened: false,
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
// Open will allow the FileStore to be used.
|
||||
func (store *FileStore) Open() {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
// if no store directory was specified in ClientOpts, by default use the
|
||||
// current working directory
|
||||
if store.directory == "" {
|
||||
store.directory, _ = os.Getwd()
|
||||
}
|
||||
|
||||
// if store dir exists, great, otherwise, create it
|
||||
if !exists(store.directory) {
|
||||
perms := os.FileMode(0770)
|
||||
merr := os.MkdirAll(store.directory, perms)
|
||||
chkerr(merr)
|
||||
}
|
||||
store.opened = true
|
||||
DEBUG.Println(STR, "store is opened at", store.directory)
|
||||
}
|
||||
|
||||
// Close will disallow the FileStore from being used.
|
||||
func (store *FileStore) Close() {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
store.opened = false
|
||||
DEBUG.Println(STR, "store is closed")
|
||||
}
|
||||
|
||||
// Put will put a message into the store, associated with the provided
|
||||
// key value.
|
||||
func (store *FileStore) Put(key string, m packets.ControlPacket) {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
if !store.opened {
|
||||
ERROR.Println(STR, "Trying to use file store, but not open")
|
||||
return
|
||||
}
|
||||
full := fullpath(store.directory, key)
|
||||
write(store.directory, key, m)
|
||||
if !exists(full) {
|
||||
ERROR.Println(STR, "file not created:", full)
|
||||
}
|
||||
}
|
||||
|
||||
// Get will retrieve a message from the store, the one associated with
|
||||
// the provided key value.
|
||||
func (store *FileStore) Get(key string) packets.ControlPacket {
|
||||
store.RLock()
|
||||
defer store.RUnlock()
|
||||
if !store.opened {
|
||||
ERROR.Println(STR, "trying to use file store, but not open")
|
||||
return nil
|
||||
}
|
||||
filepath := fullpath(store.directory, key)
|
||||
if !exists(filepath) {
|
||||
return nil
|
||||
}
|
||||
mfile, oerr := os.Open(filepath)
|
||||
chkerr(oerr)
|
||||
msg, rerr := packets.ReadPacket(mfile)
|
||||
chkerr(mfile.Close())
|
||||
|
||||
// Message was unreadable, return nil
|
||||
if rerr != nil {
|
||||
newpath := corruptpath(store.directory, key)
|
||||
WARN.Println(STR, "corrupted file detected:", rerr.Error(), "archived at:", newpath)
|
||||
if err := os.Rename(filepath, newpath); err != nil {
|
||||
ERROR.Println(STR, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
// All will provide a list of all of the keys associated with messages
|
||||
// currently residing in the FileStore.
|
||||
func (store *FileStore) All() []string {
|
||||
store.RLock()
|
||||
defer store.RUnlock()
|
||||
return store.all()
|
||||
}
|
||||
|
||||
// Del will remove the persisted message associated with the provided
|
||||
// key from the FileStore.
|
||||
func (store *FileStore) Del(key string) {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
store.del(key)
|
||||
}
|
||||
|
||||
// Reset will remove all persisted messages from the FileStore.
|
||||
func (store *FileStore) Reset() {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
WARN.Println(STR, "FileStore Reset")
|
||||
for _, key := range store.all() {
|
||||
store.del(key)
|
||||
}
|
||||
}
|
||||
|
||||
// lockless
|
||||
func (store *FileStore) all() []string {
|
||||
var err error
|
||||
var keys []string
|
||||
var files fileInfos
|
||||
|
||||
if !store.opened {
|
||||
ERROR.Println(STR, "trying to use file store, but not open")
|
||||
return nil
|
||||
}
|
||||
|
||||
files, err = ioutil.ReadDir(store.directory)
|
||||
chkerr(err)
|
||||
sort.Sort(files)
|
||||
for _, f := range files {
|
||||
DEBUG.Println(STR, "file in All():", f.Name())
|
||||
name := f.Name()
|
||||
if name[len(name)-4:] != msgExt {
|
||||
DEBUG.Println(STR, "skipping file, doesn't have right extension: ", name)
|
||||
continue
|
||||
}
|
||||
key := name[0 : len(name)-4] // remove file extension
|
||||
keys = append(keys, key)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// lockless
|
||||
func (store *FileStore) del(key string) {
|
||||
if !store.opened {
|
||||
ERROR.Println(STR, "trying to use file store, but not open")
|
||||
return
|
||||
}
|
||||
DEBUG.Println(STR, "store del filepath:", store.directory)
|
||||
DEBUG.Println(STR, "store delete key:", key)
|
||||
filepath := fullpath(store.directory, key)
|
||||
DEBUG.Println(STR, "path of deletion:", filepath)
|
||||
if !exists(filepath) {
|
||||
WARN.Println(STR, "store could not delete key:", key)
|
||||
return
|
||||
}
|
||||
rerr := os.Remove(filepath)
|
||||
chkerr(rerr)
|
||||
DEBUG.Println(STR, "del msg:", key)
|
||||
if exists(filepath) {
|
||||
ERROR.Println(STR, "file not deleted:", filepath)
|
||||
}
|
||||
}
|
||||
|
||||
func fullpath(store string, key string) string {
|
||||
p := path.Join(store, key+msgExt)
|
||||
return p
|
||||
}
|
||||
|
||||
func tmppath(store string, key string) string {
|
||||
p := path.Join(store, key+tmpExt)
|
||||
return p
|
||||
}
|
||||
|
||||
func corruptpath(store string, key string) string {
|
||||
p := path.Join(store, key+corruptExt)
|
||||
return p
|
||||
}
|
||||
|
||||
// create file called "X.[messageid].tmp" located in the store
|
||||
// the contents of the file is the bytes of the message, then
|
||||
// rename it to "X.[messageid].msg", overwriting any existing
|
||||
// message with the same id
|
||||
// X will be 'i' for inbound messages, and O for outbound messages
|
||||
func write(store, key string, m packets.ControlPacket) {
|
||||
temppath := tmppath(store, key)
|
||||
f, err := os.Create(temppath)
|
||||
chkerr(err)
|
||||
werr := m.Write(f)
|
||||
chkerr(werr)
|
||||
cerr := f.Close()
|
||||
chkerr(cerr)
|
||||
rerr := os.Rename(temppath, fullpath(store, key))
|
||||
chkerr(rerr)
|
||||
}
|
||||
|
||||
func exists(file string) bool {
|
||||
if _, err := os.Stat(file); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
chkerr(err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type fileInfos []os.FileInfo
|
||||
|
||||
func (f fileInfos) Len() int {
|
||||
return len(f)
|
||||
}
|
||||
|
||||
func (f fileInfos) Swap(i, j int) {
|
||||
f[i], f[j] = f[j], f[i]
|
||||
}
|
||||
|
||||
func (f fileInfos) Less(i, j int) bool {
|
||||
return f[i].ModTime().Before(f[j].ModTime())
|
||||
}
|
8
vendor/github.com/eclipse/paho.mqtt.golang/go.mod
generated
vendored
Normal file
8
vendor/github.com/eclipse/paho.mqtt.golang/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
module github.com/eclipse/paho.mqtt.golang
|
||||
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0
|
||||
)
|
8
vendor/github.com/eclipse/paho.mqtt.golang/go.sum
generated
vendored
Normal file
8
vendor/github.com/eclipse/paho.mqtt.golang/go.sum
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0 h1:Jcxah/M+oLZ/R4/z5RzfPzGbPXnVDPkEDtf2JnuxN+U=
|
||||
golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
138
vendor/github.com/eclipse/paho.mqtt.golang/memstore.go
generated
vendored
Normal file
138
vendor/github.com/eclipse/paho.mqtt.golang/memstore.go
generated
vendored
Normal file
|
@ -0,0 +1,138 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||
)
|
||||
|
||||
// MemoryStore implements the store interface to provide a "persistence"
|
||||
// mechanism wholly stored in memory. This is only useful for
|
||||
// as long as the client instance exists.
|
||||
type MemoryStore struct {
|
||||
sync.RWMutex
|
||||
messages map[string]packets.ControlPacket
|
||||
opened bool
|
||||
}
|
||||
|
||||
// NewMemoryStore returns a pointer to a new instance of
|
||||
// MemoryStore, the instance is not initialized and ready to
|
||||
// use until Open() has been called on it.
|
||||
func NewMemoryStore() *MemoryStore {
|
||||
store := &MemoryStore{
|
||||
messages: make(map[string]packets.ControlPacket),
|
||||
opened: false,
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
// Open initializes a MemoryStore instance.
|
||||
func (store *MemoryStore) Open() {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
store.opened = true
|
||||
DEBUG.Println(STR, "memorystore initialized")
|
||||
}
|
||||
|
||||
// Put takes a key and a pointer to a Message and stores the
|
||||
// message.
|
||||
func (store *MemoryStore) Put(key string, message packets.ControlPacket) {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
if !store.opened {
|
||||
ERROR.Println(STR, "Trying to use memory store, but not open")
|
||||
return
|
||||
}
|
||||
store.messages[key] = message
|
||||
}
|
||||
|
||||
// Get takes a key and looks in the store for a matching Message
|
||||
// returning either the Message pointer or nil.
|
||||
func (store *MemoryStore) Get(key string) packets.ControlPacket {
|
||||
store.RLock()
|
||||
defer store.RUnlock()
|
||||
if !store.opened {
|
||||
ERROR.Println(STR, "Trying to use memory store, but not open")
|
||||
return nil
|
||||
}
|
||||
mid := mIDFromKey(key)
|
||||
m := store.messages[key]
|
||||
if m == nil {
|
||||
CRITICAL.Println(STR, "memorystore get: message", mid, "not found")
|
||||
} else {
|
||||
DEBUG.Println(STR, "memorystore get: message", mid, "found")
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// All returns a slice of strings containing all the keys currently
|
||||
// in the MemoryStore.
|
||||
func (store *MemoryStore) All() []string {
|
||||
store.RLock()
|
||||
defer store.RUnlock()
|
||||
if !store.opened {
|
||||
ERROR.Println(STR, "Trying to use memory store, but not open")
|
||||
return nil
|
||||
}
|
||||
var keys []string
|
||||
for k := range store.messages {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// Del takes a key, searches the MemoryStore and if the key is found
|
||||
// deletes the Message pointer associated with it.
|
||||
func (store *MemoryStore) Del(key string) {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
if !store.opened {
|
||||
ERROR.Println(STR, "Trying to use memory store, but not open")
|
||||
return
|
||||
}
|
||||
mid := mIDFromKey(key)
|
||||
m := store.messages[key]
|
||||
if m == nil {
|
||||
WARN.Println(STR, "memorystore del: message", mid, "not found")
|
||||
} else {
|
||||
delete(store.messages, key)
|
||||
DEBUG.Println(STR, "memorystore del: message", mid, "was deleted")
|
||||
}
|
||||
}
|
||||
|
||||
// Close will disallow modifications to the state of the store.
|
||||
func (store *MemoryStore) Close() {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
if !store.opened {
|
||||
ERROR.Println(STR, "Trying to close memory store, but not open")
|
||||
return
|
||||
}
|
||||
store.opened = false
|
||||
DEBUG.Println(STR, "memorystore closed")
|
||||
}
|
||||
|
||||
// Reset eliminates all persisted message data in the store.
|
||||
func (store *MemoryStore) Reset() {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
if !store.opened {
|
||||
ERROR.Println(STR, "Trying to reset memory store, but not open")
|
||||
}
|
||||
store.messages = make(map[string]packets.ControlPacket)
|
||||
WARN.Println(STR, "memorystore wiped")
|
||||
}
|
127
vendor/github.com/eclipse/paho.mqtt.golang/message.go
generated
vendored
Normal file
127
vendor/github.com/eclipse/paho.mqtt.golang/message.go
generated
vendored
Normal file
|
@ -0,0 +1,127 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"sync"
|
||||
|
||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||
)
|
||||
|
||||
// Message defines the externals that a message implementation must support
|
||||
// these are received messages that are passed to the callbacks, not internal
|
||||
// messages
|
||||
type Message interface {
|
||||
Duplicate() bool
|
||||
Qos() byte
|
||||
Retained() bool
|
||||
Topic() string
|
||||
MessageID() uint16
|
||||
Payload() []byte
|
||||
Ack()
|
||||
}
|
||||
|
||||
type message struct {
|
||||
duplicate bool
|
||||
qos byte
|
||||
retained bool
|
||||
topic string
|
||||
messageID uint16
|
||||
payload []byte
|
||||
once sync.Once
|
||||
ack func()
|
||||
}
|
||||
|
||||
func (m *message) Duplicate() bool {
|
||||
return m.duplicate
|
||||
}
|
||||
|
||||
func (m *message) Qos() byte {
|
||||
return m.qos
|
||||
}
|
||||
|
||||
func (m *message) Retained() bool {
|
||||
return m.retained
|
||||
}
|
||||
|
||||
func (m *message) Topic() string {
|
||||
return m.topic
|
||||
}
|
||||
|
||||
func (m *message) MessageID() uint16 {
|
||||
return m.messageID
|
||||
}
|
||||
|
||||
func (m *message) Payload() []byte {
|
||||
return m.payload
|
||||
}
|
||||
|
||||
func (m *message) Ack() {
|
||||
m.once.Do(m.ack)
|
||||
}
|
||||
|
||||
func messageFromPublish(p *packets.PublishPacket, ack func()) Message {
|
||||
return &message{
|
||||
duplicate: p.Dup,
|
||||
qos: p.Qos,
|
||||
retained: p.Retain,
|
||||
topic: p.TopicName,
|
||||
messageID: p.MessageID,
|
||||
payload: p.Payload,
|
||||
ack: ack,
|
||||
}
|
||||
}
|
||||
|
||||
func newConnectMsgFromOptions(options *ClientOptions, broker *url.URL) *packets.ConnectPacket {
|
||||
m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket)
|
||||
|
||||
m.CleanSession = options.CleanSession
|
||||
m.WillFlag = options.WillEnabled
|
||||
m.WillRetain = options.WillRetained
|
||||
m.ClientIdentifier = options.ClientID
|
||||
|
||||
if options.WillEnabled {
|
||||
m.WillQos = options.WillQos
|
||||
m.WillTopic = options.WillTopic
|
||||
m.WillMessage = options.WillPayload
|
||||
}
|
||||
|
||||
username := options.Username
|
||||
password := options.Password
|
||||
if broker.User != nil {
|
||||
username = broker.User.Username()
|
||||
if pwd, ok := broker.User.Password(); ok {
|
||||
password = pwd
|
||||
}
|
||||
}
|
||||
if options.CredentialsProvider != nil {
|
||||
username, password = options.CredentialsProvider()
|
||||
}
|
||||
|
||||
if username != "" {
|
||||
m.UsernameFlag = true
|
||||
m.Username = username
|
||||
// mustn't have password without user as well
|
||||
if password != "" {
|
||||
m.PasswordFlag = true
|
||||
m.Password = []byte(password)
|
||||
}
|
||||
}
|
||||
|
||||
m.Keepalive = uint16(options.KeepAlive)
|
||||
|
||||
return m
|
||||
}
|
176
vendor/github.com/eclipse/paho.mqtt.golang/messageids.go
generated
vendored
Normal file
176
vendor/github.com/eclipse/paho.mqtt.golang/messageids.go
generated
vendored
Normal file
|
@ -0,0 +1,176 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MId is 16 bit message id as specified by the MQTT spec.
|
||||
// In general, these values should not be depended upon by
|
||||
// the client application.
|
||||
type MId uint16
|
||||
|
||||
type messageIds struct {
|
||||
sync.RWMutex
|
||||
index map[uint16]tokenCompletor
|
||||
|
||||
lastIssuedID uint16 // The most recently issued ID. Used so we cycle through ids rather than immediately reusing them (can make debugging easier)
|
||||
}
|
||||
|
||||
const (
|
||||
midMin uint16 = 1
|
||||
midMax uint16 = 65535
|
||||
)
|
||||
|
||||
func (mids *messageIds) cleanUp() {
|
||||
mids.Lock()
|
||||
for _, token := range mids.index {
|
||||
switch token.(type) {
|
||||
case *PublishToken:
|
||||
token.setError(fmt.Errorf("connection lost before Publish completed"))
|
||||
case *SubscribeToken:
|
||||
token.setError(fmt.Errorf("connection lost before Subscribe completed"))
|
||||
case *UnsubscribeToken:
|
||||
token.setError(fmt.Errorf("connection lost before Unsubscribe completed"))
|
||||
case nil:
|
||||
continue
|
||||
}
|
||||
token.flowComplete()
|
||||
}
|
||||
mids.index = make(map[uint16]tokenCompletor)
|
||||
mids.Unlock()
|
||||
DEBUG.Println(MID, "cleaned up")
|
||||
}
|
||||
|
||||
func (mids *messageIds) freeID(id uint16) {
|
||||
mids.Lock()
|
||||
delete(mids.index, id)
|
||||
mids.Unlock()
|
||||
}
|
||||
|
||||
func (mids *messageIds) claimID(token tokenCompletor, id uint16) {
|
||||
mids.Lock()
|
||||
defer mids.Unlock()
|
||||
if _, ok := mids.index[id]; !ok {
|
||||
mids.index[id] = token
|
||||
} else {
|
||||
old := mids.index[id]
|
||||
old.flowComplete()
|
||||
mids.index[id] = token
|
||||
}
|
||||
if id > mids.lastIssuedID {
|
||||
mids.lastIssuedID = id
|
||||
}
|
||||
}
|
||||
|
||||
// getID will return an available id or 0 if none available
|
||||
// The id will generally be the previous id + 1 (because this makes tracing messages a bit simpler)
|
||||
func (mids *messageIds) getID(t tokenCompletor) uint16 {
|
||||
mids.Lock()
|
||||
defer mids.Unlock()
|
||||
i := mids.lastIssuedID // note: the only situation where lastIssuedID is 0 the map will be empty
|
||||
looped := false // uint16 will loop from 65535->0
|
||||
for {
|
||||
i++
|
||||
if i == 0 { // skip 0 because its not a valid id (Control Packets MUST contain a non-zero 16-bit Packet Identifier [MQTT-2.3.1-1])
|
||||
i++
|
||||
looped = true
|
||||
}
|
||||
if _, ok := mids.index[i]; !ok {
|
||||
mids.index[i] = t
|
||||
mids.lastIssuedID = i
|
||||
return i
|
||||
}
|
||||
if (looped && i == mids.lastIssuedID) || (mids.lastIssuedID == 0 && i == midMax) { // lastIssuedID will be 0 at startup
|
||||
return 0 // no free ids
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mids *messageIds) getToken(id uint16) tokenCompletor {
|
||||
mids.RLock()
|
||||
defer mids.RUnlock()
|
||||
if token, ok := mids.index[id]; ok {
|
||||
return token
|
||||
}
|
||||
return &DummyToken{id: id}
|
||||
}
|
||||
|
||||
type DummyToken struct {
|
||||
id uint16
|
||||
}
|
||||
|
||||
// Wait implements the Token Wait method.
|
||||
func (d *DummyToken) Wait() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// WaitTimeout implements the Token WaitTimeout method.
|
||||
func (d *DummyToken) WaitTimeout(t time.Duration) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Done implements the Token Done method.
|
||||
func (d *DummyToken) Done() <-chan struct{} {
|
||||
ch := make(chan struct{})
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (d *DummyToken) flowComplete() {
|
||||
ERROR.Printf("A lookup for token %d returned nil\n", d.id)
|
||||
}
|
||||
|
||||
func (d *DummyToken) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DummyToken) setError(e error) {}
|
||||
|
||||
// PlaceHolderToken does nothing and was implemented to allow a messageid to be reserved
|
||||
// it differs from DummyToken in that calling flowComplete does not generate an error (it
|
||||
// is expected that flowComplete will be called when the token is overwritten with a real token)
|
||||
type PlaceHolderToken struct {
|
||||
id uint16
|
||||
}
|
||||
|
||||
// Wait implements the Token Wait method.
|
||||
func (p *PlaceHolderToken) Wait() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// WaitTimeout implements the Token WaitTimeout method.
|
||||
func (p *PlaceHolderToken) WaitTimeout(t time.Duration) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Done implements the Token Done method.
|
||||
func (p *PlaceHolderToken) Done() <-chan struct{} {
|
||||
ch := make(chan struct{})
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (p *PlaceHolderToken) flowComplete() {
|
||||
}
|
||||
|
||||
func (p *PlaceHolderToken) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PlaceHolderToken) setError(e error) {}
|
464
vendor/github.com/eclipse/paho.mqtt.golang/net.go
generated
vendored
Normal file
464
vendor/github.com/eclipse/paho.mqtt.golang/net.go
generated
vendored
Normal file
|
@ -0,0 +1,464 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||
)
|
||||
|
||||
const closedNetConnErrorText = "use of closed network connection" // error string for closed conn (https://golang.org/src/net/error_test.go)
|
||||
|
||||
// ConnectMQTT takes a connected net.Conn and performs the initial MQTT handshake. Parameters are:
|
||||
// conn - Connected net.Conn
|
||||
// cm - Connect Packet with everything other than the protocol name/version populated (historical reasons)
|
||||
// protocolVersion - The protocol version to attempt to connect with
|
||||
//
|
||||
// Note that, for backward compatibility, ConnectMQTT() suppresses the actual connection error (compare to connectMQTT()).
|
||||
func ConnectMQTT(conn net.Conn, cm *packets.ConnectPacket, protocolVersion uint) (byte, bool) {
|
||||
rc, sessionPresent, _ := connectMQTT(conn, cm, protocolVersion)
|
||||
return rc, sessionPresent
|
||||
}
|
||||
|
||||
func connectMQTT(conn io.ReadWriter, cm *packets.ConnectPacket, protocolVersion uint) (byte, bool, error) {
|
||||
switch protocolVersion {
|
||||
case 3:
|
||||
DEBUG.Println(CLI, "Using MQTT 3.1 protocol")
|
||||
cm.ProtocolName = "MQIsdp"
|
||||
cm.ProtocolVersion = 3
|
||||
case 0x83:
|
||||
DEBUG.Println(CLI, "Using MQTT 3.1b protocol")
|
||||
cm.ProtocolName = "MQIsdp"
|
||||
cm.ProtocolVersion = 0x83
|
||||
case 0x84:
|
||||
DEBUG.Println(CLI, "Using MQTT 3.1.1b protocol")
|
||||
cm.ProtocolName = "MQTT"
|
||||
cm.ProtocolVersion = 0x84
|
||||
default:
|
||||
DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol")
|
||||
cm.ProtocolName = "MQTT"
|
||||
cm.ProtocolVersion = 4
|
||||
}
|
||||
|
||||
if err := cm.Write(conn); err != nil {
|
||||
ERROR.Println(CLI, err)
|
||||
return packets.ErrNetworkError, false, err
|
||||
}
|
||||
|
||||
rc, sessionPresent, err := verifyCONNACK(conn)
|
||||
return rc, sessionPresent, err
|
||||
}
|
||||
|
||||
// This function is only used for receiving a connack
|
||||
// when the connection is first started.
|
||||
// This prevents receiving incoming data while resume
|
||||
// is in progress if clean session is false.
|
||||
func verifyCONNACK(conn io.Reader) (byte, bool, error) {
|
||||
DEBUG.Println(NET, "connect started")
|
||||
|
||||
ca, err := packets.ReadPacket(conn)
|
||||
if err != nil {
|
||||
ERROR.Println(NET, "connect got error", err)
|
||||
return packets.ErrNetworkError, false, err
|
||||
}
|
||||
|
||||
if ca == nil {
|
||||
ERROR.Println(NET, "received nil packet")
|
||||
return packets.ErrNetworkError, false, errors.New("nil CONNACK packet")
|
||||
}
|
||||
|
||||
msg, ok := ca.(*packets.ConnackPacket)
|
||||
if !ok {
|
||||
ERROR.Println(NET, "received msg that was not CONNACK")
|
||||
return packets.ErrNetworkError, false, errors.New("non-CONNACK first packet received")
|
||||
}
|
||||
|
||||
DEBUG.Println(NET, "received connack")
|
||||
return msg.ReturnCode, msg.SessionPresent, nil
|
||||
}
|
||||
|
||||
// inbound encapsulates the output from startIncoming.
|
||||
// err - If != nil then an error has occurred
|
||||
// cp - A control packet received over the network link
|
||||
type inbound struct {
|
||||
err error
|
||||
cp packets.ControlPacket
|
||||
}
|
||||
|
||||
// startIncoming initiates a goroutine that reads incoming messages off the wire and sends them to the channel (returned).
|
||||
// If there are any issues with the network connection then the returned channel will be closed and the goroutine will exit
|
||||
// (so closing the connection will terminate the goroutine)
|
||||
func startIncoming(conn io.Reader) <-chan inbound {
|
||||
var err error
|
||||
var cp packets.ControlPacket
|
||||
ibound := make(chan inbound)
|
||||
|
||||
DEBUG.Println(NET, "incoming started")
|
||||
|
||||
go func() {
|
||||
for {
|
||||
if cp, err = packets.ReadPacket(conn); err != nil {
|
||||
// We do not want to log the error if it is due to the network connection having been closed
|
||||
// elsewhere (i.e. after sending DisconnectPacket). Detecting this situation is the subject of
|
||||
// https://github.com/golang/go/issues/4373
|
||||
if !strings.Contains(err.Error(), closedNetConnErrorText) {
|
||||
ibound <- inbound{err: err}
|
||||
}
|
||||
close(ibound)
|
||||
DEBUG.Println(NET, "incoming complete")
|
||||
return
|
||||
}
|
||||
DEBUG.Println(NET, "startIncoming Received Message")
|
||||
ibound <- inbound{cp: cp}
|
||||
}
|
||||
}()
|
||||
|
||||
return ibound
|
||||
}
|
||||
|
||||
// incomingComms encapsulates the possible output of the incomingComms routine. If err != nil then an error has occurred and
|
||||
// the routine will have terminated; otherwise one of the other members should be non-nil
|
||||
type incomingComms struct {
|
||||
err error // If non-nil then there has been an error (ignore everything else)
|
||||
outbound *PacketAndToken // Packet (with token) than needs to be sent out (e.g. an acknowledgement)
|
||||
incomingPub *packets.PublishPacket // A new publish has been received; this will need to be passed on to our user
|
||||
}
|
||||
|
||||
// startIncomingComms initiates incoming communications; this includes starting a goroutine to process incoming
|
||||
// messages.
|
||||
// Accepts a channel of inbound messages from the store (persisted messages); note this must be closed as soon as the
|
||||
// everything in the store has been sent.
|
||||
// Returns a channel that will be passed any received packets; this will be closed on a network error (and inboundFromStore closed)
|
||||
func startIncomingComms(conn io.Reader,
|
||||
c commsFns,
|
||||
inboundFromStore <-chan packets.ControlPacket,
|
||||
) <-chan incomingComms {
|
||||
ibound := startIncoming(conn) // Start goroutine that reads from network connection
|
||||
output := make(chan incomingComms)
|
||||
|
||||
DEBUG.Println(NET, "startIncomingComms started")
|
||||
go func() {
|
||||
for {
|
||||
if inboundFromStore == nil && ibound == nil {
|
||||
close(output)
|
||||
DEBUG.Println(NET, "startIncomingComms goroutine complete")
|
||||
return // As soon as ibound is closed we can exit (should have already processed an error)
|
||||
}
|
||||
DEBUG.Println(NET, "logic waiting for msg on ibound")
|
||||
|
||||
var msg packets.ControlPacket
|
||||
var ok bool
|
||||
select {
|
||||
case msg, ok = <-inboundFromStore:
|
||||
if !ok {
|
||||
DEBUG.Println(NET, "startIncomingComms: inboundFromStore complete")
|
||||
inboundFromStore = nil // should happen quickly as this is only for persisted messages
|
||||
continue
|
||||
}
|
||||
DEBUG.Println(NET, "startIncomingComms: got msg from store")
|
||||
case ibMsg, ok := <-ibound:
|
||||
if !ok {
|
||||
DEBUG.Println(NET, "startIncomingComms: ibound complete")
|
||||
ibound = nil
|
||||
continue
|
||||
}
|
||||
DEBUG.Println(NET, "startIncomingComms: got msg on ibound")
|
||||
// If the inbound comms routine encounters any issues it will send us an error.
|
||||
if ibMsg.err != nil {
|
||||
output <- incomingComms{err: ibMsg.err}
|
||||
continue // Usually the channel will be closed immediately after sending an error but safer that we do not assume this
|
||||
}
|
||||
msg = ibMsg.cp
|
||||
|
||||
c.persistInbound(msg)
|
||||
c.UpdateLastReceived() // Notify keepalive logic that we recently received a packet
|
||||
}
|
||||
|
||||
switch m := msg.(type) {
|
||||
case *packets.PingrespPacket:
|
||||
DEBUG.Println(NET, "startIncomingComms: received pingresp")
|
||||
c.pingRespReceived()
|
||||
case *packets.SubackPacket:
|
||||
DEBUG.Println(NET, "startIncomingComms: received suback, id:", m.MessageID)
|
||||
token := c.getToken(m.MessageID)
|
||||
|
||||
if t, ok := token.(*SubscribeToken); ok {
|
||||
DEBUG.Println(NET, "startIncomingComms: granted qoss", m.ReturnCodes)
|
||||
for i, qos := range m.ReturnCodes {
|
||||
t.subResult[t.subs[i]] = qos
|
||||
}
|
||||
}
|
||||
|
||||
token.flowComplete()
|
||||
c.freeID(m.MessageID)
|
||||
case *packets.UnsubackPacket:
|
||||
DEBUG.Println(NET, "startIncomingComms: received unsuback, id:", m.MessageID)
|
||||
c.getToken(m.MessageID).flowComplete()
|
||||
c.freeID(m.MessageID)
|
||||
case *packets.PublishPacket:
|
||||
DEBUG.Println(NET, "startIncomingComms: received publish, msgId:", m.MessageID)
|
||||
output <- incomingComms{incomingPub: m}
|
||||
case *packets.PubackPacket:
|
||||
DEBUG.Println(NET, "startIncomingComms: received puback, id:", m.MessageID)
|
||||
c.getToken(m.MessageID).flowComplete()
|
||||
c.freeID(m.MessageID)
|
||||
case *packets.PubrecPacket:
|
||||
DEBUG.Println(NET, "startIncomingComms: received pubrec, id:", m.MessageID)
|
||||
prel := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket)
|
||||
prel.MessageID = m.MessageID
|
||||
output <- incomingComms{outbound: &PacketAndToken{p: prel, t: nil}}
|
||||
case *packets.PubrelPacket:
|
||||
DEBUG.Println(NET, "startIncomingComms: received pubrel, id:", m.MessageID)
|
||||
pc := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket)
|
||||
pc.MessageID = m.MessageID
|
||||
c.persistOutbound(pc)
|
||||
output <- incomingComms{outbound: &PacketAndToken{p: pc, t: nil}}
|
||||
case *packets.PubcompPacket:
|
||||
DEBUG.Println(NET, "startIncomingComms: received pubcomp, id:", m.MessageID)
|
||||
c.getToken(m.MessageID).flowComplete()
|
||||
c.freeID(m.MessageID)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return output
|
||||
}
|
||||
|
||||
// startOutgoingComms initiates a go routine to transmit outgoing packets.
|
||||
// Pass in an open network connection and channels for outbound messages (including those triggered
|
||||
// directly from incoming comms).
|
||||
// Returns a channel that will receive details of any errors (closed when the goroutine exits)
|
||||
// This function wil only terminate when all input channels are closed
|
||||
func startOutgoingComms(conn net.Conn,
|
||||
c commsFns,
|
||||
oboundp <-chan *PacketAndToken,
|
||||
obound <-chan *PacketAndToken,
|
||||
oboundFromIncoming <-chan *PacketAndToken,
|
||||
) <-chan error {
|
||||
errChan := make(chan error)
|
||||
DEBUG.Println(NET, "outgoing started")
|
||||
|
||||
go func() {
|
||||
for {
|
||||
DEBUG.Println(NET, "outgoing waiting for an outbound message")
|
||||
|
||||
// This goroutine will only exits when all of the input channels we receive on have been closed. This approach is taken to avoid any
|
||||
// deadlocks (if the connection goes down there are limited options as to what we can do with anything waiting on us and
|
||||
// throwing away the packets seems the best option)
|
||||
if oboundp == nil && obound == nil && oboundFromIncoming == nil {
|
||||
DEBUG.Println(NET, "outgoing comms stopping")
|
||||
close(errChan)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case pub, ok := <-obound:
|
||||
if !ok {
|
||||
obound = nil
|
||||
continue
|
||||
}
|
||||
msg := pub.p.(*packets.PublishPacket)
|
||||
DEBUG.Println(NET, "obound msg to write", msg.MessageID)
|
||||
|
||||
writeTimeout := c.getWriteTimeOut()
|
||||
if writeTimeout > 0 {
|
||||
if err := conn.SetWriteDeadline(time.Now().Add(writeTimeout)); err != nil {
|
||||
ERROR.Println(NET, "SetWriteDeadline ", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := msg.Write(conn); err != nil {
|
||||
ERROR.Println(NET, "outgoing obound reporting error ", err)
|
||||
pub.t.setError(err)
|
||||
// report error if it's not due to the connection being closed elsewhere
|
||||
if !strings.Contains(err.Error(), closedNetConnErrorText) {
|
||||
errChan <- err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if writeTimeout > 0 {
|
||||
// If we successfully wrote, we don't want the timeout to happen during an idle period
|
||||
// so we reset it to infinite.
|
||||
if err := conn.SetWriteDeadline(time.Time{}); err != nil {
|
||||
ERROR.Println(NET, "SetWriteDeadline to 0 ", err)
|
||||
}
|
||||
}
|
||||
|
||||
if msg.Qos == 0 {
|
||||
pub.t.flowComplete()
|
||||
}
|
||||
DEBUG.Println(NET, "obound wrote msg, id:", msg.MessageID)
|
||||
case msg, ok := <-oboundp:
|
||||
if !ok {
|
||||
oboundp = nil
|
||||
continue
|
||||
}
|
||||
DEBUG.Println(NET, "obound priority msg to write, type", reflect.TypeOf(msg.p))
|
||||
if err := msg.p.Write(conn); err != nil {
|
||||
ERROR.Println(NET, "outgoing oboundp reporting error ", err)
|
||||
if msg.t != nil {
|
||||
msg.t.setError(err)
|
||||
}
|
||||
errChan <- err
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := msg.p.(*packets.DisconnectPacket); ok {
|
||||
msg.t.(*DisconnectToken).flowComplete()
|
||||
DEBUG.Println(NET, "outbound wrote disconnect, closing connection")
|
||||
// As per the MQTT spec "After sending a DISCONNECT Packet the Client MUST close the Network Connection"
|
||||
// Closing the connection will cause the goroutines to end in sequence (starting with incoming comms)
|
||||
conn.Close()
|
||||
}
|
||||
case msg, ok := <-oboundFromIncoming: // message triggered by an inbound message (PubrecPacket or PubrelPacket)
|
||||
if !ok {
|
||||
oboundFromIncoming = nil
|
||||
continue
|
||||
}
|
||||
DEBUG.Println(NET, "obound from incoming msg to write, type", reflect.TypeOf(msg.p), " ID ", msg.p.Details().MessageID)
|
||||
if err := msg.p.Write(conn); err != nil {
|
||||
ERROR.Println(NET, "outgoing oboundFromIncoming reporting error", err)
|
||||
if msg.t != nil {
|
||||
msg.t.setError(err)
|
||||
}
|
||||
errChan <- err
|
||||
continue
|
||||
}
|
||||
}
|
||||
c.UpdateLastSent() // Record that a packet has been received (for keepalive routine)
|
||||
}
|
||||
}()
|
||||
return errChan
|
||||
}
|
||||
|
||||
// commsFns provide access to the client state (messageids, requesting disconnection and updating timing)
|
||||
type commsFns interface {
|
||||
getToken(id uint16) tokenCompletor // Retrieve the token for the specified messageid (if none then a dummy token must be returned)
|
||||
freeID(id uint16) // Release the specified messageid (clearing out of any persistent store)
|
||||
UpdateLastReceived() // Must be called whenever a packet is received
|
||||
UpdateLastSent() // Must be called whenever a packet is successfully sent
|
||||
getWriteTimeOut() time.Duration // Return the writetimeout (or 0 if none)
|
||||
persistOutbound(m packets.ControlPacket) // add the packet to the outbound store
|
||||
persistInbound(m packets.ControlPacket) // add the packet to the inbound store
|
||||
pingRespReceived() // Called when a ping response is received
|
||||
}
|
||||
|
||||
// startComms initiates goroutines that handles communications over the network connection
|
||||
// Messages will be stored (via commsFns) and deleted from the store as necessary
|
||||
// It returns two channels:
|
||||
// packets.PublishPacket - Will receive publish packets received over the network.
|
||||
// Closed when incoming comms routines exit (on shutdown or if network link closed)
|
||||
// error - Any errors will be sent on this channel. The channel is closed when all comms routines have shut down
|
||||
//
|
||||
// Note: The comms routines monitoring oboundp and obound will not shutdown until those channels are both closed. Any messages received between the
|
||||
// connection being closed and those channels being closed will generate errors (and nothing will be sent). That way the chance of a deadlock is
|
||||
// minimised.
|
||||
func startComms(conn net.Conn, // Network connection (must be active)
|
||||
c commsFns, // getters and setters to enable us to cleanly interact with client
|
||||
inboundFromStore <-chan packets.ControlPacket, // Inbound packets from the persistence store (should be closed relatively soon after startup)
|
||||
oboundp <-chan *PacketAndToken,
|
||||
obound <-chan *PacketAndToken) (
|
||||
<-chan *packets.PublishPacket, // Publishpackages received over the network
|
||||
<-chan error, // Any errors (should generally trigger a disconnect)
|
||||
) {
|
||||
// Start inbound comms handler; this needs to be able to transmit messages so we start a go routine to add these to the priority outbound channel
|
||||
ibound := startIncomingComms(conn, c, inboundFromStore)
|
||||
outboundFromIncoming := make(chan *PacketAndToken) // Will accept outgoing messages triggered by startIncomingComms (e.g. acknowledgements)
|
||||
|
||||
// Start the outgoing handler. It is important to note that output from startIncomingComms is fed into startOutgoingComms (for ACK's)
|
||||
oboundErr := startOutgoingComms(conn, c, oboundp, obound, outboundFromIncoming)
|
||||
DEBUG.Println(NET, "startComms started")
|
||||
|
||||
// Run up go routines to handle the output from the above comms functions - these are handled in separate
|
||||
// go routines because they can interact (e.g. ibound triggers an ACK to obound which triggers an error)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
|
||||
outPublish := make(chan *packets.PublishPacket)
|
||||
outError := make(chan error)
|
||||
|
||||
// Any messages received get passed to the appropriate channel
|
||||
go func() {
|
||||
for ic := range ibound {
|
||||
if ic.err != nil {
|
||||
outError <- ic.err
|
||||
continue
|
||||
}
|
||||
if ic.outbound != nil {
|
||||
outboundFromIncoming <- ic.outbound
|
||||
continue
|
||||
}
|
||||
if ic.incomingPub != nil {
|
||||
outPublish <- ic.incomingPub
|
||||
continue
|
||||
}
|
||||
ERROR.Println(STR, "startComms received empty incomingComms msg")
|
||||
}
|
||||
// Close channels that will not be written to again (allowing other routines to exit)
|
||||
close(outboundFromIncoming)
|
||||
close(outPublish)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
// Any errors will be passed out to our caller
|
||||
go func() {
|
||||
for err := range oboundErr {
|
||||
outError <- err
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
// outError is used by both routines so can only be closed when they are both complete
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(outError)
|
||||
DEBUG.Println(NET, "startComms closing outError")
|
||||
}()
|
||||
|
||||
return outPublish, outError
|
||||
}
|
||||
|
||||
// ackFunc acknowledges a packet
|
||||
// WARNING the function returned must not be called if the comms routine is shutting down or not running
|
||||
// (it needs outgoing comms in order to send the acknowledgement). Currently this is only called from
|
||||
// matchAndDispatch which will be shutdown before the comms are
|
||||
func ackFunc(oboundP chan *PacketAndToken, persist Store, packet *packets.PublishPacket) func() {
|
||||
return func() {
|
||||
switch packet.Qos {
|
||||
case 2:
|
||||
pr := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket)
|
||||
pr.MessageID = packet.MessageID
|
||||
DEBUG.Println(NET, "putting pubrec msg on obound")
|
||||
oboundP <- &PacketAndToken{p: pr, t: nil}
|
||||
DEBUG.Println(NET, "done putting pubrec msg on obound")
|
||||
case 1:
|
||||
pa := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket)
|
||||
pa.MessageID = packet.MessageID
|
||||
DEBUG.Println(NET, "putting puback msg on obound")
|
||||
persistOutbound(persist, pa)
|
||||
oboundP <- &PacketAndToken{p: pa, t: nil}
|
||||
DEBUG.Println(NET, "done putting puback msg on obound")
|
||||
case 0:
|
||||
// do nothing, since there is no need to send an ack packet back
|
||||
}
|
||||
}
|
||||
}
|
91
vendor/github.com/eclipse/paho.mqtt.golang/netconn.go
generated
vendored
Normal file
91
vendor/github.com/eclipse/paho.mqtt.golang/netconn.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/proxy"
|
||||
)
|
||||
|
||||
//
|
||||
// This just establishes the network connection; once established the type of connection should be irrelevant
|
||||
//
|
||||
|
||||
// openConnection opens a network connection using the protocol indicated in the URL. Does not carry out any MQTT specific handshakes
|
||||
func openConnection(uri *url.URL, tlsc *tls.Config, timeout time.Duration, headers http.Header, websocketOptions *WebsocketOptions) (net.Conn, error) {
|
||||
switch uri.Scheme {
|
||||
case "ws":
|
||||
conn, err := NewWebsocket(uri.String(), nil, timeout, headers, websocketOptions)
|
||||
return conn, err
|
||||
case "wss":
|
||||
conn, err := NewWebsocket(uri.String(), tlsc, timeout, headers, websocketOptions)
|
||||
return conn, err
|
||||
case "mqtt", "tcp":
|
||||
allProxy := os.Getenv("all_proxy")
|
||||
if len(allProxy) == 0 {
|
||||
conn, err := net.DialTimeout("tcp", uri.Host, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
proxyDialer := proxy.FromEnvironment()
|
||||
|
||||
conn, err := proxyDialer.Dial("tcp", uri.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
case "unix":
|
||||
conn, err := net.DialTimeout("unix", uri.Host, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
case "ssl", "tls", "mqtts", "mqtt+ssl", "tcps":
|
||||
allProxy := os.Getenv("all_proxy")
|
||||
if len(allProxy) == 0 {
|
||||
conn, err := tls.DialWithDialer(&net.Dialer{Timeout: timeout}, "tcp", uri.Host, tlsc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
proxyDialer := proxy.FromEnvironment()
|
||||
|
||||
conn, err := proxyDialer.Dial("tcp", uri.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tlsConn := tls.Client(conn, tlsc)
|
||||
|
||||
err = tlsConn.Handshake()
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return tlsConn, nil
|
||||
}
|
||||
return nil, errors.New("unknown protocol")
|
||||
}
|
108
vendor/github.com/eclipse/paho.mqtt.golang/notice.html
generated
vendored
Normal file
108
vendor/github.com/eclipse/paho.mqtt.golang/notice.html
generated
vendored
Normal file
|
@ -0,0 +1,108 @@
|
|||
<?xml version="1.0" encoding="ISO-8859-1" ?>
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
|
||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1" />
|
||||
<title>Eclipse Foundation Software User Agreement</title>
|
||||
</head>
|
||||
|
||||
<body lang="EN-US">
|
||||
<h2>Eclipse Foundation Software User Agreement</h2>
|
||||
<p>February 1, 2011</p>
|
||||
|
||||
<h3>Usage Of Content</h3>
|
||||
|
||||
<p>THE ECLIPSE FOUNDATION MAKES AVAILABLE SOFTWARE, DOCUMENTATION, INFORMATION AND/OR OTHER MATERIALS FOR OPEN SOURCE PROJECTS
|
||||
(COLLECTIVELY "CONTENT"). USE OF THE CONTENT IS GOVERNED BY THE TERMS AND CONDITIONS OF THIS AGREEMENT AND/OR THE TERMS AND
|
||||
CONDITIONS OF LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW. BY USING THE CONTENT, YOU AGREE THAT YOUR USE
|
||||
OF THE CONTENT IS GOVERNED BY THIS AGREEMENT AND/OR THE TERMS AND CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR
|
||||
NOTICES INDICATED OR REFERENCED BELOW. IF YOU DO NOT AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT AND THE TERMS AND
|
||||
CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW, THEN YOU MAY NOT USE THE CONTENT.</p>
|
||||
|
||||
<h3>Applicable Licenses</h3>
|
||||
|
||||
<p>Unless otherwise indicated, all Content made available by the Eclipse Foundation is provided to you under the terms and conditions of the Eclipse Public License Version 1.0
|
||||
("EPL"). A copy of the EPL is provided with this Content and is also available at <a href="http://www.eclipse.org/legal/epl-v10.html">http://www.eclipse.org/legal/epl-v10.html</a>.
|
||||
For purposes of the EPL, "Program" will mean the Content.</p>
|
||||
|
||||
<p>Content includes, but is not limited to, source code, object code, documentation and other files maintained in the Eclipse Foundation source code
|
||||
repository ("Repository") in software modules ("Modules") and made available as downloadable archives ("Downloads").</p>
|
||||
|
||||
<ul>
|
||||
<li>Content may be structured and packaged into modules to facilitate delivering, extending, and upgrading the Content. Typical modules may include plug-ins ("Plug-ins"), plug-in fragments ("Fragments"), and features ("Features").</li>
|
||||
<li>Each Plug-in or Fragment may be packaged as a sub-directory or JAR (Java™ ARchive) in a directory named "plugins".</li>
|
||||
<li>A Feature is a bundle of one or more Plug-ins and/or Fragments and associated material. Each Feature may be packaged as a sub-directory in a directory named "features". Within a Feature, files named "feature.xml" may contain a list of the names and version numbers of the Plug-ins
|
||||
and/or Fragments associated with that Feature.</li>
|
||||
<li>Features may also include other Features ("Included Features"). Within a Feature, files named "feature.xml" may contain a list of the names and version numbers of Included Features.</li>
|
||||
</ul>
|
||||
|
||||
<p>The terms and conditions governing Plug-ins and Fragments should be contained in files named "about.html" ("Abouts"). The terms and conditions governing Features and
|
||||
Included Features should be contained in files named "license.html" ("Feature Licenses"). Abouts and Feature Licenses may be located in any directory of a Download or Module
|
||||
including, but not limited to the following locations:</p>
|
||||
|
||||
<ul>
|
||||
<li>The top-level (root) directory</li>
|
||||
<li>Plug-in and Fragment directories</li>
|
||||
<li>Inside Plug-ins and Fragments packaged as JARs</li>
|
||||
<li>Sub-directories of the directory named "src" of certain Plug-ins</li>
|
||||
<li>Feature directories</li>
|
||||
</ul>
|
||||
|
||||
<p>Note: if a Feature made available by the Eclipse Foundation is installed using the Provisioning Technology (as defined below), you must agree to a license ("Feature Update License") during the
|
||||
installation process. If the Feature contains Included Features, the Feature Update License should either provide you with the terms and conditions governing the Included Features or
|
||||
inform you where you can locate them. Feature Update Licenses may be found in the "license" property of files named "feature.properties" found within a Feature.
|
||||
Such Abouts, Feature Licenses, and Feature Update Licenses contain the terms and conditions (or references to such terms and conditions) that govern your use of the associated Content in
|
||||
that directory.</p>
|
||||
|
||||
<p>THE ABOUTS, FEATURE LICENSES, AND FEATURE UPDATE LICENSES MAY REFER TO THE EPL OR OTHER LICENSE AGREEMENTS, NOTICES OR TERMS AND CONDITIONS. SOME OF THESE
|
||||
OTHER LICENSE AGREEMENTS MAY INCLUDE (BUT ARE NOT LIMITED TO):</p>
|
||||
|
||||
<ul>
|
||||
<li>Eclipse Distribution License Version 1.0 (available at <a href="http://www.eclipse.org/licenses/edl-v10.html">http://www.eclipse.org/licenses/edl-v1.0.html</a>)</li>
|
||||
<li>Common Public License Version 1.0 (available at <a href="http://www.eclipse.org/legal/cpl-v10.html">http://www.eclipse.org/legal/cpl-v10.html</a>)</li>
|
||||
<li>Apache Software License 1.1 (available at <a href="http://www.apache.org/licenses/LICENSE">http://www.apache.org/licenses/LICENSE</a>)</li>
|
||||
<li>Apache Software License 2.0 (available at <a href="http://www.apache.org/licenses/LICENSE-2.0">http://www.apache.org/licenses/LICENSE-2.0</a>)</li>
|
||||
<li>Metro Link Public License 1.00 (available at <a href="http://www.opengroup.org/openmotif/supporters/metrolink/license.html">http://www.opengroup.org/openmotif/supporters/metrolink/license.html</a>)</li>
|
||||
<li>Mozilla Public License Version 1.1 (available at <a href="http://www.mozilla.org/MPL/MPL-1.1.html">http://www.mozilla.org/MPL/MPL-1.1.html</a>)</li>
|
||||
</ul>
|
||||
|
||||
<p>IT IS YOUR OBLIGATION TO READ AND ACCEPT ALL SUCH TERMS AND CONDITIONS PRIOR TO USE OF THE CONTENT. If no About, Feature License, or Feature Update License is provided, please
|
||||
contact the Eclipse Foundation to determine what terms and conditions govern that particular Content.</p>
|
||||
|
||||
|
||||
<h3>Use of Provisioning Technology</h3>
|
||||
|
||||
<p>The Eclipse Foundation makes available provisioning software, examples of which include, but are not limited to, p2 and the Eclipse
|
||||
Update Manager ("Provisioning Technology") for the purpose of allowing users to install software, documentation, information and/or
|
||||
other materials (collectively "Installable Software"). This capability is provided with the intent of allowing such users to
|
||||
install, extend and update Eclipse-based products. Information about packaging Installable Software is available at <a
|
||||
href="http://eclipse.org/equinox/p2/repository_packaging.html">http://eclipse.org/equinox/p2/repository_packaging.html</a>
|
||||
("Specification").</p>
|
||||
|
||||
<p>You may use Provisioning Technology to allow other parties to install Installable Software. You shall be responsible for enabling the
|
||||
applicable license agreements relating to the Installable Software to be presented to, and accepted by, the users of the Provisioning Technology
|
||||
in accordance with the Specification. By using Provisioning Technology in such a manner and making it available in accordance with the
|
||||
Specification, you further acknowledge your agreement to, and the acquisition of all necessary rights to permit the following:</p>
|
||||
|
||||
<ol>
|
||||
<li>A series of actions may occur ("Provisioning Process") in which a user may execute the Provisioning Technology
|
||||
on a machine ("Target Machine") with the intent of installing, extending or updating the functionality of an Eclipse-based
|
||||
product.</li>
|
||||
<li>During the Provisioning Process, the Provisioning Technology may cause third party Installable Software or a portion thereof to be
|
||||
accessed and copied to the Target Machine.</li>
|
||||
<li>Pursuant to the Specification, you will provide to the user the terms and conditions that govern the use of the Installable
|
||||
Software ("Installable Software Agreement") and such Installable Software Agreement shall be accessed from the Target
|
||||
Machine in accordance with the Specification. Such Installable Software Agreement must inform the user of the terms and conditions that govern
|
||||
the Installable Software and must solicit acceptance by the end user in the manner prescribed in such Installable Software Agreement. Upon such
|
||||
indication of agreement by the user, the provisioning Technology will complete installation of the Installable Software.</li>
|
||||
</ol>
|
||||
|
||||
<h3>Cryptography</h3>
|
||||
|
||||
<p>Content may contain encryption software. The country in which you are currently may have restrictions on the import, possession, and use, and/or re-export to
|
||||
another country, of encryption software. BEFORE using any encryption software, please check the country's laws, regulations and policies concerning the import,
|
||||
possession, or use, and re-export of encryption software, to see if this is permitted.</p>
|
||||
|
||||
<p><small>Java and all Java-based trademarks are trademarks of Oracle Corporation in the United States, other countries, or both.</small></p>
|
||||
</body>
|
||||
</html>
|
21
vendor/github.com/eclipse/paho.mqtt.golang/oops.go
generated
vendored
Normal file
21
vendor/github.com/eclipse/paho.mqtt.golang/oops.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
func chkerr(e error) {
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
}
|
389
vendor/github.com/eclipse/paho.mqtt.golang/options.go
generated
vendored
Normal file
389
vendor/github.com/eclipse/paho.mqtt.golang/options.go
generated
vendored
Normal file
|
@ -0,0 +1,389 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
* Måns Ansgariusson
|
||||
*/
|
||||
|
||||
// Portions copyright © 2018 TIBCO Software Inc.
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// CredentialsProvider allows the username and password to be updated
|
||||
// before reconnecting. It should return the current username and password.
|
||||
type CredentialsProvider func() (username string, password string)
|
||||
|
||||
// MessageHandler is a callback type which can be set to be
|
||||
// executed upon the arrival of messages published to topics
|
||||
// to which the client is subscribed.
|
||||
type MessageHandler func(Client, Message)
|
||||
|
||||
// ConnectionLostHandler is a callback type which can be set to be
|
||||
// executed upon an unintended disconnection from the MQTT broker.
|
||||
// Disconnects caused by calling Disconnect or ForceDisconnect will
|
||||
// not cause an OnConnectionLost callback to execute.
|
||||
type ConnectionLostHandler func(Client, error)
|
||||
|
||||
// OnConnectHandler is a callback that is called when the client
|
||||
// state changes from unconnected/disconnected to connected. Both
|
||||
// at initial connection and on reconnection
|
||||
type OnConnectHandler func(Client)
|
||||
|
||||
// ReconnectHandler is invoked prior to reconnecting after
|
||||
// the initial connection is lost
|
||||
type ReconnectHandler func(Client, *ClientOptions)
|
||||
|
||||
// ClientOptions contains configurable options for an Client. Note that these should be set using the
|
||||
// relevant methods (e.g. AddBroker) rather than directly. See those functions for information on usage.
|
||||
type ClientOptions struct {
|
||||
Servers []*url.URL
|
||||
ClientID string
|
||||
Username string
|
||||
Password string
|
||||
CredentialsProvider CredentialsProvider
|
||||
CleanSession bool
|
||||
Order bool
|
||||
WillEnabled bool
|
||||
WillTopic string
|
||||
WillPayload []byte
|
||||
WillQos byte
|
||||
WillRetained bool
|
||||
ProtocolVersion uint
|
||||
protocolVersionExplicit bool
|
||||
TLSConfig *tls.Config
|
||||
KeepAlive int64
|
||||
PingTimeout time.Duration
|
||||
ConnectTimeout time.Duration
|
||||
MaxReconnectInterval time.Duration
|
||||
AutoReconnect bool
|
||||
ConnectRetryInterval time.Duration
|
||||
ConnectRetry bool
|
||||
Store Store
|
||||
DefaultPublishHandler MessageHandler
|
||||
OnConnect OnConnectHandler
|
||||
OnConnectionLost ConnectionLostHandler
|
||||
OnReconnecting ReconnectHandler
|
||||
WriteTimeout time.Duration
|
||||
MessageChannelDepth uint
|
||||
ResumeSubs bool
|
||||
HTTPHeaders http.Header
|
||||
WebsocketOptions *WebsocketOptions
|
||||
}
|
||||
|
||||
// NewClientOptions will create a new ClientClientOptions type with some
|
||||
// default values.
|
||||
// Port: 1883
|
||||
// CleanSession: True
|
||||
// Order: True (note: it is recommended that this be set to FALSE unless order is important)
|
||||
// KeepAlive: 30 (seconds)
|
||||
// ConnectTimeout: 30 (seconds)
|
||||
// MaxReconnectInterval 10 (minutes)
|
||||
// AutoReconnect: True
|
||||
func NewClientOptions() *ClientOptions {
|
||||
o := &ClientOptions{
|
||||
Servers: nil,
|
||||
ClientID: "",
|
||||
Username: "",
|
||||
Password: "",
|
||||
CleanSession: true,
|
||||
Order: true,
|
||||
WillEnabled: false,
|
||||
WillTopic: "",
|
||||
WillPayload: nil,
|
||||
WillQos: 0,
|
||||
WillRetained: false,
|
||||
ProtocolVersion: 0,
|
||||
protocolVersionExplicit: false,
|
||||
KeepAlive: 30,
|
||||
PingTimeout: 10 * time.Second,
|
||||
ConnectTimeout: 30 * time.Second,
|
||||
MaxReconnectInterval: 10 * time.Minute,
|
||||
AutoReconnect: true,
|
||||
ConnectRetryInterval: 30 * time.Second,
|
||||
ConnectRetry: false,
|
||||
Store: nil,
|
||||
OnConnect: nil,
|
||||
OnConnectionLost: DefaultConnectionLostHandler,
|
||||
WriteTimeout: 0, // 0 represents timeout disabled
|
||||
ResumeSubs: false,
|
||||
HTTPHeaders: make(map[string][]string),
|
||||
WebsocketOptions: &WebsocketOptions{},
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// AddBroker adds a broker URI to the list of brokers to be used. The format should be
|
||||
// scheme://host:port
|
||||
// Where "scheme" is one of "tcp", "ssl", or "ws", "host" is the ip-address (or hostname)
|
||||
// and "port" is the port on which the broker is accepting connections.
|
||||
//
|
||||
// Default values for hostname is "127.0.0.1", for schema is "tcp://".
|
||||
//
|
||||
// An example broker URI would look like: tcp://foobar.com:1883
|
||||
func (o *ClientOptions) AddBroker(server string) *ClientOptions {
|
||||
if len(server) > 0 && server[0] == ':' {
|
||||
server = "127.0.0.1" + server
|
||||
}
|
||||
if !strings.Contains(server, "://") {
|
||||
server = "tcp://" + server
|
||||
}
|
||||
brokerURI, err := url.Parse(server)
|
||||
if err != nil {
|
||||
ERROR.Println(CLI, "Failed to parse %q broker address: %s", server, err)
|
||||
return o
|
||||
}
|
||||
o.Servers = append(o.Servers, brokerURI)
|
||||
return o
|
||||
}
|
||||
|
||||
// SetResumeSubs will enable resuming of stored (un)subscribe messages when connecting
|
||||
// but not reconnecting if CleanSession is false. Otherwise these messages are discarded.
|
||||
func (o *ClientOptions) SetResumeSubs(resume bool) *ClientOptions {
|
||||
o.ResumeSubs = resume
|
||||
return o
|
||||
}
|
||||
|
||||
// SetClientID will set the client id to be used by this client when
|
||||
// connecting to the MQTT broker. According to the MQTT v3.1 specification,
|
||||
// a client id must be no longer than 23 characters.
|
||||
func (o *ClientOptions) SetClientID(id string) *ClientOptions {
|
||||
o.ClientID = id
|
||||
return o
|
||||
}
|
||||
|
||||
// SetUsername will set the username to be used by this client when connecting
|
||||
// to the MQTT broker. Note: without the use of SSL/TLS, this information will
|
||||
// be sent in plaintext across the wire.
|
||||
func (o *ClientOptions) SetUsername(u string) *ClientOptions {
|
||||
o.Username = u
|
||||
return o
|
||||
}
|
||||
|
||||
// SetPassword will set the password to be used by this client when connecting
|
||||
// to the MQTT broker. Note: without the use of SSL/TLS, this information will
|
||||
// be sent in plaintext across the wire.
|
||||
func (o *ClientOptions) SetPassword(p string) *ClientOptions {
|
||||
o.Password = p
|
||||
return o
|
||||
}
|
||||
|
||||
// SetCredentialsProvider will set a method to be called by this client when
|
||||
// connecting to the MQTT broker that provide the current username and password.
|
||||
// Note: without the use of SSL/TLS, this information will be sent
|
||||
// in plaintext across the wire.
|
||||
func (o *ClientOptions) SetCredentialsProvider(p CredentialsProvider) *ClientOptions {
|
||||
o.CredentialsProvider = p
|
||||
return o
|
||||
}
|
||||
|
||||
// SetCleanSession will set the "clean session" flag in the connect message
|
||||
// when this client connects to an MQTT broker. By setting this flag, you are
|
||||
// indicating that no messages saved by the broker for this client should be
|
||||
// delivered. Any messages that were going to be sent by this client before
|
||||
// disconnecting previously but didn't will not be sent upon connecting to the
|
||||
// broker.
|
||||
func (o *ClientOptions) SetCleanSession(clean bool) *ClientOptions {
|
||||
o.CleanSession = clean
|
||||
return o
|
||||
}
|
||||
|
||||
// SetOrderMatters will set the message routing to guarantee order within
|
||||
// each QoS level. By default, this value is true. If set to false (recommended),
|
||||
// this flag indicates that messages can be delivered asynchronously
|
||||
// from the client to the application and possibly arrive out of order.
|
||||
// Specifically, the message handler is called in its own go routine.
|
||||
// Note that setting this to true does not guarantee in-order delivery
|
||||
// (this is subject to broker settings like "max_inflight_messages=1" in mosquitto)
|
||||
// and if true then handlers must not block.
|
||||
func (o *ClientOptions) SetOrderMatters(order bool) *ClientOptions {
|
||||
o.Order = order
|
||||
return o
|
||||
}
|
||||
|
||||
// SetTLSConfig will set an SSL/TLS configuration to be used when connecting
|
||||
// to an MQTT broker. Please read the official Go documentation for more
|
||||
// information.
|
||||
func (o *ClientOptions) SetTLSConfig(t *tls.Config) *ClientOptions {
|
||||
o.TLSConfig = t
|
||||
return o
|
||||
}
|
||||
|
||||
// SetStore will set the implementation of the Store interface
|
||||
// used to provide message persistence in cases where QoS levels
|
||||
// QoS_ONE or QoS_TWO are used. If no store is provided, then the
|
||||
// client will use MemoryStore by default.
|
||||
func (o *ClientOptions) SetStore(s Store) *ClientOptions {
|
||||
o.Store = s
|
||||
return o
|
||||
}
|
||||
|
||||
// SetKeepAlive will set the amount of time (in seconds) that the client
|
||||
// should wait before sending a PING request to the broker. This will
|
||||
// allow the client to know that a connection has not been lost with the
|
||||
// server.
|
||||
func (o *ClientOptions) SetKeepAlive(k time.Duration) *ClientOptions {
|
||||
o.KeepAlive = int64(k / time.Second)
|
||||
return o
|
||||
}
|
||||
|
||||
// SetPingTimeout will set the amount of time (in seconds) that the client
|
||||
// will wait after sending a PING request to the broker, before deciding
|
||||
// that the connection has been lost. Default is 10 seconds.
|
||||
func (o *ClientOptions) SetPingTimeout(k time.Duration) *ClientOptions {
|
||||
o.PingTimeout = k
|
||||
return o
|
||||
}
|
||||
|
||||
// SetProtocolVersion sets the MQTT version to be used to connect to the
|
||||
// broker. Legitimate values are currently 3 - MQTT 3.1 or 4 - MQTT 3.1.1
|
||||
func (o *ClientOptions) SetProtocolVersion(pv uint) *ClientOptions {
|
||||
if (pv >= 3 && pv <= 4) || (pv > 0x80) {
|
||||
o.ProtocolVersion = pv
|
||||
o.protocolVersionExplicit = true
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// UnsetWill will cause any set will message to be disregarded.
|
||||
func (o *ClientOptions) UnsetWill() *ClientOptions {
|
||||
o.WillEnabled = false
|
||||
return o
|
||||
}
|
||||
|
||||
// SetWill accepts a string will message to be set. When the client connects,
|
||||
// it will give this will message to the broker, which will then publish the
|
||||
// provided payload (the will) to any clients that are subscribed to the provided
|
||||
// topic.
|
||||
func (o *ClientOptions) SetWill(topic string, payload string, qos byte, retained bool) *ClientOptions {
|
||||
o.SetBinaryWill(topic, []byte(payload), qos, retained)
|
||||
return o
|
||||
}
|
||||
|
||||
// SetBinaryWill accepts a []byte will message to be set. When the client connects,
|
||||
// it will give this will message to the broker, which will then publish the
|
||||
// provided payload (the will) to any clients that are subscribed to the provided
|
||||
// topic.
|
||||
func (o *ClientOptions) SetBinaryWill(topic string, payload []byte, qos byte, retained bool) *ClientOptions {
|
||||
o.WillEnabled = true
|
||||
o.WillTopic = topic
|
||||
o.WillPayload = payload
|
||||
o.WillQos = qos
|
||||
o.WillRetained = retained
|
||||
return o
|
||||
}
|
||||
|
||||
// SetDefaultPublishHandler sets the MessageHandler that will be called when a message
|
||||
// is received that does not match any known subscriptions.
|
||||
//
|
||||
// If OrderMatters is true (the defaultHandler) then callback must not block or
|
||||
// call functions within this package that may block (e.g. Publish) other than in
|
||||
// a new go routine.
|
||||
// defaultHandler must be safe for concurrent use by multiple goroutines.
|
||||
func (o *ClientOptions) SetDefaultPublishHandler(defaultHandler MessageHandler) *ClientOptions {
|
||||
o.DefaultPublishHandler = defaultHandler
|
||||
return o
|
||||
}
|
||||
|
||||
// SetOnConnectHandler sets the function to be called when the client is connected. Both
|
||||
// at initial connection time and upon automatic reconnect.
|
||||
func (o *ClientOptions) SetOnConnectHandler(onConn OnConnectHandler) *ClientOptions {
|
||||
o.OnConnect = onConn
|
||||
return o
|
||||
}
|
||||
|
||||
// SetConnectionLostHandler will set the OnConnectionLost callback to be executed
|
||||
// in the case where the client unexpectedly loses connection with the MQTT broker.
|
||||
func (o *ClientOptions) SetConnectionLostHandler(onLost ConnectionLostHandler) *ClientOptions {
|
||||
o.OnConnectionLost = onLost
|
||||
return o
|
||||
}
|
||||
|
||||
// SetReconnectingHandler sets the OnReconnecting callback to be executed prior
|
||||
// to the client attempting a reconnect to the MQTT broker.
|
||||
func (o *ClientOptions) SetReconnectingHandler(cb ReconnectHandler) *ClientOptions {
|
||||
o.OnReconnecting = cb
|
||||
return o
|
||||
}
|
||||
|
||||
// SetWriteTimeout puts a limit on how long a mqtt publish should block until it unblocks with a
|
||||
// timeout error. A duration of 0 never times out. Default never times out
|
||||
func (o *ClientOptions) SetWriteTimeout(t time.Duration) *ClientOptions {
|
||||
o.WriteTimeout = t
|
||||
return o
|
||||
}
|
||||
|
||||
// SetConnectTimeout limits how long the client will wait when trying to open a connection
|
||||
// to an MQTT server before timing out. A duration of 0 never times out.
|
||||
// Default 30 seconds. Currently only operational on TCP/TLS connections.
|
||||
func (o *ClientOptions) SetConnectTimeout(t time.Duration) *ClientOptions {
|
||||
o.ConnectTimeout = t
|
||||
return o
|
||||
}
|
||||
|
||||
// SetMaxReconnectInterval sets the maximum time that will be waited between reconnection attempts
|
||||
// when connection is lost
|
||||
func (o *ClientOptions) SetMaxReconnectInterval(t time.Duration) *ClientOptions {
|
||||
o.MaxReconnectInterval = t
|
||||
return o
|
||||
}
|
||||
|
||||
// SetAutoReconnect sets whether the automatic reconnection logic should be used
|
||||
// when the connection is lost, even if disabled the ConnectionLostHandler is still
|
||||
// called
|
||||
func (o *ClientOptions) SetAutoReconnect(a bool) *ClientOptions {
|
||||
o.AutoReconnect = a
|
||||
return o
|
||||
}
|
||||
|
||||
// SetConnectRetryInterval sets the time that will be waited between connection attempts
|
||||
// when initially connecting if ConnectRetry is TRUE
|
||||
func (o *ClientOptions) SetConnectRetryInterval(t time.Duration) *ClientOptions {
|
||||
o.ConnectRetryInterval = t
|
||||
return o
|
||||
}
|
||||
|
||||
// SetConnectRetry sets whether the connect function will automatically retry the connection
|
||||
// in the event of a failure (when true the token returned by the Connect function will
|
||||
// not complete until the connection is up or it is cancelled)
|
||||
// If ConnectRetry is true then subscriptions should be requested in OnConnect handler
|
||||
// Setting this to TRUE permits messages to be published before the connection is established
|
||||
func (o *ClientOptions) SetConnectRetry(a bool) *ClientOptions {
|
||||
o.ConnectRetry = a
|
||||
return o
|
||||
}
|
||||
|
||||
// SetMessageChannelDepth DEPRECATED The value set here no longer has any effect, this function
|
||||
// remains so the API is not altered.
|
||||
func (o *ClientOptions) SetMessageChannelDepth(s uint) *ClientOptions {
|
||||
o.MessageChannelDepth = s
|
||||
return o
|
||||
}
|
||||
|
||||
// SetHTTPHeaders sets the additional HTTP headers that will be sent in the WebSocket
|
||||
// opening handshake.
|
||||
func (o *ClientOptions) SetHTTPHeaders(h http.Header) *ClientOptions {
|
||||
o.HTTPHeaders = h
|
||||
return o
|
||||
}
|
||||
|
||||
// SetWebsocketOptions sets the additional websocket options used in a WebSocket connection
|
||||
func (o *ClientOptions) SetWebsocketOptions(w *WebsocketOptions) *ClientOptions {
|
||||
o.WebsocketOptions = w
|
||||
return o
|
||||
}
|
167
vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go
generated
vendored
Normal file
167
vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go
generated
vendored
Normal file
|
@ -0,0 +1,167 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ClientOptionsReader provides an interface for reading ClientOptions after the client has been initialized.
|
||||
type ClientOptionsReader struct {
|
||||
options *ClientOptions
|
||||
}
|
||||
|
||||
// Servers returns a slice of the servers defined in the clientoptions
|
||||
func (r *ClientOptionsReader) Servers() []*url.URL {
|
||||
s := make([]*url.URL, len(r.options.Servers))
|
||||
|
||||
for i, u := range r.options.Servers {
|
||||
nu := *u
|
||||
s[i] = &nu
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// ResumeSubs returns true if resuming stored (un)sub is enabled
|
||||
func (r *ClientOptionsReader) ResumeSubs() bool {
|
||||
s := r.options.ResumeSubs
|
||||
return s
|
||||
}
|
||||
|
||||
// ClientID returns the set client id
|
||||
func (r *ClientOptionsReader) ClientID() string {
|
||||
s := r.options.ClientID
|
||||
return s
|
||||
}
|
||||
|
||||
// Username returns the set username
|
||||
func (r *ClientOptionsReader) Username() string {
|
||||
s := r.options.Username
|
||||
return s
|
||||
}
|
||||
|
||||
// Password returns the set password
|
||||
func (r *ClientOptionsReader) Password() string {
|
||||
s := r.options.Password
|
||||
return s
|
||||
}
|
||||
|
||||
// CleanSession returns whether Cleansession is set
|
||||
func (r *ClientOptionsReader) CleanSession() bool {
|
||||
s := r.options.CleanSession
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *ClientOptionsReader) Order() bool {
|
||||
s := r.options.Order
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *ClientOptionsReader) WillEnabled() bool {
|
||||
s := r.options.WillEnabled
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *ClientOptionsReader) WillTopic() string {
|
||||
s := r.options.WillTopic
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *ClientOptionsReader) WillPayload() []byte {
|
||||
s := r.options.WillPayload
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *ClientOptionsReader) WillQos() byte {
|
||||
s := r.options.WillQos
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *ClientOptionsReader) WillRetained() bool {
|
||||
s := r.options.WillRetained
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *ClientOptionsReader) ProtocolVersion() uint {
|
||||
s := r.options.ProtocolVersion
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *ClientOptionsReader) TLSConfig() *tls.Config {
|
||||
s := r.options.TLSConfig
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *ClientOptionsReader) KeepAlive() time.Duration {
|
||||
s := time.Duration(r.options.KeepAlive * int64(time.Second))
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *ClientOptionsReader) PingTimeout() time.Duration {
|
||||
s := r.options.PingTimeout
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *ClientOptionsReader) ConnectTimeout() time.Duration {
|
||||
s := r.options.ConnectTimeout
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *ClientOptionsReader) MaxReconnectInterval() time.Duration {
|
||||
s := r.options.MaxReconnectInterval
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *ClientOptionsReader) AutoReconnect() bool {
|
||||
s := r.options.AutoReconnect
|
||||
return s
|
||||
}
|
||||
|
||||
// ConnectRetryInterval returns the delay between retries on the initial connection (if ConnectRetry true)
|
||||
func (r *ClientOptionsReader) ConnectRetryInterval() time.Duration {
|
||||
s := r.options.ConnectRetryInterval
|
||||
return s
|
||||
}
|
||||
|
||||
// ConnectRetry returns whether the initial connection request will be retried until connection established
|
||||
func (r *ClientOptionsReader) ConnectRetry() bool {
|
||||
s := r.options.ConnectRetry
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *ClientOptionsReader) WriteTimeout() time.Duration {
|
||||
s := r.options.WriteTimeout
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *ClientOptionsReader) MessageChannelDepth() uint {
|
||||
s := r.options.MessageChannelDepth
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *ClientOptionsReader) HTTPHeaders() http.Header {
|
||||
h := r.options.HTTPHeaders
|
||||
return h
|
||||
}
|
||||
|
||||
// WebsocketOptions returns the currently configured WebSocket options
|
||||
func (r *ClientOptionsReader) WebsocketOptions() *WebsocketOptions {
|
||||
s := r.options.WebsocketOptions
|
||||
return s
|
||||
}
|
52
vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go
generated
vendored
Normal file
52
vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// ConnackPacket is an internal representation of the fields of the
|
||||
// Connack MQTT packet
|
||||
type ConnackPacket struct {
|
||||
FixedHeader
|
||||
SessionPresent bool
|
||||
ReturnCode byte
|
||||
}
|
||||
|
||||
func (ca *ConnackPacket) String() string {
|
||||
return fmt.Sprintf("%s sessionpresent: %t returncode: %d", ca.FixedHeader, ca.SessionPresent, ca.ReturnCode)
|
||||
}
|
||||
|
||||
func (ca *ConnackPacket) Write(w io.Writer) error {
|
||||
var body bytes.Buffer
|
||||
var err error
|
||||
|
||||
body.WriteByte(boolToByte(ca.SessionPresent))
|
||||
body.WriteByte(ca.ReturnCode)
|
||||
ca.FixedHeader.RemainingLength = 2
|
||||
packet := ca.FixedHeader.pack()
|
||||
packet.Write(body.Bytes())
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (ca *ConnackPacket) Unpack(b io.Reader) error {
|
||||
flags, err := decodeByte(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ca.SessionPresent = 1&flags > 0
|
||||
ca.ReturnCode, err = decodeByte(b)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (ca *ConnackPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: 0}
|
||||
}
|
151
vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go
generated
vendored
Normal file
151
vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go
generated
vendored
Normal file
|
@ -0,0 +1,151 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// ConnectPacket is an internal representation of the fields of the
|
||||
// Connect MQTT packet
|
||||
type ConnectPacket struct {
|
||||
FixedHeader
|
||||
ProtocolName string
|
||||
ProtocolVersion byte
|
||||
CleanSession bool
|
||||
WillFlag bool
|
||||
WillQos byte
|
||||
WillRetain bool
|
||||
UsernameFlag bool
|
||||
PasswordFlag bool
|
||||
ReservedBit byte
|
||||
Keepalive uint16
|
||||
|
||||
ClientIdentifier string
|
||||
WillTopic string
|
||||
WillMessage []byte
|
||||
Username string
|
||||
Password []byte
|
||||
}
|
||||
|
||||
func (c *ConnectPacket) String() string {
|
||||
return fmt.Sprintf("%s protocolversion: %d protocolname: %s cleansession: %t willflag: %t WillQos: %d WillRetain: %t Usernameflag: %t Passwordflag: %t keepalive: %d clientId: %s willtopic: %s willmessage: %s Username: %s Password: %s", c.FixedHeader, c.ProtocolVersion, c.ProtocolName, c.CleanSession, c.WillFlag, c.WillQos, c.WillRetain, c.UsernameFlag, c.PasswordFlag, c.Keepalive, c.ClientIdentifier, c.WillTopic, c.WillMessage, c.Username, c.Password)
|
||||
}
|
||||
|
||||
func (c *ConnectPacket) Write(w io.Writer) error {
|
||||
var body bytes.Buffer
|
||||
var err error
|
||||
|
||||
body.Write(encodeString(c.ProtocolName))
|
||||
body.WriteByte(c.ProtocolVersion)
|
||||
body.WriteByte(boolToByte(c.CleanSession)<<1 | boolToByte(c.WillFlag)<<2 | c.WillQos<<3 | boolToByte(c.WillRetain)<<5 | boolToByte(c.PasswordFlag)<<6 | boolToByte(c.UsernameFlag)<<7)
|
||||
body.Write(encodeUint16(c.Keepalive))
|
||||
body.Write(encodeString(c.ClientIdentifier))
|
||||
if c.WillFlag {
|
||||
body.Write(encodeString(c.WillTopic))
|
||||
body.Write(encodeBytes(c.WillMessage))
|
||||
}
|
||||
if c.UsernameFlag {
|
||||
body.Write(encodeString(c.Username))
|
||||
}
|
||||
if c.PasswordFlag {
|
||||
body.Write(encodeBytes(c.Password))
|
||||
}
|
||||
c.FixedHeader.RemainingLength = body.Len()
|
||||
packet := c.FixedHeader.pack()
|
||||
packet.Write(body.Bytes())
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (c *ConnectPacket) Unpack(b io.Reader) error {
|
||||
var err error
|
||||
c.ProtocolName, err = decodeString(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.ProtocolVersion, err = decodeByte(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
options, err := decodeByte(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.ReservedBit = 1 & options
|
||||
c.CleanSession = 1&(options>>1) > 0
|
||||
c.WillFlag = 1&(options>>2) > 0
|
||||
c.WillQos = 3 & (options >> 3)
|
||||
c.WillRetain = 1&(options>>5) > 0
|
||||
c.PasswordFlag = 1&(options>>6) > 0
|
||||
c.UsernameFlag = 1&(options>>7) > 0
|
||||
c.Keepalive, err = decodeUint16(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.ClientIdentifier, err = decodeString(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.WillFlag {
|
||||
c.WillTopic, err = decodeString(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.WillMessage, err = decodeBytes(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if c.UsernameFlag {
|
||||
c.Username, err = decodeString(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if c.PasswordFlag {
|
||||
c.Password, err = decodeBytes(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate performs validation of the fields of a Connect packet
|
||||
func (c *ConnectPacket) Validate() byte {
|
||||
if c.PasswordFlag && !c.UsernameFlag {
|
||||
return ErrRefusedBadUsernameOrPassword
|
||||
}
|
||||
if c.ReservedBit != 0 {
|
||||
// Bad reserved bit
|
||||
return ErrProtocolViolation
|
||||
}
|
||||
if (c.ProtocolName == "MQIsdp" && c.ProtocolVersion != 3) || (c.ProtocolName == "MQTT" && c.ProtocolVersion != 4) {
|
||||
// Mismatched or unsupported protocol version
|
||||
return ErrRefusedBadProtocolVersion
|
||||
}
|
||||
if c.ProtocolName != "MQIsdp" && c.ProtocolName != "MQTT" {
|
||||
// Bad protocol name
|
||||
return ErrProtocolViolation
|
||||
}
|
||||
if len(c.ClientIdentifier) > 65535 || len(c.Username) > 65535 || len(c.Password) > 65535 {
|
||||
// Bad size field
|
||||
return ErrProtocolViolation
|
||||
}
|
||||
if len(c.ClientIdentifier) == 0 && !c.CleanSession {
|
||||
// Bad client identifier
|
||||
return ErrRefusedIDRejected
|
||||
}
|
||||
return Accepted
|
||||
}
|
||||
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (c *ConnectPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: 0}
|
||||
}
|
34
vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go
generated
vendored
Normal file
34
vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// DisconnectPacket is an internal representation of the fields of the
|
||||
// Disconnect MQTT packet
|
||||
type DisconnectPacket struct {
|
||||
FixedHeader
|
||||
}
|
||||
|
||||
func (d *DisconnectPacket) String() string {
|
||||
return d.FixedHeader.String()
|
||||
}
|
||||
|
||||
func (d *DisconnectPacket) Write(w io.Writer) error {
|
||||
packet := d.FixedHeader.pack()
|
||||
_, err := packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (d *DisconnectPacket) Unpack(b io.Reader) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (d *DisconnectPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: 0}
|
||||
}
|
356
vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go
generated
vendored
Normal file
356
vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go
generated
vendored
Normal file
|
@ -0,0 +1,356 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// ControlPacket defines the interface for structs intended to hold
|
||||
// decoded MQTT packets, either from being read or before being
|
||||
// written
|
||||
type ControlPacket interface {
|
||||
Write(io.Writer) error
|
||||
Unpack(io.Reader) error
|
||||
String() string
|
||||
Details() Details
|
||||
}
|
||||
|
||||
// PacketNames maps the constants for each of the MQTT packet types
|
||||
// to a string representation of their name.
|
||||
var PacketNames = map[uint8]string{
|
||||
1: "CONNECT",
|
||||
2: "CONNACK",
|
||||
3: "PUBLISH",
|
||||
4: "PUBACK",
|
||||
5: "PUBREC",
|
||||
6: "PUBREL",
|
||||
7: "PUBCOMP",
|
||||
8: "SUBSCRIBE",
|
||||
9: "SUBACK",
|
||||
10: "UNSUBSCRIBE",
|
||||
11: "UNSUBACK",
|
||||
12: "PINGREQ",
|
||||
13: "PINGRESP",
|
||||
14: "DISCONNECT",
|
||||
}
|
||||
|
||||
// Below are the constants assigned to each of the MQTT packet types
|
||||
const (
|
||||
Connect = 1
|
||||
Connack = 2
|
||||
Publish = 3
|
||||
Puback = 4
|
||||
Pubrec = 5
|
||||
Pubrel = 6
|
||||
Pubcomp = 7
|
||||
Subscribe = 8
|
||||
Suback = 9
|
||||
Unsubscribe = 10
|
||||
Unsuback = 11
|
||||
Pingreq = 12
|
||||
Pingresp = 13
|
||||
Disconnect = 14
|
||||
)
|
||||
|
||||
// Below are the const definitions for error codes returned by
|
||||
// Connect()
|
||||
const (
|
||||
Accepted = 0x00
|
||||
ErrRefusedBadProtocolVersion = 0x01
|
||||
ErrRefusedIDRejected = 0x02
|
||||
ErrRefusedServerUnavailable = 0x03
|
||||
ErrRefusedBadUsernameOrPassword = 0x04
|
||||
ErrRefusedNotAuthorised = 0x05
|
||||
ErrNetworkError = 0xFE
|
||||
ErrProtocolViolation = 0xFF
|
||||
)
|
||||
|
||||
// ConnackReturnCodes is a map of the error codes constants for Connect()
|
||||
// to a string representation of the error
|
||||
var ConnackReturnCodes = map[uint8]string{
|
||||
0: "Connection Accepted",
|
||||
1: "Connection Refused: Bad Protocol Version",
|
||||
2: "Connection Refused: Client Identifier Rejected",
|
||||
3: "Connection Refused: Server Unavailable",
|
||||
4: "Connection Refused: Username or Password in unknown format",
|
||||
5: "Connection Refused: Not Authorised",
|
||||
254: "Connection Error",
|
||||
255: "Connection Refused: Protocol Violation",
|
||||
}
|
||||
|
||||
var (
|
||||
ErrorRefusedBadProtocolVersion = errors.New("unacceptable protocol version")
|
||||
ErrorRefusedIDRejected = errors.New("identifier rejected")
|
||||
ErrorRefusedServerUnavailable = errors.New("server Unavailable")
|
||||
ErrorRefusedBadUsernameOrPassword = errors.New("bad user name or password")
|
||||
ErrorRefusedNotAuthorised = errors.New("not Authorized")
|
||||
ErrorNetworkError = errors.New("network Error")
|
||||
ErrorProtocolViolation = errors.New("protocol Violation")
|
||||
)
|
||||
|
||||
// ConnErrors is a map of the errors codes constants for Connect()
|
||||
// to a Go error
|
||||
var ConnErrors = map[byte]error{
|
||||
Accepted: nil,
|
||||
ErrRefusedBadProtocolVersion: ErrorRefusedBadProtocolVersion,
|
||||
ErrRefusedIDRejected: ErrorRefusedIDRejected,
|
||||
ErrRefusedServerUnavailable: ErrorRefusedServerUnavailable,
|
||||
ErrRefusedBadUsernameOrPassword: ErrorRefusedBadUsernameOrPassword,
|
||||
ErrRefusedNotAuthorised: ErrorRefusedNotAuthorised,
|
||||
ErrNetworkError: ErrorNetworkError,
|
||||
ErrProtocolViolation: ErrorProtocolViolation,
|
||||
}
|
||||
|
||||
// ReadPacket takes an instance of an io.Reader (such as net.Conn) and attempts
|
||||
// to read an MQTT packet from the stream. It returns a ControlPacket
|
||||
// representing the decoded MQTT packet and an error. One of these returns will
|
||||
// always be nil, a nil ControlPacket indicating an error occurred.
|
||||
func ReadPacket(r io.Reader) (ControlPacket, error) {
|
||||
var fh FixedHeader
|
||||
b := make([]byte, 1)
|
||||
|
||||
_, err := io.ReadFull(r, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = fh.unpack(b[0], r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cp, err := NewControlPacketWithHeader(fh)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
packetBytes := make([]byte, fh.RemainingLength)
|
||||
n, err := io.ReadFull(r, packetBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n != fh.RemainingLength {
|
||||
return nil, errors.New("failed to read expected data")
|
||||
}
|
||||
|
||||
err = cp.Unpack(bytes.NewBuffer(packetBytes))
|
||||
return cp, err
|
||||
}
|
||||
|
||||
// NewControlPacket is used to create a new ControlPacket of the type specified
|
||||
// by packetType, this is usually done by reference to the packet type constants
|
||||
// defined in packets.go. The newly created ControlPacket is empty and a pointer
|
||||
// is returned.
|
||||
func NewControlPacket(packetType byte) ControlPacket {
|
||||
switch packetType {
|
||||
case Connect:
|
||||
return &ConnectPacket{FixedHeader: FixedHeader{MessageType: Connect}}
|
||||
case Connack:
|
||||
return &ConnackPacket{FixedHeader: FixedHeader{MessageType: Connack}}
|
||||
case Disconnect:
|
||||
return &DisconnectPacket{FixedHeader: FixedHeader{MessageType: Disconnect}}
|
||||
case Publish:
|
||||
return &PublishPacket{FixedHeader: FixedHeader{MessageType: Publish}}
|
||||
case Puback:
|
||||
return &PubackPacket{FixedHeader: FixedHeader{MessageType: Puback}}
|
||||
case Pubrec:
|
||||
return &PubrecPacket{FixedHeader: FixedHeader{MessageType: Pubrec}}
|
||||
case Pubrel:
|
||||
return &PubrelPacket{FixedHeader: FixedHeader{MessageType: Pubrel, Qos: 1}}
|
||||
case Pubcomp:
|
||||
return &PubcompPacket{FixedHeader: FixedHeader{MessageType: Pubcomp}}
|
||||
case Subscribe:
|
||||
return &SubscribePacket{FixedHeader: FixedHeader{MessageType: Subscribe, Qos: 1}}
|
||||
case Suback:
|
||||
return &SubackPacket{FixedHeader: FixedHeader{MessageType: Suback}}
|
||||
case Unsubscribe:
|
||||
return &UnsubscribePacket{FixedHeader: FixedHeader{MessageType: Unsubscribe, Qos: 1}}
|
||||
case Unsuback:
|
||||
return &UnsubackPacket{FixedHeader: FixedHeader{MessageType: Unsuback}}
|
||||
case Pingreq:
|
||||
return &PingreqPacket{FixedHeader: FixedHeader{MessageType: Pingreq}}
|
||||
case Pingresp:
|
||||
return &PingrespPacket{FixedHeader: FixedHeader{MessageType: Pingresp}}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewControlPacketWithHeader is used to create a new ControlPacket of the type
|
||||
// specified within the FixedHeader that is passed to the function.
|
||||
// The newly created ControlPacket is empty and a pointer is returned.
|
||||
func NewControlPacketWithHeader(fh FixedHeader) (ControlPacket, error) {
|
||||
switch fh.MessageType {
|
||||
case Connect:
|
||||
return &ConnectPacket{FixedHeader: fh}, nil
|
||||
case Connack:
|
||||
return &ConnackPacket{FixedHeader: fh}, nil
|
||||
case Disconnect:
|
||||
return &DisconnectPacket{FixedHeader: fh}, nil
|
||||
case Publish:
|
||||
return &PublishPacket{FixedHeader: fh}, nil
|
||||
case Puback:
|
||||
return &PubackPacket{FixedHeader: fh}, nil
|
||||
case Pubrec:
|
||||
return &PubrecPacket{FixedHeader: fh}, nil
|
||||
case Pubrel:
|
||||
return &PubrelPacket{FixedHeader: fh}, nil
|
||||
case Pubcomp:
|
||||
return &PubcompPacket{FixedHeader: fh}, nil
|
||||
case Subscribe:
|
||||
return &SubscribePacket{FixedHeader: fh}, nil
|
||||
case Suback:
|
||||
return &SubackPacket{FixedHeader: fh}, nil
|
||||
case Unsubscribe:
|
||||
return &UnsubscribePacket{FixedHeader: fh}, nil
|
||||
case Unsuback:
|
||||
return &UnsubackPacket{FixedHeader: fh}, nil
|
||||
case Pingreq:
|
||||
return &PingreqPacket{FixedHeader: fh}, nil
|
||||
case Pingresp:
|
||||
return &PingrespPacket{FixedHeader: fh}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unsupported packet type 0x%x", fh.MessageType)
|
||||
}
|
||||
|
||||
// Details struct returned by the Details() function called on
|
||||
// ControlPackets to present details of the Qos and MessageID
|
||||
// of the ControlPacket
|
||||
type Details struct {
|
||||
Qos byte
|
||||
MessageID uint16
|
||||
}
|
||||
|
||||
// FixedHeader is a struct to hold the decoded information from
|
||||
// the fixed header of an MQTT ControlPacket
|
||||
type FixedHeader struct {
|
||||
MessageType byte
|
||||
Dup bool
|
||||
Qos byte
|
||||
Retain bool
|
||||
RemainingLength int
|
||||
}
|
||||
|
||||
func (fh FixedHeader) String() string {
|
||||
return fmt.Sprintf("%s: dup: %t qos: %d retain: %t rLength: %d", PacketNames[fh.MessageType], fh.Dup, fh.Qos, fh.Retain, fh.RemainingLength)
|
||||
}
|
||||
|
||||
func boolToByte(b bool) byte {
|
||||
switch b {
|
||||
case true:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (fh *FixedHeader) pack() bytes.Buffer {
|
||||
var header bytes.Buffer
|
||||
header.WriteByte(fh.MessageType<<4 | boolToByte(fh.Dup)<<3 | fh.Qos<<1 | boolToByte(fh.Retain))
|
||||
header.Write(encodeLength(fh.RemainingLength))
|
||||
return header
|
||||
}
|
||||
|
||||
func (fh *FixedHeader) unpack(typeAndFlags byte, r io.Reader) error {
|
||||
fh.MessageType = typeAndFlags >> 4
|
||||
fh.Dup = (typeAndFlags>>3)&0x01 > 0
|
||||
fh.Qos = (typeAndFlags >> 1) & 0x03
|
||||
fh.Retain = typeAndFlags&0x01 > 0
|
||||
|
||||
var err error
|
||||
fh.RemainingLength, err = decodeLength(r)
|
||||
return err
|
||||
}
|
||||
|
||||
func decodeByte(b io.Reader) (byte, error) {
|
||||
num := make([]byte, 1)
|
||||
_, err := b.Read(num)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return num[0], nil
|
||||
}
|
||||
|
||||
func decodeUint16(b io.Reader) (uint16, error) {
|
||||
num := make([]byte, 2)
|
||||
_, err := b.Read(num)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return binary.BigEndian.Uint16(num), nil
|
||||
}
|
||||
|
||||
func encodeUint16(num uint16) []byte {
|
||||
bytesResult := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(bytesResult, num)
|
||||
return bytesResult
|
||||
}
|
||||
|
||||
func encodeString(field string) []byte {
|
||||
return encodeBytes([]byte(field))
|
||||
}
|
||||
|
||||
func decodeString(b io.Reader) (string, error) {
|
||||
buf, err := decodeBytes(b)
|
||||
return string(buf), err
|
||||
}
|
||||
|
||||
func decodeBytes(b io.Reader) ([]byte, error) {
|
||||
fieldLength, err := decodeUint16(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
field := make([]byte, fieldLength)
|
||||
_, err = b.Read(field)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return field, nil
|
||||
}
|
||||
|
||||
func encodeBytes(field []byte) []byte {
|
||||
fieldLength := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(fieldLength, uint16(len(field)))
|
||||
return append(fieldLength, field...)
|
||||
}
|
||||
|
||||
func encodeLength(length int) []byte {
|
||||
var encLength []byte
|
||||
for {
|
||||
digit := byte(length % 128)
|
||||
length /= 128
|
||||
if length > 0 {
|
||||
digit |= 0x80
|
||||
}
|
||||
encLength = append(encLength, digit)
|
||||
if length == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return encLength
|
||||
}
|
||||
|
||||
func decodeLength(r io.Reader) (int, error) {
|
||||
var rLength uint32
|
||||
var multiplier uint32
|
||||
b := make([]byte, 1)
|
||||
for multiplier < 27 { // fix: Infinite '(digit & 128) == 1' will cause the dead loop
|
||||
_, err := io.ReadFull(r, b)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
digit := b[0]
|
||||
rLength |= uint32(digit&127) << multiplier
|
||||
if (digit & 128) == 0 {
|
||||
break
|
||||
}
|
||||
multiplier += 7
|
||||
}
|
||||
return int(rLength), nil
|
||||
}
|
34
vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go
generated
vendored
Normal file
34
vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// PingreqPacket is an internal representation of the fields of the
|
||||
// Pingreq MQTT packet
|
||||
type PingreqPacket struct {
|
||||
FixedHeader
|
||||
}
|
||||
|
||||
func (pr *PingreqPacket) String() string {
|
||||
return pr.FixedHeader.String()
|
||||
}
|
||||
|
||||
func (pr *PingreqPacket) Write(w io.Writer) error {
|
||||
packet := pr.FixedHeader.pack()
|
||||
_, err := packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (pr *PingreqPacket) Unpack(b io.Reader) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (pr *PingreqPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: 0}
|
||||
}
|
34
vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go
generated
vendored
Normal file
34
vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// PingrespPacket is an internal representation of the fields of the
|
||||
// Pingresp MQTT packet
|
||||
type PingrespPacket struct {
|
||||
FixedHeader
|
||||
}
|
||||
|
||||
func (pr *PingrespPacket) String() string {
|
||||
return pr.FixedHeader.String()
|
||||
}
|
||||
|
||||
func (pr *PingrespPacket) Write(w io.Writer) error {
|
||||
packet := pr.FixedHeader.pack()
|
||||
_, err := packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (pr *PingrespPacket) Unpack(b io.Reader) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (pr *PingrespPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: 0}
|
||||
}
|
42
vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go
generated
vendored
Normal file
42
vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// PubackPacket is an internal representation of the fields of the
|
||||
// Puback MQTT packet
|
||||
type PubackPacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
}
|
||||
|
||||
func (pa *PubackPacket) String() string {
|
||||
return fmt.Sprintf("%s MessageID: %d", pa.FixedHeader, pa.MessageID)
|
||||
}
|
||||
|
||||
func (pa *PubackPacket) Write(w io.Writer) error {
|
||||
var err error
|
||||
pa.FixedHeader.RemainingLength = 2
|
||||
packet := pa.FixedHeader.pack()
|
||||
packet.Write(encodeUint16(pa.MessageID))
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (pa *PubackPacket) Unpack(b io.Reader) error {
|
||||
var err error
|
||||
pa.MessageID, err = decodeUint16(b)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (pa *PubackPacket) Details() Details {
|
||||
return Details{Qos: pa.Qos, MessageID: pa.MessageID}
|
||||
}
|
42
vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go
generated
vendored
Normal file
42
vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// PubcompPacket is an internal representation of the fields of the
|
||||
// Pubcomp MQTT packet
|
||||
type PubcompPacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
}
|
||||
|
||||
func (pc *PubcompPacket) String() string {
|
||||
return fmt.Sprintf("%s MessageID: %d", pc.FixedHeader, pc.MessageID)
|
||||
}
|
||||
|
||||
func (pc *PubcompPacket) Write(w io.Writer) error {
|
||||
var err error
|
||||
pc.FixedHeader.RemainingLength = 2
|
||||
packet := pc.FixedHeader.pack()
|
||||
packet.Write(encodeUint16(pc.MessageID))
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (pc *PubcompPacket) Unpack(b io.Reader) error {
|
||||
var err error
|
||||
pc.MessageID, err = decodeUint16(b)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (pc *PubcompPacket) Details() Details {
|
||||
return Details{Qos: pc.Qos, MessageID: pc.MessageID}
|
||||
}
|
83
vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go
generated
vendored
Normal file
83
vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// PublishPacket is an internal representation of the fields of the
|
||||
// Publish MQTT packet
|
||||
type PublishPacket struct {
|
||||
FixedHeader
|
||||
TopicName string
|
||||
MessageID uint16
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
func (p *PublishPacket) String() string {
|
||||
return fmt.Sprintf("%s topicName: %s MessageID: %d payload: %s", p.FixedHeader, p.TopicName, p.MessageID, string(p.Payload))
|
||||
}
|
||||
|
||||
func (p *PublishPacket) Write(w io.Writer) error {
|
||||
var body bytes.Buffer
|
||||
var err error
|
||||
|
||||
body.Write(encodeString(p.TopicName))
|
||||
if p.Qos > 0 {
|
||||
body.Write(encodeUint16(p.MessageID))
|
||||
}
|
||||
p.FixedHeader.RemainingLength = body.Len() + len(p.Payload)
|
||||
packet := p.FixedHeader.pack()
|
||||
packet.Write(body.Bytes())
|
||||
packet.Write(p.Payload)
|
||||
_, err = w.Write(packet.Bytes())
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (p *PublishPacket) Unpack(b io.Reader) error {
|
||||
var payloadLength = p.FixedHeader.RemainingLength
|
||||
var err error
|
||||
p.TopicName, err = decodeString(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.Qos > 0 {
|
||||
p.MessageID, err = decodeUint16(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
payloadLength -= len(p.TopicName) + 4
|
||||
} else {
|
||||
payloadLength -= len(p.TopicName) + 2
|
||||
}
|
||||
if payloadLength < 0 {
|
||||
return fmt.Errorf("error unpacking publish, payload length < 0")
|
||||
}
|
||||
p.Payload = make([]byte, payloadLength)
|
||||
_, err = b.Read(p.Payload)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy creates a new PublishPacket with the same topic and payload
|
||||
// but an empty fixed header, useful for when you want to deliver
|
||||
// a message with different properties such as Qos but the same
|
||||
// content
|
||||
func (p *PublishPacket) Copy() *PublishPacket {
|
||||
newP := NewControlPacket(Publish).(*PublishPacket)
|
||||
newP.TopicName = p.TopicName
|
||||
newP.Payload = p.Payload
|
||||
|
||||
return newP
|
||||
}
|
||||
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (p *PublishPacket) Details() Details {
|
||||
return Details{Qos: p.Qos, MessageID: p.MessageID}
|
||||
}
|
42
vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go
generated
vendored
Normal file
42
vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// PubrecPacket is an internal representation of the fields of the
|
||||
// Pubrec MQTT packet
|
||||
type PubrecPacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
}
|
||||
|
||||
func (pr *PubrecPacket) String() string {
|
||||
return fmt.Sprintf("%s MessageID: %d", pr.FixedHeader, pr.MessageID)
|
||||
}
|
||||
|
||||
func (pr *PubrecPacket) Write(w io.Writer) error {
|
||||
var err error
|
||||
pr.FixedHeader.RemainingLength = 2
|
||||
packet := pr.FixedHeader.pack()
|
||||
packet.Write(encodeUint16(pr.MessageID))
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (pr *PubrecPacket) Unpack(b io.Reader) error {
|
||||
var err error
|
||||
pr.MessageID, err = decodeUint16(b)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (pr *PubrecPacket) Details() Details {
|
||||
return Details{Qos: pr.Qos, MessageID: pr.MessageID}
|
||||
}
|
42
vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go
generated
vendored
Normal file
42
vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// PubrelPacket is an internal representation of the fields of the
|
||||
// Pubrel MQTT packet
|
||||
type PubrelPacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
}
|
||||
|
||||
func (pr *PubrelPacket) String() string {
|
||||
return fmt.Sprintf("%s MessageID: %d", pr.FixedHeader, pr.MessageID)
|
||||
}
|
||||
|
||||
func (pr *PubrelPacket) Write(w io.Writer) error {
|
||||
var err error
|
||||
pr.FixedHeader.RemainingLength = 2
|
||||
packet := pr.FixedHeader.pack()
|
||||
packet.Write(encodeUint16(pr.MessageID))
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (pr *PubrelPacket) Unpack(b io.Reader) error {
|
||||
var err error
|
||||
pr.MessageID, err = decodeUint16(b)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (pr *PubrelPacket) Details() Details {
|
||||
return Details{Qos: pr.Qos, MessageID: pr.MessageID}
|
||||
}
|
57
vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go
generated
vendored
Normal file
57
vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go
generated
vendored
Normal file
|
@ -0,0 +1,57 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// SubackPacket is an internal representation of the fields of the
|
||||
// Suback MQTT packet
|
||||
type SubackPacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
ReturnCodes []byte
|
||||
}
|
||||
|
||||
func (sa *SubackPacket) String() string {
|
||||
return fmt.Sprintf("%s MessageID: %d", sa.FixedHeader, sa.MessageID)
|
||||
}
|
||||
|
||||
func (sa *SubackPacket) Write(w io.Writer) error {
|
||||
var body bytes.Buffer
|
||||
var err error
|
||||
body.Write(encodeUint16(sa.MessageID))
|
||||
body.Write(sa.ReturnCodes)
|
||||
sa.FixedHeader.RemainingLength = body.Len()
|
||||
packet := sa.FixedHeader.pack()
|
||||
packet.Write(body.Bytes())
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (sa *SubackPacket) Unpack(b io.Reader) error {
|
||||
var qosBuffer bytes.Buffer
|
||||
var err error
|
||||
sa.MessageID, err = decodeUint16(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = qosBuffer.ReadFrom(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sa.ReturnCodes = qosBuffer.Bytes()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (sa *SubackPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: sa.MessageID}
|
||||
}
|
69
vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go
generated
vendored
Normal file
69
vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// SubscribePacket is an internal representation of the fields of the
|
||||
// Subscribe MQTT packet
|
||||
type SubscribePacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
Topics []string
|
||||
Qoss []byte
|
||||
}
|
||||
|
||||
func (s *SubscribePacket) String() string {
|
||||
return fmt.Sprintf("%s MessageID: %d topics: %s", s.FixedHeader, s.MessageID, s.Topics)
|
||||
}
|
||||
|
||||
func (s *SubscribePacket) Write(w io.Writer) error {
|
||||
var body bytes.Buffer
|
||||
var err error
|
||||
|
||||
body.Write(encodeUint16(s.MessageID))
|
||||
for i, topic := range s.Topics {
|
||||
body.Write(encodeString(topic))
|
||||
body.WriteByte(s.Qoss[i])
|
||||
}
|
||||
s.FixedHeader.RemainingLength = body.Len()
|
||||
packet := s.FixedHeader.pack()
|
||||
packet.Write(body.Bytes())
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (s *SubscribePacket) Unpack(b io.Reader) error {
|
||||
var err error
|
||||
s.MessageID, err = decodeUint16(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
payloadLength := s.FixedHeader.RemainingLength - 2
|
||||
for payloadLength > 0 {
|
||||
topic, err := decodeString(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Topics = append(s.Topics, topic)
|
||||
qos, err := decodeByte(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Qoss = append(s.Qoss, qos)
|
||||
payloadLength -= 2 + len(topic) + 1 // 2 bytes of string length, plus string, plus 1 byte for Qos
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (s *SubscribePacket) Details() Details {
|
||||
return Details{Qos: 1, MessageID: s.MessageID}
|
||||
}
|
42
vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go
generated
vendored
Normal file
42
vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// UnsubackPacket is an internal representation of the fields of the
|
||||
// Unsuback MQTT packet
|
||||
type UnsubackPacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
}
|
||||
|
||||
func (ua *UnsubackPacket) String() string {
|
||||
return fmt.Sprintf("%s MessageID: %d", ua.FixedHeader, ua.MessageID)
|
||||
}
|
||||
|
||||
func (ua *UnsubackPacket) Write(w io.Writer) error {
|
||||
var err error
|
||||
ua.FixedHeader.RemainingLength = 2
|
||||
packet := ua.FixedHeader.pack()
|
||||
packet.Write(encodeUint16(ua.MessageID))
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (ua *UnsubackPacket) Unpack(b io.Reader) error {
|
||||
var err error
|
||||
ua.MessageID, err = decodeUint16(b)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (ua *UnsubackPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: ua.MessageID}
|
||||
}
|
56
vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go
generated
vendored
Normal file
56
vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// UnsubscribePacket is an internal representation of the fields of the
|
||||
// Unsubscribe MQTT packet
|
||||
type UnsubscribePacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
Topics []string
|
||||
}
|
||||
|
||||
func (u *UnsubscribePacket) String() string {
|
||||
return fmt.Sprintf("%s MessageID: %d", u.FixedHeader, u.MessageID)
|
||||
}
|
||||
|
||||
func (u *UnsubscribePacket) Write(w io.Writer) error {
|
||||
var body bytes.Buffer
|
||||
var err error
|
||||
body.Write(encodeUint16(u.MessageID))
|
||||
for _, topic := range u.Topics {
|
||||
body.Write(encodeString(topic))
|
||||
}
|
||||
u.FixedHeader.RemainingLength = body.Len()
|
||||
packet := u.FixedHeader.pack()
|
||||
packet.Write(body.Bytes())
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (u *UnsubscribePacket) Unpack(b io.Reader) error {
|
||||
var err error
|
||||
u.MessageID, err = decodeUint16(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for topic, err := decodeString(b); err == nil && topic != ""; topic, err = decodeString(b) {
|
||||
u.Topics = append(u.Topics, topic)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (u *UnsubscribePacket) Details() Details {
|
||||
return Details{Qos: 1, MessageID: u.MessageID}
|
||||
}
|
74
vendor/github.com/eclipse/paho.mqtt.golang/ping.go
generated
vendored
Normal file
74
vendor/github.com/eclipse/paho.mqtt.golang/ping.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||
)
|
||||
|
||||
// keepalive - Send ping when connection unused for set period
|
||||
// connection passed in to avoid race condition on shutdown
|
||||
func keepalive(c *client, conn io.Writer) {
|
||||
defer c.workers.Done()
|
||||
DEBUG.Println(PNG, "keepalive starting")
|
||||
var checkInterval int64
|
||||
var pingSent time.Time
|
||||
|
||||
if c.options.KeepAlive > 10 {
|
||||
checkInterval = 5
|
||||
} else {
|
||||
checkInterval = c.options.KeepAlive / 2
|
||||
}
|
||||
|
||||
intervalTicker := time.NewTicker(time.Duration(checkInterval * int64(time.Second)))
|
||||
defer intervalTicker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-c.stop:
|
||||
DEBUG.Println(PNG, "keepalive stopped")
|
||||
return
|
||||
case <-intervalTicker.C:
|
||||
lastSent := c.lastSent.Load().(time.Time)
|
||||
lastReceived := c.lastReceived.Load().(time.Time)
|
||||
|
||||
DEBUG.Println(PNG, "ping check", time.Since(lastSent).Seconds())
|
||||
if time.Since(lastSent) >= time.Duration(c.options.KeepAlive*int64(time.Second)) || time.Since(lastReceived) >= time.Duration(c.options.KeepAlive*int64(time.Second)) {
|
||||
if atomic.LoadInt32(&c.pingOutstanding) == 0 {
|
||||
DEBUG.Println(PNG, "keepalive sending ping")
|
||||
ping := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket)
|
||||
// We don't want to wait behind large messages being sent, the Write call
|
||||
// will block until it it able to send the packet.
|
||||
atomic.StoreInt32(&c.pingOutstanding, 1)
|
||||
if err := ping.Write(conn); err != nil {
|
||||
ERROR.Println(PNG, err)
|
||||
}
|
||||
c.lastSent.Store(time.Now())
|
||||
pingSent = time.Now()
|
||||
}
|
||||
}
|
||||
if atomic.LoadInt32(&c.pingOutstanding) > 0 && time.Since(pingSent) >= c.options.PingTimeout {
|
||||
CRITICAL.Println(PNG, "pingresp not received, disconnecting")
|
||||
c.internalConnLost(errors.New("pingresp not received, disconnecting")) // no harm in calling this if the connection is already down (or shutdown is in progress)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
182
vendor/github.com/eclipse/paho.mqtt.golang/router.go
generated
vendored
Normal file
182
vendor/github.com/eclipse/paho.mqtt.golang/router.go
generated
vendored
Normal file
|
@ -0,0 +1,182 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||
)
|
||||
|
||||
// route is a type which associates MQTT Topic strings with a
|
||||
// callback to be executed upon the arrival of a message associated
|
||||
// with a subscription to that topic.
|
||||
type route struct {
|
||||
topic string
|
||||
callback MessageHandler
|
||||
}
|
||||
|
||||
// match takes a slice of strings which represent the route being tested having been split on '/'
|
||||
// separators, and a slice of strings representing the topic string in the published message, similarly
|
||||
// split.
|
||||
// The function determines if the topic string matches the route according to the MQTT topic rules
|
||||
// and returns a boolean of the outcome
|
||||
func match(route []string, topic []string) bool {
|
||||
if len(route) == 0 {
|
||||
return len(topic) == 0
|
||||
}
|
||||
|
||||
if len(topic) == 0 {
|
||||
return route[0] == "#"
|
||||
}
|
||||
|
||||
if route[0] == "#" {
|
||||
return true
|
||||
}
|
||||
|
||||
if (route[0] == "+") || (route[0] == topic[0]) {
|
||||
return match(route[1:], topic[1:])
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func routeIncludesTopic(route, topic string) bool {
|
||||
return match(routeSplit(route), strings.Split(topic, "/"))
|
||||
}
|
||||
|
||||
// removes $share and sharename when splitting the route to allow
|
||||
// shared subscription routes to correctly match the topic
|
||||
func routeSplit(route string) []string {
|
||||
var result []string
|
||||
if strings.HasPrefix(route, "$share") {
|
||||
result = strings.Split(route, "/")[2:]
|
||||
} else {
|
||||
result = strings.Split(route, "/")
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// match takes the topic string of the published message and does a basic compare to the
|
||||
// string of the current Route, if they match it returns true
|
||||
func (r *route) match(topic string) bool {
|
||||
return r.topic == topic || routeIncludesTopic(r.topic, topic)
|
||||
}
|
||||
|
||||
type router struct {
|
||||
sync.RWMutex
|
||||
routes *list.List
|
||||
defaultHandler MessageHandler
|
||||
messages chan *packets.PublishPacket
|
||||
}
|
||||
|
||||
// newRouter returns a new instance of a Router and channel which can be used to tell the Router
|
||||
// to stop
|
||||
func newRouter() *router {
|
||||
router := &router{routes: list.New(), messages: make(chan *packets.PublishPacket)}
|
||||
return router
|
||||
}
|
||||
|
||||
// addRoute takes a topic string and MessageHandler callback. It looks in the current list of
|
||||
// routes to see if there is already a matching Route. If there is it replaces the current
|
||||
// callback with the new one. If not it add a new entry to the list of Routes.
|
||||
func (r *router) addRoute(topic string, callback MessageHandler) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
for e := r.routes.Front(); e != nil; e = e.Next() {
|
||||
if e.Value.(*route).topic == topic {
|
||||
r := e.Value.(*route)
|
||||
r.callback = callback
|
||||
return
|
||||
}
|
||||
}
|
||||
r.routes.PushBack(&route{topic: topic, callback: callback})
|
||||
}
|
||||
|
||||
// deleteRoute takes a route string, looks for a matching Route in the list of Routes. If
|
||||
// found it removes the Route from the list.
|
||||
func (r *router) deleteRoute(topic string) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
for e := r.routes.Front(); e != nil; e = e.Next() {
|
||||
if e.Value.(*route).topic == topic {
|
||||
r.routes.Remove(e)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setDefaultHandler assigns a default callback that will be called if no matching Route
|
||||
// is found for an incoming Publish.
|
||||
func (r *router) setDefaultHandler(handler MessageHandler) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
r.defaultHandler = handler
|
||||
}
|
||||
|
||||
// matchAndDispatch takes a channel of Message pointers as input and starts a go routine that
|
||||
// takes messages off the channel, matches them against the internal route list and calls the
|
||||
// associated callback (or the defaultHandler, if one exists and no other route matched). If
|
||||
// anything is sent down the stop channel the function will end.
|
||||
func (r *router) matchAndDispatch(messages <-chan *packets.PublishPacket, order bool, client *client) <-chan *PacketAndToken {
|
||||
ackChan := make(chan *PacketAndToken)
|
||||
go func() {
|
||||
for message := range messages {
|
||||
// DEBUG.Println(ROU, "matchAndDispatch received message")
|
||||
sent := false
|
||||
r.RLock()
|
||||
m := messageFromPublish(message, ackFunc(ackChan, client.persist, message))
|
||||
var handlers []MessageHandler
|
||||
for e := r.routes.Front(); e != nil; e = e.Next() {
|
||||
if e.Value.(*route).match(message.TopicName) {
|
||||
if order {
|
||||
handlers = append(handlers, e.Value.(*route).callback)
|
||||
} else {
|
||||
hd := e.Value.(*route).callback
|
||||
go func() {
|
||||
hd(client, m)
|
||||
m.Ack()
|
||||
}()
|
||||
}
|
||||
sent = true
|
||||
}
|
||||
}
|
||||
if !sent {
|
||||
if r.defaultHandler != nil {
|
||||
if order {
|
||||
handlers = append(handlers, r.defaultHandler)
|
||||
} else {
|
||||
go func() {
|
||||
r.defaultHandler(client, m)
|
||||
m.Ack()
|
||||
}()
|
||||
}
|
||||
} else {
|
||||
DEBUG.Println(ROU, "matchAndDispatch received message and no handler was available. Message will NOT be acknowledged.")
|
||||
}
|
||||
}
|
||||
r.RUnlock()
|
||||
for _, handler := range handlers {
|
||||
handler(client, m)
|
||||
m.Ack()
|
||||
}
|
||||
// DEBUG.Println(ROU, "matchAndDispatch handled message")
|
||||
}
|
||||
close(ackChan)
|
||||
DEBUG.Println(ROU, "matchAndDispatch exiting")
|
||||
}()
|
||||
return ackChan
|
||||
}
|
136
vendor/github.com/eclipse/paho.mqtt.golang/store.go
generated
vendored
Normal file
136
vendor/github.com/eclipse/paho.mqtt.golang/store.go
generated
vendored
Normal file
|
@ -0,0 +1,136 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||
)
|
||||
|
||||
const (
|
||||
inboundPrefix = "i."
|
||||
outboundPrefix = "o."
|
||||
)
|
||||
|
||||
// Store is an interface which can be used to provide implementations
|
||||
// for message persistence.
|
||||
// Because we may have to store distinct messages with the same
|
||||
// message ID, we need a unique key for each message. This is
|
||||
// possible by prepending "i." or "o." to each message id
|
||||
type Store interface {
|
||||
Open()
|
||||
Put(key string, message packets.ControlPacket)
|
||||
Get(key string) packets.ControlPacket
|
||||
All() []string
|
||||
Del(key string)
|
||||
Close()
|
||||
Reset()
|
||||
}
|
||||
|
||||
// A key MUST have the form "X.[messageid]"
|
||||
// where X is 'i' or 'o'
|
||||
func mIDFromKey(key string) uint16 {
|
||||
s := key[2:]
|
||||
i, err := strconv.Atoi(s)
|
||||
chkerr(err)
|
||||
return uint16(i)
|
||||
}
|
||||
|
||||
// Return true if key prefix is outbound
|
||||
func isKeyOutbound(key string) bool {
|
||||
return key[:2] == outboundPrefix
|
||||
}
|
||||
|
||||
// Return true if key prefix is inbound
|
||||
func isKeyInbound(key string) bool {
|
||||
return key[:2] == inboundPrefix
|
||||
}
|
||||
|
||||
// Return a string of the form "i.[id]"
|
||||
func inboundKeyFromMID(id uint16) string {
|
||||
return fmt.Sprintf("%s%d", inboundPrefix, id)
|
||||
}
|
||||
|
||||
// Return a string of the form "o.[id]"
|
||||
func outboundKeyFromMID(id uint16) string {
|
||||
return fmt.Sprintf("%s%d", outboundPrefix, id)
|
||||
}
|
||||
|
||||
// govern which outgoing messages are persisted
|
||||
func persistOutbound(s Store, m packets.ControlPacket) {
|
||||
switch m.Details().Qos {
|
||||
case 0:
|
||||
switch m.(type) {
|
||||
case *packets.PubackPacket, *packets.PubcompPacket:
|
||||
// Sending puback. delete matching publish
|
||||
// from ibound
|
||||
s.Del(inboundKeyFromMID(m.Details().MessageID))
|
||||
}
|
||||
case 1:
|
||||
switch m.(type) {
|
||||
case *packets.PublishPacket, *packets.PubrelPacket, *packets.SubscribePacket, *packets.UnsubscribePacket:
|
||||
// Sending publish. store in obound
|
||||
// until puback received
|
||||
s.Put(outboundKeyFromMID(m.Details().MessageID), m)
|
||||
default:
|
||||
ERROR.Println(STR, "Asked to persist an invalid message type")
|
||||
}
|
||||
case 2:
|
||||
switch m.(type) {
|
||||
case *packets.PublishPacket:
|
||||
// Sending publish. store in obound
|
||||
// until pubrel received
|
||||
s.Put(outboundKeyFromMID(m.Details().MessageID), m)
|
||||
default:
|
||||
ERROR.Println(STR, "Asked to persist an invalid message type")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// govern which incoming messages are persisted
|
||||
func persistInbound(s Store, m packets.ControlPacket) {
|
||||
switch m.Details().Qos {
|
||||
case 0:
|
||||
switch m.(type) {
|
||||
case *packets.PubackPacket, *packets.SubackPacket, *packets.UnsubackPacket, *packets.PubcompPacket:
|
||||
// Received a puback. delete matching publish
|
||||
// from obound
|
||||
s.Del(outboundKeyFromMID(m.Details().MessageID))
|
||||
case *packets.PublishPacket, *packets.PubrecPacket, *packets.PingrespPacket, *packets.ConnackPacket:
|
||||
default:
|
||||
ERROR.Println(STR, "Asked to persist an invalid messages type")
|
||||
}
|
||||
case 1:
|
||||
switch m.(type) {
|
||||
case *packets.PublishPacket, *packets.PubrelPacket:
|
||||
// Received a publish. store it in ibound
|
||||
// until puback sent
|
||||
s.Put(inboundKeyFromMID(m.Details().MessageID), m)
|
||||
default:
|
||||
ERROR.Println(STR, "Asked to persist an invalid messages type")
|
||||
}
|
||||
case 2:
|
||||
switch m.(type) {
|
||||
case *packets.PublishPacket:
|
||||
// Received a publish. store it in ibound
|
||||
// until pubrel received
|
||||
s.Put(inboundKeyFromMID(m.Details().MessageID), m)
|
||||
default:
|
||||
ERROR.Println(STR, "Asked to persist an invalid messages type")
|
||||
}
|
||||
}
|
||||
}
|
200
vendor/github.com/eclipse/paho.mqtt.golang/token.go
generated
vendored
Normal file
200
vendor/github.com/eclipse/paho.mqtt.golang/token.go
generated
vendored
Normal file
|
@ -0,0 +1,200 @@
|
|||
/*
|
||||
* Copyright (c) 2014 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Allan Stockdill-Mander
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||
)
|
||||
|
||||
// PacketAndToken is a struct that contains both a ControlPacket and a
|
||||
// Token. This struct is passed via channels between the client interface
|
||||
// code and the underlying code responsible for sending and receiving
|
||||
// MQTT messages.
|
||||
type PacketAndToken struct {
|
||||
p packets.ControlPacket
|
||||
t tokenCompletor
|
||||
}
|
||||
|
||||
// Token defines the interface for the tokens used to indicate when
|
||||
// actions have completed.
|
||||
type Token interface {
|
||||
// Wait will wait indefinitely for the Token to complete, ie the Publish
|
||||
// to be sent and confirmed receipt from the broker.
|
||||
Wait() bool
|
||||
|
||||
// WaitTimeout takes a time.Duration to wait for the flow associated with the
|
||||
// Token to complete, returns true if it returned before the timeout or
|
||||
// returns false if the timeout occurred. In the case of a timeout the Token
|
||||
// does not have an error set in case the caller wishes to wait again.
|
||||
WaitTimeout(time.Duration) bool
|
||||
|
||||
// Done returns a channel that is closed when the flow associated
|
||||
// with the Token completes. Clients should call Error after the
|
||||
// channel is closed to check if the flow completed successfully.
|
||||
//
|
||||
// Done is provided for use in select statements. Simple use cases may
|
||||
// use Wait or WaitTimeout.
|
||||
Done() <-chan struct{}
|
||||
|
||||
Error() error
|
||||
}
|
||||
|
||||
type TokenErrorSetter interface {
|
||||
setError(error)
|
||||
}
|
||||
|
||||
type tokenCompletor interface {
|
||||
Token
|
||||
TokenErrorSetter
|
||||
flowComplete()
|
||||
}
|
||||
|
||||
type baseToken struct {
|
||||
m sync.RWMutex
|
||||
complete chan struct{}
|
||||
err error
|
||||
}
|
||||
|
||||
// Wait implements the Token Wait method.
|
||||
func (b *baseToken) Wait() bool {
|
||||
<-b.complete
|
||||
return true
|
||||
}
|
||||
|
||||
// WaitTimeout implements the Token WaitTimeout method.
|
||||
func (b *baseToken) WaitTimeout(d time.Duration) bool {
|
||||
timer := time.NewTimer(d)
|
||||
select {
|
||||
case <-b.complete:
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
return true
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Done implements the Token Done method.
|
||||
func (b *baseToken) Done() <-chan struct{} {
|
||||
return b.complete
|
||||
}
|
||||
|
||||
func (b *baseToken) flowComplete() {
|
||||
select {
|
||||
case <-b.complete:
|
||||
default:
|
||||
close(b.complete)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *baseToken) Error() error {
|
||||
b.m.RLock()
|
||||
defer b.m.RUnlock()
|
||||
return b.err
|
||||
}
|
||||
|
||||
func (b *baseToken) setError(e error) {
|
||||
b.m.Lock()
|
||||
b.err = e
|
||||
b.flowComplete()
|
||||
b.m.Unlock()
|
||||
}
|
||||
|
||||
func newToken(tType byte) tokenCompletor {
|
||||
switch tType {
|
||||
case packets.Connect:
|
||||
return &ConnectToken{baseToken: baseToken{complete: make(chan struct{})}}
|
||||
case packets.Subscribe:
|
||||
return &SubscribeToken{baseToken: baseToken{complete: make(chan struct{})}, subResult: make(map[string]byte)}
|
||||
case packets.Publish:
|
||||
return &PublishToken{baseToken: baseToken{complete: make(chan struct{})}}
|
||||
case packets.Unsubscribe:
|
||||
return &UnsubscribeToken{baseToken: baseToken{complete: make(chan struct{})}}
|
||||
case packets.Disconnect:
|
||||
return &DisconnectToken{baseToken: baseToken{complete: make(chan struct{})}}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnectToken is an extension of Token containing the extra fields
|
||||
// required to provide information about calls to Connect()
|
||||
type ConnectToken struct {
|
||||
baseToken
|
||||
returnCode byte
|
||||
sessionPresent bool
|
||||
}
|
||||
|
||||
// ReturnCode returns the acknowledgement code in the connack sent
|
||||
// in response to a Connect()
|
||||
func (c *ConnectToken) ReturnCode() byte {
|
||||
c.m.RLock()
|
||||
defer c.m.RUnlock()
|
||||
return c.returnCode
|
||||
}
|
||||
|
||||
// SessionPresent returns a bool representing the value of the
|
||||
// session present field in the connack sent in response to a Connect()
|
||||
func (c *ConnectToken) SessionPresent() bool {
|
||||
c.m.RLock()
|
||||
defer c.m.RUnlock()
|
||||
return c.sessionPresent
|
||||
}
|
||||
|
||||
// PublishToken is an extension of Token containing the extra fields
|
||||
// required to provide information about calls to Publish()
|
||||
type PublishToken struct {
|
||||
baseToken
|
||||
messageID uint16
|
||||
}
|
||||
|
||||
// MessageID returns the MQTT message ID that was assigned to the
|
||||
// Publish packet when it was sent to the broker
|
||||
func (p *PublishToken) MessageID() uint16 {
|
||||
return p.messageID
|
||||
}
|
||||
|
||||
// SubscribeToken is an extension of Token containing the extra fields
|
||||
// required to provide information about calls to Subscribe()
|
||||
type SubscribeToken struct {
|
||||
baseToken
|
||||
subs []string
|
||||
subResult map[string]byte
|
||||
messageID uint16
|
||||
}
|
||||
|
||||
// Result returns a map of topics that were subscribed to along with
|
||||
// the matching return code from the broker. This is either the Qos
|
||||
// value of the subscription or an error code.
|
||||
func (s *SubscribeToken) Result() map[string]byte {
|
||||
s.m.RLock()
|
||||
defer s.m.RUnlock()
|
||||
return s.subResult
|
||||
}
|
||||
|
||||
// UnsubscribeToken is an extension of Token containing the extra fields
|
||||
// required to provide information about calls to Unsubscribe()
|
||||
type UnsubscribeToken struct {
|
||||
baseToken
|
||||
messageID uint16
|
||||
}
|
||||
|
||||
// DisconnectToken is an extension of Token containing the extra fields
|
||||
// required to provide information about calls to Disconnect()
|
||||
type DisconnectToken struct {
|
||||
baseToken
|
||||
}
|
86
vendor/github.com/eclipse/paho.mqtt.golang/topic.go
generated
vendored
Normal file
86
vendor/github.com/eclipse/paho.mqtt.golang/topic.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* Copyright (c) 2014 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ErrInvalidQos is the error returned when an packet is to be sent
|
||||
// with an invalid Qos value
|
||||
var ErrInvalidQos = errors.New("invalid QoS")
|
||||
|
||||
// ErrInvalidTopicEmptyString is the error returned when a topic string
|
||||
// is passed in that is 0 length
|
||||
var ErrInvalidTopicEmptyString = errors.New("invalid Topic; empty string")
|
||||
|
||||
// ErrInvalidTopicMultilevel is the error returned when a topic string
|
||||
// is passed in that has the multi level wildcard in any position but
|
||||
// the last
|
||||
var ErrInvalidTopicMultilevel = errors.New("invalid Topic; multi-level wildcard must be last level")
|
||||
|
||||
// Topic Names and Topic Filters
|
||||
// The MQTT v3.1.1 spec clarifies a number of ambiguities with regard
|
||||
// to the validity of Topic strings.
|
||||
// - A Topic must be between 1 and 65535 bytes.
|
||||
// - A Topic is case sensitive.
|
||||
// - A Topic may contain whitespace.
|
||||
// - A Topic containing a leading forward slash is different than a Topic without.
|
||||
// - A Topic may be "/" (two levels, both empty string).
|
||||
// - A Topic must be UTF-8 encoded.
|
||||
// - A Topic may contain any number of levels.
|
||||
// - A Topic may contain an empty level (two forward slashes in a row).
|
||||
// - A TopicName may not contain a wildcard.
|
||||
// - A TopicFilter may only have a # (multi-level) wildcard as the last level.
|
||||
// - A TopicFilter may contain any number of + (single-level) wildcards.
|
||||
// - A TopicFilter with a # will match the absence of a level
|
||||
// Example: a subscription to "foo/#" will match messages published to "foo".
|
||||
|
||||
func validateSubscribeMap(subs map[string]byte) ([]string, []byte, error) {
|
||||
if len(subs) == 0 {
|
||||
return nil, nil, errors.New("invalid subscription; subscribe map must not be empty")
|
||||
}
|
||||
|
||||
var topics []string
|
||||
var qoss []byte
|
||||
for topic, qos := range subs {
|
||||
if err := validateTopicAndQos(topic, qos); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
topics = append(topics, topic)
|
||||
qoss = append(qoss, qos)
|
||||
}
|
||||
|
||||
return topics, qoss, nil
|
||||
}
|
||||
|
||||
func validateTopicAndQos(topic string, qos byte) error {
|
||||
if len(topic) == 0 {
|
||||
return ErrInvalidTopicEmptyString
|
||||
}
|
||||
|
||||
levels := strings.Split(topic, "/")
|
||||
for i, level := range levels {
|
||||
if level == "#" && i != len(levels)-1 {
|
||||
return ErrInvalidTopicMultilevel
|
||||
}
|
||||
}
|
||||
|
||||
if qos > 2 {
|
||||
return ErrInvalidQos
|
||||
}
|
||||
return nil
|
||||
}
|
40
vendor/github.com/eclipse/paho.mqtt.golang/trace.go
generated
vendored
Normal file
40
vendor/github.com/eclipse/paho.mqtt.golang/trace.go
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
type (
|
||||
// Logger interface allows implementations to provide to this package any
|
||||
// object that implements the methods defined in it.
|
||||
Logger interface {
|
||||
Println(v ...interface{})
|
||||
Printf(format string, v ...interface{})
|
||||
}
|
||||
|
||||
// NOOPLogger implements the logger that does not perform any operation
|
||||
// by default. This allows us to efficiently discard the unwanted messages.
|
||||
NOOPLogger struct{}
|
||||
)
|
||||
|
||||
func (NOOPLogger) Println(v ...interface{}) {}
|
||||
func (NOOPLogger) Printf(format string, v ...interface{}) {}
|
||||
|
||||
// Internal levels of library output that are initialised to not print
|
||||
// anything but can be overridden by programmer
|
||||
var (
|
||||
ERROR Logger = NOOPLogger{}
|
||||
CRITICAL Logger = NOOPLogger{}
|
||||
WARN Logger = NOOPLogger{}
|
||||
DEBUG Logger = NOOPLogger{}
|
||||
)
|
119
vendor/github.com/eclipse/paho.mqtt.golang/websocket.go
generated
vendored
Normal file
119
vendor/github.com/eclipse/paho.mqtt.golang/websocket.go
generated
vendored
Normal file
|
@ -0,0 +1,119 @@
|
|||
package mqtt
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
// WebsocketOptions are config options for a websocket dialer
|
||||
type WebsocketOptions struct {
|
||||
ReadBufferSize int
|
||||
WriteBufferSize int
|
||||
Proxy ProxyFunction
|
||||
}
|
||||
|
||||
type ProxyFunction func(req *http.Request) (*url.URL, error)
|
||||
|
||||
// NewWebsocket returns a new websocket and returns a net.Conn compatible interface using the gorilla/websocket package
|
||||
func NewWebsocket(host string, tlsc *tls.Config, timeout time.Duration, requestHeader http.Header, options *WebsocketOptions) (net.Conn, error) {
|
||||
if timeout == 0 {
|
||||
timeout = 10 * time.Second
|
||||
}
|
||||
|
||||
if options == nil {
|
||||
// Apply default options
|
||||
options = &WebsocketOptions{}
|
||||
}
|
||||
if options.Proxy == nil {
|
||||
options.Proxy = http.ProxyFromEnvironment
|
||||
}
|
||||
dialer := &websocket.Dialer{
|
||||
Proxy: options.Proxy,
|
||||
HandshakeTimeout: timeout,
|
||||
EnableCompression: false,
|
||||
TLSClientConfig: tlsc,
|
||||
Subprotocols: []string{"mqtt"},
|
||||
ReadBufferSize: options.ReadBufferSize,
|
||||
WriteBufferSize: options.WriteBufferSize,
|
||||
}
|
||||
|
||||
ws, resp, err := dialer.Dial(host, requestHeader)
|
||||
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
WARN.Println(CLI, fmt.Sprintf("Websocket handshake failure. StatusCode: %d. Body: %s", resp.StatusCode, resp.Body))
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wrapper := &websocketConnector{
|
||||
Conn: ws,
|
||||
}
|
||||
return wrapper, err
|
||||
}
|
||||
|
||||
// websocketConnector is a websocket wrapper so it satisfies the net.Conn interface so it is a
|
||||
// drop in replacement of the golang.org/x/net/websocket package.
|
||||
// Implementation guide taken from https://github.com/gorilla/websocket/issues/282
|
||||
type websocketConnector struct {
|
||||
*websocket.Conn
|
||||
r io.Reader
|
||||
rio sync.Mutex
|
||||
wio sync.Mutex
|
||||
}
|
||||
|
||||
// SetDeadline sets both the read and write deadlines
|
||||
func (c *websocketConnector) SetDeadline(t time.Time) error {
|
||||
if err := c.SetReadDeadline(t); err != nil {
|
||||
return err
|
||||
}
|
||||
err := c.SetWriteDeadline(t)
|
||||
return err
|
||||
}
|
||||
|
||||
// Write writes data to the websocket
|
||||
func (c *websocketConnector) Write(p []byte) (int, error) {
|
||||
c.wio.Lock()
|
||||
defer c.wio.Unlock()
|
||||
|
||||
err := c.WriteMessage(websocket.BinaryMessage, p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Read reads the current websocket frame
|
||||
func (c *websocketConnector) Read(p []byte) (int, error) {
|
||||
c.rio.Lock()
|
||||
defer c.rio.Unlock()
|
||||
for {
|
||||
if c.r == nil {
|
||||
// Advance to next message.
|
||||
var err error
|
||||
_, c.r, err = c.NextReader()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
n, err := c.r.Read(p)
|
||||
if err == io.EOF {
|
||||
// At end of message.
|
||||
c.r = nil
|
||||
if n > 0 {
|
||||
return n, nil
|
||||
}
|
||||
// No data read, continue to next message.
|
||||
continue
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
}
|
5
vendor/github.com/fsnotify/fsnotify/.editorconfig
generated
vendored
Normal file
5
vendor/github.com/fsnotify/fsnotify/.editorconfig
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
root = true
|
||||
|
||||
[*]
|
||||
indent_style = tab
|
||||
indent_size = 4
|
6
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
Normal file
6
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
# Setup a Global .gitignore for OS and editor generated files:
|
||||
# https://help.github.com/articles/ignoring-files
|
||||
# git config --global core.excludesfile ~/.gitignore_global
|
||||
|
||||
.vagrant
|
||||
*.sublime-project
|
30
vendor/github.com/fsnotify/fsnotify/.travis.yml
generated
vendored
Normal file
30
vendor/github.com/fsnotify/fsnotify/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
sudo: false
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
fast_finish: true
|
||||
|
||||
before_script:
|
||||
- go get -u github.com/golang/lint/golint
|
||||
|
||||
script:
|
||||
- go test -v --race ./...
|
||||
|
||||
after_script:
|
||||
- test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
|
||||
- test -z "$(golint ./... | tee /dev/stderr)"
|
||||
- go vet ./...
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
notifications:
|
||||
email: false
|
52
vendor/github.com/fsnotify/fsnotify/AUTHORS
generated
vendored
Normal file
52
vendor/github.com/fsnotify/fsnotify/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# You can update this list using the following command:
|
||||
#
|
||||
# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Aaron L <aaron@bettercoder.net>
|
||||
Adrien Bustany <adrien@bustany.org>
|
||||
Amit Krishnan <amit.krishnan@oracle.com>
|
||||
Anmol Sethi <me@anmol.io>
|
||||
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
|
||||
Bruno Bigras <bigras.bruno@gmail.com>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Case Nelson <case@teammating.com>
|
||||
Chris Howey <chris@howey.me> <howeyc@gmail.com>
|
||||
Christoffer Buchholz <christoffer.buchholz@gmail.com>
|
||||
Daniel Wagner-Hall <dawagner@gmail.com>
|
||||
Dave Cheney <dave@cheney.net>
|
||||
Evan Phoenix <evan@fallingsnow.net>
|
||||
Francisco Souza <f@souza.cc>
|
||||
Hari haran <hariharan.uno@gmail.com>
|
||||
John C Barstow
|
||||
Kelvin Fo <vmirage@gmail.com>
|
||||
Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
|
||||
Matt Layher <mdlayher@gmail.com>
|
||||
Nathan Youngman <git@nathany.com>
|
||||
Nickolai Zeldovich <nickolai@csail.mit.edu>
|
||||
Patrick <patrick@dropbox.com>
|
||||
Paul Hammond <paul@paulhammond.org>
|
||||
Pawel Knap <pawelknap88@gmail.com>
|
||||
Pieter Droogendijk <pieter@binky.org.uk>
|
||||
Pursuit92 <JoshChase@techpursuit.net>
|
||||
Riku Voipio <riku.voipio@linaro.org>
|
||||
Rob Figueiredo <robfig@gmail.com>
|
||||
Rodrigo Chiossi <rodrigochiossi@gmail.com>
|
||||
Slawek Ligus <root@ooz.ie>
|
||||
Soge Zhang <zhssoge@gmail.com>
|
||||
Tiffany Jernigan <tiffany.jernigan@intel.com>
|
||||
Tilak Sharma <tilaks@google.com>
|
||||
Tom Payne <twpayne@gmail.com>
|
||||
Travis Cline <travis.cline@gmail.com>
|
||||
Tudor Golubenco <tudor.g@gmail.com>
|
||||
Vahe Khachikyan <vahe@live.ca>
|
||||
Yukang <moorekang@gmail.com>
|
||||
bronze1man <bronze1man@gmail.com>
|
||||
debrando <denis.brandolini@gmail.com>
|
||||
henrikedwards <henrik.edwards@gmail.com>
|
||||
铁哥 <guotie.9@gmail.com>
|
317
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
Normal file
317
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
Normal file
|
@ -0,0 +1,317 @@
|
|||
# Changelog
|
||||
|
||||
## v1.4.7 / 2018-01-09
|
||||
|
||||
* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
|
||||
* Tests: Fix missing verb on format string (thanks @rchiossi)
|
||||
* Linux: Fix deadlock in Remove (thanks @aarondl)
|
||||
* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
|
||||
* Docs: Moved FAQ into the README (thanks @vahe)
|
||||
* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
|
||||
* Docs: replace references to OS X with macOS
|
||||
|
||||
## v1.4.2 / 2016-10-10
|
||||
|
||||
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
|
||||
|
||||
## v1.4.1 / 2016-10-04
|
||||
|
||||
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
|
||||
|
||||
## v1.4.0 / 2016-10-01
|
||||
|
||||
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
|
||||
|
||||
## v1.3.1 / 2016-06-28
|
||||
|
||||
* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
|
||||
|
||||
## v1.3.0 / 2016-04-19
|
||||
|
||||
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
|
||||
|
||||
## v1.2.10 / 2016-03-02
|
||||
|
||||
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
|
||||
|
||||
## v1.2.9 / 2016-01-13
|
||||
|
||||
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
|
||||
|
||||
## v1.2.8 / 2015-12-17
|
||||
|
||||
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
|
||||
* inotify: fix race in test
|
||||
* enable race detection for continuous integration (Linux, Mac, Windows)
|
||||
|
||||
## v1.2.5 / 2015-10-17
|
||||
|
||||
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
|
||||
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
|
||||
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
|
||||
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
|
||||
|
||||
## v1.2.1 / 2015-10-14
|
||||
|
||||
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
|
||||
|
||||
## v1.2.0 / 2015-02-08
|
||||
|
||||
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
|
||||
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
|
||||
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
|
||||
|
||||
## v1.1.1 / 2015-02-05
|
||||
|
||||
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
|
||||
|
||||
## v1.1.0 / 2014-12-12
|
||||
|
||||
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
|
||||
* add low-level functions
|
||||
* only need to store flags on directories
|
||||
* less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
|
||||
* done can be an unbuffered channel
|
||||
* remove calls to os.NewSyscallError
|
||||
* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
|
||||
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## v1.0.4 / 2014-09-07
|
||||
|
||||
* kqueue: add dragonfly to the build tags.
|
||||
* Rename source code files, rearrange code so exported APIs are at the top.
|
||||
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
|
||||
|
||||
## v1.0.3 / 2014-08-19
|
||||
|
||||
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
|
||||
|
||||
## v1.0.2 / 2014-08-17
|
||||
|
||||
* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
|
||||
|
||||
## v1.0.0 / 2014-08-15
|
||||
|
||||
* [API] Remove AddWatch on Windows, use Add.
|
||||
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
|
||||
* Minor updates based on feedback from golint.
|
||||
|
||||
## dev / 2014-07-09
|
||||
|
||||
* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
|
||||
* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
|
||||
|
||||
## dev / 2014-07-04
|
||||
|
||||
* kqueue: fix incorrect mutex used in Close()
|
||||
* Update example to demonstrate usage of Op.
|
||||
|
||||
## dev / 2014-06-28
|
||||
|
||||
* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
|
||||
* Fix for String() method on Event (thanks Alex Brainman)
|
||||
* Don't build on Plan 9 or Solaris (thanks @4ad)
|
||||
|
||||
## dev / 2014-06-21
|
||||
|
||||
* Events channel of type Event rather than *Event.
|
||||
* [internal] use syscall constants directly for inotify and kqueue.
|
||||
* [internal] kqueue: rename events to kevents and fileEvent to event.
|
||||
|
||||
## dev / 2014-06-19
|
||||
|
||||
* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
|
||||
* [internal] remove cookie from Event struct (unused).
|
||||
* [internal] Event struct has the same definition across every OS.
|
||||
* [internal] remove internal watch and removeWatch methods.
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
|
||||
* [API] Pluralized channel names: Events and Errors.
|
||||
* [API] Renamed FileEvent struct to Event.
|
||||
* [API] Op constants replace methods like IsCreate().
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## dev / 2014-05-23
|
||||
|
||||
* [API] Remove current implementation of WatchFlags.
|
||||
* current implementation doesn't take advantage of OS for efficiency
|
||||
* provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
|
||||
* no tests for the current implementation
|
||||
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
|
||||
|
||||
## v0.9.3 / 2014-12-31
|
||||
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## v0.9.2 / 2014-08-17
|
||||
|
||||
* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
|
||||
## v0.9.1 / 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## v0.9.0 / 2014-01-17
|
||||
|
||||
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
|
||||
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
|
||||
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
|
||||
|
||||
## v0.8.12 / 2013-11-13
|
||||
|
||||
* [API] Remove FD_SET and friends from Linux adapter
|
||||
|
||||
## v0.8.11 / 2013-11-02
|
||||
|
||||
* [Doc] Add Changelog [#72][] (thanks @nathany)
|
||||
* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
|
||||
|
||||
## v0.8.10 / 2013-10-19
|
||||
|
||||
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
|
||||
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
|
||||
* [Doc] specify OS-specific limits in README (thanks @debrando)
|
||||
|
||||
## v0.8.9 / 2013-09-08
|
||||
|
||||
* [Doc] Contributing (thanks @nathany)
|
||||
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
|
||||
* [Doc] GoCI badge in README (Linux only) [#60][]
|
||||
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
|
||||
|
||||
## v0.8.8 / 2013-06-17
|
||||
|
||||
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
|
||||
|
||||
## v0.8.7 / 2013-06-03
|
||||
|
||||
* [API] Make syscall flags internal
|
||||
* [Fix] inotify: ignore event changes
|
||||
* [Fix] race in symlink test [#45][] (reported by @srid)
|
||||
* [Fix] tests on Windows
|
||||
* lower case error messages
|
||||
|
||||
## v0.8.6 / 2013-05-23
|
||||
|
||||
* kqueue: Use EVT_ONLY flag on Darwin
|
||||
* [Doc] Update README with full example
|
||||
|
||||
## v0.8.5 / 2013-05-09
|
||||
|
||||
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
|
||||
|
||||
## v0.8.4 / 2013-04-07
|
||||
|
||||
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
|
||||
|
||||
## v0.8.3 / 2013-03-13
|
||||
|
||||
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
|
||||
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
|
||||
|
||||
## v0.8.2 / 2013-02-07
|
||||
|
||||
* [Doc] add Authors
|
||||
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
|
||||
|
||||
## v0.8.1 / 2013-01-09
|
||||
|
||||
* [Fix] Windows path separators
|
||||
* [Doc] BSD License
|
||||
|
||||
## v0.8.0 / 2012-11-09
|
||||
|
||||
* kqueue: directory watching improvements (thanks @vmirage)
|
||||
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
|
||||
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
|
||||
|
||||
## v0.7.4 / 2012-10-09
|
||||
|
||||
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
|
||||
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
|
||||
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
|
||||
* [Fix] kqueue: modify after recreation of file
|
||||
|
||||
## v0.7.3 / 2012-09-27
|
||||
|
||||
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
|
||||
* [Fix] kqueue: no longer get duplicate CREATE events
|
||||
|
||||
## v0.7.2 / 2012-09-01
|
||||
|
||||
* kqueue: events for created directories
|
||||
|
||||
## v0.7.1 / 2012-07-14
|
||||
|
||||
* [Fix] for renaming files
|
||||
|
||||
## v0.7.0 / 2012-07-02
|
||||
|
||||
* [Feature] FSNotify flags
|
||||
* [Fix] inotify: Added file name back to event path
|
||||
|
||||
## v0.6.0 / 2012-06-06
|
||||
|
||||
* kqueue: watch files after directory created (thanks @tmc)
|
||||
|
||||
## v0.5.1 / 2012-05-22
|
||||
|
||||
* [Fix] inotify: remove all watches before Close()
|
||||
|
||||
## v0.5.0 / 2012-05-03
|
||||
|
||||
* [API] kqueue: return errors during watch instead of sending over channel
|
||||
* kqueue: match symlink behavior on Linux
|
||||
* inotify: add `DELETE_SELF` (requested by @taralx)
|
||||
* [Fix] kqueue: handle EINTR (reported by @robfig)
|
||||
* [Doc] Godoc example [#1][] (thanks @davecheney)
|
||||
|
||||
## v0.4.0 / 2012-03-30
|
||||
|
||||
* Go 1 released: build with go tool
|
||||
* [Feature] Windows support using winfsnotify
|
||||
* Windows does not have attribute change notifications
|
||||
* Roll attribute notifications into IsModify
|
||||
|
||||
## v0.3.0 / 2012-02-19
|
||||
|
||||
* kqueue: add files when watch directory
|
||||
|
||||
## v0.2.0 / 2011-12-30
|
||||
|
||||
* update to latest Go weekly code
|
||||
|
||||
## v0.1.0 / 2011-10-19
|
||||
|
||||
* kqueue: add watch on file creation to match inotify
|
||||
* kqueue: create file event
|
||||
* inotify: ignore `IN_IGNORED` events
|
||||
* event String()
|
||||
* linux: common FileEvent functions
|
||||
* initial commit
|
||||
|
||||
[#79]: https://github.com/howeyc/fsnotify/pull/79
|
||||
[#77]: https://github.com/howeyc/fsnotify/pull/77
|
||||
[#72]: https://github.com/howeyc/fsnotify/issues/72
|
||||
[#71]: https://github.com/howeyc/fsnotify/issues/71
|
||||
[#70]: https://github.com/howeyc/fsnotify/issues/70
|
||||
[#63]: https://github.com/howeyc/fsnotify/issues/63
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#60]: https://github.com/howeyc/fsnotify/issues/60
|
||||
[#59]: https://github.com/howeyc/fsnotify/issues/59
|
||||
[#49]: https://github.com/howeyc/fsnotify/issues/49
|
||||
[#45]: https://github.com/howeyc/fsnotify/issues/45
|
||||
[#40]: https://github.com/howeyc/fsnotify/issues/40
|
||||
[#36]: https://github.com/howeyc/fsnotify/issues/36
|
||||
[#33]: https://github.com/howeyc/fsnotify/issues/33
|
||||
[#29]: https://github.com/howeyc/fsnotify/issues/29
|
||||
[#25]: https://github.com/howeyc/fsnotify/issues/25
|
||||
[#24]: https://github.com/howeyc/fsnotify/issues/24
|
||||
[#21]: https://github.com/howeyc/fsnotify/issues/21
|
77
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
Normal file
77
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
# Contributing
|
||||
|
||||
## Issues
|
||||
|
||||
* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
|
||||
* Please indicate the platform you are using fsnotify on.
|
||||
* A code example to reproduce the problem is appreciated.
|
||||
|
||||
## Pull Requests
|
||||
|
||||
### Contributor License Agreement
|
||||
|
||||
fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
|
||||
|
||||
Please indicate that you have signed the CLA in your pull request.
|
||||
|
||||
### How fsnotify is Developed
|
||||
|
||||
* Development is done on feature branches.
|
||||
* Tests are run on BSD, Linux, macOS and Windows.
|
||||
* Pull requests are reviewed and [applied to master][am] using [hub][].
|
||||
* Maintainers may modify or squash commits rather than asking contributors to.
|
||||
* To issue a new release, the maintainers will:
|
||||
* Update the CHANGELOG
|
||||
* Tag a version, which will become available through gopkg.in.
|
||||
|
||||
### How to Fork
|
||||
|
||||
For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
|
||||
|
||||
1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
|
||||
2. Create your feature branch (`git checkout -b my-new-feature`)
|
||||
3. Ensure everything works and the tests pass (see below)
|
||||
4. Commit your changes (`git commit -am 'Add some feature'`)
|
||||
|
||||
Contribute upstream:
|
||||
|
||||
1. Fork fsnotify on GitHub
|
||||
2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
|
||||
3. Push to the branch (`git push fork my-new-feature`)
|
||||
4. Create a new Pull Request on GitHub
|
||||
|
||||
This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
|
||||
|
||||
### Testing
|
||||
|
||||
fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
|
||||
|
||||
Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
|
||||
|
||||
To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
|
||||
|
||||
* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
|
||||
* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
|
||||
* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
|
||||
* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
|
||||
* When you're done, you will want to halt or destroy the Vagrant boxes.
|
||||
|
||||
Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
|
||||
|
||||
Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
|
||||
|
||||
### Maintainers
|
||||
|
||||
Help maintaining fsnotify is welcome. To be a maintainer:
|
||||
|
||||
* Submit a pull request and sign the CLA as above.
|
||||
* You must be able to run the test suite on Mac, Windows, Linux and BSD.
|
||||
|
||||
To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
|
||||
|
||||
All code changes should be internal pull requests.
|
||||
|
||||
Releases are tagged using [Semantic Versioning](http://semver.org/).
|
||||
|
||||
[hub]: https://github.com/github/hub
|
||||
[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
|
28
vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
Normal file
28
vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2012 fsnotify Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
79
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
Normal file
79
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
Normal file
|
@ -0,0 +1,79 @@
|
|||
# File system notifications for Go
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
|
||||
|
||||
fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
|
||||
|
||||
```console
|
||||
go get -u golang.org/x/sys/...
|
||||
```
|
||||
|
||||
Cross platform: Windows, Linux, BSD and macOS.
|
||||
|
||||
|Adapter |OS |Status |
|
||||
|----------|----------|----------|
|
||||
|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
|
||||
|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
|
||||
|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
|
||||
|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
|
||||
|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
|
||||
|fanotify |Linux 2.6.37+ | |
|
||||
|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
|
||||
|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
|
||||
|
||||
\* Android and iOS are untested.
|
||||
|
||||
Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
|
||||
|
||||
## API stability
|
||||
|
||||
fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
|
||||
|
||||
All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
|
||||
|
||||
Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
|
||||
|
||||
## Contributing
|
||||
|
||||
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
|
||||
|
||||
## Example
|
||||
|
||||
See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
|
||||
|
||||
## FAQ
|
||||
|
||||
**When a file is moved to another directory is it still being watched?**
|
||||
|
||||
No (it shouldn't be, unless you are watching where it was moved to).
|
||||
|
||||
**When I watch a directory, are all subdirectories watched as well?**
|
||||
|
||||
No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
|
||||
|
||||
**Do I have to watch the Error and Event channels in a separate goroutine?**
|
||||
|
||||
As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
|
||||
|
||||
**Why am I receiving multiple events for the same file on OS X?**
|
||||
|
||||
Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
|
||||
|
||||
**How many files can be watched at once?**
|
||||
|
||||
There are OS-specific limits as to how many watches can be created:
|
||||
* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
|
||||
* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
|
||||
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#18]: https://github.com/fsnotify/fsnotify/issues/18
|
||||
[#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
[#7]: https://github.com/howeyc/fsnotify/issues/7
|
||||
|
||||
[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
|
||||
|
||||
## Related Projects
|
||||
|
||||
* [notify](https://github.com/rjeczalik/notify)
|
||||
* [fsevents](https://github.com/fsnotify/fsevents)
|
||||
|
37
vendor/github.com/fsnotify/fsnotify/fen.go
generated
vendored
Normal file
37
vendor/github.com/fsnotify/fsnotify/fen.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build solaris
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
return nil
|
||||
}
|
66
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
Normal file
66
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !plan9
|
||||
|
||||
// Package fsnotify provides a platform-independent interface for file system notifications.
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Event represents a single file system notification.
|
||||
type Event struct {
|
||||
Name string // Relative path to the file or directory.
|
||||
Op Op // File operation that triggered the event.
|
||||
}
|
||||
|
||||
// Op describes a set of file operations.
|
||||
type Op uint32
|
||||
|
||||
// These are the generalized file operations that can trigger a notification.
|
||||
const (
|
||||
Create Op = 1 << iota
|
||||
Write
|
||||
Remove
|
||||
Rename
|
||||
Chmod
|
||||
)
|
||||
|
||||
func (op Op) String() string {
|
||||
// Use a buffer for efficient string concatenation
|
||||
var buffer bytes.Buffer
|
||||
|
||||
if op&Create == Create {
|
||||
buffer.WriteString("|CREATE")
|
||||
}
|
||||
if op&Remove == Remove {
|
||||
buffer.WriteString("|REMOVE")
|
||||
}
|
||||
if op&Write == Write {
|
||||
buffer.WriteString("|WRITE")
|
||||
}
|
||||
if op&Rename == Rename {
|
||||
buffer.WriteString("|RENAME")
|
||||
}
|
||||
if op&Chmod == Chmod {
|
||||
buffer.WriteString("|CHMOD")
|
||||
}
|
||||
if buffer.Len() == 0 {
|
||||
return ""
|
||||
}
|
||||
return buffer.String()[1:] // Strip leading pipe
|
||||
}
|
||||
|
||||
// String returns a string representation of the event in the form
|
||||
// "file: REMOVE|WRITE|..."
|
||||
func (e Event) String() string {
|
||||
return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
|
||||
}
|
||||
|
||||
// Common errors that can be reported by a watcher
|
||||
var ErrEventOverflow = errors.New("fsnotify queue overflow")
|
337
vendor/github.com/fsnotify/fsnotify/inotify.go
generated
vendored
Normal file
337
vendor/github.com/fsnotify/fsnotify/inotify.go
generated
vendored
Normal file
|
@ -0,0 +1,337 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
mu sync.Mutex // Map access
|
||||
fd int
|
||||
poller *fdPoller
|
||||
watches map[string]*watch // Map of inotify watches (key: path)
|
||||
paths map[int]string // Map of watched paths (key: watch descriptor)
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
doneResp chan struct{} // Channel to respond to Close
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
// Create inotify fd
|
||||
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
|
||||
if fd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
// Create epoll
|
||||
poller, err := newFdPoller(fd)
|
||||
if err != nil {
|
||||
unix.Close(fd)
|
||||
return nil, err
|
||||
}
|
||||
w := &Watcher{
|
||||
fd: fd,
|
||||
poller: poller,
|
||||
watches: make(map[string]*watch),
|
||||
paths: make(map[int]string),
|
||||
Events: make(chan Event),
|
||||
Errors: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
doneResp: make(chan struct{}),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *Watcher) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send 'close' signal to goroutine, and set the Watcher to closed.
|
||||
close(w.done)
|
||||
|
||||
// Wake up goroutine
|
||||
w.poller.wake()
|
||||
|
||||
// Wait for goroutine to close
|
||||
<-w.doneResp
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
if w.isClosed() {
|
||||
return errors.New("inotify instance already closed")
|
||||
}
|
||||
|
||||
const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
|
||||
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
|
||||
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||
|
||||
var flags uint32 = agnosticEvents
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watchEntry := w.watches[name]
|
||||
if watchEntry != nil {
|
||||
flags |= watchEntry.flags | unix.IN_MASK_ADD
|
||||
}
|
||||
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
|
||||
if wd == -1 {
|
||||
return errno
|
||||
}
|
||||
|
||||
if watchEntry == nil {
|
||||
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
|
||||
w.paths[wd] = name
|
||||
} else {
|
||||
watchEntry.wd = uint32(wd)
|
||||
watchEntry.flags = flags
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
|
||||
// Fetch the watch.
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watch, ok := w.watches[name]
|
||||
|
||||
// Remove it from inotify.
|
||||
if !ok {
|
||||
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
|
||||
}
|
||||
|
||||
// We successfully removed the watch if InotifyRmWatch doesn't return an
|
||||
// error, we need to clean up our internal state to ensure it matches
|
||||
// inotify's kernel state.
|
||||
delete(w.paths, int(watch.wd))
|
||||
delete(w.watches, name)
|
||||
|
||||
// inotify_rm_watch will return EINVAL if the file has been deleted;
|
||||
// the inotify will already have been removed.
|
||||
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
|
||||
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
|
||||
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
|
||||
// by another thread and we have not received IN_IGNORE event.
|
||||
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
|
||||
if success == -1 {
|
||||
// TODO: Perhaps it's not helpful to return an error here in every case.
|
||||
// the only two possible errors are:
|
||||
// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
|
||||
// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
|
||||
// Watch descriptors are invalidated when they are removed explicitly or implicitly;
|
||||
// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
|
||||
return errno
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
}
|
||||
|
||||
// readEvents reads from the inotify file descriptor, converts the
|
||||
// received events into Event objects and sends them via the Events channel
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||
n int // Number of bytes read with read()
|
||||
errno error // Syscall errno
|
||||
ok bool // For poller.wait
|
||||
)
|
||||
|
||||
defer close(w.doneResp)
|
||||
defer close(w.Errors)
|
||||
defer close(w.Events)
|
||||
defer unix.Close(w.fd)
|
||||
defer w.poller.close()
|
||||
|
||||
for {
|
||||
// See if we have been closed.
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
ok, errno = w.poller.wait()
|
||||
if errno != nil {
|
||||
select {
|
||||
case w.Errors <- errno:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
n, errno = unix.Read(w.fd, buf[:])
|
||||
// If a signal interrupted execution, see if we've been asked to close, and try again.
|
||||
// http://man7.org/linux/man-pages/man7/signal.7.html :
|
||||
// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
|
||||
if errno == unix.EINTR {
|
||||
continue
|
||||
}
|
||||
|
||||
// unix.Read might have been woken up by Close. If so, we're done.
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
if n < unix.SizeofInotifyEvent {
|
||||
var err error
|
||||
if n == 0 {
|
||||
// If EOF is received. This should really never happen.
|
||||
err = io.EOF
|
||||
} else if n < 0 {
|
||||
// If an error occurred while reading.
|
||||
err = errno
|
||||
} else {
|
||||
// Read was too short.
|
||||
err = errors.New("notify: short read in readEvents()")
|
||||
}
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
// We don't know how many events we just read into the buffer
|
||||
// While the offset points to at least one whole event...
|
||||
for offset <= uint32(n-unix.SizeofInotifyEvent) {
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||
|
||||
mask := uint32(raw.Mask)
|
||||
nameLen := uint32(raw.Len)
|
||||
|
||||
if mask&unix.IN_Q_OVERFLOW != 0 {
|
||||
select {
|
||||
case w.Errors <- ErrEventOverflow:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If the event happened to the watched directory or the watched file, the kernel
|
||||
// doesn't append the filename to the event, but we would like to always fill the
|
||||
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
||||
// the "paths" map.
|
||||
w.mu.Lock()
|
||||
name, ok := w.paths[int(raw.Wd)]
|
||||
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
|
||||
// This is a sign to clean up the maps, otherwise we are no longer in sync
|
||||
// with the inotify kernel state which has already deleted the watch
|
||||
// automatically.
|
||||
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||
delete(w.paths, int(raw.Wd))
|
||||
delete(w.watches, name)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if nameLen > 0 {
|
||||
// Point "bytes" at the first byte of the filename
|
||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
|
||||
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
||||
}
|
||||
|
||||
event := newEvent(name, mask)
|
||||
|
||||
// Send the events that are not ignored on the events channel
|
||||
if !event.ignoreLinux(mask) {
|
||||
select {
|
||||
case w.Events <- event:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
offset += unix.SizeofInotifyEvent + nameLen
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Certain types of events can be "ignored" and not sent over the Events
|
||||
// channel. Such as events marked ignore by the kernel, or MODIFY events
|
||||
// against files that do not exist.
|
||||
func (e *Event) ignoreLinux(mask uint32) bool {
|
||||
// Ignore anything the inotify API says to ignore
|
||||
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
|
||||
return true
|
||||
}
|
||||
|
||||
// If the event is not a DELETE or RENAME, the file must exist.
|
||||
// Otherwise the event is ignored.
|
||||
// *Note*: this was put in place because it was seen that a MODIFY
|
||||
// event was sent after the DELETE. This ignores that MODIFY and
|
||||
// assumes a DELETE will come or has come if the file doesn't exist.
|
||||
if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
|
||||
_, statErr := os.Lstat(e.Name)
|
||||
return os.IsNotExist(statErr)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on an inotify mask.
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
187
vendor/github.com/fsnotify/fsnotify/inotify_poller.go
generated
vendored
Normal file
187
vendor/github.com/fsnotify/fsnotify/inotify_poller.go
generated
vendored
Normal file
|
@ -0,0 +1,187 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type fdPoller struct {
|
||||
fd int // File descriptor (as returned by the inotify_init() syscall)
|
||||
epfd int // Epoll file descriptor
|
||||
pipe [2]int // Pipe for waking up
|
||||
}
|
||||
|
||||
func emptyPoller(fd int) *fdPoller {
|
||||
poller := new(fdPoller)
|
||||
poller.fd = fd
|
||||
poller.epfd = -1
|
||||
poller.pipe[0] = -1
|
||||
poller.pipe[1] = -1
|
||||
return poller
|
||||
}
|
||||
|
||||
// Create a new inotify poller.
|
||||
// This creates an inotify handler, and an epoll handler.
|
||||
func newFdPoller(fd int) (*fdPoller, error) {
|
||||
var errno error
|
||||
poller := emptyPoller(fd)
|
||||
defer func() {
|
||||
if errno != nil {
|
||||
poller.close()
|
||||
}
|
||||
}()
|
||||
poller.fd = fd
|
||||
|
||||
// Create epoll fd
|
||||
poller.epfd, errno = unix.EpollCreate1(0)
|
||||
if poller.epfd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
|
||||
errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
// Register inotify fd with epoll
|
||||
event := unix.EpollEvent{
|
||||
Fd: int32(poller.fd),
|
||||
Events: unix.EPOLLIN,
|
||||
}
|
||||
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
// Register pipe fd with epoll
|
||||
event = unix.EpollEvent{
|
||||
Fd: int32(poller.pipe[0]),
|
||||
Events: unix.EPOLLIN,
|
||||
}
|
||||
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
return poller, nil
|
||||
}
|
||||
|
||||
// Wait using epoll.
|
||||
// Returns true if something is ready to be read,
|
||||
// false if there is not.
|
||||
func (poller *fdPoller) wait() (bool, error) {
|
||||
// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
|
||||
// I don't know whether epoll_wait returns the number of events returned,
|
||||
// or the total number of events ready.
|
||||
// I decided to catch both by making the buffer one larger than the maximum.
|
||||
events := make([]unix.EpollEvent, 7)
|
||||
for {
|
||||
n, errno := unix.EpollWait(poller.epfd, events, -1)
|
||||
if n == -1 {
|
||||
if errno == unix.EINTR {
|
||||
continue
|
||||
}
|
||||
return false, errno
|
||||
}
|
||||
if n == 0 {
|
||||
// If there are no events, try again.
|
||||
continue
|
||||
}
|
||||
if n > 6 {
|
||||
// This should never happen. More events were returned than should be possible.
|
||||
return false, errors.New("epoll_wait returned more events than I know what to do with")
|
||||
}
|
||||
ready := events[:n]
|
||||
epollhup := false
|
||||
epollerr := false
|
||||
epollin := false
|
||||
for _, event := range ready {
|
||||
if event.Fd == int32(poller.fd) {
|
||||
if event.Events&unix.EPOLLHUP != 0 {
|
||||
// This should not happen, but if it does, treat it as a wakeup.
|
||||
epollhup = true
|
||||
}
|
||||
if event.Events&unix.EPOLLERR != 0 {
|
||||
// If an error is waiting on the file descriptor, we should pretend
|
||||
// something is ready to read, and let unix.Read pick up the error.
|
||||
epollerr = true
|
||||
}
|
||||
if event.Events&unix.EPOLLIN != 0 {
|
||||
// There is data to read.
|
||||
epollin = true
|
||||
}
|
||||
}
|
||||
if event.Fd == int32(poller.pipe[0]) {
|
||||
if event.Events&unix.EPOLLHUP != 0 {
|
||||
// Write pipe descriptor was closed, by us. This means we're closing down the
|
||||
// watcher, and we should wake up.
|
||||
}
|
||||
if event.Events&unix.EPOLLERR != 0 {
|
||||
// If an error is waiting on the pipe file descriptor.
|
||||
// This is an absolute mystery, and should never ever happen.
|
||||
return false, errors.New("Error on the pipe descriptor.")
|
||||
}
|
||||
if event.Events&unix.EPOLLIN != 0 {
|
||||
// This is a regular wakeup, so we have to clear the buffer.
|
||||
err := poller.clearWake()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if epollhup || epollerr || epollin {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Close the write end of the poller.
|
||||
func (poller *fdPoller) wake() error {
|
||||
buf := make([]byte, 1)
|
||||
n, errno := unix.Write(poller.pipe[1], buf)
|
||||
if n == -1 {
|
||||
if errno == unix.EAGAIN {
|
||||
// Buffer is full, poller will wake.
|
||||
return nil
|
||||
}
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (poller *fdPoller) clearWake() error {
|
||||
// You have to be woken up a LOT in order to get to 100!
|
||||
buf := make([]byte, 100)
|
||||
n, errno := unix.Read(poller.pipe[0], buf)
|
||||
if n == -1 {
|
||||
if errno == unix.EAGAIN {
|
||||
// Buffer is empty, someone else cleared our wake.
|
||||
return nil
|
||||
}
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close all poller file descriptors, but not the one passed to it.
|
||||
func (poller *fdPoller) close() {
|
||||
if poller.pipe[1] != -1 {
|
||||
unix.Close(poller.pipe[1])
|
||||
}
|
||||
if poller.pipe[0] != -1 {
|
||||
unix.Close(poller.pipe[0])
|
||||
}
|
||||
if poller.epfd != -1 {
|
||||
unix.Close(poller.epfd)
|
||||
}
|
||||
}
|
521
vendor/github.com/fsnotify/fsnotify/kqueue.go
generated
vendored
Normal file
521
vendor/github.com/fsnotify/fsnotify/kqueue.go
generated
vendored
Normal file
|
@ -0,0 +1,521 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build freebsd openbsd netbsd dragonfly darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
|
||||
kq int // File descriptor (as returned by the kqueue() syscall).
|
||||
|
||||
mu sync.Mutex // Protects access to watcher data
|
||||
watches map[string]int // Map of watched file descriptors (key: path).
|
||||
externalWatches map[string]bool // Map of watches added by user of the library.
|
||||
dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
|
||||
paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
|
||||
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
type pathInfo struct {
|
||||
name string
|
||||
isDir bool
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
kq, err := kqueue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := &Watcher{
|
||||
kq: kq,
|
||||
watches: make(map[string]int),
|
||||
dirFlags: make(map[string]uint32),
|
||||
paths: make(map[int]pathInfo),
|
||||
fileExists: make(map[string]bool),
|
||||
externalWatches: make(map[string]bool),
|
||||
Events: make(chan Event),
|
||||
Errors: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
|
||||
// copy paths to remove while locked
|
||||
var pathsToRemove = make([]string, 0, len(w.watches))
|
||||
for name := range w.watches {
|
||||
pathsToRemove = append(pathsToRemove, name)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
// unlock before calling Remove, which also locks
|
||||
|
||||
for _, name := range pathsToRemove {
|
||||
w.Remove(name)
|
||||
}
|
||||
|
||||
// send a "quit" message to the reader goroutine
|
||||
close(w.done)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
w.mu.Lock()
|
||||
w.externalWatches[name] = true
|
||||
w.mu.Unlock()
|
||||
_, err := w.addWatch(name, noteAllEvents)
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
w.mu.Lock()
|
||||
watchfd, ok := w.watches[name]
|
||||
w.mu.Unlock()
|
||||
if !ok {
|
||||
return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
|
||||
}
|
||||
|
||||
const registerRemove = unix.EV_DELETE
|
||||
if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unix.Close(watchfd)
|
||||
|
||||
w.mu.Lock()
|
||||
isDir := w.paths[watchfd].isDir
|
||||
delete(w.watches, name)
|
||||
delete(w.paths, watchfd)
|
||||
delete(w.dirFlags, name)
|
||||
w.mu.Unlock()
|
||||
|
||||
// Find all watched paths that are in this directory that are not external.
|
||||
if isDir {
|
||||
var pathsToRemove []string
|
||||
w.mu.Lock()
|
||||
for _, path := range w.paths {
|
||||
wdir, _ := filepath.Split(path.name)
|
||||
if filepath.Clean(wdir) == name {
|
||||
if !w.externalWatches[path.name] {
|
||||
pathsToRemove = append(pathsToRemove, path.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, name := range pathsToRemove {
|
||||
// Since these are internal, not much sense in propagating error
|
||||
// to the user, as that will just confuse them with an error about
|
||||
// a path they did not explicitly watch themselves.
|
||||
w.Remove(name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
||||
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
|
||||
|
||||
// keventWaitTime to block on each read from kevent
|
||||
var keventWaitTime = durationToTimespec(100 * time.Millisecond)
|
||||
|
||||
// addWatch adds name to the watched file set.
|
||||
// The flags are interpreted as described in kevent(2).
|
||||
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
|
||||
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
||||
var isDir bool
|
||||
// Make ./name and name equivalent
|
||||
name = filepath.Clean(name)
|
||||
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return "", errors.New("kevent instance already closed")
|
||||
}
|
||||
watchfd, alreadyWatching := w.watches[name]
|
||||
// We already have a watch, but we can still override flags.
|
||||
if alreadyWatching {
|
||||
isDir = w.paths[watchfd].isDir
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if !alreadyWatching {
|
||||
fi, err := os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Don't watch sockets.
|
||||
if fi.Mode()&os.ModeSocket == os.ModeSocket {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Don't watch named pipes.
|
||||
if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Follow Symlinks
|
||||
// Unfortunately, Linux can add bogus symlinks to watch list without
|
||||
// issue, and Windows can't do symlinks period (AFAIK). To maintain
|
||||
// consistency, we will act like everything is fine. There will simply
|
||||
// be no file events for broken symlinks.
|
||||
// Hence the returns of nil on errors.
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
name, err = filepath.EvalSymlinks(name)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
_, alreadyWatching = w.watches[name]
|
||||
w.mu.Unlock()
|
||||
|
||||
if alreadyWatching {
|
||||
return name, nil
|
||||
}
|
||||
|
||||
fi, err = os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
|
||||
watchfd, err = unix.Open(name, openMode, 0700)
|
||||
if watchfd == -1 {
|
||||
return "", err
|
||||
}
|
||||
|
||||
isDir = fi.IsDir()
|
||||
}
|
||||
|
||||
const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
|
||||
if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
|
||||
unix.Close(watchfd)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !alreadyWatching {
|
||||
w.mu.Lock()
|
||||
w.watches[name] = watchfd
|
||||
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
if isDir {
|
||||
// Watch the directory if it has not been watched before,
|
||||
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
||||
w.mu.Lock()
|
||||
|
||||
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
|
||||
(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
|
||||
// Store flags so this watch can be updated later
|
||||
w.dirFlags[name] = flags
|
||||
w.mu.Unlock()
|
||||
|
||||
if watchDir {
|
||||
if err := w.watchDirectoryFiles(name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// readEvents reads from kqueue and converts the received kevents into
|
||||
// Event values that it sends down the Events channel.
|
||||
func (w *Watcher) readEvents() {
|
||||
eventBuffer := make([]unix.Kevent_t, 10)
|
||||
|
||||
loop:
|
||||
for {
|
||||
// See if there is a message on the "done" channel
|
||||
select {
|
||||
case <-w.done:
|
||||
break loop
|
||||
default:
|
||||
}
|
||||
|
||||
// Get new events
|
||||
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
|
||||
// EINTR is okay, the syscall was interrupted before timeout expired.
|
||||
if err != nil && err != unix.EINTR {
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
case <-w.done:
|
||||
break loop
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Flush the events we received to the Events channel
|
||||
for len(kevents) > 0 {
|
||||
kevent := &kevents[0]
|
||||
watchfd := int(kevent.Ident)
|
||||
mask := uint32(kevent.Fflags)
|
||||
w.mu.Lock()
|
||||
path := w.paths[watchfd]
|
||||
w.mu.Unlock()
|
||||
event := newEvent(path.name, mask)
|
||||
|
||||
if path.isDir && !(event.Op&Remove == Remove) {
|
||||
// Double check to make sure the directory exists. This can happen when
|
||||
// we do a rm -fr on a recursively watched folders and we receive a
|
||||
// modification event first but the folder has been deleted and later
|
||||
// receive the delete event
|
||||
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
|
||||
// mark is as delete event
|
||||
event.Op |= Remove
|
||||
}
|
||||
}
|
||||
|
||||
if event.Op&Rename == Rename || event.Op&Remove == Remove {
|
||||
w.Remove(event.Name)
|
||||
w.mu.Lock()
|
||||
delete(w.fileExists, event.Name)
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
|
||||
w.sendDirectoryChangeEvents(event.Name)
|
||||
} else {
|
||||
// Send the event on the Events channel.
|
||||
select {
|
||||
case w.Events <- event:
|
||||
case <-w.done:
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
if event.Op&Remove == Remove {
|
||||
// Look for a file that may have overwritten this.
|
||||
// For example, mv f1 f2 will delete f2, then create f2.
|
||||
if path.isDir {
|
||||
fileDir := filepath.Clean(event.Name)
|
||||
w.mu.Lock()
|
||||
_, found := w.watches[fileDir]
|
||||
w.mu.Unlock()
|
||||
if found {
|
||||
// make sure the directory exists before we watch for changes. When we
|
||||
// do a recursive watch and perform rm -fr, the parent directory might
|
||||
// have gone missing, ignore the missing directory and let the
|
||||
// upcoming delete event remove the watch from the parent directory.
|
||||
if _, err := os.Lstat(fileDir); err == nil {
|
||||
w.sendDirectoryChangeEvents(fileDir)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
filePath := filepath.Clean(event.Name)
|
||||
if fileInfo, err := os.Lstat(filePath); err == nil {
|
||||
w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Move to next event
|
||||
kevents = kevents[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// cleanup
|
||||
err := unix.Close(w.kq)
|
||||
if err != nil {
|
||||
// only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on kqueue Fflags.
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func newCreateEvent(name string) Event {
|
||||
return Event{Name: name, Op: Create}
|
||||
}
|
||||
|
||||
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
|
||||
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, fileInfo := range files {
|
||||
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||
filePath, err = w.internalWatch(filePath, fileInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.fileExists[filePath] = true
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendDirectoryEvents searches the directory for newly created files
|
||||
// and sends them over the event channel. This functionality is to have
|
||||
// the BSD version of fsnotify match Linux inotify which provides a
|
||||
// create event for files created in a watched directory.
|
||||
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Search for new files
|
||||
for _, fileInfo := range files {
|
||||
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||
err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
|
||||
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
|
||||
w.mu.Lock()
|
||||
_, doesExist := w.fileExists[filePath]
|
||||
w.mu.Unlock()
|
||||
if !doesExist {
|
||||
// Send create event
|
||||
select {
|
||||
case w.Events <- newCreateEvent(filePath):
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// like watchDirectoryFiles (but without doing another ReadDir)
|
||||
filePath, err = w.internalWatch(filePath, fileInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.fileExists[filePath] = true
|
||||
w.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
|
||||
if fileInfo.IsDir() {
|
||||
// mimic Linux providing delete events for subdirectories
|
||||
// but preserve the flags used if currently watching subdirectory
|
||||
w.mu.Lock()
|
||||
flags := w.dirFlags[name]
|
||||
w.mu.Unlock()
|
||||
|
||||
flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
|
||||
return w.addWatch(name, flags)
|
||||
}
|
||||
|
||||
// watch file to mimic Linux inotify
|
||||
return w.addWatch(name, noteAllEvents)
|
||||
}
|
||||
|
||||
// kqueue creates a new kernel event queue and returns a descriptor.
|
||||
func kqueue() (kq int, err error) {
|
||||
kq, err = unix.Kqueue()
|
||||
if kq == -1 {
|
||||
return kq, err
|
||||
}
|
||||
return kq, nil
|
||||
}
|
||||
|
||||
// register events with the queue
|
||||
func register(kq int, fds []int, flags int, fflags uint32) error {
|
||||
changes := make([]unix.Kevent_t, len(fds))
|
||||
|
||||
for i, fd := range fds {
|
||||
// SetKevent converts int to the platform-specific types:
|
||||
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
|
||||
changes[i].Fflags = fflags
|
||||
}
|
||||
|
||||
// register the events
|
||||
success, err := unix.Kevent(kq, changes, nil, nil)
|
||||
if success == -1 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// read retrieves pending events, or waits until an event occurs.
|
||||
// A timeout of nil blocks indefinitely, while 0 polls the queue.
|
||||
func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
|
||||
n, err := unix.Kevent(kq, nil, events, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return events[0:n], nil
|
||||
}
|
||||
|
||||
// durationToTimespec prepares a timeout value
|
||||
func durationToTimespec(d time.Duration) unix.Timespec {
|
||||
return unix.NsecToTimespec(d.Nanoseconds())
|
||||
}
|
11
vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
generated
vendored
Normal file
11
vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build freebsd openbsd netbsd dragonfly
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const openMode = unix.O_NONBLOCK | unix.O_RDONLY
|
12
vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
generated
vendored
Normal file
12
vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// note: this constant is not defined on BSD
|
||||
const openMode = unix.O_EVTONLY
|
561
vendor/github.com/fsnotify/fsnotify/windows.go
generated
vendored
Normal file
561
vendor/github.com/fsnotify/fsnotify/windows.go
generated
vendored
Normal file
|
@ -0,0 +1,561 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build windows
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
mu sync.Mutex // Map access
|
||||
port syscall.Handle // Handle to completion port
|
||||
watches watchMap // Map of watches (key: i-number)
|
||||
input chan *input // Inputs to the reader are sent on this channel
|
||||
quit chan chan<- error
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
|
||||
if e != nil {
|
||||
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
|
||||
}
|
||||
w := &Watcher{
|
||||
port: port,
|
||||
watches: make(watchMap),
|
||||
input: make(chan *input, 1),
|
||||
Events: make(chan Event, 50),
|
||||
Errors: make(chan error),
|
||||
quit: make(chan chan<- error, 1),
|
||||
}
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
if w.isClosed {
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
|
||||
// Send "quit" message to the reader goroutine
|
||||
ch := make(chan error)
|
||||
w.quit <- ch
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-ch
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
if w.isClosed {
|
||||
return errors.New("watcher already closed")
|
||||
}
|
||||
in := &input{
|
||||
op: opAddWatch,
|
||||
path: filepath.Clean(name),
|
||||
flags: sysFSALLEVENTS,
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
in := &input{
|
||||
op: opRemoveWatch,
|
||||
path: filepath.Clean(name),
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
const (
|
||||
// Options for AddWatch
|
||||
sysFSONESHOT = 0x80000000
|
||||
sysFSONLYDIR = 0x1000000
|
||||
|
||||
// Events
|
||||
sysFSACCESS = 0x1
|
||||
sysFSALLEVENTS = 0xfff
|
||||
sysFSATTRIB = 0x4
|
||||
sysFSCLOSE = 0x18
|
||||
sysFSCREATE = 0x100
|
||||
sysFSDELETE = 0x200
|
||||
sysFSDELETESELF = 0x400
|
||||
sysFSMODIFY = 0x2
|
||||
sysFSMOVE = 0xc0
|
||||
sysFSMOVEDFROM = 0x40
|
||||
sysFSMOVEDTO = 0x80
|
||||
sysFSMOVESELF = 0x800
|
||||
|
||||
// Special events
|
||||
sysFSIGNORED = 0x8000
|
||||
sysFSQOVERFLOW = 0x4000
|
||||
)
|
||||
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&sysFSMODIFY == sysFSMODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&sysFSATTRIB == sysFSATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
const (
|
||||
opAddWatch = iota
|
||||
opRemoveWatch
|
||||
)
|
||||
|
||||
const (
|
||||
provisional uint64 = 1 << (32 + iota)
|
||||
)
|
||||
|
||||
type input struct {
|
||||
op int
|
||||
path string
|
||||
flags uint32
|
||||
reply chan error
|
||||
}
|
||||
|
||||
type inode struct {
|
||||
handle syscall.Handle
|
||||
volume uint32
|
||||
index uint64
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
ov syscall.Overlapped
|
||||
ino *inode // i-number
|
||||
path string // Directory path
|
||||
mask uint64 // Directory itself is being watched with these notify flags
|
||||
names map[string]uint64 // Map of names being watched and their notify flags
|
||||
rename string // Remembers the old name while renaming a file
|
||||
buf [4096]byte
|
||||
}
|
||||
|
||||
type indexMap map[uint64]*watch
|
||||
type watchMap map[uint32]indexMap
|
||||
|
||||
func (w *Watcher) wakeupReader() error {
|
||||
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||
if e != nil {
|
||||
return os.NewSyscallError("PostQueuedCompletionStatus", e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDir(pathname string) (dir string, err error) {
|
||||
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
|
||||
if e != nil {
|
||||
return "", os.NewSyscallError("GetFileAttributes", e)
|
||||
}
|
||||
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||
dir = pathname
|
||||
} else {
|
||||
dir, _ = filepath.Split(pathname)
|
||||
dir = filepath.Clean(dir)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getIno(path string) (ino *inode, err error) {
|
||||
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
|
||||
syscall.FILE_LIST_DIRECTORY,
|
||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
||||
nil, syscall.OPEN_EXISTING,
|
||||
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
|
||||
if e != nil {
|
||||
return nil, os.NewSyscallError("CreateFile", e)
|
||||
}
|
||||
var fi syscall.ByHandleFileInformation
|
||||
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
|
||||
syscall.CloseHandle(h)
|
||||
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
|
||||
}
|
||||
ino = &inode{
|
||||
handle: h,
|
||||
volume: fi.VolumeSerialNumber,
|
||||
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
|
||||
}
|
||||
return ino, nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) get(ino *inode) *watch {
|
||||
if i := m[ino.volume]; i != nil {
|
||||
return i[ino.index]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) set(ino *inode, watch *watch) {
|
||||
i := m[ino.volume]
|
||||
if i == nil {
|
||||
i = make(indexMap)
|
||||
m[ino.volume] = i
|
||||
}
|
||||
i[ino.index] = watch
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
||||
dir, err := getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if flags&sysFSONLYDIR != 0 && pathname != dir {
|
||||
return nil
|
||||
}
|
||||
ino, err := getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watchEntry := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watchEntry == nil {
|
||||
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
|
||||
syscall.CloseHandle(ino.handle)
|
||||
return os.NewSyscallError("CreateIoCompletionPort", e)
|
||||
}
|
||||
watchEntry = &watch{
|
||||
ino: ino,
|
||||
path: dir,
|
||||
names: make(map[string]uint64),
|
||||
}
|
||||
w.mu.Lock()
|
||||
w.watches.set(ino, watchEntry)
|
||||
w.mu.Unlock()
|
||||
flags |= provisional
|
||||
} else {
|
||||
syscall.CloseHandle(ino.handle)
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask |= flags
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] |= flags
|
||||
}
|
||||
if err = w.startRead(watchEntry); err != nil {
|
||||
return err
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask &= ^provisional
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] &= ^provisional
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) remWatch(pathname string) error {
|
||||
dir, err := getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ino, err := getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watch := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watch == nil {
|
||||
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
|
||||
}
|
||||
if pathname == dir {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
watch.mask = 0
|
||||
} else {
|
||||
name := filepath.Base(pathname)
|
||||
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
return w.startRead(watch)
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) deleteWatch(watch *watch) {
|
||||
for name, mask := range watch.names {
|
||||
if mask&provisional == 0 {
|
||||
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
|
||||
}
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if watch.mask != 0 {
|
||||
if watch.mask&provisional == 0 {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
}
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) startRead(watch *watch) error {
|
||||
if e := syscall.CancelIo(watch.ino.handle); e != nil {
|
||||
w.Errors <- os.NewSyscallError("CancelIo", e)
|
||||
w.deleteWatch(watch)
|
||||
}
|
||||
mask := toWindowsFlags(watch.mask)
|
||||
for _, m := range watch.names {
|
||||
mask |= toWindowsFlags(m)
|
||||
}
|
||||
if mask == 0 {
|
||||
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
|
||||
w.Errors <- os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
w.mu.Lock()
|
||||
delete(w.watches[watch.ino.volume], watch.ino.index)
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
|
||||
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
|
||||
if e != nil {
|
||||
err := os.NewSyscallError("ReadDirectoryChanges", e)
|
||||
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||
// Watched directory was probably removed
|
||||
if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
|
||||
if watch.mask&sysFSONESHOT != 0 {
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents reads from the I/O completion port, converts the
|
||||
// received events into Event objects and sends them via the Events channel.
|
||||
// Entry point to the I/O thread.
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
n, key uint32
|
||||
ov *syscall.Overlapped
|
||||
)
|
||||
runtime.LockOSThread()
|
||||
|
||||
for {
|
||||
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
|
||||
watch := (*watch)(unsafe.Pointer(ov))
|
||||
|
||||
if watch == nil {
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.mu.Lock()
|
||||
var indexes []indexMap
|
||||
for _, index := range w.watches {
|
||||
indexes = append(indexes, index)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, index := range indexes {
|
||||
for _, watch := range index {
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if e := syscall.CloseHandle(w.port); e != nil {
|
||||
err = os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
ch <- err
|
||||
return
|
||||
case in := <-w.input:
|
||||
switch in.op {
|
||||
case opAddWatch:
|
||||
in.reply <- w.addWatch(in.path, uint64(in.flags))
|
||||
case opRemoveWatch:
|
||||
in.reply <- w.remWatch(in.path)
|
||||
}
|
||||
default:
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch e {
|
||||
case syscall.ERROR_MORE_DATA:
|
||||
if watch == nil {
|
||||
w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
|
||||
} else {
|
||||
// The i/o succeeded but the buffer is full.
|
||||
// In theory we should be building up a full packet.
|
||||
// In practice we can get away with just carrying on.
|
||||
n = uint32(unsafe.Sizeof(watch.buf))
|
||||
}
|
||||
case syscall.ERROR_ACCESS_DENIED:
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
continue
|
||||
case syscall.ERROR_OPERATION_ABORTED:
|
||||
// CancelIo was called on this handle
|
||||
continue
|
||||
default:
|
||||
w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
|
||||
continue
|
||||
case nil:
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
for {
|
||||
if n == 0 {
|
||||
w.Events <- newEvent("", sysFSQOVERFLOW)
|
||||
w.Errors <- errors.New("short read in readEvents()")
|
||||
break
|
||||
}
|
||||
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
|
||||
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
|
||||
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
|
||||
fullname := filepath.Join(watch.path, name)
|
||||
|
||||
var mask uint64
|
||||
switch raw.Action {
|
||||
case syscall.FILE_ACTION_REMOVED:
|
||||
mask = sysFSDELETESELF
|
||||
case syscall.FILE_ACTION_MODIFIED:
|
||||
mask = sysFSMODIFY
|
||||
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
watch.rename = name
|
||||
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
if watch.names[watch.rename] != 0 {
|
||||
watch.names[name] |= watch.names[watch.rename]
|
||||
delete(watch.names, watch.rename)
|
||||
mask = sysFSMOVESELF
|
||||
}
|
||||
}
|
||||
|
||||
sendNameEvent := func() {
|
||||
if w.sendEvent(fullname, watch.names[name]&mask) {
|
||||
if watch.names[name]&sysFSONESHOT != 0 {
|
||||
delete(watch.names, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
sendNameEvent()
|
||||
}
|
||||
if raw.Action == syscall.FILE_ACTION_REMOVED {
|
||||
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
|
||||
if watch.mask&sysFSONESHOT != 0 {
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
fullname = filepath.Join(watch.path, watch.rename)
|
||||
sendNameEvent()
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
if raw.NextEntryOffset == 0 {
|
||||
break
|
||||
}
|
||||
offset += raw.NextEntryOffset
|
||||
|
||||
// Error!
|
||||
if offset >= n {
|
||||
w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.startRead(watch); err != nil {
|
||||
w.Errors <- err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
||||
if mask == 0 {
|
||||
return false
|
||||
}
|
||||
event := newEvent(name, uint32(mask))
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.quit <- ch
|
||||
case w.Events <- event:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func toWindowsFlags(mask uint64) uint32 {
|
||||
var m uint32
|
||||
if mask&sysFSACCESS != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
|
||||
}
|
||||
if mask&sysFSMODIFY != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||
}
|
||||
if mask&sysFSATTRIB != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
|
||||
}
|
||||
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func toFSnotifyFlags(action uint32) uint64 {
|
||||
switch action {
|
||||
case syscall.FILE_ACTION_ADDED:
|
||||
return sysFSCREATE
|
||||
case syscall.FILE_ACTION_REMOVED:
|
||||
return sysFSDELETE
|
||||
case syscall.FILE_ACTION_MODIFIED:
|
||||
return sysFSMODIFY
|
||||
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
return sysFSMOVEDFROM
|
||||
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
return sysFSMOVEDTO
|
||||
}
|
||||
return 0
|
||||
}
|
25
vendor/github.com/gorilla/websocket/.gitignore
generated
vendored
Normal file
25
vendor/github.com/gorilla/websocket/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
.idea/
|
||||
*.iml
|
9
vendor/github.com/gorilla/websocket/AUTHORS
generated
vendored
Normal file
9
vendor/github.com/gorilla/websocket/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
# This is the official list of Gorilla WebSocket authors for copyright
|
||||
# purposes.
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Gary Burd <gary@beagledreams.com>
|
||||
Google LLC (https://opensource.google.com/)
|
||||
Joachim Bauch <mail@joachim-bauch.de>
|
||||
|
22
vendor/github.com/gorilla/websocket/LICENSE
generated
vendored
Normal file
22
vendor/github.com/gorilla/websocket/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
64
vendor/github.com/gorilla/websocket/README.md
generated
vendored
Normal file
64
vendor/github.com/gorilla/websocket/README.md
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
# Gorilla WebSocket
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket)
|
||||
[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket)
|
||||
|
||||
Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
|
||||
[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
|
||||
|
||||
### Documentation
|
||||
|
||||
* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc)
|
||||
* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
|
||||
* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
|
||||
* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
|
||||
* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
|
||||
|
||||
### Status
|
||||
|
||||
The Gorilla WebSocket package provides a complete and tested implementation of
|
||||
the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
|
||||
package API is stable.
|
||||
|
||||
### Installation
|
||||
|
||||
go get github.com/gorilla/websocket
|
||||
|
||||
### Protocol Compliance
|
||||
|
||||
The Gorilla WebSocket package passes the server tests in the [Autobahn Test
|
||||
Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn
|
||||
subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
|
||||
|
||||
### Gorilla WebSocket compared with other packages
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th><a href="http://godoc.org/github.com/gorilla/websocket">github.com/gorilla</a></th>
|
||||
<th><a href="http://godoc.org/golang.org/x/net/websocket">golang.org/x/net</a></th>
|
||||
</tr>
|
||||
<tr>
|
||||
<tr><td colspan="3"><a href="http://tools.ietf.org/html/rfc6455">RFC 6455</a> Features</td></tr>
|
||||
<tr><td>Passes <a href="https://github.com/crossbario/autobahn-testsuite">Autobahn Test Suite</a></td><td><a href="https://github.com/gorilla/websocket/tree/master/examples/autobahn">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Receive <a href="https://tools.ietf.org/html/rfc6455#section-5.4">fragmented</a> message<td>Yes</td><td><a href="https://code.google.com/p/go/issues/detail?id=7632">No</a>, see note 1</td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.1">close</a> message</td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td><a href="https://code.google.com/p/go/issues/detail?id=4588">No</a></td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.2">pings</a> and receive <a href="https://tools.ietf.org/html/rfc6455#section-5.5.3">pongs</a></td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Get the <a href="https://tools.ietf.org/html/rfc6455#section-5.6">type</a> of a received data message</td><td>Yes</td><td>Yes, see note 2</td></tr>
|
||||
<tr><td colspan="3">Other Features</tr></td>
|
||||
<tr><td><a href="https://tools.ietf.org/html/rfc7692">Compression Extensions</a></td><td>Experimental</td><td>No</td></tr>
|
||||
<tr><td>Read message using io.Reader</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextReader">Yes</a></td><td>No, see note 3</td></tr>
|
||||
<tr><td>Write message using io.WriteCloser</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextWriter">Yes</a></td><td>No, see note 3</td></tr>
|
||||
</table>
|
||||
|
||||
Notes:
|
||||
|
||||
1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
|
||||
2. The application can get the type of a received data message by implementing
|
||||
a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
|
||||
function.
|
||||
3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
|
||||
Read returns when the input buffer is full or a frame boundary is
|
||||
encountered. Each call to Write sends a single frame message. The Gorilla
|
||||
io.Reader and io.WriteCloser operate on a single WebSocket message.
|
||||
|
395
vendor/github.com/gorilla/websocket/client.go
generated
vendored
Normal file
395
vendor/github.com/gorilla/websocket/client.go
generated
vendored
Normal file
|
@ -0,0 +1,395 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrBadHandshake is returned when the server response to opening handshake is
|
||||
// invalid.
|
||||
var ErrBadHandshake = errors.New("websocket: bad handshake")
|
||||
|
||||
var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
|
||||
|
||||
// NewClient creates a new client connection using the given net connection.
|
||||
// The URL u specifies the host and request URI. Use requestHeader to specify
|
||||
// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
|
||||
// (Cookie). Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etc.
|
||||
//
|
||||
// Deprecated: Use Dialer instead.
|
||||
func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
|
||||
d := Dialer{
|
||||
ReadBufferSize: readBufSize,
|
||||
WriteBufferSize: writeBufSize,
|
||||
NetDial: func(net, addr string) (net.Conn, error) {
|
||||
return netConn, nil
|
||||
},
|
||||
}
|
||||
return d.Dial(u.String(), requestHeader)
|
||||
}
|
||||
|
||||
// A Dialer contains options for connecting to WebSocket server.
|
||||
type Dialer struct {
|
||||
// NetDial specifies the dial function for creating TCP connections. If
|
||||
// NetDial is nil, net.Dial is used.
|
||||
NetDial func(network, addr string) (net.Conn, error)
|
||||
|
||||
// NetDialContext specifies the dial function for creating TCP connections. If
|
||||
// NetDialContext is nil, net.DialContext is used.
|
||||
NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
|
||||
// Proxy specifies a function to return a proxy for a given
|
||||
// Request. If the function returns a non-nil error, the
|
||||
// request is aborted with the provided error.
|
||||
// If Proxy is nil or returns a nil *URL, no proxy is used.
|
||||
Proxy func(*http.Request) (*url.URL, error)
|
||||
|
||||
// TLSClientConfig specifies the TLS configuration to use with tls.Client.
|
||||
// If nil, the default configuration is used.
|
||||
TLSClientConfig *tls.Config
|
||||
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
|
||||
// size is zero, then a useful default size is used. The I/O buffer sizes
|
||||
// do not limit the size of the messages that can be sent or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// WriteBufferPool is a pool of buffers for write operations. If the value
|
||||
// is not set, then write buffers are allocated to the connection for the
|
||||
// lifetime of the connection.
|
||||
//
|
||||
// A pool is most useful when the application has a modest volume of writes
|
||||
// across a large number of connections.
|
||||
//
|
||||
// Applications should use a single pool for each unique value of
|
||||
// WriteBufferSize.
|
||||
WriteBufferPool BufferPool
|
||||
|
||||
// Subprotocols specifies the client's requested subprotocols.
|
||||
Subprotocols []string
|
||||
|
||||
// EnableCompression specifies if the client should attempt to negotiate
|
||||
// per message compression (RFC 7692). Setting this value to true does not
|
||||
// guarantee that compression will be supported. Currently only "no context
|
||||
// takeover" modes are supported.
|
||||
EnableCompression bool
|
||||
|
||||
// Jar specifies the cookie jar.
|
||||
// If Jar is nil, cookies are not sent in requests and ignored
|
||||
// in responses.
|
||||
Jar http.CookieJar
|
||||
}
|
||||
|
||||
// Dial creates a new client connection by calling DialContext with a background context.
|
||||
func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
return d.DialContext(context.Background(), urlStr, requestHeader)
|
||||
}
|
||||
|
||||
var errMalformedURL = errors.New("malformed ws or wss URL")
|
||||
|
||||
func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
|
||||
hostPort = u.Host
|
||||
hostNoPort = u.Host
|
||||
if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
|
||||
hostNoPort = hostNoPort[:i]
|
||||
} else {
|
||||
switch u.Scheme {
|
||||
case "wss":
|
||||
hostPort += ":443"
|
||||
case "https":
|
||||
hostPort += ":443"
|
||||
default:
|
||||
hostPort += ":80"
|
||||
}
|
||||
}
|
||||
return hostPort, hostNoPort
|
||||
}
|
||||
|
||||
// DefaultDialer is a dialer with all fields set to the default values.
|
||||
var DefaultDialer = &Dialer{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
HandshakeTimeout: 45 * time.Second,
|
||||
}
|
||||
|
||||
// nilDialer is dialer to use when receiver is nil.
|
||||
var nilDialer = *DefaultDialer
|
||||
|
||||
// DialContext creates a new client connection. Use requestHeader to specify the
|
||||
// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
|
||||
// Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// The context will be used in the request and in the Dialer.
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etcetera. The response body may not contain the entire response and does not
|
||||
// need to be closed by the application.
|
||||
func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
if d == nil {
|
||||
d = &nilDialer
|
||||
}
|
||||
|
||||
challengeKey, err := generateChallengeKey()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
u, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "ws":
|
||||
u.Scheme = "http"
|
||||
case "wss":
|
||||
u.Scheme = "https"
|
||||
default:
|
||||
return nil, nil, errMalformedURL
|
||||
}
|
||||
|
||||
if u.User != nil {
|
||||
// User name and password are not allowed in websocket URIs.
|
||||
return nil, nil, errMalformedURL
|
||||
}
|
||||
|
||||
req := &http.Request{
|
||||
Method: "GET",
|
||||
URL: u,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: make(http.Header),
|
||||
Host: u.Host,
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
// Set the cookies present in the cookie jar of the dialer
|
||||
if d.Jar != nil {
|
||||
for _, cookie := range d.Jar.Cookies(u) {
|
||||
req.AddCookie(cookie)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the request headers using the capitalization for names and values in
|
||||
// RFC examples. Although the capitalization shouldn't matter, there are
|
||||
// servers that depend on it. The Header.Set method is not used because the
|
||||
// method canonicalizes the header names.
|
||||
req.Header["Upgrade"] = []string{"websocket"}
|
||||
req.Header["Connection"] = []string{"Upgrade"}
|
||||
req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
|
||||
req.Header["Sec-WebSocket-Version"] = []string{"13"}
|
||||
if len(d.Subprotocols) > 0 {
|
||||
req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
|
||||
}
|
||||
for k, vs := range requestHeader {
|
||||
switch {
|
||||
case k == "Host":
|
||||
if len(vs) > 0 {
|
||||
req.Host = vs[0]
|
||||
}
|
||||
case k == "Upgrade" ||
|
||||
k == "Connection" ||
|
||||
k == "Sec-Websocket-Key" ||
|
||||
k == "Sec-Websocket-Version" ||
|
||||
k == "Sec-Websocket-Extensions" ||
|
||||
(k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
|
||||
return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
|
||||
case k == "Sec-Websocket-Protocol":
|
||||
req.Header["Sec-WebSocket-Protocol"] = vs
|
||||
default:
|
||||
req.Header[k] = vs
|
||||
}
|
||||
}
|
||||
|
||||
if d.EnableCompression {
|
||||
req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
|
||||
}
|
||||
|
||||
if d.HandshakeTimeout != 0 {
|
||||
var cancel func()
|
||||
ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
// Get network dial function.
|
||||
var netDial func(network, add string) (net.Conn, error)
|
||||
|
||||
if d.NetDialContext != nil {
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return d.NetDialContext(ctx, network, addr)
|
||||
}
|
||||
} else if d.NetDial != nil {
|
||||
netDial = d.NetDial
|
||||
} else {
|
||||
netDialer := &net.Dialer{}
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return netDialer.DialContext(ctx, network, addr)
|
||||
}
|
||||
}
|
||||
|
||||
// If needed, wrap the dial function to set the connection deadline.
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
forwardDial := netDial
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
c, err := forwardDial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = c.SetDeadline(deadline)
|
||||
if err != nil {
|
||||
c.Close()
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If needed, wrap the dial function to connect through a proxy.
|
||||
if d.Proxy != nil {
|
||||
proxyURL, err := d.Proxy(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if proxyURL != nil {
|
||||
dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
netDial = dialer.Dial
|
||||
}
|
||||
}
|
||||
|
||||
hostPort, hostNoPort := hostPortNoPort(u)
|
||||
trace := httptrace.ContextClientTrace(ctx)
|
||||
if trace != nil && trace.GetConn != nil {
|
||||
trace.GetConn(hostPort)
|
||||
}
|
||||
|
||||
netConn, err := netDial("tcp", hostPort)
|
||||
if trace != nil && trace.GotConn != nil {
|
||||
trace.GotConn(httptrace.GotConnInfo{
|
||||
Conn: netConn,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if netConn != nil {
|
||||
netConn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if u.Scheme == "https" {
|
||||
cfg := cloneTLSConfig(d.TLSClientConfig)
|
||||
if cfg.ServerName == "" {
|
||||
cfg.ServerName = hostNoPort
|
||||
}
|
||||
tlsConn := tls.Client(netConn, cfg)
|
||||
netConn = tlsConn
|
||||
|
||||
var err error
|
||||
if trace != nil {
|
||||
err = doHandshakeWithTrace(trace, tlsConn, cfg)
|
||||
} else {
|
||||
err = doHandshake(tlsConn, cfg)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
|
||||
|
||||
if err := req.Write(netConn); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if trace != nil && trace.GotFirstResponseByte != nil {
|
||||
if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
|
||||
trace.GotFirstResponseByte()
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := http.ReadResponse(conn.br, req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if d.Jar != nil {
|
||||
if rc := resp.Cookies(); len(rc) > 0 {
|
||||
d.Jar.SetCookies(u, rc)
|
||||
}
|
||||
}
|
||||
|
||||
if resp.StatusCode != 101 ||
|
||||
!strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
|
||||
!strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
|
||||
resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
|
||||
// Before closing the network connection on return from this
|
||||
// function, slurp up some of the response to aid application
|
||||
// debugging.
|
||||
buf := make([]byte, 1024)
|
||||
n, _ := io.ReadFull(resp.Body, buf)
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
|
||||
return nil, resp, ErrBadHandshake
|
||||
}
|
||||
|
||||
for _, ext := range parseExtensions(resp.Header) {
|
||||
if ext[""] != "permessage-deflate" {
|
||||
continue
|
||||
}
|
||||
_, snct := ext["server_no_context_takeover"]
|
||||
_, cnct := ext["client_no_context_takeover"]
|
||||
if !snct || !cnct {
|
||||
return nil, resp, errInvalidCompression
|
||||
}
|
||||
conn.newCompressionWriter = compressNoContextTakeover
|
||||
conn.newDecompressionReader = decompressNoContextTakeover
|
||||
break
|
||||
}
|
||||
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
|
||||
conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
|
||||
|
||||
netConn.SetDeadline(time.Time{})
|
||||
netConn = nil // to avoid close in defer.
|
||||
return conn, resp, nil
|
||||
}
|
||||
|
||||
func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
return err
|
||||
}
|
||||
if !cfg.InsecureSkipVerify {
|
||||
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
16
vendor/github.com/gorilla/websocket/client_clone.go
generated
vendored
Normal file
16
vendor/github.com/gorilla/websocket/client_clone.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
return cfg.Clone()
|
||||
}
|
38
vendor/github.com/gorilla/websocket/client_clone_legacy.go
generated
vendored
Normal file
38
vendor/github.com/gorilla/websocket/client_clone_legacy.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
// cloneTLSConfig clones all public fields except the fields
|
||||
// SessionTicketsDisabled and SessionTicketKey. This avoids copying the
|
||||
// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a
|
||||
// config in active use.
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
return &tls.Config{
|
||||
Rand: cfg.Rand,
|
||||
Time: cfg.Time,
|
||||
Certificates: cfg.Certificates,
|
||||
NameToCertificate: cfg.NameToCertificate,
|
||||
GetCertificate: cfg.GetCertificate,
|
||||
RootCAs: cfg.RootCAs,
|
||||
NextProtos: cfg.NextProtos,
|
||||
ServerName: cfg.ServerName,
|
||||
ClientAuth: cfg.ClientAuth,
|
||||
ClientCAs: cfg.ClientCAs,
|
||||
InsecureSkipVerify: cfg.InsecureSkipVerify,
|
||||
CipherSuites: cfg.CipherSuites,
|
||||
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
|
||||
ClientSessionCache: cfg.ClientSessionCache,
|
||||
MinVersion: cfg.MinVersion,
|
||||
MaxVersion: cfg.MaxVersion,
|
||||
CurvePreferences: cfg.CurvePreferences,
|
||||
}
|
||||
}
|
148
vendor/github.com/gorilla/websocket/compression.go
generated
vendored
Normal file
148
vendor/github.com/gorilla/websocket/compression.go
generated
vendored
Normal file
|
@ -0,0 +1,148 @@
|
|||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
|
||||
maxCompressionLevel = flate.BestCompression
|
||||
defaultCompressionLevel = 1
|
||||
)
|
||||
|
||||
var (
|
||||
flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
|
||||
flateReaderPool = sync.Pool{New: func() interface{} {
|
||||
return flate.NewReader(nil)
|
||||
}}
|
||||
)
|
||||
|
||||
func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
|
||||
const tail =
|
||||
// Add four bytes as specified in RFC
|
||||
"\x00\x00\xff\xff" +
|
||||
// Add final block to squelch unexpected EOF error from flate reader.
|
||||
"\x01\x00\x00\xff\xff"
|
||||
|
||||
fr, _ := flateReaderPool.Get().(io.ReadCloser)
|
||||
fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
|
||||
return &flateReadWrapper{fr}
|
||||
}
|
||||
|
||||
func isValidCompressionLevel(level int) bool {
|
||||
return minCompressionLevel <= level && level <= maxCompressionLevel
|
||||
}
|
||||
|
||||
func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
|
||||
p := &flateWriterPools[level-minCompressionLevel]
|
||||
tw := &truncWriter{w: w}
|
||||
fw, _ := p.Get().(*flate.Writer)
|
||||
if fw == nil {
|
||||
fw, _ = flate.NewWriter(tw, level)
|
||||
} else {
|
||||
fw.Reset(tw)
|
||||
}
|
||||
return &flateWriteWrapper{fw: fw, tw: tw, p: p}
|
||||
}
|
||||
|
||||
// truncWriter is an io.Writer that writes all but the last four bytes of the
|
||||
// stream to another io.Writer.
|
||||
type truncWriter struct {
|
||||
w io.WriteCloser
|
||||
n int
|
||||
p [4]byte
|
||||
}
|
||||
|
||||
func (w *truncWriter) Write(p []byte) (int, error) {
|
||||
n := 0
|
||||
|
||||
// fill buffer first for simplicity.
|
||||
if w.n < len(w.p) {
|
||||
n = copy(w.p[w.n:], p)
|
||||
p = p[n:]
|
||||
w.n += n
|
||||
if len(p) == 0 {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
||||
m := len(p)
|
||||
if m > len(w.p) {
|
||||
m = len(w.p)
|
||||
}
|
||||
|
||||
if nn, err := w.w.Write(w.p[:m]); err != nil {
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
copy(w.p[:], w.p[m:])
|
||||
copy(w.p[len(w.p)-m:], p[len(p)-m:])
|
||||
nn, err := w.w.Write(p[:len(p)-m])
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
type flateWriteWrapper struct {
|
||||
fw *flate.Writer
|
||||
tw *truncWriter
|
||||
p *sync.Pool
|
||||
}
|
||||
|
||||
func (w *flateWriteWrapper) Write(p []byte) (int, error) {
|
||||
if w.fw == nil {
|
||||
return 0, errWriteClosed
|
||||
}
|
||||
return w.fw.Write(p)
|
||||
}
|
||||
|
||||
func (w *flateWriteWrapper) Close() error {
|
||||
if w.fw == nil {
|
||||
return errWriteClosed
|
||||
}
|
||||
err1 := w.fw.Flush()
|
||||
w.p.Put(w.fw)
|
||||
w.fw = nil
|
||||
if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
|
||||
return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
|
||||
}
|
||||
err2 := w.tw.w.Close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
type flateReadWrapper struct {
|
||||
fr io.ReadCloser
|
||||
}
|
||||
|
||||
func (r *flateReadWrapper) Read(p []byte) (int, error) {
|
||||
if r.fr == nil {
|
||||
return 0, io.ErrClosedPipe
|
||||
}
|
||||
n, err := r.fr.Read(p)
|
||||
if err == io.EOF {
|
||||
// Preemptively place the reader back in the pool. This helps with
|
||||
// scenarios where the application does not call NextReader() soon after
|
||||
// this final read.
|
||||
r.Close()
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *flateReadWrapper) Close() error {
|
||||
if r.fr == nil {
|
||||
return io.ErrClosedPipe
|
||||
}
|
||||
err := r.fr.Close()
|
||||
flateReaderPool.Put(r.fr)
|
||||
r.fr = nil
|
||||
return err
|
||||
}
|
1201
vendor/github.com/gorilla/websocket/conn.go
generated
vendored
Normal file
1201
vendor/github.com/gorilla/websocket/conn.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
15
vendor/github.com/gorilla/websocket/conn_write.go
generated
vendored
Normal file
15
vendor/github.com/gorilla/websocket/conn_write.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "net"
|
||||
|
||||
func (c *Conn) writeBufs(bufs ...[]byte) error {
|
||||
b := net.Buffers(bufs)
|
||||
_, err := b.WriteTo(c.conn)
|
||||
return err
|
||||
}
|
18
vendor/github.com/gorilla/websocket/conn_write_legacy.go
generated
vendored
Normal file
18
vendor/github.com/gorilla/websocket/conn_write_legacy.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
func (c *Conn) writeBufs(bufs ...[]byte) error {
|
||||
for _, buf := range bufs {
|
||||
if len(buf) > 0 {
|
||||
if _, err := c.conn.Write(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
227
vendor/github.com/gorilla/websocket/doc.go
generated
vendored
Normal file
227
vendor/github.com/gorilla/websocket/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,227 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package websocket implements the WebSocket protocol defined in RFC 6455.
|
||||
//
|
||||
// Overview
|
||||
//
|
||||
// The Conn type represents a WebSocket connection. A server application calls
|
||||
// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// ReadBufferSize: 1024,
|
||||
// WriteBufferSize: 1024,
|
||||
// }
|
||||
//
|
||||
// func handler(w http.ResponseWriter, r *http.Request) {
|
||||
// conn, err := upgrader.Upgrade(w, r, nil)
|
||||
// if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// ... Use conn to send and receive messages.
|
||||
// }
|
||||
//
|
||||
// Call the connection's WriteMessage and ReadMessage methods to send and
|
||||
// receive messages as a slice of bytes. This snippet of code shows how to echo
|
||||
// messages using these methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, p, err := conn.ReadMessage()
|
||||
// if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// if err := conn.WriteMessage(messageType, p); err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// In above snippet of code, p is a []byte and messageType is an int with value
|
||||
// websocket.BinaryMessage or websocket.TextMessage.
|
||||
//
|
||||
// An application can also send and receive messages using the io.WriteCloser
|
||||
// and io.Reader interfaces. To send a message, call the connection NextWriter
|
||||
// method to get an io.WriteCloser, write the message to the writer and close
|
||||
// the writer when done. To receive a message, call the connection NextReader
|
||||
// method to get an io.Reader and read until io.EOF is returned. This snippet
|
||||
// shows how to echo messages using the NextWriter and NextReader methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, r, err := conn.NextReader()
|
||||
// if err != nil {
|
||||
// return
|
||||
// }
|
||||
// w, err := conn.NextWriter(messageType)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if _, err := io.Copy(w, r); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if err := w.Close(); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Data Messages
|
||||
//
|
||||
// The WebSocket protocol distinguishes between text and binary data messages.
|
||||
// Text messages are interpreted as UTF-8 encoded text. The interpretation of
|
||||
// binary messages is left to the application.
|
||||
//
|
||||
// This package uses the TextMessage and BinaryMessage integer constants to
|
||||
// identify the two data message types. The ReadMessage and NextReader methods
|
||||
// return the type of the received message. The messageType argument to the
|
||||
// WriteMessage and NextWriter methods specifies the type of a sent message.
|
||||
//
|
||||
// It is the application's responsibility to ensure that text messages are
|
||||
// valid UTF-8 encoded text.
|
||||
//
|
||||
// Control Messages
|
||||
//
|
||||
// The WebSocket protocol defines three types of control messages: close, ping
|
||||
// and pong. Call the connection WriteControl, WriteMessage or NextWriter
|
||||
// methods to send a control message to the peer.
|
||||
//
|
||||
// Connections handle received close messages by calling the handler function
|
||||
// set with the SetCloseHandler method and by returning a *CloseError from the
|
||||
// NextReader, ReadMessage or the message Read method. The default close
|
||||
// handler sends a close message to the peer.
|
||||
//
|
||||
// Connections handle received ping messages by calling the handler function
|
||||
// set with the SetPingHandler method. The default ping handler sends a pong
|
||||
// message to the peer.
|
||||
//
|
||||
// Connections handle received pong messages by calling the handler function
|
||||
// set with the SetPongHandler method. The default pong handler does nothing.
|
||||
// If an application sends ping messages, then the application should set a
|
||||
// pong handler to receive the corresponding pong.
|
||||
//
|
||||
// The control message handler functions are called from the NextReader,
|
||||
// ReadMessage and message reader Read methods. The default close and ping
|
||||
// handlers can block these methods for a short time when the handler writes to
|
||||
// the connection.
|
||||
//
|
||||
// The application must read the connection to process close, ping and pong
|
||||
// messages sent from the peer. If the application is not otherwise interested
|
||||
// in messages from the peer, then the application should start a goroutine to
|
||||
// read and discard messages from the peer. A simple example is:
|
||||
//
|
||||
// func readLoop(c *websocket.Conn) {
|
||||
// for {
|
||||
// if _, _, err := c.NextReader(); err != nil {
|
||||
// c.Close()
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Concurrency
|
||||
//
|
||||
// Connections support one concurrent reader and one concurrent writer.
|
||||
//
|
||||
// Applications are responsible for ensuring that no more than one goroutine
|
||||
// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
|
||||
// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
|
||||
// that no more than one goroutine calls the read methods (NextReader,
|
||||
// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
|
||||
// concurrently.
|
||||
//
|
||||
// The Close and WriteControl methods can be called concurrently with all other
|
||||
// methods.
|
||||
//
|
||||
// Origin Considerations
|
||||
//
|
||||
// Web browsers allow Javascript applications to open a WebSocket connection to
|
||||
// any host. It's up to the server to enforce an origin policy using the Origin
|
||||
// request header sent by the browser.
|
||||
//
|
||||
// The Upgrader calls the function specified in the CheckOrigin field to check
|
||||
// the origin. If the CheckOrigin function returns false, then the Upgrade
|
||||
// method fails the WebSocket handshake with HTTP status 403.
|
||||
//
|
||||
// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
|
||||
// the handshake if the Origin request header is present and the Origin host is
|
||||
// not equal to the Host request header.
|
||||
//
|
||||
// The deprecated package-level Upgrade function does not perform origin
|
||||
// checking. The application is responsible for checking the Origin header
|
||||
// before calling the Upgrade function.
|
||||
//
|
||||
// Buffers
|
||||
//
|
||||
// Connections buffer network input and output to reduce the number
|
||||
// of system calls when reading or writing messages.
|
||||
//
|
||||
// Write buffers are also used for constructing WebSocket frames. See RFC 6455,
|
||||
// Section 5 for a discussion of message framing. A WebSocket frame header is
|
||||
// written to the network each time a write buffer is flushed to the network.
|
||||
// Decreasing the size of the write buffer can increase the amount of framing
|
||||
// overhead on the connection.
|
||||
//
|
||||
// The buffer sizes in bytes are specified by the ReadBufferSize and
|
||||
// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default
|
||||
// size of 4096 when a buffer size field is set to zero. The Upgrader reuses
|
||||
// buffers created by the HTTP server when a buffer size field is set to zero.
|
||||
// The HTTP server buffers have a size of 4096 at the time of this writing.
|
||||
//
|
||||
// The buffer sizes do not limit the size of a message that can be read or
|
||||
// written by a connection.
|
||||
//
|
||||
// Buffers are held for the lifetime of the connection by default. If the
|
||||
// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the
|
||||
// write buffer only when writing a message.
|
||||
//
|
||||
// Applications should tune the buffer sizes to balance memory use and
|
||||
// performance. Increasing the buffer size uses more memory, but can reduce the
|
||||
// number of system calls to read or write the network. In the case of writing,
|
||||
// increasing the buffer size can reduce the number of frame headers written to
|
||||
// the network.
|
||||
//
|
||||
// Some guidelines for setting buffer parameters are:
|
||||
//
|
||||
// Limit the buffer sizes to the maximum expected message size. Buffers larger
|
||||
// than the largest message do not provide any benefit.
|
||||
//
|
||||
// Depending on the distribution of message sizes, setting the buffer size to
|
||||
// a value less than the maximum expected message size can greatly reduce memory
|
||||
// use with a small impact on performance. Here's an example: If 99% of the
|
||||
// messages are smaller than 256 bytes and the maximum message size is 512
|
||||
// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls
|
||||
// than a buffer size of 512 bytes. The memory savings is 50%.
|
||||
//
|
||||
// A write buffer pool is useful when the application has a modest number
|
||||
// writes over a large number of connections. when buffers are pooled, a larger
|
||||
// buffer size has a reduced impact on total memory use and has the benefit of
|
||||
// reducing system calls and frame overhead.
|
||||
//
|
||||
// Compression EXPERIMENTAL
|
||||
//
|
||||
// Per message compression extensions (RFC 7692) are experimentally supported
|
||||
// by this package in a limited capacity. Setting the EnableCompression option
|
||||
// to true in Dialer or Upgrader will attempt to negotiate per message deflate
|
||||
// support.
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// EnableCompression: true,
|
||||
// }
|
||||
//
|
||||
// If compression was successfully negotiated with the connection's peer, any
|
||||
// message received in compressed form will be automatically decompressed.
|
||||
// All Read methods will return uncompressed bytes.
|
||||
//
|
||||
// Per message compression of messages written to a connection can be enabled
|
||||
// or disabled by calling the corresponding Conn method:
|
||||
//
|
||||
// conn.EnableWriteCompression(false)
|
||||
//
|
||||
// Currently this package does not support compression with "context takeover".
|
||||
// This means that messages must be compressed and decompressed in isolation,
|
||||
// without retaining sliding window or dictionary state across messages. For
|
||||
// more details refer to RFC 7692.
|
||||
//
|
||||
// Use of compression is experimental and may result in decreased performance.
|
||||
package websocket
|
3
vendor/github.com/gorilla/websocket/go.mod
generated
vendored
Normal file
3
vendor/github.com/gorilla/websocket/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
module github.com/gorilla/websocket
|
||||
|
||||
go 1.12
|
0
vendor/github.com/gorilla/websocket/go.sum
generated
vendored
Normal file
0
vendor/github.com/gorilla/websocket/go.sum
generated
vendored
Normal file
42
vendor/github.com/gorilla/websocket/join.go
generated
vendored
Normal file
42
vendor/github.com/gorilla/websocket/join.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// JoinMessages concatenates received messages to create a single io.Reader.
|
||||
// The string term is appended to each message. The returned reader does not
|
||||
// support concurrent calls to the Read method.
|
||||
func JoinMessages(c *Conn, term string) io.Reader {
|
||||
return &joinReader{c: c, term: term}
|
||||
}
|
||||
|
||||
type joinReader struct {
|
||||
c *Conn
|
||||
term string
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
func (r *joinReader) Read(p []byte) (int, error) {
|
||||
if r.r == nil {
|
||||
var err error
|
||||
_, r.r, err = r.c.NextReader()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if r.term != "" {
|
||||
r.r = io.MultiReader(r.r, strings.NewReader(r.term))
|
||||
}
|
||||
}
|
||||
n, err := r.r.Read(p)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
r.r = nil
|
||||
}
|
||||
return n, err
|
||||
}
|
60
vendor/github.com/gorilla/websocket/json.go
generated
vendored
Normal file
60
vendor/github.com/gorilla/websocket/json.go
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// WriteJSON writes the JSON encoding of v as a message.
|
||||
//
|
||||
// Deprecated: Use c.WriteJSON instead.
|
||||
func WriteJSON(c *Conn, v interface{}) error {
|
||||
return c.WriteJSON(v)
|
||||
}
|
||||
|
||||
// WriteJSON writes the JSON encoding of v as a message.
|
||||
//
|
||||
// See the documentation for encoding/json Marshal for details about the
|
||||
// conversion of Go values to JSON.
|
||||
func (c *Conn) WriteJSON(v interface{}) error {
|
||||
w, err := c.NextWriter(TextMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err1 := json.NewEncoder(w).Encode(v)
|
||||
err2 := w.Close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
// ReadJSON reads the next JSON-encoded message from the connection and stores
|
||||
// it in the value pointed to by v.
|
||||
//
|
||||
// Deprecated: Use c.ReadJSON instead.
|
||||
func ReadJSON(c *Conn, v interface{}) error {
|
||||
return c.ReadJSON(v)
|
||||
}
|
||||
|
||||
// ReadJSON reads the next JSON-encoded message from the connection and stores
|
||||
// it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for the encoding/json Unmarshal function for details
|
||||
// about the conversion of JSON to a Go value.
|
||||
func (c *Conn) ReadJSON(v interface{}) error {
|
||||
_, r, err := c.NextReader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.NewDecoder(r).Decode(v)
|
||||
if err == io.EOF {
|
||||
// One value is expected in the message.
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
54
vendor/github.com/gorilla/websocket/mask.go
generated
vendored
Normal file
54
vendor/github.com/gorilla/websocket/mask.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
|
||||
// this source code is governed by a BSD-style license that can be found in the
|
||||
// LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
package websocket
|
||||
|
||||
import "unsafe"
|
||||
|
||||
const wordSize = int(unsafe.Sizeof(uintptr(0)))
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
// Mask one byte at a time for small buffers.
|
||||
if len(b) < 2*wordSize {
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
return pos & 3
|
||||
}
|
||||
|
||||
// Mask one byte at a time to word boundary.
|
||||
if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
|
||||
n = wordSize - n
|
||||
for i := range b[:n] {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
b = b[n:]
|
||||
}
|
||||
|
||||
// Create aligned word size key.
|
||||
var k [wordSize]byte
|
||||
for i := range k {
|
||||
k[i] = key[(pos+i)&3]
|
||||
}
|
||||
kw := *(*uintptr)(unsafe.Pointer(&k))
|
||||
|
||||
// Mask one word at a time.
|
||||
n := (len(b) / wordSize) * wordSize
|
||||
for i := 0; i < n; i += wordSize {
|
||||
*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
|
||||
}
|
||||
|
||||
// Mask one byte at a time for remaining bytes.
|
||||
b = b[n:]
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
|
||||
return pos & 3
|
||||
}
|
15
vendor/github.com/gorilla/websocket/mask_safe.go
generated
vendored
Normal file
15
vendor/github.com/gorilla/websocket/mask_safe.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
|
||||
// this source code is governed by a BSD-style license that can be found in the
|
||||
// LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package websocket
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
return pos & 3
|
||||
}
|
102
vendor/github.com/gorilla/websocket/prepared.go
generated
vendored
Normal file
102
vendor/github.com/gorilla/websocket/prepared.go
generated
vendored
Normal file
|
@ -0,0 +1,102 @@
|
|||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PreparedMessage caches on the wire representations of a message payload.
|
||||
// Use PreparedMessage to efficiently send a message payload to multiple
|
||||
// connections. PreparedMessage is especially useful when compression is used
|
||||
// because the CPU and memory expensive compression operation can be executed
|
||||
// once for a given set of compression options.
|
||||
type PreparedMessage struct {
|
||||
messageType int
|
||||
data []byte
|
||||
mu sync.Mutex
|
||||
frames map[prepareKey]*preparedFrame
|
||||
}
|
||||
|
||||
// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
|
||||
type prepareKey struct {
|
||||
isServer bool
|
||||
compress bool
|
||||
compressionLevel int
|
||||
}
|
||||
|
||||
// preparedFrame contains data in wire representation.
|
||||
type preparedFrame struct {
|
||||
once sync.Once
|
||||
data []byte
|
||||
}
|
||||
|
||||
// NewPreparedMessage returns an initialized PreparedMessage. You can then send
|
||||
// it to connection using WritePreparedMessage method. Valid wire
|
||||
// representation will be calculated lazily only once for a set of current
|
||||
// connection options.
|
||||
func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
|
||||
pm := &PreparedMessage{
|
||||
messageType: messageType,
|
||||
frames: make(map[prepareKey]*preparedFrame),
|
||||
data: data,
|
||||
}
|
||||
|
||||
// Prepare a plain server frame.
|
||||
_, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// To protect against caller modifying the data argument, remember the data
|
||||
// copied to the plain server frame.
|
||||
pm.data = frameData[len(frameData)-len(data):]
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
|
||||
pm.mu.Lock()
|
||||
frame, ok := pm.frames[key]
|
||||
if !ok {
|
||||
frame = &preparedFrame{}
|
||||
pm.frames[key] = frame
|
||||
}
|
||||
pm.mu.Unlock()
|
||||
|
||||
var err error
|
||||
frame.once.Do(func() {
|
||||
// Prepare a frame using a 'fake' connection.
|
||||
// TODO: Refactor code in conn.go to allow more direct construction of
|
||||
// the frame.
|
||||
mu := make(chan struct{}, 1)
|
||||
mu <- struct{}{}
|
||||
var nc prepareConn
|
||||
c := &Conn{
|
||||
conn: &nc,
|
||||
mu: mu,
|
||||
isServer: key.isServer,
|
||||
compressionLevel: key.compressionLevel,
|
||||
enableWriteCompression: true,
|
||||
writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
|
||||
}
|
||||
if key.compress {
|
||||
c.newCompressionWriter = compressNoContextTakeover
|
||||
}
|
||||
err = c.WriteMessage(pm.messageType, pm.data)
|
||||
frame.data = nc.buf.Bytes()
|
||||
})
|
||||
return pm.messageType, frame.data, err
|
||||
}
|
||||
|
||||
type prepareConn struct {
|
||||
buf bytes.Buffer
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) }
|
||||
func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }
|
77
vendor/github.com/gorilla/websocket/proxy.go
generated
vendored
Normal file
77
vendor/github.com/gorilla/websocket/proxy.go
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type netDialerFunc func(network, addr string) (net.Conn, error)
|
||||
|
||||
func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
|
||||
return fn(network, addr)
|
||||
}
|
||||
|
||||
func init() {
|
||||
proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
|
||||
return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil
|
||||
})
|
||||
}
|
||||
|
||||
type httpProxyDialer struct {
|
||||
proxyURL *url.URL
|
||||
forwardDial func(network, addr string) (net.Conn, error)
|
||||
}
|
||||
|
||||
func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
|
||||
hostPort, _ := hostPortNoPort(hpd.proxyURL)
|
||||
conn, err := hpd.forwardDial(network, hostPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
connectHeader := make(http.Header)
|
||||
if user := hpd.proxyURL.User; user != nil {
|
||||
proxyUser := user.Username()
|
||||
if proxyPassword, passwordSet := user.Password(); passwordSet {
|
||||
credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
|
||||
connectHeader.Set("Proxy-Authorization", "Basic "+credential)
|
||||
}
|
||||
}
|
||||
|
||||
connectReq := &http.Request{
|
||||
Method: "CONNECT",
|
||||
URL: &url.URL{Opaque: addr},
|
||||
Host: addr,
|
||||
Header: connectHeader,
|
||||
}
|
||||
|
||||
if err := connectReq.Write(conn); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read response. It's OK to use and discard buffered reader here becaue
|
||||
// the remote server does not speak until spoken to.
|
||||
br := bufio.NewReader(conn)
|
||||
resp, err := http.ReadResponse(br, connectReq)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
conn.Close()
|
||||
f := strings.SplitN(resp.Status, " ", 2)
|
||||
return nil, errors.New(f[1])
|
||||
}
|
||||
return conn, nil
|
||||
}
|
363
vendor/github.com/gorilla/websocket/server.go
generated
vendored
Normal file
363
vendor/github.com/gorilla/websocket/server.go
generated
vendored
Normal file
|
@ -0,0 +1,363 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HandshakeError describes an error with the handshake from the peer.
|
||||
type HandshakeError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (e HandshakeError) Error() string { return e.message }
|
||||
|
||||
// Upgrader specifies parameters for upgrading an HTTP connection to a
|
||||
// WebSocket connection.
|
||||
type Upgrader struct {
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
|
||||
// size is zero, then buffers allocated by the HTTP server are used. The
|
||||
// I/O buffer sizes do not limit the size of the messages that can be sent
|
||||
// or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// WriteBufferPool is a pool of buffers for write operations. If the value
|
||||
// is not set, then write buffers are allocated to the connection for the
|
||||
// lifetime of the connection.
|
||||
//
|
||||
// A pool is most useful when the application has a modest volume of writes
|
||||
// across a large number of connections.
|
||||
//
|
||||
// Applications should use a single pool for each unique value of
|
||||
// WriteBufferSize.
|
||||
WriteBufferPool BufferPool
|
||||
|
||||
// Subprotocols specifies the server's supported protocols in order of
|
||||
// preference. If this field is not nil, then the Upgrade method negotiates a
|
||||
// subprotocol by selecting the first match in this list with a protocol
|
||||
// requested by the client. If there's no match, then no protocol is
|
||||
// negotiated (the Sec-Websocket-Protocol header is not included in the
|
||||
// handshake response).
|
||||
Subprotocols []string
|
||||
|
||||
// Error specifies the function for generating HTTP error responses. If Error
|
||||
// is nil, then http.Error is used to generate the HTTP response.
|
||||
Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
|
||||
|
||||
// CheckOrigin returns true if the request Origin header is acceptable. If
|
||||
// CheckOrigin is nil, then a safe default is used: return false if the
|
||||
// Origin request header is present and the origin host is not equal to
|
||||
// request Host header.
|
||||
//
|
||||
// A CheckOrigin function should carefully validate the request origin to
|
||||
// prevent cross-site request forgery.
|
||||
CheckOrigin func(r *http.Request) bool
|
||||
|
||||
// EnableCompression specify if the server should attempt to negotiate per
|
||||
// message compression (RFC 7692). Setting this value to true does not
|
||||
// guarantee that compression will be supported. Currently only "no context
|
||||
// takeover" modes are supported.
|
||||
EnableCompression bool
|
||||
}
|
||||
|
||||
func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
|
||||
err := HandshakeError{reason}
|
||||
if u.Error != nil {
|
||||
u.Error(w, r, status, err)
|
||||
} else {
|
||||
w.Header().Set("Sec-Websocket-Version", "13")
|
||||
http.Error(w, http.StatusText(status), status)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// checkSameOrigin returns true if the origin is not set or is equal to the request host.
|
||||
func checkSameOrigin(r *http.Request) bool {
|
||||
origin := r.Header["Origin"]
|
||||
if len(origin) == 0 {
|
||||
return true
|
||||
}
|
||||
u, err := url.Parse(origin[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return equalASCIIFold(u.Host, r.Host)
|
||||
}
|
||||
|
||||
func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
|
||||
if u.Subprotocols != nil {
|
||||
clientProtocols := Subprotocols(r)
|
||||
for _, serverProtocol := range u.Subprotocols {
|
||||
for _, clientProtocol := range clientProtocols {
|
||||
if clientProtocol == serverProtocol {
|
||||
return clientProtocol
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if responseHeader != nil {
|
||||
return responseHeader.Get("Sec-Websocket-Protocol")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// application negotiated subprotocol (Sec-WebSocket-Protocol).
|
||||
//
|
||||
// If the upgrade fails, then Upgrade replies to the client with an HTTP error
|
||||
// response.
|
||||
func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
|
||||
const badHandshake = "websocket: the client is not using the websocket protocol: "
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
|
||||
}
|
||||
|
||||
if r.Method != "GET" {
|
||||
return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
|
||||
}
|
||||
|
||||
if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported")
|
||||
}
|
||||
|
||||
checkOrigin := u.CheckOrigin
|
||||
if checkOrigin == nil {
|
||||
checkOrigin = checkSameOrigin
|
||||
}
|
||||
if !checkOrigin(r) {
|
||||
return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
|
||||
}
|
||||
|
||||
challengeKey := r.Header.Get("Sec-Websocket-Key")
|
||||
if challengeKey == "" {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank")
|
||||
}
|
||||
|
||||
subprotocol := u.selectSubprotocol(r, responseHeader)
|
||||
|
||||
// Negotiate PMCE
|
||||
var compress bool
|
||||
if u.EnableCompression {
|
||||
for _, ext := range parseExtensions(r.Header) {
|
||||
if ext[""] != "permessage-deflate" {
|
||||
continue
|
||||
}
|
||||
compress = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
h, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
|
||||
}
|
||||
var brw *bufio.ReadWriter
|
||||
netConn, brw, err := h.Hijack()
|
||||
if err != nil {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, err.Error())
|
||||
}
|
||||
|
||||
if brw.Reader.Buffered() > 0 {
|
||||
netConn.Close()
|
||||
return nil, errors.New("websocket: client sent data before handshake is complete")
|
||||
}
|
||||
|
||||
var br *bufio.Reader
|
||||
if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
|
||||
// Reuse hijacked buffered reader as connection reader.
|
||||
br = brw.Reader
|
||||
}
|
||||
|
||||
buf := bufioWriterBuffer(netConn, brw.Writer)
|
||||
|
||||
var writeBuf []byte
|
||||
if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
|
||||
// Reuse hijacked write buffer as connection buffer.
|
||||
writeBuf = buf
|
||||
}
|
||||
|
||||
c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
|
||||
c.subprotocol = subprotocol
|
||||
|
||||
if compress {
|
||||
c.newCompressionWriter = compressNoContextTakeover
|
||||
c.newDecompressionReader = decompressNoContextTakeover
|
||||
}
|
||||
|
||||
// Use larger of hijacked buffer and connection write buffer for header.
|
||||
p := buf
|
||||
if len(c.writeBuf) > len(p) {
|
||||
p = c.writeBuf
|
||||
}
|
||||
p = p[:0]
|
||||
|
||||
p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
|
||||
p = append(p, computeAcceptKey(challengeKey)...)
|
||||
p = append(p, "\r\n"...)
|
||||
if c.subprotocol != "" {
|
||||
p = append(p, "Sec-WebSocket-Protocol: "...)
|
||||
p = append(p, c.subprotocol...)
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
if compress {
|
||||
p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
|
||||
}
|
||||
for k, vs := range responseHeader {
|
||||
if k == "Sec-Websocket-Protocol" {
|
||||
continue
|
||||
}
|
||||
for _, v := range vs {
|
||||
p = append(p, k...)
|
||||
p = append(p, ": "...)
|
||||
for i := 0; i < len(v); i++ {
|
||||
b := v[i]
|
||||
if b <= 31 {
|
||||
// prevent response splitting.
|
||||
b = ' '
|
||||
}
|
||||
p = append(p, b)
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
|
||||
// Clear deadlines set by HTTP server.
|
||||
netConn.SetDeadline(time.Time{})
|
||||
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
|
||||
}
|
||||
if _, err = netConn.Write(p); err != nil {
|
||||
netConn.Close()
|
||||
return nil, err
|
||||
}
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// Deprecated: Use websocket.Upgrader instead.
|
||||
//
|
||||
// Upgrade does not perform origin checking. The application is responsible for
|
||||
// checking the Origin header before calling Upgrade. An example implementation
|
||||
// of the same origin policy check is:
|
||||
//
|
||||
// if req.Header.Get("Origin") != "http://"+req.Host {
|
||||
// http.Error(w, "Origin not allowed", http.StatusForbidden)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// If the endpoint supports subprotocols, then the application is responsible
|
||||
// for negotiating the protocol used on the connection. Use the Subprotocols()
|
||||
// function to get the subprotocols requested by the client. Use the
|
||||
// Sec-Websocket-Protocol response header to specify the subprotocol selected
|
||||
// by the application.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// negotiated subprotocol (Sec-Websocket-Protocol).
|
||||
//
|
||||
// The connection buffers IO to the underlying network connection. The
|
||||
// readBufSize and writeBufSize parameters specify the size of the buffers to
|
||||
// use. Messages can be larger than the buffers.
|
||||
//
|
||||
// If the request is not a valid WebSocket handshake, then Upgrade returns an
|
||||
// error of type HandshakeError. Applications should handle this error by
|
||||
// replying to the client with an HTTP error response.
|
||||
func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
|
||||
u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
|
||||
u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
|
||||
// don't return errors to maintain backwards compatibility
|
||||
}
|
||||
u.CheckOrigin = func(r *http.Request) bool {
|
||||
// allow all connections by default
|
||||
return true
|
||||
}
|
||||
return u.Upgrade(w, r, responseHeader)
|
||||
}
|
||||
|
||||
// Subprotocols returns the subprotocols requested by the client in the
|
||||
// Sec-Websocket-Protocol header.
|
||||
func Subprotocols(r *http.Request) []string {
|
||||
h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
|
||||
if h == "" {
|
||||
return nil
|
||||
}
|
||||
protocols := strings.Split(h, ",")
|
||||
for i := range protocols {
|
||||
protocols[i] = strings.TrimSpace(protocols[i])
|
||||
}
|
||||
return protocols
|
||||
}
|
||||
|
||||
// IsWebSocketUpgrade returns true if the client requested upgrade to the
|
||||
// WebSocket protocol.
|
||||
func IsWebSocketUpgrade(r *http.Request) bool {
|
||||
return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
|
||||
tokenListContainsValue(r.Header, "Upgrade", "websocket")
|
||||
}
|
||||
|
||||
// bufioReaderSize size returns the size of a bufio.Reader.
|
||||
func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
|
||||
// This code assumes that peek on a reset reader returns
|
||||
// bufio.Reader.buf[:0].
|
||||
// TODO: Use bufio.Reader.Size() after Go 1.10
|
||||
br.Reset(originalReader)
|
||||
if p, err := br.Peek(0); err == nil {
|
||||
return cap(p)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// writeHook is an io.Writer that records the last slice passed to it vio
|
||||
// io.Writer.Write.
|
||||
type writeHook struct {
|
||||
p []byte
|
||||
}
|
||||
|
||||
func (wh *writeHook) Write(p []byte) (int, error) {
|
||||
wh.p = p
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// bufioWriterBuffer grabs the buffer from a bufio.Writer.
|
||||
func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
|
||||
// This code assumes that bufio.Writer.buf[:1] is passed to the
|
||||
// bufio.Writer's underlying writer.
|
||||
var wh writeHook
|
||||
bw.Reset(&wh)
|
||||
bw.WriteByte(0)
|
||||
bw.Flush()
|
||||
|
||||
bw.Reset(originalWriter)
|
||||
|
||||
return wh.p[:cap(wh.p)]
|
||||
}
|
19
vendor/github.com/gorilla/websocket/trace.go
generated
vendored
Normal file
19
vendor/github.com/gorilla/websocket/trace.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http/httptrace"
|
||||
)
|
||||
|
||||
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
if trace.TLSHandshakeStart != nil {
|
||||
trace.TLSHandshakeStart()
|
||||
}
|
||||
err := doHandshake(tlsConn, cfg)
|
||||
if trace.TLSHandshakeDone != nil {
|
||||
trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
|
||||
}
|
||||
return err
|
||||
}
|
12
vendor/github.com/gorilla/websocket/trace_17.go
generated
vendored
Normal file
12
vendor/github.com/gorilla/websocket/trace_17.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http/httptrace"
|
||||
)
|
||||
|
||||
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
return doHandshake(tlsConn, cfg)
|
||||
}
|
283
vendor/github.com/gorilla/websocket/util.go
generated
vendored
Normal file
283
vendor/github.com/gorilla/websocket/util.go
generated
vendored
Normal file
|
@ -0,0 +1,283 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
|
||||
|
||||
func computeAcceptKey(challengeKey string) string {
|
||||
h := sha1.New()
|
||||
h.Write([]byte(challengeKey))
|
||||
h.Write(keyGUID)
|
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func generateChallengeKey() (string, error) {
|
||||
p := make([]byte, 16)
|
||||
if _, err := io.ReadFull(rand.Reader, p); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(p), nil
|
||||
}
|
||||
|
||||
// Token octets per RFC 2616.
|
||||
var isTokenOctet = [256]bool{
|
||||
'!': true,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'W': true,
|
||||
'V': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'|': true,
|
||||
'~': true,
|
||||
}
|
||||
|
||||
// skipSpace returns a slice of the string s with all leading RFC 2616 linear
|
||||
// whitespace removed.
|
||||
func skipSpace(s string) (rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if b := s[i]; b != ' ' && b != '\t' {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[i:]
|
||||
}
|
||||
|
||||
// nextToken returns the leading RFC 2616 token of s and the string following
|
||||
// the token.
|
||||
func nextToken(s string) (token, rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if !isTokenOctet[s[i]] {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[:i], s[i:]
|
||||
}
|
||||
|
||||
// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616
|
||||
// and the string following the token or quoted string.
|
||||
func nextTokenOrQuoted(s string) (value string, rest string) {
|
||||
if !strings.HasPrefix(s, "\"") {
|
||||
return nextToken(s)
|
||||
}
|
||||
s = s[1:]
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '"':
|
||||
return s[:i], s[i+1:]
|
||||
case '\\':
|
||||
p := make([]byte, len(s)-1)
|
||||
j := copy(p, s[:i])
|
||||
escape := true
|
||||
for i = i + 1; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case escape:
|
||||
escape = false
|
||||
p[j] = b
|
||||
j++
|
||||
case b == '\\':
|
||||
escape = true
|
||||
case b == '"':
|
||||
return string(p[:j]), s[i+1:]
|
||||
default:
|
||||
p[j] = b
|
||||
j++
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// equalASCIIFold returns true if s is equal to t with ASCII case folding as
|
||||
// defined in RFC 4790.
|
||||
func equalASCIIFold(s, t string) bool {
|
||||
for s != "" && t != "" {
|
||||
sr, size := utf8.DecodeRuneInString(s)
|
||||
s = s[size:]
|
||||
tr, size := utf8.DecodeRuneInString(t)
|
||||
t = t[size:]
|
||||
if sr == tr {
|
||||
continue
|
||||
}
|
||||
if 'A' <= sr && sr <= 'Z' {
|
||||
sr = sr + 'a' - 'A'
|
||||
}
|
||||
if 'A' <= tr && tr <= 'Z' {
|
||||
tr = tr + 'a' - 'A'
|
||||
}
|
||||
if sr != tr {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return s == t
|
||||
}
|
||||
|
||||
// tokenListContainsValue returns true if the 1#token header with the given
|
||||
// name contains a token equal to value with ASCII case folding.
|
||||
func tokenListContainsValue(header http.Header, name string, value string) bool {
|
||||
headers:
|
||||
for _, s := range header[name] {
|
||||
for {
|
||||
var t string
|
||||
t, s = nextToken(skipSpace(s))
|
||||
if t == "" {
|
||||
continue headers
|
||||
}
|
||||
s = skipSpace(s)
|
||||
if s != "" && s[0] != ',' {
|
||||
continue headers
|
||||
}
|
||||
if equalASCIIFold(t, value) {
|
||||
return true
|
||||
}
|
||||
if s == "" {
|
||||
continue headers
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parseExtensions parses WebSocket extensions from a header.
|
||||
func parseExtensions(header http.Header) []map[string]string {
|
||||
// From RFC 6455:
|
||||
//
|
||||
// Sec-WebSocket-Extensions = extension-list
|
||||
// extension-list = 1#extension
|
||||
// extension = extension-token *( ";" extension-param )
|
||||
// extension-token = registered-token
|
||||
// registered-token = token
|
||||
// extension-param = token [ "=" (token | quoted-string) ]
|
||||
// ;When using the quoted-string syntax variant, the value
|
||||
// ;after quoted-string unescaping MUST conform to the
|
||||
// ;'token' ABNF.
|
||||
|
||||
var result []map[string]string
|
||||
headers:
|
||||
for _, s := range header["Sec-Websocket-Extensions"] {
|
||||
for {
|
||||
var t string
|
||||
t, s = nextToken(skipSpace(s))
|
||||
if t == "" {
|
||||
continue headers
|
||||
}
|
||||
ext := map[string]string{"": t}
|
||||
for {
|
||||
s = skipSpace(s)
|
||||
if !strings.HasPrefix(s, ";") {
|
||||
break
|
||||
}
|
||||
var k string
|
||||
k, s = nextToken(skipSpace(s[1:]))
|
||||
if k == "" {
|
||||
continue headers
|
||||
}
|
||||
s = skipSpace(s)
|
||||
var v string
|
||||
if strings.HasPrefix(s, "=") {
|
||||
v, s = nextTokenOrQuoted(skipSpace(s[1:]))
|
||||
s = skipSpace(s)
|
||||
}
|
||||
if s != "" && s[0] != ',' && s[0] != ';' {
|
||||
continue headers
|
||||
}
|
||||
ext[k] = v
|
||||
}
|
||||
if s != "" && s[0] != ',' {
|
||||
continue headers
|
||||
}
|
||||
result = append(result, ext)
|
||||
if s == "" {
|
||||
continue headers
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
473
vendor/github.com/gorilla/websocket/x_net_proxy.go
generated
vendored
Normal file
473
vendor/github.com/gorilla/websocket/x_net_proxy.go
generated
vendored
Normal file
|
@ -0,0 +1,473 @@
|
|||
// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
|
||||
//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
|
||||
|
||||
// Package proxy provides support for a variety of protocols to proxy network
|
||||
// data.
|
||||
//
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type proxy_direct struct{}
|
||||
|
||||
// Direct is a direct proxy: one that makes network connections directly.
|
||||
var proxy_Direct = proxy_direct{}
|
||||
|
||||
func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
|
||||
return net.Dial(network, addr)
|
||||
}
|
||||
|
||||
// A PerHost directs connections to a default Dialer unless the host name
|
||||
// requested matches one of a number of exceptions.
|
||||
type proxy_PerHost struct {
|
||||
def, bypass proxy_Dialer
|
||||
|
||||
bypassNetworks []*net.IPNet
|
||||
bypassIPs []net.IP
|
||||
bypassZones []string
|
||||
bypassHosts []string
|
||||
}
|
||||
|
||||
// NewPerHost returns a PerHost Dialer that directs connections to either
|
||||
// defaultDialer or bypass, depending on whether the connection matches one of
|
||||
// the configured rules.
|
||||
func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
|
||||
return &proxy_PerHost{
|
||||
def: defaultDialer,
|
||||
bypass: bypass,
|
||||
}
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the given network through either
|
||||
// defaultDialer or bypass.
|
||||
func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.dialerForRequest(host).Dial(network, addr)
|
||||
}
|
||||
|
||||
func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
for _, net := range p.bypassNetworks {
|
||||
if net.Contains(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassIP := range p.bypassIPs {
|
||||
if bypassIP.Equal(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
for _, zone := range p.bypassZones {
|
||||
if strings.HasSuffix(host, zone) {
|
||||
return p.bypass
|
||||
}
|
||||
if host == zone[1:] {
|
||||
// For a zone ".example.com", we match "example.com"
|
||||
// too.
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassHost := range p.bypassHosts {
|
||||
if bypassHost == host {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
// AddFromString parses a string that contains comma-separated values
|
||||
// specifying hosts that should use the bypass proxy. Each value is either an
|
||||
// IP address, a CIDR range, a zone (*.example.com) or a host name
|
||||
// (localhost). A best effort is made to parse the string and errors are
|
||||
// ignored.
|
||||
func (p *proxy_PerHost) AddFromString(s string) {
|
||||
hosts := strings.Split(s, ",")
|
||||
for _, host := range hosts {
|
||||
host = strings.TrimSpace(host)
|
||||
if len(host) == 0 {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(host, "/") {
|
||||
// We assume that it's a CIDR address like 127.0.0.0/8
|
||||
if _, net, err := net.ParseCIDR(host); err == nil {
|
||||
p.AddNetwork(net)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
p.AddIP(ip)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(host, "*.") {
|
||||
p.AddZone(host[1:])
|
||||
continue
|
||||
}
|
||||
p.AddHost(host)
|
||||
}
|
||||
}
|
||||
|
||||
// AddIP specifies an IP address that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match an IP.
|
||||
func (p *proxy_PerHost) AddIP(ip net.IP) {
|
||||
p.bypassIPs = append(p.bypassIPs, ip)
|
||||
}
|
||||
|
||||
// AddNetwork specifies an IP range that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match.
|
||||
func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
|
||||
p.bypassNetworks = append(p.bypassNetworks, net)
|
||||
}
|
||||
|
||||
// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
|
||||
// "example.com" matches "example.com" and all of its subdomains.
|
||||
func (p *proxy_PerHost) AddZone(zone string) {
|
||||
if strings.HasSuffix(zone, ".") {
|
||||
zone = zone[:len(zone)-1]
|
||||
}
|
||||
if !strings.HasPrefix(zone, ".") {
|
||||
zone = "." + zone
|
||||
}
|
||||
p.bypassZones = append(p.bypassZones, zone)
|
||||
}
|
||||
|
||||
// AddHost specifies a host name that will use the bypass proxy.
|
||||
func (p *proxy_PerHost) AddHost(host string) {
|
||||
if strings.HasSuffix(host, ".") {
|
||||
host = host[:len(host)-1]
|
||||
}
|
||||
p.bypassHosts = append(p.bypassHosts, host)
|
||||
}
|
||||
|
||||
// A Dialer is a means to establish a connection.
|
||||
type proxy_Dialer interface {
|
||||
// Dial connects to the given address via the proxy.
|
||||
Dial(network, addr string) (c net.Conn, err error)
|
||||
}
|
||||
|
||||
// Auth contains authentication parameters that specific Dialers may require.
|
||||
type proxy_Auth struct {
|
||||
User, Password string
|
||||
}
|
||||
|
||||
// FromEnvironment returns the dialer specified by the proxy related variables in
|
||||
// the environment.
|
||||
func proxy_FromEnvironment() proxy_Dialer {
|
||||
allProxy := proxy_allProxyEnv.Get()
|
||||
if len(allProxy) == 0 {
|
||||
return proxy_Direct
|
||||
}
|
||||
|
||||
proxyURL, err := url.Parse(allProxy)
|
||||
if err != nil {
|
||||
return proxy_Direct
|
||||
}
|
||||
proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
|
||||
if err != nil {
|
||||
return proxy_Direct
|
||||
}
|
||||
|
||||
noProxy := proxy_noProxyEnv.Get()
|
||||
if len(noProxy) == 0 {
|
||||
return proxy
|
||||
}
|
||||
|
||||
perHost := proxy_NewPerHost(proxy, proxy_Direct)
|
||||
perHost.AddFromString(noProxy)
|
||||
return perHost
|
||||
}
|
||||
|
||||
// proxySchemes is a map from URL schemes to a function that creates a Dialer
|
||||
// from a URL with such a scheme.
|
||||
var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
|
||||
|
||||
// RegisterDialerType takes a URL scheme and a function to generate Dialers from
|
||||
// a URL with that scheme and a forwarding Dialer. Registered schemes are used
|
||||
// by FromURL.
|
||||
func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
|
||||
if proxy_proxySchemes == nil {
|
||||
proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
|
||||
}
|
||||
proxy_proxySchemes[scheme] = f
|
||||
}
|
||||
|
||||
// FromURL returns a Dialer given a URL specification and an underlying
|
||||
// Dialer for it to make network requests.
|
||||
func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
|
||||
var auth *proxy_Auth
|
||||
if u.User != nil {
|
||||
auth = new(proxy_Auth)
|
||||
auth.User = u.User.Username()
|
||||
if p, ok := u.User.Password(); ok {
|
||||
auth.Password = p
|
||||
}
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "socks5":
|
||||
return proxy_SOCKS5("tcp", u.Host, auth, forward)
|
||||
}
|
||||
|
||||
// If the scheme doesn't match any of the built-in schemes, see if it
|
||||
// was registered by another package.
|
||||
if proxy_proxySchemes != nil {
|
||||
if f, ok := proxy_proxySchemes[u.Scheme]; ok {
|
||||
return f(u, forward)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
|
||||
}
|
||||
|
||||
var (
|
||||
proxy_allProxyEnv = &proxy_envOnce{
|
||||
names: []string{"ALL_PROXY", "all_proxy"},
|
||||
}
|
||||
proxy_noProxyEnv = &proxy_envOnce{
|
||||
names: []string{"NO_PROXY", "no_proxy"},
|
||||
}
|
||||
)
|
||||
|
||||
// envOnce looks up an environment variable (optionally by multiple
|
||||
// names) once. It mitigates expensive lookups on some platforms
|
||||
// (e.g. Windows).
|
||||
// (Borrowed from net/http/transport.go)
|
||||
type proxy_envOnce struct {
|
||||
names []string
|
||||
once sync.Once
|
||||
val string
|
||||
}
|
||||
|
||||
func (e *proxy_envOnce) Get() string {
|
||||
e.once.Do(e.init)
|
||||
return e.val
|
||||
}
|
||||
|
||||
func (e *proxy_envOnce) init() {
|
||||
for _, n := range e.names {
|
||||
e.val = os.Getenv(n)
|
||||
if e.val != "" {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
|
||||
// with an optional username and password. See RFC 1928 and RFC 1929.
|
||||
func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
|
||||
s := &proxy_socks5{
|
||||
network: network,
|
||||
addr: addr,
|
||||
forward: forward,
|
||||
}
|
||||
if auth != nil {
|
||||
s.user = auth.User
|
||||
s.password = auth.Password
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
type proxy_socks5 struct {
|
||||
user, password string
|
||||
network, addr string
|
||||
forward proxy_Dialer
|
||||
}
|
||||
|
||||
const proxy_socks5Version = 5
|
||||
|
||||
const (
|
||||
proxy_socks5AuthNone = 0
|
||||
proxy_socks5AuthPassword = 2
|
||||
)
|
||||
|
||||
const proxy_socks5Connect = 1
|
||||
|
||||
const (
|
||||
proxy_socks5IP4 = 1
|
||||
proxy_socks5Domain = 3
|
||||
proxy_socks5IP6 = 4
|
||||
)
|
||||
|
||||
var proxy_socks5Errors = []string{
|
||||
"",
|
||||
"general failure",
|
||||
"connection forbidden",
|
||||
"network unreachable",
|
||||
"host unreachable",
|
||||
"connection refused",
|
||||
"TTL expired",
|
||||
"command not supported",
|
||||
"address type not supported",
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the given network via the SOCKS5 proxy.
|
||||
func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
|
||||
switch network {
|
||||
case "tcp", "tcp6", "tcp4":
|
||||
default:
|
||||
return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
|
||||
}
|
||||
|
||||
conn, err := s.forward.Dial(s.network, s.addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.connect(conn, addr); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// connect takes an existing connection to a socks5 proxy server,
|
||||
// and commands the server to extend that connection to target,
|
||||
// which must be a canonical address with a host and port.
|
||||
func (s *proxy_socks5) connect(conn net.Conn, target string) error {
|
||||
host, portStr, err := net.SplitHostPort(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return errors.New("proxy: failed to parse port number: " + portStr)
|
||||
}
|
||||
if port < 1 || port > 0xffff {
|
||||
return errors.New("proxy: port number out of range: " + portStr)
|
||||
}
|
||||
|
||||
// the size here is just an estimate
|
||||
buf := make([]byte, 0, 6+len(host))
|
||||
|
||||
buf = append(buf, proxy_socks5Version)
|
||||
if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
|
||||
buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
|
||||
} else {
|
||||
buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
|
||||
}
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
if buf[0] != 5 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
|
||||
}
|
||||
if buf[1] == 0xff {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
|
||||
}
|
||||
|
||||
// See RFC 1929
|
||||
if buf[1] == proxy_socks5AuthPassword {
|
||||
buf = buf[:0]
|
||||
buf = append(buf, 1 /* password protocol version */)
|
||||
buf = append(buf, uint8(len(s.user)))
|
||||
buf = append(buf, s.user...)
|
||||
buf = append(buf, uint8(len(s.password)))
|
||||
buf = append(buf, s.password...)
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if buf[1] != 0 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
|
||||
}
|
||||
}
|
||||
|
||||
buf = buf[:0]
|
||||
buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
|
||||
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
if ip4 := ip.To4(); ip4 != nil {
|
||||
buf = append(buf, proxy_socks5IP4)
|
||||
ip = ip4
|
||||
} else {
|
||||
buf = append(buf, proxy_socks5IP6)
|
||||
}
|
||||
buf = append(buf, ip...)
|
||||
} else {
|
||||
if len(host) > 255 {
|
||||
return errors.New("proxy: destination host name too long: " + host)
|
||||
}
|
||||
buf = append(buf, proxy_socks5Domain)
|
||||
buf = append(buf, byte(len(host)))
|
||||
buf = append(buf, host...)
|
||||
}
|
||||
buf = append(buf, byte(port>>8), byte(port))
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
|
||||
return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
failure := "unknown error"
|
||||
if int(buf[1]) < len(proxy_socks5Errors) {
|
||||
failure = proxy_socks5Errors[buf[1]]
|
||||
}
|
||||
|
||||
if len(failure) > 0 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
|
||||
}
|
||||
|
||||
bytesToDiscard := 0
|
||||
switch buf[3] {
|
||||
case proxy_socks5IP4:
|
||||
bytesToDiscard = net.IPv4len
|
||||
case proxy_socks5IP6:
|
||||
bytesToDiscard = net.IPv6len
|
||||
case proxy_socks5Domain:
|
||||
_, err := io.ReadFull(conn, buf[:1])
|
||||
if err != nil {
|
||||
return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
bytesToDiscard = int(buf[0])
|
||||
default:
|
||||
return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
|
||||
}
|
||||
|
||||
if cap(buf) < bytesToDiscard {
|
||||
buf = make([]byte, bytesToDiscard)
|
||||
} else {
|
||||
buf = buf[:bytesToDiscard]
|
||||
}
|
||||
if _, err := io.ReadFull(conn, buf); err != nil {
|
||||
return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
// Also need to discard the port number
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
9
vendor/github.com/hashicorp/hcl/.gitignore
generated
vendored
Normal file
9
vendor/github.com/hashicorp/hcl/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
y.output
|
||||
|
||||
# ignore intellij files
|
||||
.idea
|
||||
*.iml
|
||||
*.ipr
|
||||
*.iws
|
||||
|
||||
*.test
|
13
vendor/github.com/hashicorp/hcl/.travis.yml
generated
vendored
Normal file
13
vendor/github.com/hashicorp/hcl/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
sudo: false
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.x
|
||||
- tip
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
script: make test
|
354
vendor/github.com/hashicorp/hcl/LICENSE
generated
vendored
Normal file
354
vendor/github.com/hashicorp/hcl/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,354 @@
|
|||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. “Contributor”
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. “Contributor Version”
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor’s Contribution.
|
||||
|
||||
1.3. “Contribution”
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. “Covered Software”
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. “Incompatible With Secondary Licenses”
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of version
|
||||
1.1 or earlier of the License, but not also under the terms of a
|
||||
Secondary License.
|
||||
|
||||
1.6. “Executable Form”
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. “Larger Work”
|
||||
|
||||
means a work that combines Covered Software with other material, in a separate
|
||||
file or files, that is not Covered Software.
|
||||
|
||||
1.8. “License”
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. “Licensable”
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether at the
|
||||
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||
this License.
|
||||
|
||||
1.10. “Modifications”
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to, deletion
|
||||
from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. “Patent Claims” of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method, process,
|
||||
and apparatus claims, in any patent Licensable by such Contributor that
|
||||
would be infringed, but for the grant of the License, by the making,
|
||||
using, selling, offering for sale, having made, import, or transfer of
|
||||
either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. “Secondary License”
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. “Source Code Form”
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. “You” (or “Your”)
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, “You” includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, “control” means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or as
|
||||
part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its Contributions
|
||||
or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||
effective for each Contribution on the date the Contributor first distributes
|
||||
such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under this
|
||||
License. No additional rights or licenses will be implied from the distribution
|
||||
or licensing of Covered Software under this License. Notwithstanding Section
|
||||
2.1(b) above, no patent license is granted by a Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party’s
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||
Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks, or
|
||||
logos of any Contributor (except as may be necessary to comply with the
|
||||
notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this License
|
||||
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||
under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its Contributions
|
||||
are its original creation(s) or it has sufficient rights to grant the
|
||||
rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under applicable
|
||||
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under the
|
||||
terms of this License. You must inform recipients that the Source Code Form
|
||||
of the Covered Software is governed by the terms of this License, and how
|
||||
they can obtain a copy of this License. You may not attempt to alter or
|
||||
restrict the recipients’ rights in the Source Code Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this License,
|
||||
or sublicense it under different terms, provided that the license for
|
||||
the Executable Form does not attempt to limit or alter the recipients’
|
||||
rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for the
|
||||
Covered Software. If the Larger Work is a combination of Covered Software
|
||||
with a work governed by one or more Secondary Licenses, and the Covered
|
||||
Software is not Incompatible With Secondary Licenses, this License permits
|
||||
You to additionally distribute such Covered Software under the terms of
|
||||
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||
their option, further distribute the Covered Software under the terms of
|
||||
either this License or such Secondary License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices (including
|
||||
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||
of liability) contained within the Source Code Form of the Covered
|
||||
Software, except that You may alter any license notices to the extent
|
||||
required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||
of any Contributor. You must make it absolutely clear that any such
|
||||
warranty, support, indemnity, or liability obligation is offered by You
|
||||
alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute, judicial
|
||||
order, or regulation then You must: (a) comply with the terms of this License
|
||||
to the maximum extent possible; and (b) describe the limitations and the code
|
||||
they affect. Such description must be placed in a text file included with all
|
||||
distributions of the Covered Software under this License. Except to the
|
||||
extent prohibited by statute or regulation, such description must be
|
||||
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||
understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||
if such Contributor fails to notify You of the non-compliance by some
|
||||
reasonable means prior to 60 days after You have come back into compliance.
|
||||
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||
some reasonable means, this is the first time You have received notice of
|
||||
non-compliance with this License from such Contributor, and You become
|
||||
compliant prior to 30 days after Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||
and cross-claims) alleging that a Contributor Version directly or
|
||||
indirectly infringes any patent, then the rights granted to You by any and
|
||||
all Contributors for the Covered Software under Section 2.1 of this License
|
||||
shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an “as is” basis, without
|
||||
warranty of any kind, either expressed, implied, or statutory, including,
|
||||
without limitation, warranties that the Covered Software is free of defects,
|
||||
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||
risk as to the quality and performance of the Covered Software is with You.
|
||||
Should any Covered Software prove defective in any respect, You (not any
|
||||
Contributor) assume the cost of any necessary servicing, repair, or
|
||||
correction. This disclaimer of warranty constitutes an essential part of this
|
||||
License. No use of any Covered Software is authorized under this License
|
||||
except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from such
|
||||
party’s negligence to the extent applicable law prohibits such limitation.
|
||||
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||
consequential damages, so this exclusion and limitation may not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts of
|
||||
a jurisdiction where the defendant maintains its principal place of business
|
||||
and such litigation shall be governed by laws of that jurisdiction, without
|
||||
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject matter
|
||||
hereof. If any provision of this License is held to be unenforceable, such
|
||||
provision shall be reformed only to the extent necessary to make it
|
||||
enforceable. Any law or regulation which provides that the language of a
|
||||
contract shall be construed against the drafter shall not be used to construe
|
||||
this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version of
|
||||
the License under which You originally received the Covered Software, or
|
||||
under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a modified
|
||||
version of this License if you rename the license and remove any
|
||||
references to the name of the license steward (except to note that such
|
||||
modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file, then
|
||||
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||
directory) where a recipient would be likely to look for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||
|
||||
This Source Code Form is “Incompatible
|
||||
With Secondary Licenses”, as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
18
vendor/github.com/hashicorp/hcl/Makefile
generated
vendored
Normal file
18
vendor/github.com/hashicorp/hcl/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
TEST?=./...
|
||||
|
||||
default: test
|
||||
|
||||
fmt: generate
|
||||
go fmt ./...
|
||||
|
||||
test: generate
|
||||
go get -t ./...
|
||||
go test $(TEST) $(TESTARGS)
|
||||
|
||||
generate:
|
||||
go generate ./...
|
||||
|
||||
updatedeps:
|
||||
go get -u golang.org/x/tools/cmd/stringer
|
||||
|
||||
.PHONY: default generate test updatedeps
|
125
vendor/github.com/hashicorp/hcl/README.md
generated
vendored
Normal file
125
vendor/github.com/hashicorp/hcl/README.md
generated
vendored
Normal file
|
@ -0,0 +1,125 @@
|
|||
# HCL
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl)
|
||||
|
||||
HCL (HashiCorp Configuration Language) is a configuration language built
|
||||
by HashiCorp. The goal of HCL is to build a structured configuration language
|
||||
that is both human and machine friendly for use with command-line tools, but
|
||||
specifically targeted towards DevOps tools, servers, etc.
|
||||
|
||||
HCL is also fully JSON compatible. That is, JSON can be used as completely
|
||||
valid input to a system expecting HCL. This helps makes systems
|
||||
interoperable with other systems.
|
||||
|
||||
HCL is heavily inspired by
|
||||
[libucl](https://github.com/vstakhov/libucl),
|
||||
nginx configuration, and others similar.
|
||||
|
||||
## Why?
|
||||
|
||||
A common question when viewing HCL is to ask the question: why not
|
||||
JSON, YAML, etc.?
|
||||
|
||||
Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
|
||||
used a variety of configuration languages from full programming languages
|
||||
such as Ruby to complete data structure languages such as JSON. What we
|
||||
learned is that some people wanted human-friendly configuration languages
|
||||
and some people wanted machine-friendly languages.
|
||||
|
||||
JSON fits a nice balance in this, but is fairly verbose and most
|
||||
importantly doesn't support comments. With YAML, we found that beginners
|
||||
had a really hard time determining what the actual structure was, and
|
||||
ended up guessing more often than not whether to use a hyphen, colon, etc.
|
||||
in order to represent some configuration key.
|
||||
|
||||
Full programming languages such as Ruby enable complex behavior
|
||||
a configuration language shouldn't usually allow, and also forces
|
||||
people to learn some set of Ruby.
|
||||
|
||||
Because of this, we decided to create our own configuration language
|
||||
that is JSON-compatible. Our configuration language (HCL) is designed
|
||||
to be written and modified by humans. The API for HCL allows JSON
|
||||
as an input so that it is also machine-friendly (machines can generate
|
||||
JSON instead of trying to generate HCL).
|
||||
|
||||
Our goal with HCL is not to alienate other configuration languages.
|
||||
It is instead to provide HCL as a specialized language for our tools,
|
||||
and JSON as the interoperability layer.
|
||||
|
||||
## Syntax
|
||||
|
||||
For a complete grammar, please see the parser itself. A high-level overview
|
||||
of the syntax and grammar is listed here.
|
||||
|
||||
* Single line comments start with `#` or `//`
|
||||
|
||||
* Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
|
||||
are not allowed. A multi-line comment (also known as a block comment)
|
||||
terminates at the first `*/` found.
|
||||
|
||||
* Values are assigned with the syntax `key = value` (whitespace doesn't
|
||||
matter). The value can be any primitive: a string, number, boolean,
|
||||
object, or list.
|
||||
|
||||
* Strings are double-quoted and can contain any UTF-8 characters.
|
||||
Example: `"Hello, World"`
|
||||
|
||||
* Multi-line strings start with `<<EOF` at the end of a line, and end
|
||||
with `EOF` on its own line ([here documents](https://en.wikipedia.org/wiki/Here_document)).
|
||||
Any text may be used in place of `EOF`. Example:
|
||||
```
|
||||
<<FOO
|
||||
hello
|
||||
world
|
||||
FOO
|
||||
```
|
||||
|
||||
* Numbers are assumed to be base 10. If you prefix a number with 0x,
|
||||
it is treated as a hexadecimal. If it is prefixed with 0, it is
|
||||
treated as an octal. Numbers can be in scientific notation: "1e10".
|
||||
|
||||
* Boolean values: `true`, `false`
|
||||
|
||||
* Arrays can be made by wrapping it in `[]`. Example:
|
||||
`["foo", "bar", 42]`. Arrays can contain primitives,
|
||||
other arrays, and objects. As an alternative, lists
|
||||
of objects can be created with repeated blocks, using
|
||||
this structure:
|
||||
|
||||
```hcl
|
||||
service {
|
||||
key = "value"
|
||||
}
|
||||
|
||||
service {
|
||||
key = "value"
|
||||
}
|
||||
```
|
||||
|
||||
Objects and nested objects are created using the structure shown below:
|
||||
|
||||
```
|
||||
variable "ami" {
|
||||
description = "the AMI to use"
|
||||
}
|
||||
```
|
||||
This would be equivalent to the following json:
|
||||
``` json
|
||||
{
|
||||
"variable": {
|
||||
"ami": {
|
||||
"description": "the AMI to use"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Thanks
|
||||
|
||||
Thanks to:
|
||||
|
||||
* [@vstakhov](https://github.com/vstakhov) - The original libucl parser
|
||||
and syntax that HCL was based off of.
|
||||
|
||||
* [@fatih](https://github.com/fatih) - The rewritten HCL parser
|
||||
in pure Go (no goyacc) and support for a printer.
|
19
vendor/github.com/hashicorp/hcl/appveyor.yml
generated
vendored
Normal file
19
vendor/github.com/hashicorp/hcl/appveyor.yml
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
version: "build-{branch}-{build}"
|
||||
image: Visual Studio 2015
|
||||
clone_folder: c:\gopath\src\github.com\hashicorp\hcl
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
init:
|
||||
- git config --global core.autocrlf false
|
||||
install:
|
||||
- cmd: >-
|
||||
echo %Path%
|
||||
|
||||
go version
|
||||
|
||||
go env
|
||||
|
||||
go get -t ./...
|
||||
|
||||
build_script:
|
||||
- cmd: go test -v ./...
|
729
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
Normal file
729
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
Normal file
|
@ -0,0 +1,729 @@
|
|||
package hcl
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
"github.com/hashicorp/hcl/hcl/parser"
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// This is the tag to use with structures to have settings for HCL
|
||||
const tagName = "hcl"
|
||||
|
||||
var (
|
||||
// nodeType holds a reference to the type of ast.Node
|
||||
nodeType reflect.Type = findNodeType()
|
||||
)
|
||||
|
||||
// Unmarshal accepts a byte slice as input and writes the
|
||||
// data to the value pointed to by v.
|
||||
func Unmarshal(bs []byte, v interface{}) error {
|
||||
root, err := parse(bs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return DecodeObject(v, root)
|
||||
}
|
||||
|
||||
// Decode reads the given input and decodes it into the structure
|
||||
// given by `out`.
|
||||
func Decode(out interface{}, in string) error {
|
||||
obj, err := Parse(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return DecodeObject(out, obj)
|
||||
}
|
||||
|
||||
// DecodeObject is a lower-level version of Decode. It decodes a
|
||||
// raw Object into the given output.
|
||||
func DecodeObject(out interface{}, n ast.Node) error {
|
||||
val := reflect.ValueOf(out)
|
||||
if val.Kind() != reflect.Ptr {
|
||||
return errors.New("result must be a pointer")
|
||||
}
|
||||
|
||||
// If we have the file, we really decode the root node
|
||||
if f, ok := n.(*ast.File); ok {
|
||||
n = f.Node
|
||||
}
|
||||
|
||||
var d decoder
|
||||
return d.decode("root", n, val.Elem())
|
||||
}
|
||||
|
||||
type decoder struct {
|
||||
stack []reflect.Kind
|
||||
}
|
||||
|
||||
func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
|
||||
k := result
|
||||
|
||||
// If we have an interface with a valid value, we use that
|
||||
// for the check.
|
||||
if result.Kind() == reflect.Interface {
|
||||
elem := result.Elem()
|
||||
if elem.IsValid() {
|
||||
k = elem
|
||||
}
|
||||
}
|
||||
|
||||
// Push current onto stack unless it is an interface.
|
||||
if k.Kind() != reflect.Interface {
|
||||
d.stack = append(d.stack, k.Kind())
|
||||
|
||||
// Schedule a pop
|
||||
defer func() {
|
||||
d.stack = d.stack[:len(d.stack)-1]
|
||||
}()
|
||||
}
|
||||
|
||||
switch k.Kind() {
|
||||
case reflect.Bool:
|
||||
return d.decodeBool(name, node, result)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return d.decodeFloat(name, node, result)
|
||||
case reflect.Int, reflect.Int32, reflect.Int64:
|
||||
return d.decodeInt(name, node, result)
|
||||
case reflect.Interface:
|
||||
// When we see an interface, we make our own thing
|
||||
return d.decodeInterface(name, node, result)
|
||||
case reflect.Map:
|
||||
return d.decodeMap(name, node, result)
|
||||
case reflect.Ptr:
|
||||
return d.decodePtr(name, node, result)
|
||||
case reflect.Slice:
|
||||
return d.decodeSlice(name, node, result)
|
||||
case reflect.String:
|
||||
return d.decodeString(name, node, result)
|
||||
case reflect.Struct:
|
||||
return d.decodeStruct(name, node, result)
|
||||
default:
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
if n.Token.Type == token.BOOL {
|
||||
v, err := strconv.ParseBool(n.Token.Text)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result.Set(reflect.ValueOf(v))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
|
||||
v, err := strconv.ParseFloat(n.Token.Text, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result.Set(reflect.ValueOf(v).Convert(result.Type()))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
switch n.Token.Type {
|
||||
case token.NUMBER:
|
||||
v, err := strconv.ParseInt(n.Token.Text, 0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if result.Kind() == reflect.Interface {
|
||||
result.Set(reflect.ValueOf(int(v)))
|
||||
} else {
|
||||
result.SetInt(v)
|
||||
}
|
||||
return nil
|
||||
case token.STRING:
|
||||
v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if result.Kind() == reflect.Interface {
|
||||
result.Set(reflect.ValueOf(int(v)))
|
||||
} else {
|
||||
result.SetInt(v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
|
||||
// When we see an ast.Node, we retain the value to enable deferred decoding.
|
||||
// Very useful in situations where we want to preserve ast.Node information
|
||||
// like Pos
|
||||
if result.Type() == nodeType && result.CanSet() {
|
||||
result.Set(reflect.ValueOf(node))
|
||||
return nil
|
||||
}
|
||||
|
||||
var set reflect.Value
|
||||
redecode := true
|
||||
|
||||
// For testing types, ObjectType should just be treated as a list. We
|
||||
// set this to a temporary var because we want to pass in the real node.
|
||||
testNode := node
|
||||
if ot, ok := node.(*ast.ObjectType); ok {
|
||||
testNode = ot.List
|
||||
}
|
||||
|
||||
switch n := testNode.(type) {
|
||||
case *ast.ObjectList:
|
||||
// If we're at the root or we're directly within a slice, then we
|
||||
// decode objects into map[string]interface{}, otherwise we decode
|
||||
// them into lists.
|
||||
if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
|
||||
var temp map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeMap(
|
||||
reflect.MapOf(
|
||||
reflect.TypeOf(""),
|
||||
tempVal.Type().Elem()))
|
||||
|
||||
set = result
|
||||
} else {
|
||||
var temp []map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeSlice(
|
||||
reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
|
||||
set = result
|
||||
}
|
||||
case *ast.ObjectType:
|
||||
// If we're at the root or we're directly within a slice, then we
|
||||
// decode objects into map[string]interface{}, otherwise we decode
|
||||
// them into lists.
|
||||
if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
|
||||
var temp map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeMap(
|
||||
reflect.MapOf(
|
||||
reflect.TypeOf(""),
|
||||
tempVal.Type().Elem()))
|
||||
|
||||
set = result
|
||||
} else {
|
||||
var temp []map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeSlice(
|
||||
reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
|
||||
set = result
|
||||
}
|
||||
case *ast.ListType:
|
||||
var temp []interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeSlice(
|
||||
reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
|
||||
set = result
|
||||
case *ast.LiteralType:
|
||||
switch n.Token.Type {
|
||||
case token.BOOL:
|
||||
var result bool
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
|
||||
case token.FLOAT:
|
||||
var result float64
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
|
||||
case token.NUMBER:
|
||||
var result int
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
|
||||
case token.STRING, token.HEREDOC:
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
|
||||
default:
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"%s: cannot decode into interface: %T",
|
||||
name, node)
|
||||
}
|
||||
|
||||
// Set the result to what its supposed to be, then reset
|
||||
// result so we don't reflect into this method anymore.
|
||||
result.Set(set)
|
||||
|
||||
if redecode {
|
||||
// Revisit the node so that we can use the newly instantiated
|
||||
// thing and populate it.
|
||||
if err := d.decode(name, node, result); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
|
||||
if item, ok := node.(*ast.ObjectItem); ok {
|
||||
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
|
||||
}
|
||||
|
||||
if ot, ok := node.(*ast.ObjectType); ok {
|
||||
node = ot.List
|
||||
}
|
||||
|
||||
n, ok := node.(*ast.ObjectList)
|
||||
if !ok {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
// If we have an interface, then we can address the interface,
|
||||
// but not the slice itself, so get the element but set the interface
|
||||
set := result
|
||||
if result.Kind() == reflect.Interface {
|
||||
result = result.Elem()
|
||||
}
|
||||
|
||||
resultType := result.Type()
|
||||
resultElemType := resultType.Elem()
|
||||
resultKeyType := resultType.Key()
|
||||
if resultKeyType.Kind() != reflect.String {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: map must have string keys", name),
|
||||
}
|
||||
}
|
||||
|
||||
// Make a map if it is nil
|
||||
resultMap := result
|
||||
if result.IsNil() {
|
||||
resultMap = reflect.MakeMap(
|
||||
reflect.MapOf(resultKeyType, resultElemType))
|
||||
}
|
||||
|
||||
// Go through each element and decode it.
|
||||
done := make(map[string]struct{})
|
||||
for _, item := range n.Items {
|
||||
if item.Val == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// github.com/hashicorp/terraform/issue/5740
|
||||
if len(item.Keys) == 0 {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: map must have string keys", name),
|
||||
}
|
||||
}
|
||||
|
||||
// Get the key we're dealing with, which is the first item
|
||||
keyStr := item.Keys[0].Token.Value().(string)
|
||||
|
||||
// If we've already processed this key, then ignore it
|
||||
if _, ok := done[keyStr]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Determine the value. If we have more than one key, then we
|
||||
// get the objectlist of only these keys.
|
||||
itemVal := item.Val
|
||||
if len(item.Keys) > 1 {
|
||||
itemVal = n.Filter(keyStr)
|
||||
done[keyStr] = struct{}{}
|
||||
}
|
||||
|
||||
// Make the field name
|
||||
fieldName := fmt.Sprintf("%s.%s", name, keyStr)
|
||||
|
||||
// Get the key/value as reflection values
|
||||
key := reflect.ValueOf(keyStr)
|
||||
val := reflect.Indirect(reflect.New(resultElemType))
|
||||
|
||||
// If we have a pre-existing value in the map, use that
|
||||
oldVal := resultMap.MapIndex(key)
|
||||
if oldVal.IsValid() {
|
||||
val.Set(oldVal)
|
||||
}
|
||||
|
||||
// Decode!
|
||||
if err := d.decode(fieldName, itemVal, val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the value on the map
|
||||
resultMap.SetMapIndex(key, val)
|
||||
}
|
||||
|
||||
// Set the final map if we can
|
||||
set.Set(resultMap)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
|
||||
// Create an element of the concrete (non pointer) type and decode
|
||||
// into that. Then set the value of the pointer to this type.
|
||||
resultType := result.Type()
|
||||
resultElemType := resultType.Elem()
|
||||
val := reflect.New(resultElemType)
|
||||
if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result.Set(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
|
||||
// If we have an interface, then we can address the interface,
|
||||
// but not the slice itself, so get the element but set the interface
|
||||
set := result
|
||||
if result.Kind() == reflect.Interface {
|
||||
result = result.Elem()
|
||||
}
|
||||
// Create the slice if it isn't nil
|
||||
resultType := result.Type()
|
||||
resultElemType := resultType.Elem()
|
||||
if result.IsNil() {
|
||||
resultSliceType := reflect.SliceOf(resultElemType)
|
||||
result = reflect.MakeSlice(
|
||||
resultSliceType, 0, 0)
|
||||
}
|
||||
|
||||
// Figure out the items we'll be copying into the slice
|
||||
var items []ast.Node
|
||||
switch n := node.(type) {
|
||||
case *ast.ObjectList:
|
||||
items = make([]ast.Node, len(n.Items))
|
||||
for i, item := range n.Items {
|
||||
items[i] = item
|
||||
}
|
||||
case *ast.ObjectType:
|
||||
items = []ast.Node{n}
|
||||
case *ast.ListType:
|
||||
items = n.List
|
||||
default:
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("unknown slice type: %T", node),
|
||||
}
|
||||
}
|
||||
|
||||
for i, item := range items {
|
||||
fieldName := fmt.Sprintf("%s[%d]", name, i)
|
||||
|
||||
// Decode
|
||||
val := reflect.Indirect(reflect.New(resultElemType))
|
||||
|
||||
// if item is an object that was decoded from ambiguous JSON and
|
||||
// flattened, make sure it's expanded if it needs to decode into a
|
||||
// defined structure.
|
||||
item := expandObject(item, val)
|
||||
|
||||
if err := d.decode(fieldName, item, val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Append it onto the slice
|
||||
result = reflect.Append(result, val)
|
||||
}
|
||||
|
||||
set.Set(result)
|
||||
return nil
|
||||
}
|
||||
|
||||
// expandObject detects if an ambiguous JSON object was flattened to a List which
|
||||
// should be decoded into a struct, and expands the ast to properly deocode.
|
||||
func expandObject(node ast.Node, result reflect.Value) ast.Node {
|
||||
item, ok := node.(*ast.ObjectItem)
|
||||
if !ok {
|
||||
return node
|
||||
}
|
||||
|
||||
elemType := result.Type()
|
||||
|
||||
// our target type must be a struct
|
||||
switch elemType.Kind() {
|
||||
case reflect.Ptr:
|
||||
switch elemType.Elem().Kind() {
|
||||
case reflect.Struct:
|
||||
//OK
|
||||
default:
|
||||
return node
|
||||
}
|
||||
case reflect.Struct:
|
||||
//OK
|
||||
default:
|
||||
return node
|
||||
}
|
||||
|
||||
// A list value will have a key and field name. If it had more fields,
|
||||
// it wouldn't have been flattened.
|
||||
if len(item.Keys) != 2 {
|
||||
return node
|
||||
}
|
||||
|
||||
keyToken := item.Keys[0].Token
|
||||
item.Keys = item.Keys[1:]
|
||||
|
||||
// we need to un-flatten the ast enough to decode
|
||||
newNode := &ast.ObjectItem{
|
||||
Keys: []*ast.ObjectKey{
|
||||
&ast.ObjectKey{
|
||||
Token: keyToken,
|
||||
},
|
||||
},
|
||||
Val: &ast.ObjectType{
|
||||
List: &ast.ObjectList{
|
||||
Items: []*ast.ObjectItem{item},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return newNode
|
||||
}
|
||||
|
||||
func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
switch n.Token.Type {
|
||||
case token.NUMBER:
|
||||
result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
|
||||
return nil
|
||||
case token.STRING, token.HEREDOC:
|
||||
result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type for string %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
|
||||
var item *ast.ObjectItem
|
||||
if it, ok := node.(*ast.ObjectItem); ok {
|
||||
item = it
|
||||
node = it.Val
|
||||
}
|
||||
|
||||
if ot, ok := node.(*ast.ObjectType); ok {
|
||||
node = ot.List
|
||||
}
|
||||
|
||||
// Handle the special case where the object itself is a literal. Previously
|
||||
// the yacc parser would always ensure top-level elements were arrays. The new
|
||||
// parser does not make the same guarantees, thus we need to convert any
|
||||
// top-level literal elements into a list.
|
||||
if _, ok := node.(*ast.LiteralType); ok && item != nil {
|
||||
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
|
||||
}
|
||||
|
||||
list, ok := node.(*ast.ObjectList)
|
||||
if !ok {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
// This slice will keep track of all the structs we'll be decoding.
|
||||
// There can be more than one struct if there are embedded structs
|
||||
// that are squashed.
|
||||
structs := make([]reflect.Value, 1, 5)
|
||||
structs[0] = result
|
||||
|
||||
// Compile the list of all the fields that we're going to be decoding
|
||||
// from all the structs.
|
||||
type field struct {
|
||||
field reflect.StructField
|
||||
val reflect.Value
|
||||
}
|
||||
fields := []field{}
|
||||
for len(structs) > 0 {
|
||||
structVal := structs[0]
|
||||
structs = structs[1:]
|
||||
|
||||
structType := structVal.Type()
|
||||
for i := 0; i < structType.NumField(); i++ {
|
||||
fieldType := structType.Field(i)
|
||||
tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
|
||||
|
||||
// Ignore fields with tag name "-"
|
||||
if tagParts[0] == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
if fieldType.Anonymous {
|
||||
fieldKind := fieldType.Type.Kind()
|
||||
if fieldKind != reflect.Struct {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unsupported type to struct: %s",
|
||||
fieldType.Name, fieldKind),
|
||||
}
|
||||
}
|
||||
|
||||
// We have an embedded field. We "squash" the fields down
|
||||
// if specified in the tag.
|
||||
squash := false
|
||||
for _, tag := range tagParts[1:] {
|
||||
if tag == "squash" {
|
||||
squash = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if squash {
|
||||
structs = append(
|
||||
structs, result.FieldByName(fieldType.Name))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Normal struct field, store it away
|
||||
fields = append(fields, field{fieldType, structVal.Field(i)})
|
||||
}
|
||||
}
|
||||
|
||||
usedKeys := make(map[string]struct{})
|
||||
decodedFields := make([]string, 0, len(fields))
|
||||
decodedFieldsVal := make([]reflect.Value, 0)
|
||||
unusedKeysVal := make([]reflect.Value, 0)
|
||||
for _, f := range fields {
|
||||
field, fieldValue := f.field, f.val
|
||||
if !fieldValue.IsValid() {
|
||||
// This should never happen
|
||||
panic("field is not valid")
|
||||
}
|
||||
|
||||
// If we can't set the field, then it is unexported or something,
|
||||
// and we just continue onwards.
|
||||
if !fieldValue.CanSet() {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldName := field.Name
|
||||
|
||||
tagValue := field.Tag.Get(tagName)
|
||||
tagParts := strings.SplitN(tagValue, ",", 2)
|
||||
if len(tagParts) >= 2 {
|
||||
switch tagParts[1] {
|
||||
case "decodedFields":
|
||||
decodedFieldsVal = append(decodedFieldsVal, fieldValue)
|
||||
continue
|
||||
case "key":
|
||||
if item == nil {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: %s asked for 'key', impossible",
|
||||
name, fieldName),
|
||||
}
|
||||
}
|
||||
|
||||
fieldValue.SetString(item.Keys[0].Token.Value().(string))
|
||||
continue
|
||||
case "unusedKeys":
|
||||
unusedKeysVal = append(unusedKeysVal, fieldValue)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if tagParts[0] != "" {
|
||||
fieldName = tagParts[0]
|
||||
}
|
||||
|
||||
// Determine the element we'll use to decode. If it is a single
|
||||
// match (only object with the field), then we decode it exactly.
|
||||
// If it is a prefix match, then we decode the matches.
|
||||
filter := list.Filter(fieldName)
|
||||
|
||||
prefixMatches := filter.Children()
|
||||
matches := filter.Elem()
|
||||
if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Track the used key
|
||||
usedKeys[fieldName] = struct{}{}
|
||||
|
||||
// Create the field name and decode. We range over the elements
|
||||
// because we actually want the value.
|
||||
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
|
||||
if len(prefixMatches.Items) > 0 {
|
||||
if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, match := range matches.Items {
|
||||
var decodeNode ast.Node = match.Val
|
||||
if ot, ok := decodeNode.(*ast.ObjectType); ok {
|
||||
decodeNode = &ast.ObjectList{Items: ot.List.Items}
|
||||
}
|
||||
|
||||
if err := d.decode(fieldName, decodeNode, fieldValue); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
decodedFields = append(decodedFields, field.Name)
|
||||
}
|
||||
|
||||
if len(decodedFieldsVal) > 0 {
|
||||
// Sort it so that it is deterministic
|
||||
sort.Strings(decodedFields)
|
||||
|
||||
for _, v := range decodedFieldsVal {
|
||||
v.Set(reflect.ValueOf(decodedFields))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// findNodeType returns the type of ast.Node
|
||||
func findNodeType() reflect.Type {
|
||||
var nodeContainer struct {
|
||||
Node ast.Node
|
||||
}
|
||||
value := reflect.ValueOf(nodeContainer).FieldByName("Node")
|
||||
return value.Type()
|
||||
}
|
3
vendor/github.com/hashicorp/hcl/go.mod
generated
vendored
Normal file
3
vendor/github.com/hashicorp/hcl/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
module github.com/hashicorp/hcl
|
||||
|
||||
require github.com/davecgh/go-spew v1.1.1
|
2
vendor/github.com/hashicorp/hcl/go.sum
generated
vendored
Normal file
2
vendor/github.com/hashicorp/hcl/go.sum
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
11
vendor/github.com/hashicorp/hcl/hcl.go
generated
vendored
Normal file
11
vendor/github.com/hashicorp/hcl/hcl.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
// Package hcl decodes HCL into usable Go structures.
|
||||
//
|
||||
// hcl input can come in either pure HCL format or JSON format.
|
||||
// It can be parsed into an AST, and then decoded into a structure,
|
||||
// or it can be decoded directly from a string into a structure.
|
||||
//
|
||||
// If you choose to parse HCL into a raw AST, the benefit is that you
|
||||
// can write custom visitor implementations to implement custom
|
||||
// semantic checks. By default, HCL does not perform any semantic
|
||||
// checks.
|
||||
package hcl
|
219
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
generated
vendored
Normal file
219
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
generated
vendored
Normal file
|
@ -0,0 +1,219 @@
|
|||
// Package ast declares the types used to represent syntax trees for HCL
|
||||
// (HashiCorp Configuration Language)
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// Node is an element in the abstract syntax tree.
|
||||
type Node interface {
|
||||
node()
|
||||
Pos() token.Pos
|
||||
}
|
||||
|
||||
func (File) node() {}
|
||||
func (ObjectList) node() {}
|
||||
func (ObjectKey) node() {}
|
||||
func (ObjectItem) node() {}
|
||||
func (Comment) node() {}
|
||||
func (CommentGroup) node() {}
|
||||
func (ObjectType) node() {}
|
||||
func (LiteralType) node() {}
|
||||
func (ListType) node() {}
|
||||
|
||||
// File represents a single HCL file
|
||||
type File struct {
|
||||
Node Node // usually a *ObjectList
|
||||
Comments []*CommentGroup // list of all comments in the source
|
||||
}
|
||||
|
||||
func (f *File) Pos() token.Pos {
|
||||
return f.Node.Pos()
|
||||
}
|
||||
|
||||
// ObjectList represents a list of ObjectItems. An HCL file itself is an
|
||||
// ObjectList.
|
||||
type ObjectList struct {
|
||||
Items []*ObjectItem
|
||||
}
|
||||
|
||||
func (o *ObjectList) Add(item *ObjectItem) {
|
||||
o.Items = append(o.Items, item)
|
||||
}
|
||||
|
||||
// Filter filters out the objects with the given key list as a prefix.
|
||||
//
|
||||
// The returned list of objects contain ObjectItems where the keys have
|
||||
// this prefix already stripped off. This might result in objects with
|
||||
// zero-length key lists if they have no children.
|
||||
//
|
||||
// If no matches are found, an empty ObjectList (non-nil) is returned.
|
||||
func (o *ObjectList) Filter(keys ...string) *ObjectList {
|
||||
var result ObjectList
|
||||
for _, item := range o.Items {
|
||||
// If there aren't enough keys, then ignore this
|
||||
if len(item.Keys) < len(keys) {
|
||||
continue
|
||||
}
|
||||
|
||||
match := true
|
||||
for i, key := range item.Keys[:len(keys)] {
|
||||
key := key.Token.Value().(string)
|
||||
if key != keys[i] && !strings.EqualFold(key, keys[i]) {
|
||||
match = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
continue
|
||||
}
|
||||
|
||||
// Strip off the prefix from the children
|
||||
newItem := *item
|
||||
newItem.Keys = newItem.Keys[len(keys):]
|
||||
result.Add(&newItem)
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
|
||||
// Children returns further nested objects (key length > 0) within this
|
||||
// ObjectList. This should be used with Filter to get at child items.
|
||||
func (o *ObjectList) Children() *ObjectList {
|
||||
var result ObjectList
|
||||
for _, item := range o.Items {
|
||||
if len(item.Keys) > 0 {
|
||||
result.Add(item)
|
||||
}
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
|
||||
// Elem returns items in the list that are direct element assignments
|
||||
// (key length == 0). This should be used with Filter to get at elements.
|
||||
func (o *ObjectList) Elem() *ObjectList {
|
||||
var result ObjectList
|
||||
for _, item := range o.Items {
|
||||
if len(item.Keys) == 0 {
|
||||
result.Add(item)
|
||||
}
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
|
||||
func (o *ObjectList) Pos() token.Pos {
|
||||
// always returns the uninitiliazed position
|
||||
return o.Items[0].Pos()
|
||||
}
|
||||
|
||||
// ObjectItem represents a HCL Object Item. An item is represented with a key
|
||||
// (or keys). It can be an assignment or an object (both normal and nested)
|
||||
type ObjectItem struct {
|
||||
// keys is only one length long if it's of type assignment. If it's a
|
||||
// nested object it can be larger than one. In that case "assign" is
|
||||
// invalid as there is no assignments for a nested object.
|
||||
Keys []*ObjectKey
|
||||
|
||||
// assign contains the position of "=", if any
|
||||
Assign token.Pos
|
||||
|
||||
// val is the item itself. It can be an object,list, number, bool or a
|
||||
// string. If key length is larger than one, val can be only of type
|
||||
// Object.
|
||||
Val Node
|
||||
|
||||
LeadComment *CommentGroup // associated lead comment
|
||||
LineComment *CommentGroup // associated line comment
|
||||
}
|
||||
|
||||
func (o *ObjectItem) Pos() token.Pos {
|
||||
// I'm not entirely sure what causes this, but removing this causes
|
||||
// a test failure. We should investigate at some point.
|
||||
if len(o.Keys) == 0 {
|
||||
return token.Pos{}
|
||||
}
|
||||
|
||||
return o.Keys[0].Pos()
|
||||
}
|
||||
|
||||
// ObjectKeys are either an identifier or of type string.
|
||||
type ObjectKey struct {
|
||||
Token token.Token
|
||||
}
|
||||
|
||||
func (o *ObjectKey) Pos() token.Pos {
|
||||
return o.Token.Pos
|
||||
}
|
||||
|
||||
// LiteralType represents a literal of basic type. Valid types are:
|
||||
// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
|
||||
type LiteralType struct {
|
||||
Token token.Token
|
||||
|
||||
// comment types, only used when in a list
|
||||
LeadComment *CommentGroup
|
||||
LineComment *CommentGroup
|
||||
}
|
||||
|
||||
func (l *LiteralType) Pos() token.Pos {
|
||||
return l.Token.Pos
|
||||
}
|
||||
|
||||
// ListStatement represents a HCL List type
|
||||
type ListType struct {
|
||||
Lbrack token.Pos // position of "["
|
||||
Rbrack token.Pos // position of "]"
|
||||
List []Node // the elements in lexical order
|
||||
}
|
||||
|
||||
func (l *ListType) Pos() token.Pos {
|
||||
return l.Lbrack
|
||||
}
|
||||
|
||||
func (l *ListType) Add(node Node) {
|
||||
l.List = append(l.List, node)
|
||||
}
|
||||
|
||||
// ObjectType represents a HCL Object Type
|
||||
type ObjectType struct {
|
||||
Lbrace token.Pos // position of "{"
|
||||
Rbrace token.Pos // position of "}"
|
||||
List *ObjectList // the nodes in lexical order
|
||||
}
|
||||
|
||||
func (o *ObjectType) Pos() token.Pos {
|
||||
return o.Lbrace
|
||||
}
|
||||
|
||||
// Comment node represents a single //, # style or /*- style commment
|
||||
type Comment struct {
|
||||
Start token.Pos // position of / or #
|
||||
Text string
|
||||
}
|
||||
|
||||
func (c *Comment) Pos() token.Pos {
|
||||
return c.Start
|
||||
}
|
||||
|
||||
// CommentGroup node represents a sequence of comments with no other tokens and
|
||||
// no empty lines between.
|
||||
type CommentGroup struct {
|
||||
List []*Comment // len(List) > 0
|
||||
}
|
||||
|
||||
func (c *CommentGroup) Pos() token.Pos {
|
||||
return c.List[0].Pos()
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// GoStringer
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
|
||||
func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
|
52
vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
generated
vendored
Normal file
52
vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
package ast
|
||||
|
||||
import "fmt"
|
||||
|
||||
// WalkFunc describes a function to be called for each node during a Walk. The
|
||||
// returned node can be used to rewrite the AST. Walking stops the returned
|
||||
// bool is false.
|
||||
type WalkFunc func(Node) (Node, bool)
|
||||
|
||||
// Walk traverses an AST in depth-first order: It starts by calling fn(node);
|
||||
// node must not be nil. If fn returns true, Walk invokes fn recursively for
|
||||
// each of the non-nil children of node, followed by a call of fn(nil). The
|
||||
// returned node of fn can be used to rewrite the passed node to fn.
|
||||
func Walk(node Node, fn WalkFunc) Node {
|
||||
rewritten, ok := fn(node)
|
||||
if !ok {
|
||||
return rewritten
|
||||
}
|
||||
|
||||
switch n := node.(type) {
|
||||
case *File:
|
||||
n.Node = Walk(n.Node, fn)
|
||||
case *ObjectList:
|
||||
for i, item := range n.Items {
|
||||
n.Items[i] = Walk(item, fn).(*ObjectItem)
|
||||
}
|
||||
case *ObjectKey:
|
||||
// nothing to do
|
||||
case *ObjectItem:
|
||||
for i, k := range n.Keys {
|
||||
n.Keys[i] = Walk(k, fn).(*ObjectKey)
|
||||
}
|
||||
|
||||
if n.Val != nil {
|
||||
n.Val = Walk(n.Val, fn)
|
||||
}
|
||||
case *LiteralType:
|
||||
// nothing to do
|
||||
case *ListType:
|
||||
for i, l := range n.List {
|
||||
n.List[i] = Walk(l, fn)
|
||||
}
|
||||
case *ObjectType:
|
||||
n.List = Walk(n.List, fn).(*ObjectList)
|
||||
default:
|
||||
// should we panic here?
|
||||
fmt.Printf("unknown type: %T\n", n)
|
||||
}
|
||||
|
||||
fn(nil)
|
||||
return rewritten
|
||||
}
|
17
vendor/github.com/hashicorp/hcl/hcl/parser/error.go
generated
vendored
Normal file
17
vendor/github.com/hashicorp/hcl/hcl/parser/error.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// PosError is a parse error that contains a position.
|
||||
type PosError struct {
|
||||
Pos token.Pos
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *PosError) Error() string {
|
||||
return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
|
||||
}
|
532
vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
generated
vendored
Normal file
532
vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
generated
vendored
Normal file
|
@ -0,0 +1,532 @@
|
|||
// Package parser implements a parser for HCL (HashiCorp Configuration
|
||||
// Language)
|
||||
package parser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
"github.com/hashicorp/hcl/hcl/scanner"
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
type Parser struct {
|
||||
sc *scanner.Scanner
|
||||
|
||||
// Last read token
|
||||
tok token.Token
|
||||
commaPrev token.Token
|
||||
|
||||
comments []*ast.CommentGroup
|
||||
leadComment *ast.CommentGroup // last lead comment
|
||||
lineComment *ast.CommentGroup // last line comment
|
||||
|
||||
enableTrace bool
|
||||
indent int
|
||||
n int // buffer size (max = 1)
|
||||
}
|
||||
|
||||
func newParser(src []byte) *Parser {
|
||||
return &Parser{
|
||||
sc: scanner.New(src),
|
||||
}
|
||||
}
|
||||
|
||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||
func Parse(src []byte) (*ast.File, error) {
|
||||
// normalize all line endings
|
||||
// since the scanner and output only work with "\n" line endings, we may
|
||||
// end up with dangling "\r" characters in the parsed data.
|
||||
src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
|
||||
|
||||
p := newParser(src)
|
||||
return p.Parse()
|
||||
}
|
||||
|
||||
var errEofToken = errors.New("EOF token found")
|
||||
|
||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||
func (p *Parser) Parse() (*ast.File, error) {
|
||||
f := &ast.File{}
|
||||
var err, scerr error
|
||||
p.sc.Error = func(pos token.Pos, msg string) {
|
||||
scerr = &PosError{Pos: pos, Err: errors.New(msg)}
|
||||
}
|
||||
|
||||
f.Node, err = p.objectList(false)
|
||||
if scerr != nil {
|
||||
return nil, scerr
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f.Comments = p.comments
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// objectList parses a list of items within an object (generally k/v pairs).
|
||||
// The parameter" obj" tells this whether to we are within an object (braces:
|
||||
// '{', '}') or just at the top level. If we're within an object, we end
|
||||
// at an RBRACE.
|
||||
func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
|
||||
defer un(trace(p, "ParseObjectList"))
|
||||
node := &ast.ObjectList{}
|
||||
|
||||
for {
|
||||
if obj {
|
||||
tok := p.scan()
|
||||
p.unscan()
|
||||
if tok.Type == token.RBRACE {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
n, err := p.objectItem()
|
||||
if err == errEofToken {
|
||||
break // we are finished
|
||||
}
|
||||
|
||||
// we don't return a nil node, because might want to use already
|
||||
// collected items.
|
||||
if err != nil {
|
||||
return node, err
|
||||
}
|
||||
|
||||
node.Add(n)
|
||||
|
||||
// object lists can be optionally comma-delimited e.g. when a list of maps
|
||||
// is being expressed, so a comma is allowed here - it's simply consumed
|
||||
tok := p.scan()
|
||||
if tok.Type != token.COMMA {
|
||||
p.unscan()
|
||||
}
|
||||
}
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
|
||||
endline = p.tok.Pos.Line
|
||||
|
||||
// count the endline if it's multiline comment, ie starting with /*
|
||||
if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
|
||||
// don't use range here - no need to decode Unicode code points
|
||||
for i := 0; i < len(p.tok.Text); i++ {
|
||||
if p.tok.Text[i] == '\n' {
|
||||
endline++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
|
||||
p.tok = p.sc.Scan()
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
|
||||
var list []*ast.Comment
|
||||
endline = p.tok.Pos.Line
|
||||
|
||||
for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
|
||||
var comment *ast.Comment
|
||||
comment, endline = p.consumeComment()
|
||||
list = append(list, comment)
|
||||
}
|
||||
|
||||
// add comment group to the comments list
|
||||
comments = &ast.CommentGroup{List: list}
|
||||
p.comments = append(p.comments, comments)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// objectItem parses a single object item
|
||||
func (p *Parser) objectItem() (*ast.ObjectItem, error) {
|
||||
defer un(trace(p, "ParseObjectItem"))
|
||||
|
||||
keys, err := p.objectKey()
|
||||
if len(keys) > 0 && err == errEofToken {
|
||||
// We ignore eof token here since it is an error if we didn't
|
||||
// receive a value (but we did receive a key) for the item.
|
||||
err = nil
|
||||
}
|
||||
if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
|
||||
// This is a strange boolean statement, but what it means is:
|
||||
// We have keys with no value, and we're likely in an object
|
||||
// (since RBrace ends an object). For this, we set err to nil so
|
||||
// we continue and get the error below of having the wrong value
|
||||
// type.
|
||||
err = nil
|
||||
|
||||
// Reset the token type so we don't think it completed fine. See
|
||||
// objectType which uses p.tok.Type to check if we're done with
|
||||
// the object.
|
||||
p.tok.Type = token.EOF
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o := &ast.ObjectItem{
|
||||
Keys: keys,
|
||||
}
|
||||
|
||||
if p.leadComment != nil {
|
||||
o.LeadComment = p.leadComment
|
||||
p.leadComment = nil
|
||||
}
|
||||
|
||||
switch p.tok.Type {
|
||||
case token.ASSIGN:
|
||||
o.Assign = p.tok.Pos
|
||||
o.Val, err = p.object()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case token.LBRACE:
|
||||
o.Val, err = p.objectType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
keyStr := make([]string, 0, len(keys))
|
||||
for _, k := range keys {
|
||||
keyStr = append(keyStr, k.Token.Text)
|
||||
}
|
||||
|
||||
return nil, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"key '%s' expected start of object ('{') or assignment ('=')",
|
||||
strings.Join(keyStr, " ")),
|
||||
}
|
||||
}
|
||||
|
||||
// key=#comment
|
||||
// val
|
||||
if p.lineComment != nil {
|
||||
o.LineComment, p.lineComment = p.lineComment, nil
|
||||
}
|
||||
|
||||
// do a look-ahead for line comment
|
||||
p.scan()
|
||||
if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
|
||||
o.LineComment = p.lineComment
|
||||
p.lineComment = nil
|
||||
}
|
||||
p.unscan()
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// objectKey parses an object key and returns a ObjectKey AST
|
||||
func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
|
||||
keyCount := 0
|
||||
keys := make([]*ast.ObjectKey, 0)
|
||||
|
||||
for {
|
||||
tok := p.scan()
|
||||
switch tok.Type {
|
||||
case token.EOF:
|
||||
// It is very important to also return the keys here as well as
|
||||
// the error. This is because we need to be able to tell if we
|
||||
// did parse keys prior to finding the EOF, or if we just found
|
||||
// a bare EOF.
|
||||
return keys, errEofToken
|
||||
case token.ASSIGN:
|
||||
// assignment or object only, but not nested objects. this is not
|
||||
// allowed: `foo bar = {}`
|
||||
if keyCount > 1 {
|
||||
return nil, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
|
||||
}
|
||||
}
|
||||
|
||||
if keyCount == 0 {
|
||||
return nil, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: errors.New("no object keys found!"),
|
||||
}
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
case token.LBRACE:
|
||||
var err error
|
||||
|
||||
// If we have no keys, then it is a syntax error. i.e. {{}} is not
|
||||
// allowed.
|
||||
if len(keys) == 0 {
|
||||
err = &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
|
||||
}
|
||||
}
|
||||
|
||||
// object
|
||||
return keys, err
|
||||
case token.IDENT, token.STRING:
|
||||
keyCount++
|
||||
keys = append(keys, &ast.ObjectKey{Token: p.tok})
|
||||
case token.ILLEGAL:
|
||||
return keys, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("illegal character"),
|
||||
}
|
||||
default:
|
||||
return keys, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// object parses any type of object, such as number, bool, string, object or
|
||||
// list.
|
||||
func (p *Parser) object() (ast.Node, error) {
|
||||
defer un(trace(p, "ParseType"))
|
||||
tok := p.scan()
|
||||
|
||||
switch tok.Type {
|
||||
case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
|
||||
return p.literalType()
|
||||
case token.LBRACE:
|
||||
return p.objectType()
|
||||
case token.LBRACK:
|
||||
return p.listType()
|
||||
case token.COMMENT:
|
||||
// implement comment
|
||||
case token.EOF:
|
||||
return nil, errEofToken
|
||||
}
|
||||
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf("Unknown token: %+v", tok),
|
||||
}
|
||||
}
|
||||
|
||||
// objectType parses an object type and returns a ObjectType AST
|
||||
func (p *Parser) objectType() (*ast.ObjectType, error) {
|
||||
defer un(trace(p, "ParseObjectType"))
|
||||
|
||||
// we assume that the currently scanned token is a LBRACE
|
||||
o := &ast.ObjectType{
|
||||
Lbrace: p.tok.Pos,
|
||||
}
|
||||
|
||||
l, err := p.objectList(true)
|
||||
|
||||
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
|
||||
// not a RBRACE, it's an syntax error and we just return it.
|
||||
if err != nil && p.tok.Type != token.RBRACE {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// No error, scan and expect the ending to be a brace
|
||||
if tok := p.scan(); tok.Type != token.RBRACE {
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
|
||||
}
|
||||
}
|
||||
|
||||
o.List = l
|
||||
o.Rbrace = p.tok.Pos // advanced via parseObjectList
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// listType parses a list type and returns a ListType AST
|
||||
func (p *Parser) listType() (*ast.ListType, error) {
|
||||
defer un(trace(p, "ParseListType"))
|
||||
|
||||
// we assume that the currently scanned token is a LBRACK
|
||||
l := &ast.ListType{
|
||||
Lbrack: p.tok.Pos,
|
||||
}
|
||||
|
||||
needComma := false
|
||||
for {
|
||||
tok := p.scan()
|
||||
if needComma {
|
||||
switch tok.Type {
|
||||
case token.COMMA, token.RBRACK:
|
||||
default:
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"error parsing list, expected comma or list end, got: %s",
|
||||
tok.Type),
|
||||
}
|
||||
}
|
||||
}
|
||||
switch tok.Type {
|
||||
case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
|
||||
node, err := p.literalType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If there is a lead comment, apply it
|
||||
if p.leadComment != nil {
|
||||
node.LeadComment = p.leadComment
|
||||
p.leadComment = nil
|
||||
}
|
||||
|
||||
l.Add(node)
|
||||
needComma = true
|
||||
case token.COMMA:
|
||||
// get next list item or we are at the end
|
||||
// do a look-ahead for line comment
|
||||
p.scan()
|
||||
if p.lineComment != nil && len(l.List) > 0 {
|
||||
lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
|
||||
if ok {
|
||||
lit.LineComment = p.lineComment
|
||||
l.List[len(l.List)-1] = lit
|
||||
p.lineComment = nil
|
||||
}
|
||||
}
|
||||
p.unscan()
|
||||
|
||||
needComma = false
|
||||
continue
|
||||
case token.LBRACE:
|
||||
// Looks like a nested object, so parse it out
|
||||
node, err := p.objectType()
|
||||
if err != nil {
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"error while trying to parse object within list: %s", err),
|
||||
}
|
||||
}
|
||||
l.Add(node)
|
||||
needComma = true
|
||||
case token.LBRACK:
|
||||
node, err := p.listType()
|
||||
if err != nil {
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"error while trying to parse list within list: %s", err),
|
||||
}
|
||||
}
|
||||
l.Add(node)
|
||||
case token.RBRACK:
|
||||
// finished
|
||||
l.Rbrack = p.tok.Pos
|
||||
return l, nil
|
||||
default:
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// literalType parses a literal type and returns a LiteralType AST
|
||||
func (p *Parser) literalType() (*ast.LiteralType, error) {
|
||||
defer un(trace(p, "ParseLiteral"))
|
||||
|
||||
return &ast.LiteralType{
|
||||
Token: p.tok,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// scan returns the next token from the underlying scanner. If a token has
|
||||
// been unscanned then read that instead. In the process, it collects any
|
||||
// comment groups encountered, and remembers the last lead and line comments.
|
||||
func (p *Parser) scan() token.Token {
|
||||
// If we have a token on the buffer, then return it.
|
||||
if p.n != 0 {
|
||||
p.n = 0
|
||||
return p.tok
|
||||
}
|
||||
|
||||
// Otherwise read the next token from the scanner and Save it to the buffer
|
||||
// in case we unscan later.
|
||||
prev := p.tok
|
||||
p.tok = p.sc.Scan()
|
||||
|
||||
if p.tok.Type == token.COMMENT {
|
||||
var comment *ast.CommentGroup
|
||||
var endline int
|
||||
|
||||
// fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
|
||||
// p.tok.Pos.Line, prev.Pos.Line, endline)
|
||||
if p.tok.Pos.Line == prev.Pos.Line {
|
||||
// The comment is on same line as the previous token; it
|
||||
// cannot be a lead comment but may be a line comment.
|
||||
comment, endline = p.consumeCommentGroup(0)
|
||||
if p.tok.Pos.Line != endline {
|
||||
// The next token is on a different line, thus
|
||||
// the last comment group is a line comment.
|
||||
p.lineComment = comment
|
||||
}
|
||||
}
|
||||
|
||||
// consume successor comments, if any
|
||||
endline = -1
|
||||
for p.tok.Type == token.COMMENT {
|
||||
comment, endline = p.consumeCommentGroup(1)
|
||||
}
|
||||
|
||||
if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
|
||||
switch p.tok.Type {
|
||||
case token.RBRACE, token.RBRACK:
|
||||
// Do not count for these cases
|
||||
default:
|
||||
// The next token is following on the line immediately after the
|
||||
// comment group, thus the last comment group is a lead comment.
|
||||
p.leadComment = comment
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return p.tok
|
||||
}
|
||||
|
||||
// unscan pushes the previously read token back onto the buffer.
|
||||
func (p *Parser) unscan() {
|
||||
p.n = 1
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Parsing support
|
||||
|
||||
func (p *Parser) printTrace(a ...interface{}) {
|
||||
if !p.enableTrace {
|
||||
return
|
||||
}
|
||||
|
||||
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
|
||||
const n = len(dots)
|
||||
fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
|
||||
|
||||
i := 2 * p.indent
|
||||
for i > n {
|
||||
fmt.Print(dots)
|
||||
i -= n
|
||||
}
|
||||
// i <= n
|
||||
fmt.Print(dots[0:i])
|
||||
fmt.Println(a...)
|
||||
}
|
||||
|
||||
func trace(p *Parser, msg string) *Parser {
|
||||
p.printTrace(msg, "(")
|
||||
p.indent++
|
||||
return p
|
||||
}
|
||||
|
||||
// Usage pattern: defer un(trace(p, "..."))
|
||||
func un(p *Parser) {
|
||||
p.indent--
|
||||
p.printTrace(")")
|
||||
}
|
789
vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
generated
vendored
Normal file
789
vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
generated
vendored
Normal file
|
@ -0,0 +1,789 @@
|
|||
package printer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
const (
|
||||
blank = byte(' ')
|
||||
newline = byte('\n')
|
||||
tab = byte('\t')
|
||||
infinity = 1 << 30 // offset or line
|
||||
)
|
||||
|
||||
var (
|
||||
unindent = []byte("\uE123") // in the private use space
|
||||
)
|
||||
|
||||
type printer struct {
|
||||
cfg Config
|
||||
prev token.Pos
|
||||
|
||||
comments []*ast.CommentGroup // may be nil, contains all comments
|
||||
standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node)
|
||||
|
||||
enableTrace bool
|
||||
indentTrace int
|
||||
}
|
||||
|
||||
type ByPosition []*ast.CommentGroup
|
||||
|
||||
func (b ByPosition) Len() int { return len(b) }
|
||||
func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) }
|
||||
|
||||
// collectComments comments all standalone comments which are not lead or line
|
||||
// comment
|
||||
func (p *printer) collectComments(node ast.Node) {
|
||||
// first collect all comments. This is already stored in
|
||||
// ast.File.(comments)
|
||||
ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
|
||||
switch t := nn.(type) {
|
||||
case *ast.File:
|
||||
p.comments = t.Comments
|
||||
return nn, false
|
||||
}
|
||||
return nn, true
|
||||
})
|
||||
|
||||
standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0)
|
||||
for _, c := range p.comments {
|
||||
standaloneComments[c.Pos()] = c
|
||||
}
|
||||
|
||||
// next remove all lead and line comments from the overall comment map.
|
||||
// This will give us comments which are standalone, comments which are not
|
||||
// assigned to any kind of node.
|
||||
ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
|
||||
switch t := nn.(type) {
|
||||
case *ast.LiteralType:
|
||||
if t.LeadComment != nil {
|
||||
for _, comment := range t.LeadComment.List {
|
||||
if _, ok := standaloneComments[comment.Pos()]; ok {
|
||||
delete(standaloneComments, comment.Pos())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if t.LineComment != nil {
|
||||
for _, comment := range t.LineComment.List {
|
||||
if _, ok := standaloneComments[comment.Pos()]; ok {
|
||||
delete(standaloneComments, comment.Pos())
|
||||
}
|
||||
}
|
||||
}
|
||||
case *ast.ObjectItem:
|
||||
if t.LeadComment != nil {
|
||||
for _, comment := range t.LeadComment.List {
|
||||
if _, ok := standaloneComments[comment.Pos()]; ok {
|
||||
delete(standaloneComments, comment.Pos())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if t.LineComment != nil {
|
||||
for _, comment := range t.LineComment.List {
|
||||
if _, ok := standaloneComments[comment.Pos()]; ok {
|
||||
delete(standaloneComments, comment.Pos())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nn, true
|
||||
})
|
||||
|
||||
for _, c := range standaloneComments {
|
||||
p.standaloneComments = append(p.standaloneComments, c)
|
||||
}
|
||||
|
||||
sort.Sort(ByPosition(p.standaloneComments))
|
||||
}
|
||||
|
||||
// output prints creates b printable HCL output and returns it.
|
||||
func (p *printer) output(n interface{}) []byte {
|
||||
var buf bytes.Buffer
|
||||
|
||||
switch t := n.(type) {
|
||||
case *ast.File:
|
||||
// File doesn't trace so we add the tracing here
|
||||
defer un(trace(p, "File"))
|
||||
return p.output(t.Node)
|
||||
case *ast.ObjectList:
|
||||
defer un(trace(p, "ObjectList"))
|
||||
|
||||
var index int
|
||||
for {
|
||||
// Determine the location of the next actual non-comment
|
||||
// item. If we're at the end, the next item is at "infinity"
|
||||
var nextItem token.Pos
|
||||
if index != len(t.Items) {
|
||||
nextItem = t.Items[index].Pos()
|
||||
} else {
|
||||
nextItem = token.Pos{Offset: infinity, Line: infinity}
|
||||
}
|
||||
|
||||
// Go through the standalone comments in the file and print out
|
||||
// the comments that we should be for this object item.
|
||||
for _, c := range p.standaloneComments {
|
||||
// Go through all the comments in the group. The group
|
||||
// should be printed together, not separated by double newlines.
|
||||
printed := false
|
||||
newlinePrinted := false
|
||||
for _, comment := range c.List {
|
||||
// We only care about comments after the previous item
|
||||
// we've printed so that comments are printed in the
|
||||
// correct locations (between two objects for example).
|
||||
// And before the next item.
|
||||
if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
|
||||
// if we hit the end add newlines so we can print the comment
|
||||
// we don't do this if prev is invalid which means the
|
||||
// beginning of the file since the first comment should
|
||||
// be at the first line.
|
||||
if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) {
|
||||
buf.Write([]byte{newline, newline})
|
||||
newlinePrinted = true
|
||||
}
|
||||
|
||||
// Write the actual comment.
|
||||
buf.WriteString(comment.Text)
|
||||
buf.WriteByte(newline)
|
||||
|
||||
// Set printed to true to note that we printed something
|
||||
printed = true
|
||||
}
|
||||
}
|
||||
|
||||
// If we're not at the last item, write a new line so
|
||||
// that there is a newline separating this comment from
|
||||
// the next object.
|
||||
if printed && index != len(t.Items) {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
|
||||
if index == len(t.Items) {
|
||||
break
|
||||
}
|
||||
|
||||
buf.Write(p.output(t.Items[index]))
|
||||
if index != len(t.Items)-1 {
|
||||
// Always write a newline to separate us from the next item
|
||||
buf.WriteByte(newline)
|
||||
|
||||
// Need to determine if we're going to separate the next item
|
||||
// with a blank line. The logic here is simple, though there
|
||||
// are a few conditions:
|
||||
//
|
||||
// 1. The next object is more than one line away anyways,
|
||||
// so we need an empty line.
|
||||
//
|
||||
// 2. The next object is not a "single line" object, so
|
||||
// we need an empty line.
|
||||
//
|
||||
// 3. This current object is not a single line object,
|
||||
// so we need an empty line.
|
||||
current := t.Items[index]
|
||||
next := t.Items[index+1]
|
||||
if next.Pos().Line != t.Items[index].Pos().Line+1 ||
|
||||
!p.isSingleLineObject(next) ||
|
||||
!p.isSingleLineObject(current) {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
index++
|
||||
}
|
||||
case *ast.ObjectKey:
|
||||
buf.WriteString(t.Token.Text)
|
||||
case *ast.ObjectItem:
|
||||
p.prev = t.Pos()
|
||||
buf.Write(p.objectItem(t))
|
||||
case *ast.LiteralType:
|
||||
buf.Write(p.literalType(t))
|
||||
case *ast.ListType:
|
||||
buf.Write(p.list(t))
|
||||
case *ast.ObjectType:
|
||||
buf.Write(p.objectType(t))
|
||||
default:
|
||||
fmt.Printf(" unknown type: %T\n", n)
|
||||
}
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (p *printer) literalType(lit *ast.LiteralType) []byte {
|
||||
result := []byte(lit.Token.Text)
|
||||
switch lit.Token.Type {
|
||||
case token.HEREDOC:
|
||||
// Clear the trailing newline from heredocs
|
||||
if result[len(result)-1] == '\n' {
|
||||
result = result[:len(result)-1]
|
||||
}
|
||||
|
||||
// Poison lines 2+ so that we don't indent them
|
||||
result = p.heredocIndent(result)
|
||||
case token.STRING:
|
||||
// If this is a multiline string, poison lines 2+ so we don't
|
||||
// indent them.
|
||||
if bytes.IndexRune(result, '\n') >= 0 {
|
||||
result = p.heredocIndent(result)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// objectItem returns the printable HCL form of an object item. An object type
|
||||
// starts with one/multiple keys and has a value. The value might be of any
|
||||
// type.
|
||||
func (p *printer) objectItem(o *ast.ObjectItem) []byte {
|
||||
defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text)))
|
||||
var buf bytes.Buffer
|
||||
|
||||
if o.LeadComment != nil {
|
||||
for _, comment := range o.LeadComment.List {
|
||||
buf.WriteString(comment.Text)
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
|
||||
// If key and val are on different lines, treat line comments like lead comments.
|
||||
if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line {
|
||||
for _, comment := range o.LineComment.List {
|
||||
buf.WriteString(comment.Text)
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
|
||||
for i, k := range o.Keys {
|
||||
buf.WriteString(k.Token.Text)
|
||||
buf.WriteByte(blank)
|
||||
|
||||
// reach end of key
|
||||
if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 {
|
||||
buf.WriteString("=")
|
||||
buf.WriteByte(blank)
|
||||
}
|
||||
}
|
||||
|
||||
buf.Write(p.output(o.Val))
|
||||
|
||||
if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line {
|
||||
buf.WriteByte(blank)
|
||||
for _, comment := range o.LineComment.List {
|
||||
buf.WriteString(comment.Text)
|
||||
}
|
||||
}
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// objectType returns the printable HCL form of an object type. An object type
|
||||
// begins with a brace and ends with a brace.
|
||||
func (p *printer) objectType(o *ast.ObjectType) []byte {
|
||||
defer un(trace(p, "ObjectType"))
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString("{")
|
||||
|
||||
var index int
|
||||
var nextItem token.Pos
|
||||
var commented, newlinePrinted bool
|
||||
for {
|
||||
// Determine the location of the next actual non-comment
|
||||
// item. If we're at the end, the next item is the closing brace
|
||||
if index != len(o.List.Items) {
|
||||
nextItem = o.List.Items[index].Pos()
|
||||
} else {
|
||||
nextItem = o.Rbrace
|
||||
}
|
||||
|
||||
// Go through the standalone comments in the file and print out
|
||||
// the comments that we should be for this object item.
|
||||
for _, c := range p.standaloneComments {
|
||||
printed := false
|
||||
var lastCommentPos token.Pos
|
||||
for _, comment := range c.List {
|
||||
// We only care about comments after the previous item
|
||||
// we've printed so that comments are printed in the
|
||||
// correct locations (between two objects for example).
|
||||
// And before the next item.
|
||||
if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
|
||||
// If there are standalone comments and the initial newline has not
|
||||
// been printed yet, do it now.
|
||||
if !newlinePrinted {
|
||||
newlinePrinted = true
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
|
||||
// add newline if it's between other printed nodes
|
||||
if index > 0 {
|
||||
commented = true
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
|
||||
// Store this position
|
||||
lastCommentPos = comment.Pos()
|
||||
|
||||
// output the comment itself
|
||||
buf.Write(p.indent(p.heredocIndent([]byte(comment.Text))))
|
||||
|
||||
// Set printed to true to note that we printed something
|
||||
printed = true
|
||||
|
||||
/*
|
||||
if index != len(o.List.Items) {
|
||||
buf.WriteByte(newline) // do not print on the end
|
||||
}
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
// Stuff to do if we had comments
|
||||
if printed {
|
||||
// Always write a newline
|
||||
buf.WriteByte(newline)
|
||||
|
||||
// If there is another item in the object and our comment
|
||||
// didn't hug it directly, then make sure there is a blank
|
||||
// line separating them.
|
||||
if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if index == len(o.List.Items) {
|
||||
p.prev = o.Rbrace
|
||||
break
|
||||
}
|
||||
|
||||
// At this point we are sure that it's not a totally empty block: print
|
||||
// the initial newline if it hasn't been printed yet by the previous
|
||||
// block about standalone comments.
|
||||
if !newlinePrinted {
|
||||
buf.WriteByte(newline)
|
||||
newlinePrinted = true
|
||||
}
|
||||
|
||||
// check if we have adjacent one liner items. If yes we'll going to align
|
||||
// the comments.
|
||||
var aligned []*ast.ObjectItem
|
||||
for _, item := range o.List.Items[index:] {
|
||||
// we don't group one line lists
|
||||
if len(o.List.Items) == 1 {
|
||||
break
|
||||
}
|
||||
|
||||
// one means a oneliner with out any lead comment
|
||||
// two means a oneliner with lead comment
|
||||
// anything else might be something else
|
||||
cur := lines(string(p.objectItem(item)))
|
||||
if cur > 2 {
|
||||
break
|
||||
}
|
||||
|
||||
curPos := item.Pos()
|
||||
|
||||
nextPos := token.Pos{}
|
||||
if index != len(o.List.Items)-1 {
|
||||
nextPos = o.List.Items[index+1].Pos()
|
||||
}
|
||||
|
||||
prevPos := token.Pos{}
|
||||
if index != 0 {
|
||||
prevPos = o.List.Items[index-1].Pos()
|
||||
}
|
||||
|
||||
// fmt.Println("DEBUG ----------------")
|
||||
// fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos)
|
||||
// fmt.Printf("cur = %+v curPos: %s\n", cur, curPos)
|
||||
// fmt.Printf("next = %+v nextPos: %s\n", next, nextPos)
|
||||
|
||||
if curPos.Line+1 == nextPos.Line {
|
||||
aligned = append(aligned, item)
|
||||
index++
|
||||
continue
|
||||
}
|
||||
|
||||
if curPos.Line-1 == prevPos.Line {
|
||||
aligned = append(aligned, item)
|
||||
index++
|
||||
|
||||
// finish if we have a new line or comment next. This happens
|
||||
// if the next item is not adjacent
|
||||
if curPos.Line+1 != nextPos.Line {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
// put newlines if the items are between other non aligned items.
|
||||
// newlines are also added if there is a standalone comment already, so
|
||||
// check it too
|
||||
if !commented && index != len(aligned) {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
|
||||
if len(aligned) >= 1 {
|
||||
p.prev = aligned[len(aligned)-1].Pos()
|
||||
|
||||
items := p.alignedItems(aligned)
|
||||
buf.Write(p.indent(items))
|
||||
} else {
|
||||
p.prev = o.List.Items[index].Pos()
|
||||
|
||||
buf.Write(p.indent(p.objectItem(o.List.Items[index])))
|
||||
index++
|
||||
}
|
||||
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
|
||||
buf.WriteString("}")
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (p *printer) alignedItems(items []*ast.ObjectItem) []byte {
|
||||
var buf bytes.Buffer
|
||||
|
||||
// find the longest key and value length, needed for alignment
|
||||
var longestKeyLen int // longest key length
|
||||
var longestValLen int // longest value length
|
||||
for _, item := range items {
|
||||
key := len(item.Keys[0].Token.Text)
|
||||
val := len(p.output(item.Val))
|
||||
|
||||
if key > longestKeyLen {
|
||||
longestKeyLen = key
|
||||
}
|
||||
|
||||
if val > longestValLen {
|
||||
longestValLen = val
|
||||
}
|
||||
}
|
||||
|
||||
for i, item := range items {
|
||||
if item.LeadComment != nil {
|
||||
for _, comment := range item.LeadComment.List {
|
||||
buf.WriteString(comment.Text)
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
|
||||
for i, k := range item.Keys {
|
||||
keyLen := len(k.Token.Text)
|
||||
buf.WriteString(k.Token.Text)
|
||||
for i := 0; i < longestKeyLen-keyLen+1; i++ {
|
||||
buf.WriteByte(blank)
|
||||
}
|
||||
|
||||
// reach end of key
|
||||
if i == len(item.Keys)-1 && len(item.Keys) == 1 {
|
||||
buf.WriteString("=")
|
||||
buf.WriteByte(blank)
|
||||
}
|
||||
}
|
||||
|
||||
val := p.output(item.Val)
|
||||
valLen := len(val)
|
||||
buf.Write(val)
|
||||
|
||||
if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil {
|
||||
for i := 0; i < longestValLen-valLen+1; i++ {
|
||||
buf.WriteByte(blank)
|
||||
}
|
||||
|
||||
for _, comment := range item.LineComment.List {
|
||||
buf.WriteString(comment.Text)
|
||||
}
|
||||
}
|
||||
|
||||
// do not print for the last item
|
||||
if i != len(items)-1 {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// list returns the printable HCL form of an list type.
|
||||
func (p *printer) list(l *ast.ListType) []byte {
|
||||
if p.isSingleLineList(l) {
|
||||
return p.singleLineList(l)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString("[")
|
||||
buf.WriteByte(newline)
|
||||
|
||||
var longestLine int
|
||||
for _, item := range l.List {
|
||||
// for now we assume that the list only contains literal types
|
||||
if lit, ok := item.(*ast.LiteralType); ok {
|
||||
lineLen := len(lit.Token.Text)
|
||||
if lineLen > longestLine {
|
||||
longestLine = lineLen
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
haveEmptyLine := false
|
||||
for i, item := range l.List {
|
||||
// If we have a lead comment, then we want to write that first
|
||||
leadComment := false
|
||||
if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil {
|
||||
leadComment = true
|
||||
|
||||
// Ensure an empty line before every element with a
|
||||
// lead comment (except the first item in a list).
|
||||
if !haveEmptyLine && i != 0 {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
|
||||
for _, comment := range lit.LeadComment.List {
|
||||
buf.Write(p.indent([]byte(comment.Text)))
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
|
||||
// also indent each line
|
||||
val := p.output(item)
|
||||
curLen := len(val)
|
||||
buf.Write(p.indent(val))
|
||||
|
||||
// if this item is a heredoc, then we output the comma on
|
||||
// the next line. This is the only case this happens.
|
||||
comma := []byte{','}
|
||||
if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
|
||||
buf.WriteByte(newline)
|
||||
comma = p.indent(comma)
|
||||
}
|
||||
|
||||
buf.Write(comma)
|
||||
|
||||
if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
|
||||
// if the next item doesn't have any comments, do not align
|
||||
buf.WriteByte(blank) // align one space
|
||||
for i := 0; i < longestLine-curLen; i++ {
|
||||
buf.WriteByte(blank)
|
||||
}
|
||||
|
||||
for _, comment := range lit.LineComment.List {
|
||||
buf.WriteString(comment.Text)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteByte(newline)
|
||||
|
||||
// Ensure an empty line after every element with a
|
||||
// lead comment (except the first item in a list).
|
||||
haveEmptyLine = leadComment && i != len(l.List)-1
|
||||
if haveEmptyLine {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString("]")
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// isSingleLineList returns true if:
|
||||
// * they were previously formatted entirely on one line
|
||||
// * they consist entirely of literals
|
||||
// * there are either no heredoc strings or the list has exactly one element
|
||||
// * there are no line comments
|
||||
func (printer) isSingleLineList(l *ast.ListType) bool {
|
||||
for _, item := range l.List {
|
||||
if item.Pos().Line != l.Lbrack.Line {
|
||||
return false
|
||||
}
|
||||
|
||||
lit, ok := item.(*ast.LiteralType)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if lit.Token.Type == token.HEREDOC && len(l.List) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
if lit.LineComment != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// singleLineList prints a simple single line list.
|
||||
// For a definition of "simple", see isSingleLineList above.
|
||||
func (p *printer) singleLineList(l *ast.ListType) []byte {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
buf.WriteString("[")
|
||||
for i, item := range l.List {
|
||||
if i != 0 {
|
||||
buf.WriteString(", ")
|
||||
}
|
||||
|
||||
// Output the item itself
|
||||
buf.Write(p.output(item))
|
||||
|
||||
// The heredoc marker needs to be at the end of line.
|
||||
if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString("]")
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// indent indents the lines of the given buffer for each non-empty line
|
||||
func (p *printer) indent(buf []byte) []byte {
|
||||
var prefix []byte
|
||||
if p.cfg.SpacesWidth != 0 {
|
||||
for i := 0; i < p.cfg.SpacesWidth; i++ {
|
||||
prefix = append(prefix, blank)
|
||||
}
|
||||
} else {
|
||||
prefix = []byte{tab}
|
||||
}
|
||||
|
||||
var res []byte
|
||||
bol := true
|
||||
for _, c := range buf {
|
||||
if bol && c != '\n' {
|
||||
res = append(res, prefix...)
|
||||
}
|
||||
|
||||
res = append(res, c)
|
||||
bol = c == '\n'
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// unindent removes all the indentation from the tombstoned lines
|
||||
func (p *printer) unindent(buf []byte) []byte {
|
||||
var res []byte
|
||||
for i := 0; i < len(buf); i++ {
|
||||
skip := len(buf)-i <= len(unindent)
|
||||
if !skip {
|
||||
skip = !bytes.Equal(unindent, buf[i:i+len(unindent)])
|
||||
}
|
||||
if skip {
|
||||
res = append(res, buf[i])
|
||||
continue
|
||||
}
|
||||
|
||||
// We have a marker. we have to backtrace here and clean out
|
||||
// any whitespace ahead of our tombstone up to a \n
|
||||
for j := len(res) - 1; j >= 0; j-- {
|
||||
if res[j] == '\n' {
|
||||
break
|
||||
}
|
||||
|
||||
res = res[:j]
|
||||
}
|
||||
|
||||
// Skip the entire unindent marker
|
||||
i += len(unindent) - 1
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// heredocIndent marks all the 2nd and further lines as unindentable
|
||||
func (p *printer) heredocIndent(buf []byte) []byte {
|
||||
var res []byte
|
||||
bol := false
|
||||
for _, c := range buf {
|
||||
if bol && c != '\n' {
|
||||
res = append(res, unindent...)
|
||||
}
|
||||
res = append(res, c)
|
||||
bol = c == '\n'
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// isSingleLineObject tells whether the given object item is a single
|
||||
// line object such as "obj {}".
|
||||
//
|
||||
// A single line object:
|
||||
//
|
||||
// * has no lead comments (hence multi-line)
|
||||
// * has no assignment
|
||||
// * has no values in the stanza (within {})
|
||||
//
|
||||
func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool {
|
||||
// If there is a lead comment, can't be one line
|
||||
if val.LeadComment != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// If there is assignment, we always break by line
|
||||
if val.Assign.IsValid() {
|
||||
return false
|
||||
}
|
||||
|
||||
// If it isn't an object type, then its not a single line object
|
||||
ot, ok := val.Val.(*ast.ObjectType)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// If the object has no items, it is single line!
|
||||
return len(ot.List.Items) == 0
|
||||
}
|
||||
|
||||
func lines(txt string) int {
|
||||
endline := 1
|
||||
for i := 0; i < len(txt); i++ {
|
||||
if txt[i] == '\n' {
|
||||
endline++
|
||||
}
|
||||
}
|
||||
return endline
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Tracing support
|
||||
|
||||
func (p *printer) printTrace(a ...interface{}) {
|
||||
if !p.enableTrace {
|
||||
return
|
||||
}
|
||||
|
||||
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
|
||||
const n = len(dots)
|
||||
i := 2 * p.indentTrace
|
||||
for i > n {
|
||||
fmt.Print(dots)
|
||||
i -= n
|
||||
}
|
||||
// i <= n
|
||||
fmt.Print(dots[0:i])
|
||||
fmt.Println(a...)
|
||||
}
|
||||
|
||||
func trace(p *printer, msg string) *printer {
|
||||
p.printTrace(msg, "(")
|
||||
p.indentTrace++
|
||||
return p
|
||||
}
|
||||
|
||||
// Usage pattern: defer un(trace(p, "..."))
|
||||
func un(p *printer) {
|
||||
p.indentTrace--
|
||||
p.printTrace(")")
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue