vendor: remove dep and use vndr

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
Antonio Murdaca 2017-06-06 09:19:04 +02:00
parent 16f44674a4
commit 148e72d81e
No known key found for this signature in database
GPG key ID: B2BEAD150DE936B9
16131 changed files with 73815 additions and 4235138 deletions

View file

@ -1,141 +0,0 @@
package ansiterm
import (
"fmt"
"testing"
)
func TestStateTransitions(t *testing.T) {
stateTransitionHelper(t, "CsiEntry", "Ground", alphabetics)
stateTransitionHelper(t, "CsiEntry", "CsiParam", csiCollectables)
stateTransitionHelper(t, "Escape", "CsiEntry", []byte{ANSI_ESCAPE_SECONDARY})
stateTransitionHelper(t, "Escape", "OscString", []byte{0x5D})
stateTransitionHelper(t, "Escape", "Ground", escapeToGroundBytes)
stateTransitionHelper(t, "Escape", "EscapeIntermediate", intermeds)
stateTransitionHelper(t, "EscapeIntermediate", "EscapeIntermediate", intermeds)
stateTransitionHelper(t, "EscapeIntermediate", "EscapeIntermediate", executors)
stateTransitionHelper(t, "EscapeIntermediate", "Ground", escapeIntermediateToGroundBytes)
stateTransitionHelper(t, "OscString", "Ground", []byte{ANSI_BEL})
stateTransitionHelper(t, "OscString", "Ground", []byte{0x5C})
stateTransitionHelper(t, "Ground", "Ground", executors)
}
func TestAnyToX(t *testing.T) {
anyToXHelper(t, []byte{ANSI_ESCAPE_PRIMARY}, "Escape")
anyToXHelper(t, []byte{DCS_ENTRY}, "DcsEntry")
anyToXHelper(t, []byte{OSC_STRING}, "OscString")
anyToXHelper(t, []byte{CSI_ENTRY}, "CsiEntry")
anyToXHelper(t, toGroundBytes, "Ground")
}
func TestCollectCsiParams(t *testing.T) {
parser, _ := createTestParser("CsiEntry")
parser.Parse(csiCollectables)
buffer := parser.context.paramBuffer
bufferCount := len(buffer)
if bufferCount != len(csiCollectables) {
t.Errorf("Buffer: %v", buffer)
t.Errorf("CsiParams: %v", csiCollectables)
t.Errorf("Buffer count failure: %d != %d", bufferCount, len(csiParams))
return
}
for i, v := range csiCollectables {
if v != buffer[i] {
t.Errorf("Buffer: %v", buffer)
t.Errorf("CsiParams: %v", csiParams)
t.Errorf("Mismatch at buffer[%d] = %d", i, buffer[i])
}
}
}
func TestParseParams(t *testing.T) {
parseParamsHelper(t, []byte{}, []string{})
parseParamsHelper(t, []byte{';'}, []string{})
parseParamsHelper(t, []byte{';', ';'}, []string{})
parseParamsHelper(t, []byte{'7'}, []string{"7"})
parseParamsHelper(t, []byte{'7', ';'}, []string{"7"})
parseParamsHelper(t, []byte{'7', ';', ';'}, []string{"7"})
parseParamsHelper(t, []byte{'7', ';', ';', '8'}, []string{"7", "8"})
parseParamsHelper(t, []byte{'7', ';', '8', ';'}, []string{"7", "8"})
parseParamsHelper(t, []byte{'7', ';', ';', '8', ';', ';'}, []string{"7", "8"})
parseParamsHelper(t, []byte{'7', '8'}, []string{"78"})
parseParamsHelper(t, []byte{'7', '8', ';'}, []string{"78"})
parseParamsHelper(t, []byte{'7', '8', ';', '9', '0'}, []string{"78", "90"})
parseParamsHelper(t, []byte{'7', '8', ';', ';', '9', '0'}, []string{"78", "90"})
parseParamsHelper(t, []byte{'7', '8', ';', '9', '0', ';'}, []string{"78", "90"})
parseParamsHelper(t, []byte{'7', '8', ';', '9', '0', ';', ';'}, []string{"78", "90"})
}
func TestCursor(t *testing.T) {
cursorSingleParamHelper(t, 'A', "CUU")
cursorSingleParamHelper(t, 'B', "CUD")
cursorSingleParamHelper(t, 'C', "CUF")
cursorSingleParamHelper(t, 'D', "CUB")
cursorSingleParamHelper(t, 'E', "CNL")
cursorSingleParamHelper(t, 'F', "CPL")
cursorSingleParamHelper(t, 'G', "CHA")
cursorTwoParamHelper(t, 'H', "CUP")
cursorTwoParamHelper(t, 'f', "HVP")
funcCallParamHelper(t, []byte{'?', '2', '5', 'h'}, "CsiEntry", "Ground", []string{"DECTCEM([true])"})
funcCallParamHelper(t, []byte{'?', '2', '5', 'l'}, "CsiEntry", "Ground", []string{"DECTCEM([false])"})
}
func TestErase(t *testing.T) {
// Erase in Display
eraseHelper(t, 'J', "ED")
// Erase in Line
eraseHelper(t, 'K', "EL")
}
func TestSelectGraphicRendition(t *testing.T) {
funcCallParamHelper(t, []byte{'m'}, "CsiEntry", "Ground", []string{"SGR([0])"})
funcCallParamHelper(t, []byte{'0', 'm'}, "CsiEntry", "Ground", []string{"SGR([0])"})
funcCallParamHelper(t, []byte{'0', ';', '1', 'm'}, "CsiEntry", "Ground", []string{"SGR([0 1])"})
funcCallParamHelper(t, []byte{'0', ';', '1', ';', '2', 'm'}, "CsiEntry", "Ground", []string{"SGR([0 1 2])"})
}
func TestScroll(t *testing.T) {
scrollHelper(t, 'S', "SU")
scrollHelper(t, 'T', "SD")
}
func TestPrint(t *testing.T) {
parser, evtHandler := createTestParser("Ground")
parser.Parse(printables)
validateState(t, parser.currState, "Ground")
for i, v := range printables {
expectedCall := fmt.Sprintf("Print([%s])", string(v))
actualCall := evtHandler.FunctionCalls[i]
if actualCall != expectedCall {
t.Errorf("Actual != Expected: %v != %v at %d", actualCall, expectedCall, i)
}
}
}
func TestClear(t *testing.T) {
p, _ := createTestParser("Ground")
fillContext(p.context)
p.clear()
validateEmptyContext(t, p.context)
}
func TestClearOnStateChange(t *testing.T) {
clearOnStateChangeHelper(t, "Ground", "Escape", []byte{ANSI_ESCAPE_PRIMARY})
clearOnStateChangeHelper(t, "Ground", "CsiEntry", []byte{CSI_ENTRY})
}
func TestC0(t *testing.T) {
expectedCall := "Execute([" + string(ANSI_LINE_FEED) + "])"
c0Helper(t, []byte{ANSI_LINE_FEED}, "Ground", []string{expectedCall})
expectedCall = "Execute([" + string(ANSI_CARRIAGE_RETURN) + "])"
c0Helper(t, []byte{ANSI_CARRIAGE_RETURN}, "Ground", []string{expectedCall})
}
func TestEscDispatch(t *testing.T) {
funcCallParamHelper(t, []byte{'M'}, "Escape", "Ground", []string{"RI([])"})
}

View file

@ -1,114 +0,0 @@
package ansiterm
import (
"fmt"
"testing"
)
func getStateNames() []string {
parser, _ := createTestParser("Ground")
stateNames := []string{}
for _, state := range parser.stateMap {
stateNames = append(stateNames, state.Name())
}
return stateNames
}
func stateTransitionHelper(t *testing.T, start string, end string, bytes []byte) {
for _, b := range bytes {
bytes := []byte{byte(b)}
parser, _ := createTestParser(start)
parser.Parse(bytes)
validateState(t, parser.currState, end)
}
}
func anyToXHelper(t *testing.T, bytes []byte, expectedState string) {
for _, s := range getStateNames() {
stateTransitionHelper(t, s, expectedState, bytes)
}
}
func funcCallParamHelper(t *testing.T, bytes []byte, start string, expected string, expectedCalls []string) {
parser, evtHandler := createTestParser(start)
parser.Parse(bytes)
validateState(t, parser.currState, expected)
validateFuncCalls(t, evtHandler.FunctionCalls, expectedCalls)
}
func parseParamsHelper(t *testing.T, bytes []byte, expectedParams []string) {
params, err := parseParams(bytes)
if err != nil {
t.Errorf("Parameter parse error: %v", err)
return
}
if len(params) != len(expectedParams) {
t.Errorf("Parsed parameters: %v", params)
t.Errorf("Expected parameters: %v", expectedParams)
t.Errorf("Parameter length failure: %d != %d", len(params), len(expectedParams))
return
}
for i, v := range expectedParams {
if v != params[i] {
t.Errorf("Parsed parameters: %v", params)
t.Errorf("Expected parameters: %v", expectedParams)
t.Errorf("Parameter parse failure: %s != %s at position %d", v, params[i], i)
}
}
}
func cursorSingleParamHelper(t *testing.T, command byte, funcName string) {
funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)})
funcCallParamHelper(t, []byte{'2', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([23])", funcName)})
funcCallParamHelper(t, []byte{'2', ';', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)})
funcCallParamHelper(t, []byte{'2', ';', '3', ';', '4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)})
}
func cursorTwoParamHelper(t *testing.T, command byte, funcName string) {
funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1 1])", funcName)})
funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1 1])", funcName)})
funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 1])", funcName)})
funcCallParamHelper(t, []byte{'2', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([23 1])", funcName)})
funcCallParamHelper(t, []byte{'2', ';', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 3])", funcName)})
funcCallParamHelper(t, []byte{'2', ';', '3', ';', '4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 3])", funcName)})
}
func eraseHelper(t *testing.T, command byte, funcName string) {
funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)})
funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)})
funcCallParamHelper(t, []byte{'1', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)})
funcCallParamHelper(t, []byte{'3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([3])", funcName)})
funcCallParamHelper(t, []byte{'4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)})
funcCallParamHelper(t, []byte{'1', ';', '2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
}
func scrollHelper(t *testing.T, command byte, funcName string) {
funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
funcCallParamHelper(t, []byte{'1', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
funcCallParamHelper(t, []byte{'5', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([5])", funcName)})
funcCallParamHelper(t, []byte{'4', ';', '6', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([4])", funcName)})
}
func clearOnStateChangeHelper(t *testing.T, start string, end string, bytes []byte) {
p, _ := createTestParser(start)
fillContext(p.context)
p.Parse(bytes)
validateState(t, p.currState, end)
validateEmptyContext(t, p.context)
}
func c0Helper(t *testing.T, bytes []byte, expectedState string, expectedCalls []string) {
parser, evtHandler := createTestParser("Ground")
parser.Parse(bytes)
validateState(t, parser.currState, expectedState)
validateFuncCalls(t, evtHandler.FunctionCalls, expectedCalls)
}

View file

@ -1,66 +0,0 @@
package ansiterm
import (
"testing"
)
func createTestParser(s string) (*AnsiParser, *TestAnsiEventHandler) {
evtHandler := CreateTestAnsiEventHandler()
parser := CreateParser(s, evtHandler)
return parser, evtHandler
}
func validateState(t *testing.T, actualState state, expectedStateName string) {
actualName := "Nil"
if actualState != nil {
actualName = actualState.Name()
}
if actualName != expectedStateName {
t.Errorf("Invalid state: '%s' != '%s'", actualName, expectedStateName)
}
}
func validateFuncCalls(t *testing.T, actualCalls []string, expectedCalls []string) {
actualCount := len(actualCalls)
expectedCount := len(expectedCalls)
if actualCount != expectedCount {
t.Errorf("Actual calls: %v", actualCalls)
t.Errorf("Expected calls: %v", expectedCalls)
t.Errorf("Call count error: %d != %d", actualCount, expectedCount)
return
}
for i, v := range actualCalls {
if v != expectedCalls[i] {
t.Errorf("Actual calls: %v", actualCalls)
t.Errorf("Expected calls: %v", expectedCalls)
t.Errorf("Mismatched calls: %s != %s with lengths %d and %d", v, expectedCalls[i], len(v), len(expectedCalls[i]))
}
}
}
func fillContext(context *ansiContext) {
context.currentChar = 'A'
context.paramBuffer = []byte{'C', 'D', 'E'}
context.interBuffer = []byte{'F', 'G', 'H'}
}
func validateEmptyContext(t *testing.T, context *ansiContext) {
var expectedCurrChar byte = 0x0
if context.currentChar != expectedCurrChar {
t.Errorf("Currentchar mismatch '%#x' != '%#x'", context.currentChar, expectedCurrChar)
}
if len(context.paramBuffer) != 0 {
t.Errorf("Non-empty parameter buffer: %v", context.paramBuffer)
}
if len(context.paramBuffer) != 0 {
t.Errorf("Non-empty intermediate buffer: %v", context.interBuffer)
}
}

View file

@ -1,173 +0,0 @@
package ansiterm
import (
"fmt"
"strconv"
)
type TestAnsiEventHandler struct {
FunctionCalls []string
}
func CreateTestAnsiEventHandler() *TestAnsiEventHandler {
evtHandler := TestAnsiEventHandler{}
evtHandler.FunctionCalls = make([]string, 0)
return &evtHandler
}
func (h *TestAnsiEventHandler) recordCall(call string, params []string) {
s := fmt.Sprintf("%s(%v)", call, params)
h.FunctionCalls = append(h.FunctionCalls, s)
}
func (h *TestAnsiEventHandler) Print(b byte) error {
h.recordCall("Print", []string{string(b)})
return nil
}
func (h *TestAnsiEventHandler) Execute(b byte) error {
h.recordCall("Execute", []string{string(b)})
return nil
}
func (h *TestAnsiEventHandler) CUU(param int) error {
h.recordCall("CUU", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) CUD(param int) error {
h.recordCall("CUD", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) CUF(param int) error {
h.recordCall("CUF", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) CUB(param int) error {
h.recordCall("CUB", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) CNL(param int) error {
h.recordCall("CNL", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) CPL(param int) error {
h.recordCall("CPL", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) CHA(param int) error {
h.recordCall("CHA", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) VPA(param int) error {
h.recordCall("VPA", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) CUP(x int, y int) error {
xS, yS := strconv.Itoa(x), strconv.Itoa(y)
h.recordCall("CUP", []string{xS, yS})
return nil
}
func (h *TestAnsiEventHandler) HVP(x int, y int) error {
xS, yS := strconv.Itoa(x), strconv.Itoa(y)
h.recordCall("HVP", []string{xS, yS})
return nil
}
func (h *TestAnsiEventHandler) DECTCEM(visible bool) error {
h.recordCall("DECTCEM", []string{strconv.FormatBool(visible)})
return nil
}
func (h *TestAnsiEventHandler) DECOM(visible bool) error {
h.recordCall("DECOM", []string{strconv.FormatBool(visible)})
return nil
}
func (h *TestAnsiEventHandler) DECCOLM(use132 bool) error {
h.recordCall("DECOLM", []string{strconv.FormatBool(use132)})
return nil
}
func (h *TestAnsiEventHandler) ED(param int) error {
h.recordCall("ED", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) EL(param int) error {
h.recordCall("EL", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) IL(param int) error {
h.recordCall("IL", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) DL(param int) error {
h.recordCall("DL", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) ICH(param int) error {
h.recordCall("ICH", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) DCH(param int) error {
h.recordCall("DCH", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) SGR(params []int) error {
strings := []string{}
for _, v := range params {
strings = append(strings, strconv.Itoa(v))
}
h.recordCall("SGR", strings)
return nil
}
func (h *TestAnsiEventHandler) SU(param int) error {
h.recordCall("SU", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) SD(param int) error {
h.recordCall("SD", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) DA(params []string) error {
h.recordCall("DA", params)
return nil
}
func (h *TestAnsiEventHandler) DECSTBM(top int, bottom int) error {
topS, bottomS := strconv.Itoa(top), strconv.Itoa(bottom)
h.recordCall("DECSTBM", []string{topS, bottomS})
return nil
}
func (h *TestAnsiEventHandler) RI() error {
h.recordCall("RI", nil)
return nil
}
func (h *TestAnsiEventHandler) IND() error {
h.recordCall("IND", nil)
return nil
}
func (h *TestAnsiEventHandler) Flush() error {
return nil
}

View file

@ -1,5 +0,0 @@
TAGS
tags
.*.swp
tomlcheck/tomlcheck
toml.test

View file

@ -1,12 +0,0 @@
language: go
go:
- 1.1
- 1.2
- tip
install:
- go install ./...
- go get github.com/BurntSushi/toml-test
script:
- export PATH="$PATH:$HOME/gopath/bin"
- make test

View file

@ -1,3 +0,0 @@
Compatible with TOML version
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)

View file

@ -1,19 +0,0 @@
install:
go install ./...
test: install
go test -v
toml-test toml-test-decoder
toml-test -encoder toml-test-encoder
fmt:
gofmt -w *.go */*.go
colcheck *.go */*.go
tags:
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
push:
git push origin master
git push github master

View file

@ -1,61 +0,0 @@
package main
import (
"fmt"
"time"
"github.com/BurntSushi/toml"
)
type tomlConfig struct {
Title string
Owner ownerInfo
DB database `toml:"database"`
Servers map[string]server
Clients clients
}
type ownerInfo struct {
Name string
Org string `toml:"organization"`
Bio string
DOB time.Time
}
type database struct {
Server string
Ports []int
ConnMax int `toml:"connection_max"`
Enabled bool
}
type server struct {
IP string
DC string
}
type clients struct {
Data [][]interface{}
Hosts []string
}
func main() {
var config tomlConfig
if _, err := toml.DecodeFile("example.toml", &config); err != nil {
fmt.Println(err)
return
}
fmt.Printf("Title: %s\n", config.Title)
fmt.Printf("Owner: %s (%s, %s), Born: %s\n",
config.Owner.Name, config.Owner.Org, config.Owner.Bio,
config.Owner.DOB)
fmt.Printf("Database: %s %v (Max conn. %d), Enabled? %v\n",
config.DB.Server, config.DB.Ports, config.DB.ConnMax,
config.DB.Enabled)
for serverName, server := range config.Servers {
fmt.Printf("Server: %s (%s, %s)\n", serverName, server.IP, server.DC)
}
fmt.Printf("Client data: %v\n", config.Clients.Data)
fmt.Printf("Client hosts: %v\n", config.Clients.Hosts)
}

View file

@ -1,35 +0,0 @@
# This is a TOML document. Boom.
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
organization = "GitHub"
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
]

View file

@ -1,22 +0,0 @@
# Test file for TOML
# Only this one tries to emulate a TOML file written by a user of the kind of parser writers probably hate
# This part you'll really hate
[the]
test_string = "You'll hate me after this - #" # " Annoying, isn't it?
[the.hard]
test_array = [ "] ", " # "] # ] There you go, parse this!
test_array2 = [ "Test #11 ]proved that", "Experiment #9 was a success" ]
# You didn't think it'd as easy as chucking out the last #, did you?
another_test_string = " Same thing, but with a string #"
harder_test_string = " And when \"'s are in the string, along with # \"" # "and comments are there too"
# Things will get harder
[the.hard.bit#]
what? = "You don't think some user won't do that?"
multi_line_array = [
"]",
# ] Oh yes I did
]

View file

@ -1,4 +0,0 @@
# [x] you
# [x.y] don't
# [x.y.z] need these
[x.y.z.w] # for this to work

View file

@ -1,6 +0,0 @@
# DO NOT WANT
[fruit]
type = "apple"
[fruit.type]
apple = "yes"

View file

@ -1,35 +0,0 @@
# This is an INVALID TOML document. Boom.
# Can you spot the error without help?
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
organization = "GitHub"
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
dob = 1979-05-27T7:32:00Z # First class dates? Why not?
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
]

View file

@ -1,5 +0,0 @@
Age = 25
Cats = [ "Cauchy", "Plato" ]
Pi = 3.14
Perfection = [ 6, 28, 496, 8128 ]
DOB = 1987-07-05T05:45:00Z

View file

@ -1 +0,0 @@
some_key_NAME = "wat"

View file

@ -1,14 +0,0 @@
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.

View file

@ -1,14 +0,0 @@
# Implements the TOML test suite interface
This is an implementation of the interface expected by
[toml-test](https://github.com/BurntSushi/toml-test) for my
[toml parser written in Go](https://github.com/BurntSushi/toml).
In particular, it maps TOML data on `stdin` to a JSON format on `stdout`.
Compatible with TOML version
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
Compatible with `toml-test` version
[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0)

View file

@ -1,90 +0,0 @@
// Command toml-test-decoder satisfies the toml-test interface for testing
// TOML decoders. Namely, it accepts TOML on stdin and outputs JSON on stdout.
package main
import (
"encoding/json"
"flag"
"fmt"
"log"
"os"
"path"
"time"
"github.com/BurntSushi/toml"
)
func init() {
log.SetFlags(0)
flag.Usage = usage
flag.Parse()
}
func usage() {
log.Printf("Usage: %s < toml-file\n", path.Base(os.Args[0]))
flag.PrintDefaults()
os.Exit(1)
}
func main() {
if flag.NArg() != 0 {
flag.Usage()
}
var tmp interface{}
if _, err := toml.DecodeReader(os.Stdin, &tmp); err != nil {
log.Fatalf("Error decoding TOML: %s", err)
}
typedTmp := translate(tmp)
if err := json.NewEncoder(os.Stdout).Encode(typedTmp); err != nil {
log.Fatalf("Error encoding JSON: %s", err)
}
}
func translate(tomlData interface{}) interface{} {
switch orig := tomlData.(type) {
case map[string]interface{}:
typed := make(map[string]interface{}, len(orig))
for k, v := range orig {
typed[k] = translate(v)
}
return typed
case []map[string]interface{}:
typed := make([]map[string]interface{}, len(orig))
for i, v := range orig {
typed[i] = translate(v).(map[string]interface{})
}
return typed
case []interface{}:
typed := make([]interface{}, len(orig))
for i, v := range orig {
typed[i] = translate(v)
}
// We don't really need to tag arrays, but let's be future proof.
// (If TOML ever supports tuples, we'll need this.)
return tag("array", typed)
case time.Time:
return tag("datetime", orig.Format("2006-01-02T15:04:05Z"))
case bool:
return tag("bool", fmt.Sprintf("%v", orig))
case int64:
return tag("integer", fmt.Sprintf("%d", orig))
case float64:
return tag("float", fmt.Sprintf("%v", orig))
case string:
return tag("string", orig)
}
panic(fmt.Sprintf("Unknown type: %T", tomlData))
}
func tag(typeName string, data interface{}) map[string]interface{} {
return map[string]interface{}{
"type": typeName,
"value": data,
}
}

View file

@ -1,14 +0,0 @@
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.

View file

@ -1,14 +0,0 @@
# Implements the TOML test suite interface for TOML encoders
This is an implementation of the interface expected by
[toml-test](https://github.com/BurntSushi/toml-test) for the
[TOML encoder](https://github.com/BurntSushi/toml).
In particular, it maps JSON data on `stdin` to a TOML format on `stdout`.
Compatible with TOML version
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
Compatible with `toml-test` version
[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0)

View file

@ -1,131 +0,0 @@
// Command toml-test-encoder satisfies the toml-test interface for testing
// TOML encoders. Namely, it accepts JSON on stdin and outputs TOML on stdout.
package main
import (
"encoding/json"
"flag"
"log"
"os"
"path"
"strconv"
"time"
"github.com/BurntSushi/toml"
)
func init() {
log.SetFlags(0)
flag.Usage = usage
flag.Parse()
}
func usage() {
log.Printf("Usage: %s < json-file\n", path.Base(os.Args[0]))
flag.PrintDefaults()
os.Exit(1)
}
func main() {
if flag.NArg() != 0 {
flag.Usage()
}
var tmp interface{}
if err := json.NewDecoder(os.Stdin).Decode(&tmp); err != nil {
log.Fatalf("Error decoding JSON: %s", err)
}
tomlData := translate(tmp)
if err := toml.NewEncoder(os.Stdout).Encode(tomlData); err != nil {
log.Fatalf("Error encoding TOML: %s", err)
}
}
func translate(typedJson interface{}) interface{} {
switch v := typedJson.(type) {
case map[string]interface{}:
if len(v) == 2 && in("type", v) && in("value", v) {
return untag(v)
}
m := make(map[string]interface{}, len(v))
for k, v2 := range v {
m[k] = translate(v2)
}
return m
case []interface{}:
tabArray := make([]map[string]interface{}, len(v))
for i := range v {
if m, ok := translate(v[i]).(map[string]interface{}); ok {
tabArray[i] = m
} else {
log.Fatalf("JSON arrays may only contain objects. This " +
"corresponds to only tables being allowed in " +
"TOML table arrays.")
}
}
return tabArray
}
log.Fatalf("Unrecognized JSON format '%T'.", typedJson)
panic("unreachable")
}
func untag(typed map[string]interface{}) interface{} {
t := typed["type"].(string)
v := typed["value"]
switch t {
case "string":
return v.(string)
case "integer":
v := v.(string)
n, err := strconv.Atoi(v)
if err != nil {
log.Fatalf("Could not parse '%s' as integer: %s", v, err)
}
return n
case "float":
v := v.(string)
f, err := strconv.ParseFloat(v, 64)
if err != nil {
log.Fatalf("Could not parse '%s' as float64: %s", v, err)
}
return f
case "datetime":
v := v.(string)
t, err := time.Parse("2006-01-02T15:04:05Z", v)
if err != nil {
log.Fatalf("Could not parse '%s' as a datetime: %s", v, err)
}
return t
case "bool":
v := v.(string)
switch v {
case "true":
return true
case "false":
return false
}
log.Fatalf("Could not parse '%s' as a boolean.", v)
case "array":
v := v.([]interface{})
array := make([]interface{}, len(v))
for i := range v {
if m, ok := v[i].(map[string]interface{}); ok {
array[i] = untag(m)
} else {
log.Fatalf("Arrays may only contain other arrays or "+
"primitive values, but found a '%T'.", m)
}
}
return array
}
log.Fatalf("Unrecognized tag type '%s'.", t)
panic("unreachable")
}
func in(key string, m map[string]interface{}) bool {
_, ok := m[key]
return ok
}

View file

@ -1,14 +0,0 @@
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.

View file

@ -1,22 +0,0 @@
# TOML Validator
If Go is installed, it's simple to try it out:
```bash
go get github.com/BurntSushi/toml/cmd/tomlv
tomlv some-toml-file.toml
```
You can see the types of every key in a TOML file with:
```bash
tomlv -types some-toml-file.toml
```
At the moment, only one error message is reported at a time. Error messages
include line numbers. No output means that the files given are valid TOML, or
there is a bug in `tomlv`.
Compatible with TOML version
[v0.1.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.1.0.md)

View file

@ -1,61 +0,0 @@
// Command tomlv validates TOML documents and prints each key's type.
package main
import (
"flag"
"fmt"
"log"
"os"
"path"
"strings"
"text/tabwriter"
"github.com/BurntSushi/toml"
)
var (
flagTypes = false
)
func init() {
log.SetFlags(0)
flag.BoolVar(&flagTypes, "types", flagTypes,
"When set, the types of every defined key will be shown.")
flag.Usage = usage
flag.Parse()
}
func usage() {
log.Printf("Usage: %s toml-file [ toml-file ... ]\n",
path.Base(os.Args[0]))
flag.PrintDefaults()
os.Exit(1)
}
func main() {
if flag.NArg() < 1 {
flag.Usage()
}
for _, f := range flag.Args() {
var tmp interface{}
md, err := toml.DecodeFile(f, &tmp)
if err != nil {
log.Fatalf("Error in '%s': %s", f, err)
}
if flagTypes {
printTypes(md)
}
}
}
func printTypes(md toml.MetaData) {
tabw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
for _, key := range md.Keys() {
fmt.Fprintf(tabw, "%s%s\t%s\n",
strings.Repeat(" ", len(key)-1), key, md.Type(key...))
}
tabw.Flush()
}

File diff suppressed because it is too large Load diff

View file

@ -1,590 +0,0 @@
package toml
import (
"bytes"
"fmt"
"log"
"net"
"testing"
"time"
)
func TestEncodeRoundTrip(t *testing.T) {
type Config struct {
Age int
Cats []string
Pi float64
Perfection []int
DOB time.Time
Ipaddress net.IP
}
var inputs = Config{
13,
[]string{"one", "two", "three"},
3.145,
[]int{11, 2, 3, 4},
time.Now(),
net.ParseIP("192.168.59.254"),
}
var firstBuffer bytes.Buffer
e := NewEncoder(&firstBuffer)
err := e.Encode(inputs)
if err != nil {
t.Fatal(err)
}
var outputs Config
if _, err := Decode(firstBuffer.String(), &outputs); err != nil {
log.Printf("Could not decode:\n-----\n%s\n-----\n",
firstBuffer.String())
t.Fatal(err)
}
// could test each value individually, but I'm lazy
var secondBuffer bytes.Buffer
e2 := NewEncoder(&secondBuffer)
err = e2.Encode(outputs)
if err != nil {
t.Fatal(err)
}
if firstBuffer.String() != secondBuffer.String() {
t.Error(
firstBuffer.String(),
"\n\n is not identical to\n\n",
secondBuffer.String())
}
}
// XXX(burntsushi)
// I think these tests probably should be removed. They are good, but they
// ought to be obsolete by toml-test.
func TestEncode(t *testing.T) {
type Embedded struct {
Int int `toml:"_int"`
}
type NonStruct int
date := time.Date(2014, 5, 11, 20, 30, 40, 0, time.FixedZone("IST", 3600))
dateStr := "2014-05-11T19:30:40Z"
tests := map[string]struct {
input interface{}
wantOutput string
wantError error
}{
"bool field": {
input: struct {
BoolTrue bool
BoolFalse bool
}{true, false},
wantOutput: "BoolTrue = true\nBoolFalse = false\n",
},
"int fields": {
input: struct {
Int int
Int8 int8
Int16 int16
Int32 int32
Int64 int64
}{1, 2, 3, 4, 5},
wantOutput: "Int = 1\nInt8 = 2\nInt16 = 3\nInt32 = 4\nInt64 = 5\n",
},
"uint fields": {
input: struct {
Uint uint
Uint8 uint8
Uint16 uint16
Uint32 uint32
Uint64 uint64
}{1, 2, 3, 4, 5},
wantOutput: "Uint = 1\nUint8 = 2\nUint16 = 3\nUint32 = 4" +
"\nUint64 = 5\n",
},
"float fields": {
input: struct {
Float32 float32
Float64 float64
}{1.5, 2.5},
wantOutput: "Float32 = 1.5\nFloat64 = 2.5\n",
},
"string field": {
input: struct{ String string }{"foo"},
wantOutput: "String = \"foo\"\n",
},
"string field and unexported field": {
input: struct {
String string
unexported int
}{"foo", 0},
wantOutput: "String = \"foo\"\n",
},
"datetime field in UTC": {
input: struct{ Date time.Time }{date},
wantOutput: fmt.Sprintf("Date = %s\n", dateStr),
},
"datetime field as primitive": {
// Using a map here to fail if isStructOrMap() returns true for
// time.Time.
input: map[string]interface{}{
"Date": date,
"Int": 1,
},
wantOutput: fmt.Sprintf("Date = %s\nInt = 1\n", dateStr),
},
"array fields": {
input: struct {
IntArray0 [0]int
IntArray3 [3]int
}{[0]int{}, [3]int{1, 2, 3}},
wantOutput: "IntArray0 = []\nIntArray3 = [1, 2, 3]\n",
},
"slice fields": {
input: struct{ IntSliceNil, IntSlice0, IntSlice3 []int }{
nil, []int{}, []int{1, 2, 3},
},
wantOutput: "IntSlice0 = []\nIntSlice3 = [1, 2, 3]\n",
},
"datetime slices": {
input: struct{ DatetimeSlice []time.Time }{
[]time.Time{date, date},
},
wantOutput: fmt.Sprintf("DatetimeSlice = [%s, %s]\n",
dateStr, dateStr),
},
"nested arrays and slices": {
input: struct {
SliceOfArrays [][2]int
ArrayOfSlices [2][]int
SliceOfArraysOfSlices [][2][]int
ArrayOfSlicesOfArrays [2][][2]int
SliceOfMixedArrays [][2]interface{}
ArrayOfMixedSlices [2][]interface{}
}{
[][2]int{{1, 2}, {3, 4}},
[2][]int{{1, 2}, {3, 4}},
[][2][]int{
{
{1, 2}, {3, 4},
},
{
{5, 6}, {7, 8},
},
},
[2][][2]int{
{
{1, 2}, {3, 4},
},
{
{5, 6}, {7, 8},
},
},
[][2]interface{}{
{1, 2}, {"a", "b"},
},
[2][]interface{}{
{1, 2}, {"a", "b"},
},
},
wantOutput: `SliceOfArrays = [[1, 2], [3, 4]]
ArrayOfSlices = [[1, 2], [3, 4]]
SliceOfArraysOfSlices = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
ArrayOfSlicesOfArrays = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
SliceOfMixedArrays = [[1, 2], ["a", "b"]]
ArrayOfMixedSlices = [[1, 2], ["a", "b"]]
`,
},
"empty slice": {
input: struct{ Empty []interface{} }{[]interface{}{}},
wantOutput: "Empty = []\n",
},
"(error) slice with element type mismatch (string and integer)": {
input: struct{ Mixed []interface{} }{[]interface{}{1, "a"}},
wantError: errArrayMixedElementTypes,
},
"(error) slice with element type mismatch (integer and float)": {
input: struct{ Mixed []interface{} }{[]interface{}{1, 2.5}},
wantError: errArrayMixedElementTypes,
},
"slice with elems of differing Go types, same TOML types": {
input: struct {
MixedInts []interface{}
MixedFloats []interface{}
}{
[]interface{}{
int(1), int8(2), int16(3), int32(4), int64(5),
uint(1), uint8(2), uint16(3), uint32(4), uint64(5),
},
[]interface{}{float32(1.5), float64(2.5)},
},
wantOutput: "MixedInts = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n" +
"MixedFloats = [1.5, 2.5]\n",
},
"(error) slice w/ element type mismatch (one is nested array)": {
input: struct{ Mixed []interface{} }{
[]interface{}{1, []interface{}{2}},
},
wantError: errArrayMixedElementTypes,
},
"(error) slice with 1 nil element": {
input: struct{ NilElement1 []interface{} }{[]interface{}{nil}},
wantError: errArrayNilElement,
},
"(error) slice with 1 nil element (and other non-nil elements)": {
input: struct{ NilElement []interface{} }{
[]interface{}{1, nil},
},
wantError: errArrayNilElement,
},
"simple map": {
input: map[string]int{"a": 1, "b": 2},
wantOutput: "a = 1\nb = 2\n",
},
"map with interface{} value type": {
input: map[string]interface{}{"a": 1, "b": "c"},
wantOutput: "a = 1\nb = \"c\"\n",
},
"map with interface{} value type, some of which are structs": {
input: map[string]interface{}{
"a": struct{ Int int }{2},
"b": 1,
},
wantOutput: "b = 1\n\n[a]\n Int = 2\n",
},
"nested map": {
input: map[string]map[string]int{
"a": {"b": 1},
"c": {"d": 2},
},
wantOutput: "[a]\n b = 1\n\n[c]\n d = 2\n",
},
"nested struct": {
input: struct{ Struct struct{ Int int } }{
struct{ Int int }{1},
},
wantOutput: "[Struct]\n Int = 1\n",
},
"nested struct and non-struct field": {
input: struct {
Struct struct{ Int int }
Bool bool
}{struct{ Int int }{1}, true},
wantOutput: "Bool = true\n\n[Struct]\n Int = 1\n",
},
"2 nested structs": {
input: struct{ Struct1, Struct2 struct{ Int int } }{
struct{ Int int }{1}, struct{ Int int }{2},
},
wantOutput: "[Struct1]\n Int = 1\n\n[Struct2]\n Int = 2\n",
},
"deeply nested structs": {
input: struct {
Struct1, Struct2 struct{ Struct3 *struct{ Int int } }
}{
struct{ Struct3 *struct{ Int int } }{&struct{ Int int }{1}},
struct{ Struct3 *struct{ Int int } }{nil},
},
wantOutput: "[Struct1]\n [Struct1.Struct3]\n Int = 1" +
"\n\n[Struct2]\n",
},
"nested struct with nil struct elem": {
input: struct {
Struct struct{ Inner *struct{ Int int } }
}{
struct{ Inner *struct{ Int int } }{nil},
},
wantOutput: "[Struct]\n",
},
"nested struct with no fields": {
input: struct {
Struct struct{ Inner struct{} }
}{
struct{ Inner struct{} }{struct{}{}},
},
wantOutput: "[Struct]\n [Struct.Inner]\n",
},
"struct with tags": {
input: struct {
Struct struct {
Int int `toml:"_int"`
} `toml:"_struct"`
Bool bool `toml:"_bool"`
}{
struct {
Int int `toml:"_int"`
}{1}, true,
},
wantOutput: "_bool = true\n\n[_struct]\n _int = 1\n",
},
"embedded struct": {
input: struct{ Embedded }{Embedded{1}},
wantOutput: "_int = 1\n",
},
"embedded *struct": {
input: struct{ *Embedded }{&Embedded{1}},
wantOutput: "_int = 1\n",
},
"nested embedded struct": {
input: struct {
Struct struct{ Embedded } `toml:"_struct"`
}{struct{ Embedded }{Embedded{1}}},
wantOutput: "[_struct]\n _int = 1\n",
},
"nested embedded *struct": {
input: struct {
Struct struct{ *Embedded } `toml:"_struct"`
}{struct{ *Embedded }{&Embedded{1}}},
wantOutput: "[_struct]\n _int = 1\n",
},
"embedded non-struct": {
input: struct{ NonStruct }{5},
wantOutput: "NonStruct = 5\n",
},
"array of tables": {
input: struct {
Structs []*struct{ Int int } `toml:"struct"`
}{
[]*struct{ Int int }{{1}, {3}},
},
wantOutput: "[[struct]]\n Int = 1\n\n[[struct]]\n Int = 3\n",
},
"array of tables order": {
input: map[string]interface{}{
"map": map[string]interface{}{
"zero": 5,
"arr": []map[string]int{
{
"friend": 5,
},
},
},
},
wantOutput: "[map]\n zero = 5\n\n [[map.arr]]\n friend = 5\n",
},
"(error) top-level slice": {
input: []struct{ Int int }{{1}, {2}, {3}},
wantError: errNoKey,
},
"(error) slice of slice": {
input: struct {
Slices [][]struct{ Int int }
}{
[][]struct{ Int int }{{{1}}, {{2}}, {{3}}},
},
wantError: errArrayNoTable,
},
"(error) map no string key": {
input: map[int]string{1: ""},
wantError: errNonString,
},
"(error) empty key name": {
input: map[string]int{"": 1},
wantError: errAnything,
},
"(error) empty map name": {
input: map[string]interface{}{
"": map[string]int{"v": 1},
},
wantError: errAnything,
},
}
for label, test := range tests {
encodeExpected(t, label, test.input, test.wantOutput, test.wantError)
}
}
func TestEncodeNestedTableArrays(t *testing.T) {
type song struct {
Name string `toml:"name"`
}
type album struct {
Name string `toml:"name"`
Songs []song `toml:"songs"`
}
type springsteen struct {
Albums []album `toml:"albums"`
}
value := springsteen{
[]album{
{"Born to Run",
[]song{{"Jungleland"}, {"Meeting Across the River"}}},
{"Born in the USA",
[]song{{"Glory Days"}, {"Dancing in the Dark"}}},
},
}
expected := `[[albums]]
name = "Born to Run"
[[albums.songs]]
name = "Jungleland"
[[albums.songs]]
name = "Meeting Across the River"
[[albums]]
name = "Born in the USA"
[[albums.songs]]
name = "Glory Days"
[[albums.songs]]
name = "Dancing in the Dark"
`
encodeExpected(t, "nested table arrays", value, expected, nil)
}
func TestEncodeArrayHashWithNormalHashOrder(t *testing.T) {
type Alpha struct {
V int
}
type Beta struct {
V int
}
type Conf struct {
V int
A Alpha
B []Beta
}
val := Conf{
V: 1,
A: Alpha{2},
B: []Beta{{3}},
}
expected := "V = 1\n\n[A]\n V = 2\n\n[[B]]\n V = 3\n"
encodeExpected(t, "array hash with normal hash order", val, expected, nil)
}
func TestEncodeWithOmitEmpty(t *testing.T) {
type simple struct {
Bool bool `toml:"bool,omitempty"`
String string `toml:"string,omitempty"`
Array [0]byte `toml:"array,omitempty"`
Slice []int `toml:"slice,omitempty"`
Map map[string]string `toml:"map,omitempty"`
}
var v simple
encodeExpected(t, "fields with omitempty are omitted when empty", v, "", nil)
v = simple{
Bool: true,
String: " ",
Slice: []int{2, 3, 4},
Map: map[string]string{"foo": "bar"},
}
expected := `bool = true
string = " "
slice = [2, 3, 4]
[map]
foo = "bar"
`
encodeExpected(t, "fields with omitempty are not omitted when non-empty",
v, expected, nil)
}
func TestEncodeWithOmitZero(t *testing.T) {
type simple struct {
Number int `toml:"number,omitzero"`
Real float64 `toml:"real,omitzero"`
Unsigned uint `toml:"unsigned,omitzero"`
}
value := simple{0, 0.0, uint(0)}
expected := ""
encodeExpected(t, "simple with omitzero, all zero", value, expected, nil)
value.Number = 10
value.Real = 20
value.Unsigned = 5
expected = `number = 10
real = 20.0
unsigned = 5
`
encodeExpected(t, "simple with omitzero, non-zero", value, expected, nil)
}
func TestEncodeOmitemptyWithEmptyName(t *testing.T) {
type simple struct {
S []int `toml:",omitempty"`
}
v := simple{[]int{1, 2, 3}}
expected := "S = [1, 2, 3]\n"
encodeExpected(t, "simple with omitempty, no name, non-empty field",
v, expected, nil)
}
func TestEncodeAnonymousStructPointerField(t *testing.T) {
type Sub struct{}
type simple struct {
*Sub
}
value := simple{}
expected := ""
encodeExpected(t, "nil anonymous struct pointer field", value, expected, nil)
value = simple{Sub: &Sub{}}
expected = ""
encodeExpected(t, "non-nil anonymous struct pointer field", value, expected, nil)
}
func TestEncodeIgnoredFields(t *testing.T) {
type simple struct {
Number int `toml:"-"`
}
value := simple{}
expected := ""
encodeExpected(t, "ignored field", value, expected, nil)
}
func encodeExpected(
t *testing.T, label string, val interface{}, wantStr string, wantErr error,
) {
var buf bytes.Buffer
enc := NewEncoder(&buf)
err := enc.Encode(val)
if err != wantErr {
if wantErr != nil {
if wantErr == errAnything && err != nil {
return
}
t.Errorf("%s: want Encode error %v, got %v", label, wantErr, err)
} else {
t.Errorf("%s: Encode failed: %s", label, err)
}
}
if err != nil {
return
}
if got := buf.String(); wantStr != got {
t.Errorf("%s: want\n-----\n%q\n-----\nbut got\n-----\n%q\n-----\n",
label, wantStr, got)
}
}
func ExampleEncoder_Encode() {
date, _ := time.Parse(time.RFC822, "14 Mar 10 18:00 UTC")
var config = map[string]interface{}{
"date": date,
"counts": []int{1, 1, 2, 3, 5, 8},
"hash": map[string]string{
"key1": "val1",
"key2": "val2",
},
}
buf := new(bytes.Buffer)
if err := NewEncoder(buf).Encode(config); err != nil {
log.Fatal(err)
}
fmt.Println(buf.String())
// Output:
// counts = [1, 1, 2, 3, 5, 8]
// date = 2010-03-14T18:00:00Z
//
// [hash]
// key1 = "val1"
// key2 = "val2"
}

View file

@ -1 +0,0 @@
au BufWritePost *.go silent!make tags > /dev/null 2>&1

View file

@ -1 +0,0 @@
*.exe

View file

@ -1,80 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar_test
import (
"archive/tar"
"bytes"
"fmt"
"io"
"log"
"os"
)
func Example() {
// Create a buffer to write our archive to.
buf := new(bytes.Buffer)
// Create a new tar archive.
tw := tar.NewWriter(buf)
// Add some files to the archive.
var files = []struct {
Name, Body string
}{
{"readme.txt", "This archive contains some text files."},
{"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
{"todo.txt", "Get animal handling license."},
}
for _, file := range files {
hdr := &tar.Header{
Name: file.Name,
Mode: 0600,
Size: int64(len(file.Body)),
}
if err := tw.WriteHeader(hdr); err != nil {
log.Fatalln(err)
}
if _, err := tw.Write([]byte(file.Body)); err != nil {
log.Fatalln(err)
}
}
// Make sure to check the error on Close.
if err := tw.Close(); err != nil {
log.Fatalln(err)
}
// Open the tar archive for reading.
r := bytes.NewReader(buf.Bytes())
tr := tar.NewReader(r)
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
log.Fatalln(err)
}
fmt.Printf("Contents of %s:\n", hdr.Name)
if _, err := io.Copy(os.Stdout, tr); err != nil {
log.Fatalln(err)
}
fmt.Println()
}
// Output:
// Contents of readme.txt:
// This archive contains some text files.
// Contents of gopher.txt:
// Gopher names:
// George
// Geoffrey
// Gonzo
// Contents of todo.txt:
// Get animal handling license.
}

File diff suppressed because it is too large Load diff

View file

@ -1,325 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
import (
"bytes"
"io/ioutil"
"os"
"path"
"reflect"
"strings"
"testing"
"time"
)
func TestFileInfoHeader(t *testing.T) {
fi, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
h, err := FileInfoHeader(fi, "")
if err != nil {
t.Fatalf("FileInfoHeader: %v", err)
}
if g, e := h.Name, "small.txt"; g != e {
t.Errorf("Name = %q; want %q", g, e)
}
if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e {
t.Errorf("Mode = %#o; want %#o", g, e)
}
if g, e := h.Size, int64(5); g != e {
t.Errorf("Size = %v; want %v", g, e)
}
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
t.Errorf("ModTime = %v; want %v", g, e)
}
// FileInfoHeader should error when passing nil FileInfo
if _, err := FileInfoHeader(nil, ""); err == nil {
t.Fatalf("Expected error when passing nil to FileInfoHeader")
}
}
func TestFileInfoHeaderDir(t *testing.T) {
fi, err := os.Stat("testdata")
if err != nil {
t.Fatal(err)
}
h, err := FileInfoHeader(fi, "")
if err != nil {
t.Fatalf("FileInfoHeader: %v", err)
}
if g, e := h.Name, "testdata/"; g != e {
t.Errorf("Name = %q; want %q", g, e)
}
// Ignoring c_ISGID for golang.org/issue/4867
if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e {
t.Errorf("Mode = %#o; want %#o", g, e)
}
if g, e := h.Size, int64(0); g != e {
t.Errorf("Size = %v; want %v", g, e)
}
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
t.Errorf("ModTime = %v; want %v", g, e)
}
}
func TestFileInfoHeaderSymlink(t *testing.T) {
h, err := FileInfoHeader(symlink{}, "some-target")
if err != nil {
t.Fatal(err)
}
if g, e := h.Name, "some-symlink"; g != e {
t.Errorf("Name = %q; want %q", g, e)
}
if g, e := h.Linkname, "some-target"; g != e {
t.Errorf("Linkname = %q; want %q", g, e)
}
}
type symlink struct{}
func (symlink) Name() string { return "some-symlink" }
func (symlink) Size() int64 { return 0 }
func (symlink) Mode() os.FileMode { return os.ModeSymlink }
func (symlink) ModTime() time.Time { return time.Time{} }
func (symlink) IsDir() bool { return false }
func (symlink) Sys() interface{} { return nil }
func TestRoundTrip(t *testing.T) {
data := []byte("some file contents")
var b bytes.Buffer
tw := NewWriter(&b)
hdr := &Header{
Name: "file.txt",
Uid: 1 << 21, // too big for 8 octal digits
Size: int64(len(data)),
ModTime: time.Now(),
}
// tar only supports second precision.
hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)
if err := tw.WriteHeader(hdr); err != nil {
t.Fatalf("tw.WriteHeader: %v", err)
}
if _, err := tw.Write(data); err != nil {
t.Fatalf("tw.Write: %v", err)
}
if err := tw.Close(); err != nil {
t.Fatalf("tw.Close: %v", err)
}
// Read it back.
tr := NewReader(&b)
rHdr, err := tr.Next()
if err != nil {
t.Fatalf("tr.Next: %v", err)
}
if !reflect.DeepEqual(rHdr, hdr) {
t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr)
}
rData, err := ioutil.ReadAll(tr)
if err != nil {
t.Fatalf("Read: %v", err)
}
if !bytes.Equal(rData, data) {
t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data)
}
}
type headerRoundTripTest struct {
h *Header
fm os.FileMode
}
func TestHeaderRoundTrip(t *testing.T) {
golden := []headerRoundTripTest{
// regular file.
{
h: &Header{
Name: "test.txt",
Mode: 0644 | c_ISREG,
Size: 12,
ModTime: time.Unix(1360600916, 0),
Typeflag: TypeReg,
},
fm: 0644,
},
// symbolic link.
{
h: &Header{
Name: "link.txt",
Mode: 0777 | c_ISLNK,
Size: 0,
ModTime: time.Unix(1360600852, 0),
Typeflag: TypeSymlink,
},
fm: 0777 | os.ModeSymlink,
},
// character device node.
{
h: &Header{
Name: "dev/null",
Mode: 0666 | c_ISCHR,
Size: 0,
ModTime: time.Unix(1360578951, 0),
Typeflag: TypeChar,
},
fm: 0666 | os.ModeDevice | os.ModeCharDevice,
},
// block device node.
{
h: &Header{
Name: "dev/sda",
Mode: 0660 | c_ISBLK,
Size: 0,
ModTime: time.Unix(1360578954, 0),
Typeflag: TypeBlock,
},
fm: 0660 | os.ModeDevice,
},
// directory.
{
h: &Header{
Name: "dir/",
Mode: 0755 | c_ISDIR,
Size: 0,
ModTime: time.Unix(1360601116, 0),
Typeflag: TypeDir,
},
fm: 0755 | os.ModeDir,
},
// fifo node.
{
h: &Header{
Name: "dev/initctl",
Mode: 0600 | c_ISFIFO,
Size: 0,
ModTime: time.Unix(1360578949, 0),
Typeflag: TypeFifo,
},
fm: 0600 | os.ModeNamedPipe,
},
// setuid.
{
h: &Header{
Name: "bin/su",
Mode: 0755 | c_ISREG | c_ISUID,
Size: 23232,
ModTime: time.Unix(1355405093, 0),
Typeflag: TypeReg,
},
fm: 0755 | os.ModeSetuid,
},
// setguid.
{
h: &Header{
Name: "group.txt",
Mode: 0750 | c_ISREG | c_ISGID,
Size: 0,
ModTime: time.Unix(1360602346, 0),
Typeflag: TypeReg,
},
fm: 0750 | os.ModeSetgid,
},
// sticky.
{
h: &Header{
Name: "sticky.txt",
Mode: 0600 | c_ISREG | c_ISVTX,
Size: 7,
ModTime: time.Unix(1360602540, 0),
Typeflag: TypeReg,
},
fm: 0600 | os.ModeSticky,
},
// hard link.
{
h: &Header{
Name: "hard.txt",
Mode: 0644 | c_ISREG,
Size: 0,
Linkname: "file.txt",
ModTime: time.Unix(1360600916, 0),
Typeflag: TypeLink,
},
fm: 0644,
},
// More information.
{
h: &Header{
Name: "info.txt",
Mode: 0600 | c_ISREG,
Size: 0,
Uid: 1000,
Gid: 1000,
ModTime: time.Unix(1360602540, 0),
Uname: "slartibartfast",
Gname: "users",
Typeflag: TypeReg,
},
fm: 0600,
},
}
for i, g := range golden {
fi := g.h.FileInfo()
h2, err := FileInfoHeader(fi, "")
if err != nil {
t.Error(err)
continue
}
if strings.Contains(fi.Name(), "/") {
t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name())
}
name := path.Base(g.h.Name)
if fi.IsDir() {
name += "/"
}
if got, want := h2.Name, name; got != want {
t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
}
if got, want := h2.Size, g.h.Size; got != want {
t.Errorf("i=%d: Size: got %v, want %v", i, got, want)
}
if got, want := h2.Uid, g.h.Uid; got != want {
t.Errorf("i=%d: Uid: got %d, want %d", i, got, want)
}
if got, want := h2.Gid, g.h.Gid; got != want {
t.Errorf("i=%d: Gid: got %d, want %d", i, got, want)
}
if got, want := h2.Uname, g.h.Uname; got != want {
t.Errorf("i=%d: Uname: got %q, want %q", i, got, want)
}
if got, want := h2.Gname, g.h.Gname; got != want {
t.Errorf("i=%d: Gname: got %q, want %q", i, got, want)
}
if got, want := h2.Linkname, g.h.Linkname; got != want {
t.Errorf("i=%d: Linkname: got %v, want %v", i, got, want)
}
if got, want := h2.Typeflag, g.h.Typeflag; got != want {
t.Logf("%#v %#v", g.h, fi.Sys())
t.Errorf("i=%d: Typeflag: got %q, want %q", i, got, want)
}
if got, want := h2.Mode, g.h.Mode; got != want {
t.Errorf("i=%d: Mode: got %o, want %o", i, got, want)
}
if got, want := fi.Mode(), g.fm; got != want {
t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want)
}
if got, want := h2.AccessTime, g.h.AccessTime; got != want {
t.Errorf("i=%d: AccessTime: got %v, want %v", i, got, want)
}
if got, want := h2.ChangeTime, g.h.ChangeTime; got != want {
t.Errorf("i=%d: ChangeTime: got %v, want %v", i, got, want)
}
if got, want := h2.ModTime, g.h.ModTime; got != want {
t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want)
}
if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h {
t.Errorf("i=%d: Sys didn't return original *Header", i)
}
}
}

View file

@ -1 +0,0 @@
Kilts

View file

@ -1 +0,0 @@
Google.com

Binary file not shown.

View file

@ -1,739 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"reflect"
"sort"
"strings"
"testing"
"testing/iotest"
"time"
)
type writerTestEntry struct {
header *Header
contents string
}
type writerTest struct {
file string // filename of expected output
entries []*writerTestEntry
}
var writerTests = []*writerTest{
// The writer test file was produced with this command:
// tar (GNU tar) 1.26
// ln -s small.txt link.txt
// tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
{
file: "testdata/writer.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: "small.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 5,
ModTime: time.Unix(1246508266, 0),
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
contents: "Kilts",
},
{
header: &Header{
Name: "small2.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 11,
ModTime: time.Unix(1245217492, 0),
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
contents: "Google.com\n",
},
{
header: &Header{
Name: "link.txt",
Mode: 0777,
Uid: 1000,
Gid: 1000,
Size: 0,
ModTime: time.Unix(1314603082, 0),
Typeflag: '2',
Linkname: "small.txt",
Uname: "strings",
Gname: "strings",
},
// no contents
},
},
},
// The truncated test file was produced using these commands:
// dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
// tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
{
file: "testdata/writer-big.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: "tmp/16gig.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 16 << 30,
ModTime: time.Unix(1254699560, 0),
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
// fake contents
contents: strings.Repeat("\x00", 4<<10),
},
},
},
// The truncated test file was produced using these commands:
// dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt
// tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar
{
file: "testdata/writer-big-long.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: strings.Repeat("longname/", 15) + "16gig.txt",
Mode: 0644,
Uid: 1000,
Gid: 1000,
Size: 16 << 30,
ModTime: time.Unix(1399583047, 0),
Typeflag: '0',
Uname: "guillaume",
Gname: "guillaume",
},
// fake contents
contents: strings.Repeat("\x00", 4<<10),
},
},
},
// This file was produced using gnu tar 1.17
// gnutar -b 4 --format=ustar (longname/)*15 + file.txt
{
file: "testdata/ustar.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: strings.Repeat("longname/", 15) + "file.txt",
Mode: 0644,
Uid: 0765,
Gid: 024,
Size: 06,
ModTime: time.Unix(1360135598, 0),
Typeflag: '0',
Uname: "shane",
Gname: "staff",
},
contents: "hello\n",
},
},
},
// This file was produced using gnu tar 1.26
// echo "Slartibartfast" > file.txt
// ln file.txt hard.txt
// tar -b 1 --format=ustar -c -f hardlink.tar file.txt hard.txt
{
file: "testdata/hardlink.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: "file.txt",
Mode: 0644,
Uid: 1000,
Gid: 100,
Size: 15,
ModTime: time.Unix(1425484303, 0),
Typeflag: '0',
Uname: "vbatts",
Gname: "users",
},
contents: "Slartibartfast\n",
},
{
header: &Header{
Name: "hard.txt",
Mode: 0644,
Uid: 1000,
Gid: 100,
Size: 0,
ModTime: time.Unix(1425484303, 0),
Typeflag: '1',
Linkname: "file.txt",
Uname: "vbatts",
Gname: "users",
},
// no contents
},
},
},
}
// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
func bytestr(offset int, b []byte) string {
const rowLen = 32
s := fmt.Sprintf("%04x ", offset)
for _, ch := range b {
switch {
case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
s += fmt.Sprintf(" %c", ch)
default:
s += fmt.Sprintf(" %02x", ch)
}
}
return s
}
// Render a pseudo-diff between two blocks of bytes.
func bytediff(a []byte, b []byte) string {
const rowLen = 32
s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b))
for offset := 0; len(a)+len(b) > 0; offset += rowLen {
na, nb := rowLen, rowLen
if na > len(a) {
na = len(a)
}
if nb > len(b) {
nb = len(b)
}
sa := bytestr(offset, a[0:na])
sb := bytestr(offset, b[0:nb])
if sa != sb {
s += fmt.Sprintf("-%v\n+%v\n", sa, sb)
}
a = a[na:]
b = b[nb:]
}
return s
}
func TestWriter(t *testing.T) {
testLoop:
for i, test := range writerTests {
expected, err := ioutil.ReadFile(test.file)
if err != nil {
t.Errorf("test %d: Unexpected error: %v", i, err)
continue
}
buf := new(bytes.Buffer)
tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB
big := false
for j, entry := range test.entries {
big = big || entry.header.Size > 1<<10
if err := tw.WriteHeader(entry.header); err != nil {
t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err)
continue testLoop
}
if _, err := io.WriteString(tw, entry.contents); err != nil {
t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err)
continue testLoop
}
}
// Only interested in Close failures for the small tests.
if err := tw.Close(); err != nil && !big {
t.Errorf("test %d: Failed closing archive: %v", i, err)
continue testLoop
}
actual := buf.Bytes()
if !bytes.Equal(expected, actual) {
t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
i, bytediff(expected, actual))
}
if testing.Short() { // The second test is expensive.
break
}
}
}
func TestPax(t *testing.T) {
// Create an archive with a large name
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
if err != nil {
t.Fatalf("os.Stat: %v", err)
}
// Force a PAX long name to be written
longName := strings.Repeat("ab", 100)
contents := strings.Repeat(" ", int(hdr.Size))
hdr.Name = longName
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err = writer.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// Test that we can get a long name back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if hdr.Name != longName {
t.Fatal("Couldn't recover long file name")
}
}
func TestPaxSymlink(t *testing.T) {
// Create an archive with a large linkname
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
hdr.Typeflag = TypeSymlink
if err != nil {
t.Fatalf("os.Stat:1 %v", err)
}
// Force a PAX long linkname to be written
longLinkname := strings.Repeat("1234567890/1234567890", 10)
hdr.Linkname = longLinkname
hdr.Size = 0
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// Test that we can get a long name back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if hdr.Linkname != longLinkname {
t.Fatal("Couldn't recover long link name")
}
}
func TestPaxNonAscii(t *testing.T) {
// Create an archive with non ascii. These should trigger a pax header
// because pax headers have a defined utf-8 encoding.
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
if err != nil {
t.Fatalf("os.Stat:1 %v", err)
}
// some sample data
chineseFilename := "文件名"
chineseGroupname := "組"
chineseUsername := "用戶名"
hdr.Name = chineseFilename
hdr.Gname = chineseGroupname
hdr.Uname = chineseUsername
contents := strings.Repeat(" ", int(hdr.Size))
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err = writer.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// Test that we can get a long name back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if hdr.Name != chineseFilename {
t.Fatal("Couldn't recover unicode name")
}
if hdr.Gname != chineseGroupname {
t.Fatal("Couldn't recover unicode group")
}
if hdr.Uname != chineseUsername {
t.Fatal("Couldn't recover unicode user")
}
}
func TestPaxXattrs(t *testing.T) {
xattrs := map[string]string{
"user.key": "value",
}
// Create an archive with an xattr
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
if err != nil {
t.Fatalf("os.Stat: %v", err)
}
contents := "Kilts"
hdr.Xattrs = xattrs
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err = writer.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Test that we can get the xattrs back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
hdr.Xattrs, xattrs)
}
}
func TestPaxHeadersSorted(t *testing.T) {
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
if err != nil {
t.Fatalf("os.Stat: %v", err)
}
contents := strings.Repeat(" ", int(hdr.Size))
hdr.Xattrs = map[string]string{
"foo": "foo",
"bar": "bar",
"baz": "baz",
"qux": "qux",
}
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err = writer.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// xattr bar should always appear before others
indices := []int{
bytes.Index(buf.Bytes(), []byte("bar=bar")),
bytes.Index(buf.Bytes(), []byte("baz=baz")),
bytes.Index(buf.Bytes(), []byte("foo=foo")),
bytes.Index(buf.Bytes(), []byte("qux=qux")),
}
if !sort.IntsAreSorted(indices) {
t.Fatal("PAX headers are not sorted")
}
}
func TestUSTARLongName(t *testing.T) {
// Create an archive with a path that failed to split with USTAR extension in previous versions.
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
hdr.Typeflag = TypeDir
if err != nil {
t.Fatalf("os.Stat:1 %v", err)
}
// Force a PAX long name to be written. The name was taken from a practical example
// that fails and replaced ever char through numbers to anonymize the sample.
longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
hdr.Name = longName
hdr.Size = 0
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Test that we can get a long name back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if hdr.Name != longName {
t.Fatal("Couldn't recover long name")
}
}
func TestValidTypeflagWithPAXHeader(t *testing.T) {
var buffer bytes.Buffer
tw := NewWriter(&buffer)
fileName := strings.Repeat("ab", 100)
hdr := &Header{
Name: fileName,
Size: 4,
Typeflag: 0,
}
if err := tw.WriteHeader(hdr); err != nil {
t.Fatalf("Failed to write header: %s", err)
}
if _, err := tw.Write([]byte("fooo")); err != nil {
t.Fatalf("Failed to write the file's data: %s", err)
}
tw.Close()
tr := NewReader(&buffer)
for {
header, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("Failed to read header: %s", err)
}
if header.Typeflag != 0 {
t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag)
}
}
}
func TestWriteAfterClose(t *testing.T) {
var buffer bytes.Buffer
tw := NewWriter(&buffer)
hdr := &Header{
Name: "small.txt",
Size: 5,
}
if err := tw.WriteHeader(hdr); err != nil {
t.Fatalf("Failed to write header: %s", err)
}
tw.Close()
if _, err := tw.Write([]byte("Kilts")); err != ErrWriteAfterClose {
t.Fatalf("Write: got %v; want ErrWriteAfterClose", err)
}
}
func TestSplitUSTARPath(t *testing.T) {
var sr = strings.Repeat
var vectors = []struct {
input string // Input path
prefix string // Expected output prefix
suffix string // Expected output suffix
ok bool // Split success?
}{
{"", "", "", false},
{"abc", "", "", false},
{"用戶名", "", "", false},
{sr("a", fileNameSize), "", "", false},
{sr("a", fileNameSize) + "/", "", "", false},
{sr("a", fileNameSize) + "/a", sr("a", fileNameSize), "a", true},
{sr("a", fileNamePrefixSize) + "/", "", "", false},
{sr("a", fileNamePrefixSize) + "/a", sr("a", fileNamePrefixSize), "a", true},
{sr("a", fileNameSize+1), "", "", false},
{sr("/", fileNameSize+1), sr("/", fileNameSize-1), "/", true},
{sr("a", fileNamePrefixSize) + "/" + sr("b", fileNameSize),
sr("a", fileNamePrefixSize), sr("b", fileNameSize), true},
{sr("a", fileNamePrefixSize) + "//" + sr("b", fileNameSize), "", "", false},
{sr("a/", fileNameSize), sr("a/", 77) + "a", sr("a/", 22), true},
}
for _, v := range vectors {
prefix, suffix, ok := splitUSTARPath(v.input)
if prefix != v.prefix || suffix != v.suffix || ok != v.ok {
t.Errorf("splitUSTARPath(%q):\ngot (%q, %q, %v)\nwant (%q, %q, %v)",
v.input, prefix, suffix, ok, v.prefix, v.suffix, v.ok)
}
}
}
func TestFormatPAXRecord(t *testing.T) {
var medName = strings.Repeat("CD", 50)
var longName = strings.Repeat("AB", 100)
var vectors = []struct {
inputKey string
inputVal string
output string
}{
{"k", "v", "6 k=v\n"},
{"path", "/etc/hosts", "19 path=/etc/hosts\n"},
{"path", longName, "210 path=" + longName + "\n"},
{"path", medName, "110 path=" + medName + "\n"},
{"foo", "ba", "9 foo=ba\n"},
{"foo", "bar", "11 foo=bar\n"},
{"foo", "b=\nar=\n==\x00", "18 foo=b=\nar=\n==\x00\n"},
{"foo", "hello9 foo=ba\nworld", "27 foo=hello9 foo=ba\nworld\n"},
{"☺☻☹", "日a本b語ç", "27 ☺☻☹=日a本b語ç\n"},
{"\x00hello", "\x00world", "17 \x00hello=\x00world\n"},
}
for _, v := range vectors {
output := formatPAXRecord(v.inputKey, v.inputVal)
if output != v.output {
t.Errorf("formatPAXRecord(%q, %q): got %q, want %q",
v.inputKey, v.inputVal, output, v.output)
}
}
}
func TestFitsInBase256(t *testing.T) {
var vectors = []struct {
input int64
width int
ok bool
}{
{+1, 8, true},
{0, 8, true},
{-1, 8, true},
{1 << 56, 8, false},
{(1 << 56) - 1, 8, true},
{-1 << 56, 8, true},
{(-1 << 56) - 1, 8, false},
{121654, 8, true},
{-9849849, 8, true},
{math.MaxInt64, 9, true},
{0, 9, true},
{math.MinInt64, 9, true},
{math.MaxInt64, 12, true},
{0, 12, true},
{math.MinInt64, 12, true},
}
for _, v := range vectors {
ok := fitsInBase256(v.width, v.input)
if ok != v.ok {
t.Errorf("checkNumeric(%d, %d): got %v, want %v", v.input, v.width, ok, v.ok)
}
}
}
func TestFormatNumeric(t *testing.T) {
var vectors = []struct {
input int64
output string
ok bool
}{
// Test base-256 (binary) encoded values.
{-1, "\xff", true},
{-1, "\xff\xff", true},
{-1, "\xff\xff\xff", true},
{(1 << 0), "0", false},
{(1 << 8) - 1, "\x80\xff", true},
{(1 << 8), "0\x00", false},
{(1 << 16) - 1, "\x80\xff\xff", true},
{(1 << 16), "00\x00", false},
{-1 * (1 << 0), "\xff", true},
{-1*(1<<0) - 1, "0", false},
{-1 * (1 << 8), "\xff\x00", true},
{-1*(1<<8) - 1, "0\x00", false},
{-1 * (1 << 16), "\xff\x00\x00", true},
{-1*(1<<16) - 1, "00\x00", false},
{537795476381659745, "0000000\x00", false},
{537795476381659745, "\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61", true},
{-615126028225187231, "0000000\x00", false},
{-615126028225187231, "\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61", true},
{math.MaxInt64, "0000000\x00", false},
{math.MaxInt64, "\x80\x00\x00\x00\x7f\xff\xff\xff\xff\xff\xff\xff", true},
{math.MinInt64, "0000000\x00", false},
{math.MinInt64, "\xff\xff\xff\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
{math.MaxInt64, "\x80\x7f\xff\xff\xff\xff\xff\xff\xff", true},
{math.MinInt64, "\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
}
for _, v := range vectors {
var f formatter
output := make([]byte, len(v.output))
f.formatNumeric(output, v.input)
ok := (f.err == nil)
if ok != v.ok {
if v.ok {
t.Errorf("formatNumeric(%d): got formatting failure, want success", v.input)
} else {
t.Errorf("formatNumeric(%d): got formatting success, want failure", v.input)
}
}
if string(output) != v.output {
t.Errorf("formatNumeric(%d): got %q, want %q", v.input, output, v.output)
}
}
}
func TestFormatPAXTime(t *testing.T) {
t1 := time.Date(2000, 1, 1, 11, 0, 0, 0, time.UTC)
t2 := time.Date(2000, 1, 1, 11, 0, 0, 100, time.UTC)
t3 := time.Date(1960, 1, 1, 11, 0, 0, 0, time.UTC)
t4 := time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC)
verify := func(time time.Time, s string) {
p := formatPAXTime(time)
if p != s {
t.Errorf("for %v, expected %s, got %s", time, s, p)
}
}
verify(t1, "946724400")
verify(t2, "946724400.000000100")
verify(t3, "-315579600")
verify(t4, "0")
}

View file

@ -1,255 +0,0 @@
package winio
import (
"io"
"io/ioutil"
"os"
"syscall"
"testing"
)
var testFileName string
func TestMain(m *testing.M) {
f, err := ioutil.TempFile("", "tmp")
if err != nil {
panic(err)
}
testFileName = f.Name()
f.Close()
defer os.Remove(testFileName)
os.Exit(m.Run())
}
func makeTestFile(makeADS bool) error {
os.Remove(testFileName)
f, err := os.Create(testFileName)
if err != nil {
return err
}
defer f.Close()
_, err = f.Write([]byte("testing 1 2 3\n"))
if err != nil {
return err
}
if makeADS {
a, err := os.Create(testFileName + ":ads.txt")
if err != nil {
return err
}
defer a.Close()
_, err = a.Write([]byte("alternate data stream\n"))
if err != nil {
return err
}
}
return nil
}
func TestBackupRead(t *testing.T) {
err := makeTestFile(true)
if err != nil {
t.Fatal(err)
}
f, err := os.Open(testFileName)
if err != nil {
t.Fatal(err)
}
defer f.Close()
r := NewBackupFileReader(f, false)
defer r.Close()
b, err := ioutil.ReadAll(r)
if err != nil {
t.Fatal(err)
}
if len(b) == 0 {
t.Fatal("no data")
}
}
func TestBackupStreamRead(t *testing.T) {
err := makeTestFile(true)
if err != nil {
t.Fatal(err)
}
f, err := os.Open(testFileName)
if err != nil {
t.Fatal(err)
}
defer f.Close()
r := NewBackupFileReader(f, false)
defer r.Close()
br := NewBackupStreamReader(r)
gotData := false
gotAltData := false
for {
hdr, err := br.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
switch hdr.Id {
case BackupData:
if gotData {
t.Fatal("duplicate data")
}
if hdr.Name != "" {
t.Fatalf("unexpected name %s", hdr.Name)
}
b, err := ioutil.ReadAll(br)
if err != nil {
t.Fatal(err)
}
if string(b) != "testing 1 2 3\n" {
t.Fatalf("incorrect data %v", b)
}
gotData = true
case BackupAlternateData:
if gotAltData {
t.Fatal("duplicate alt data")
}
if hdr.Name != ":ads.txt:$DATA" {
t.Fatalf("incorrect name %s", hdr.Name)
}
b, err := ioutil.ReadAll(br)
if err != nil {
t.Fatal(err)
}
if string(b) != "alternate data stream\n" {
t.Fatalf("incorrect data %v", b)
}
gotAltData = true
default:
t.Fatalf("unknown stream ID %d", hdr.Id)
}
}
if !gotData || !gotAltData {
t.Fatal("missing stream")
}
}
func TestBackupStreamWrite(t *testing.T) {
f, err := os.Create(testFileName)
if err != nil {
t.Fatal(err)
}
defer f.Close()
w := NewBackupFileWriter(f, false)
defer w.Close()
data := "testing 1 2 3\n"
altData := "alternate stream\n"
br := NewBackupStreamWriter(w)
err = br.WriteHeader(&BackupHeader{Id: BackupData, Size: int64(len(data))})
if err != nil {
t.Fatal(err)
}
n, err := br.Write([]byte(data))
if err != nil {
t.Fatal(err)
}
if n != len(data) {
t.Fatal("short write")
}
err = br.WriteHeader(&BackupHeader{Id: BackupAlternateData, Size: int64(len(altData)), Name: ":ads.txt:$DATA"})
if err != nil {
t.Fatal(err)
}
n, err = br.Write([]byte(altData))
if err != nil {
t.Fatal(err)
}
if n != len(altData) {
t.Fatal("short write")
}
f.Close()
b, err := ioutil.ReadFile(testFileName)
if err != nil {
t.Fatal(err)
}
if string(b) != data {
t.Fatalf("wrong data %v", b)
}
b, err = ioutil.ReadFile(testFileName + ":ads.txt")
if err != nil {
t.Fatal(err)
}
if string(b) != altData {
t.Fatalf("wrong data %v", b)
}
}
func makeSparseFile() error {
os.Remove(testFileName)
f, err := os.Create(testFileName)
if err != nil {
return err
}
defer f.Close()
const (
FSCTL_SET_SPARSE = 0x000900c4
FSCTL_SET_ZERO_DATA = 0x000980c8
)
err = syscall.DeviceIoControl(syscall.Handle(f.Fd()), FSCTL_SET_SPARSE, nil, 0, nil, 0, nil, nil)
if err != nil {
return err
}
_, err = f.Write([]byte("testing 1 2 3\n"))
if err != nil {
return err
}
_, err = f.Seek(1000000, 0)
if err != nil {
return err
}
_, err = f.Write([]byte("more data later\n"))
if err != nil {
return err
}
return nil
}
func TestBackupSparseFile(t *testing.T) {
err := makeSparseFile()
if err != nil {
t.Fatal(err)
}
f, err := os.Open(testFileName)
if err != nil {
t.Fatal(err)
}
defer f.Close()
r := NewBackupFileReader(f, false)
defer r.Close()
br := NewBackupStreamReader(r)
for {
hdr, err := br.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
t.Log(hdr)
}
}

View file

@ -1,84 +0,0 @@
package backuptar
import (
"bytes"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"testing"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/archive/tar"
)
func ensurePresent(t *testing.T, m map[string]string, keys ...string) {
for _, k := range keys {
if _, ok := m[k]; !ok {
t.Error(k, "not present in tar header")
}
}
}
func TestRoundTrip(t *testing.T) {
f, err := ioutil.TempFile("", "tst")
if err != nil {
t.Fatal(err)
}
defer f.Close()
defer os.Remove(f.Name())
if _, err = f.Write([]byte("testing 1 2 3\n")); err != nil {
t.Fatal(err)
}
if _, err = f.Seek(0, 0); err != nil {
t.Fatal(err)
}
fi, err := f.Stat()
if err != nil {
t.Fatal(err)
}
bi, err := winio.GetFileBasicInfo(f)
if err != nil {
t.Fatal(err)
}
br := winio.NewBackupFileReader(f, true)
defer br.Close()
var buf bytes.Buffer
tw := tar.NewWriter(&buf)
err = WriteTarFileFromBackupStream(tw, br, f.Name(), fi.Size(), bi)
if err != nil {
t.Fatal(err)
}
tr := tar.NewReader(&buf)
hdr, err := tr.Next()
if err != nil {
t.Fatal(err)
}
name, size, bi2, err := FileInfoFromHeader(hdr)
if err != nil {
t.Fatal(err)
}
if name != filepath.ToSlash(f.Name()) {
t.Errorf("got name %s, expected %s", name, filepath.ToSlash(f.Name()))
}
if size != fi.Size() {
t.Errorf("got size %d, expected %d", size, fi.Size())
}
if !reflect.DeepEqual(*bi, *bi2) {
t.Errorf("got %#v, expected %#v", *bi, *bi2)
}
ensurePresent(t, hdr.Winheaders, "fileattr", "sd")
}

View file

@ -1,262 +0,0 @@
package winio
import (
"bufio"
"io"
"net"
"os"
"syscall"
"testing"
"time"
)
var testPipeName = `\\.\pipe\winiotestpipe`
func TestDialUnknownFailsImmediately(t *testing.T) {
_, err := DialPipe(testPipeName, nil)
if err.(*os.PathError).Err != syscall.ENOENT {
t.Fatalf("expected ENOENT got %v", err)
}
}
func TestDialListenerTimesOut(t *testing.T) {
l, err := ListenPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
defer l.Close()
var d = time.Duration(10 * time.Millisecond)
_, err = DialPipe(testPipeName, &d)
if err != ErrTimeout {
t.Fatalf("expected ErrTimeout, got %v", err)
}
}
func TestDialAccessDeniedWithRestrictedSD(t *testing.T) {
c := PipeConfig{
SecurityDescriptor: "D:P(A;;0x1200FF;;;WD)",
}
l, err := ListenPipe(testPipeName, &c)
if err != nil {
t.Fatal(err)
}
defer l.Close()
_, err = DialPipe(testPipeName, nil)
if err.(*os.PathError).Err != syscall.ERROR_ACCESS_DENIED {
t.Fatalf("expected ERROR_ACCESS_DENIED, got %v", err)
}
}
func getConnection(cfg *PipeConfig) (client net.Conn, server net.Conn, err error) {
l, err := ListenPipe(testPipeName, cfg)
if err != nil {
return
}
defer l.Close()
type response struct {
c net.Conn
err error
}
ch := make(chan response)
go func() {
c, err := l.Accept()
ch <- response{c, err}
}()
c, err := DialPipe(testPipeName, nil)
if err != nil {
return
}
r := <-ch
if err = r.err; err != nil {
c.Close()
return
}
client = c
server = r.c
return
}
func TestReadTimeout(t *testing.T) {
c, s, err := getConnection(nil)
if err != nil {
t.Fatal(err)
}
defer c.Close()
defer s.Close()
c.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
buf := make([]byte, 10)
_, err = c.Read(buf)
if err != ErrTimeout {
t.Fatalf("expected ErrTimeout, got %v", err)
}
}
func server(l net.Listener, ch chan int) {
c, err := l.Accept()
if err != nil {
panic(err)
}
rw := bufio.NewReadWriter(bufio.NewReader(c), bufio.NewWriter(c))
s, err := rw.ReadString('\n')
if err != nil {
panic(err)
}
_, err = rw.WriteString("got " + s)
if err != nil {
panic(err)
}
err = rw.Flush()
if err != nil {
panic(err)
}
c.Close()
ch <- 1
}
func TestFullListenDialReadWrite(t *testing.T) {
l, err := ListenPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
defer l.Close()
ch := make(chan int)
go server(l, ch)
c, err := DialPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
defer c.Close()
rw := bufio.NewReadWriter(bufio.NewReader(c), bufio.NewWriter(c))
_, err = rw.WriteString("hello world\n")
if err != nil {
t.Fatal(err)
}
err = rw.Flush()
if err != nil {
t.Fatal(err)
}
s, err := rw.ReadString('\n')
if err != nil {
t.Fatal(err)
}
ms := "got hello world\n"
if s != ms {
t.Errorf("expected '%s', got '%s'", ms, s)
}
<-ch
}
func TestCloseAbortsListen(t *testing.T) {
l, err := ListenPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
ch := make(chan error)
go func() {
_, err := l.Accept()
ch <- err
}()
time.Sleep(30 * time.Millisecond)
l.Close()
err = <-ch
if err != ErrPipeListenerClosed {
t.Fatalf("expected ErrPipeListenerClosed, got %v", err)
}
}
func ensureEOFOnClose(t *testing.T, r io.Reader, w io.Closer) {
b := make([]byte, 10)
w.Close()
n, err := r.Read(b)
if n > 0 {
t.Errorf("unexpected byte count %d", n)
}
if err != io.EOF {
t.Errorf("expected EOF: %v", err)
}
}
func TestCloseClientEOFServer(t *testing.T) {
c, s, err := getConnection(nil)
if err != nil {
t.Fatal(err)
}
defer c.Close()
defer s.Close()
ensureEOFOnClose(t, c, s)
}
func TestCloseServerEOFClient(t *testing.T) {
c, s, err := getConnection(nil)
if err != nil {
t.Fatal(err)
}
defer c.Close()
defer s.Close()
ensureEOFOnClose(t, s, c)
}
func TestCloseWriteEOF(t *testing.T) {
cfg := &PipeConfig{
MessageMode: true,
}
c, s, err := getConnection(cfg)
if err != nil {
t.Fatal(err)
}
defer c.Close()
defer s.Close()
type closeWriter interface {
CloseWrite() error
}
err = c.(closeWriter).CloseWrite()
if err != nil {
t.Fatal(err)
}
b := make([]byte, 10)
_, err = s.Read(b)
if err != io.EOF {
t.Fatal(err)
}
}
func TestAcceptAfterCloseFails(t *testing.T) {
l, err := ListenPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
l.Close()
_, err = l.Accept()
if err != ErrPipeListenerClosed {
t.Fatalf("expected ErrPipeListenerClosed, got %v", err)
}
}
func TestDialTimesOutByDefault(t *testing.T) {
l, err := ListenPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
defer l.Close()
_, err = DialPipe(testPipeName, nil)
if err != ErrTimeout {
t.Fatalf("expected ErrTimeout, got %v", err)
}
}

View file

@ -1,17 +0,0 @@
package winio
import "testing"
func TestRunWithUnavailablePrivilege(t *testing.T) {
err := RunWithPrivilege("SeCreateTokenPrivilege", func() error { return nil })
if _, ok := err.(*PrivilegeError); err == nil || !ok {
t.Fatal("expected PrivilegeError")
}
}
func TestRunWithPrivileges(t *testing.T) {
err := RunWithPrivilege("SeShutdownPrivilege", func() error { return nil })
if err != nil {
t.Fatal(err)
}
}

View file

@ -1,26 +0,0 @@
package winio
import "testing"
func TestLookupInvalidSid(t *testing.T) {
_, err := LookupSidByName(".\\weoifjdsklfj")
aerr, ok := err.(*AccountLookupError)
if !ok || aerr.Err != cERROR_NONE_MAPPED {
t.Fatalf("expected AccountLookupError with ERROR_NONE_MAPPED, got %s", err)
}
}
func TestLookupValidSid(t *testing.T) {
sid, err := LookupSidByName("Everyone")
if err != nil || sid != "S-1-1-0" {
t.Fatal("expected S-1-1-0, got %s, %s", sid, err)
}
}
func TestLookupEmptyNameFails(t *testing.T) {
_, err := LookupSidByName(".\\weoifjdsklfj")
aerr, ok := err.(*AccountLookupError)
if !ok || aerr.Err != cERROR_NONE_MAPPED {
t.Fatalf("expected AccountLookupError with ERROR_NONE_MAPPED, got %s", err)
}
}

View file

@ -1,138 +0,0 @@
package wim
import (
"encoding/binary"
"io"
"io/ioutil"
"github.com/Microsoft/go-winio/wim/lzx"
)
const chunkSize = 32768 // Compressed resource chunk size
type compressedReader struct {
r *io.SectionReader
d io.ReadCloser
chunks []int64
curChunk int
originalSize int64
}
func newCompressedReader(r *io.SectionReader, originalSize int64, offset int64) (*compressedReader, error) {
nchunks := (originalSize + chunkSize - 1) / chunkSize
var base int64
chunks := make([]int64, nchunks)
if originalSize <= 0xffffffff {
// 32-bit chunk offsets
base = (nchunks - 1) * 4
chunks32 := make([]uint32, nchunks-1)
err := binary.Read(r, binary.LittleEndian, chunks32)
if err != nil {
return nil, err
}
for i, n := range chunks32 {
chunks[i+1] = int64(n)
}
} else {
// 64-bit chunk offsets
base = (nchunks - 1) * 8
err := binary.Read(r, binary.LittleEndian, chunks[1:])
if err != nil {
return nil, err
}
}
for i, c := range chunks {
chunks[i] = c + base
}
cr := &compressedReader{
r: r,
chunks: chunks,
originalSize: originalSize,
}
err := cr.reset(int(offset / chunkSize))
if err != nil {
return nil, err
}
suboff := offset % chunkSize
if suboff != 0 {
_, err := io.CopyN(ioutil.Discard, cr.d, suboff)
if err != nil {
return nil, err
}
}
return cr, nil
}
func (r *compressedReader) chunkOffset(n int) int64 {
if n == len(r.chunks) {
return r.r.Size()
}
return r.chunks[n]
}
func (r *compressedReader) chunkSize(n int) int {
return int(r.chunkOffset(n+1) - r.chunkOffset(n))
}
func (r *compressedReader) uncompressedSize(n int) int {
if n < len(r.chunks)-1 {
return chunkSize
}
size := int(r.originalSize % chunkSize)
if size == 0 {
size = chunkSize
}
return size
}
func (r *compressedReader) reset(n int) error {
if n >= len(r.chunks) {
return io.EOF
}
if r.d != nil {
r.d.Close()
}
r.curChunk = n
size := r.chunkSize(n)
uncompressedSize := r.uncompressedSize(n)
section := io.NewSectionReader(r.r, r.chunkOffset(n), int64(size))
if size != uncompressedSize {
d, err := lzx.NewReader(section, uncompressedSize)
if err != nil {
return err
}
r.d = d
} else {
r.d = ioutil.NopCloser(section)
}
return nil
}
func (r *compressedReader) Read(b []byte) (int, error) {
for {
n, err := r.d.Read(b)
if err != io.EOF {
return n, err
}
err = r.reset(r.curChunk + 1)
if err != nil {
return n, err
}
}
}
func (r *compressedReader) Close() error {
var err error
if r.d != nil {
err = r.d.Close()
r.d = nil
}
return err
}

View file

@ -1,614 +0,0 @@
// Package lzx implements a decompressor for the the WIM variant of the
// LZX compression algorithm.
//
// The LZX algorithm is an earlier variant of LZX DELTA, which is documented
// at https://msdn.microsoft.com/en-us/library/cc483133(v=exchg.80).aspx.
package lzx
import (
"bytes"
"encoding/binary"
"errors"
"io"
)
const (
maincodecount = 496
maincodesplit = 256
lencodecount = 249
lenshift = 9
codemask = 0x1ff
tablebits = 9
tablesize = 1 << tablebits
maxBlockSize = 32768
windowSize = 32768
maxTreePathLen = 16
e8filesize = 12000000
maxe8offset = 0x3fffffff
verbatimBlock = 1
alignedOffsetBlock = 2
uncompressedBlock = 3
)
var footerBits = [...]byte{
0, 0, 0, 0, 1, 1, 2, 2,
3, 3, 4, 4, 5, 5, 6, 6,
7, 7, 8, 8, 9, 9, 10, 10,
11, 11, 12, 12, 13, 13, 14,
}
var basePosition = [...]uint16{
0, 1, 2, 3, 4, 6, 8, 12,
16, 24, 32, 48, 64, 96, 128, 192,
256, 384, 512, 768, 1024, 1536, 2048, 3072,
4096, 6144, 8192, 12288, 16384, 24576, 32768,
}
var (
errCorrupt = errors.New("LZX data corrupt")
)
// Reader is an interface used by the decompressor to access
// the input stream. If the provided io.Reader does not implement
// Reader, then a bufio.Reader is used.
type Reader interface {
io.Reader
io.ByteReader
}
type decompressor struct {
r io.Reader
err error
unaligned bool
nbits byte
c uint32
lru [3]uint16
uncompressed int
windowReader *bytes.Reader
mainlens [maincodecount]byte
lenlens [lencodecount]byte
window [windowSize]byte
b []byte
bv int
bo int
}
//go:noinline
func (f *decompressor) fail(err error) {
if f.err == nil {
f.err = err
}
f.bo = 0
f.bv = 0
}
func (f *decompressor) ensureAtLeast(n int) error {
if f.bv-f.bo >= n {
return nil
}
if f.err != nil {
return f.err
}
if f.bv != f.bo {
copy(f.b[:f.bv-f.bo], f.b[f.bo:f.bv])
}
n, err := io.ReadAtLeast(f.r, f.b[f.bv-f.bo:], n)
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
} else {
f.fail(err)
}
return err
}
f.bv = f.bv - f.bo + n
f.bo = 0
return nil
}
// feed retrieves another 16-bit word from the stream and consumes
// it into f.c. It returns false if there are no more bytes available.
// Otherwise, on error, it sets f.err.
func (f *decompressor) feed() bool {
err := f.ensureAtLeast(2)
if err != nil {
if err == io.ErrUnexpectedEOF {
return false
}
}
f.c |= (uint32(f.b[f.bo+1])<<8 | uint32(f.b[f.bo])) << (16 - f.nbits)
f.nbits += 16
f.bo += 2
return true
}
// getBits retrieves the next n bits from the byte stream. n
// must be <= 16. It sets f.err on error.
func (f *decompressor) getBits(n byte) uint16 {
if f.nbits < n {
if !f.feed() {
f.fail(io.ErrUnexpectedEOF)
}
}
c := uint16(f.c >> (32 - n))
f.c <<= n
f.nbits -= n
return c
}
type huffman struct {
extra [][]uint16
maxbits byte
table [tablesize]uint16
}
// buildTable builds a huffman decoding table from a slice of code lengths,
// one per code, in order. Each code length must be <= maxTreePathLen.
// See https://en.wikipedia.org/wiki/Canonical_Huffman_code.
func buildTable(codelens []byte) *huffman {
// Determine the number of codes of each length, and the
// maximum length.
var count [maxTreePathLen + 1]uint
var max byte
for _, cl := range codelens {
count[cl]++
if max < cl {
max = cl
}
}
if max == 0 {
return &huffman{}
}
// Determine the first code of each length.
var first [maxTreePathLen + 1]uint
code := uint(0)
for i := byte(1); i <= max; i++ {
code <<= 1
first[i] = code
code += count[i]
}
if code != 1<<max {
return nil
}
// Build a table for code lookup. For code sizes < max,
// put all possible suffixes for the code into the table, too.
// For max > tablebits, split long codes into additional tables
// of suffixes of max-tablebits length.
h := &huffman{maxbits: max}
if max > tablebits {
core := first[tablebits+1] / 2 // Number of codes that fit without extra tables
nextra := 1<<tablebits - core // Number of extra entries
h.extra = make([][]uint16, nextra)
for code := core; code < 1<<tablebits; code++ {
h.table[code] = uint16(code - core)
h.extra[code-core] = make([]uint16, 1<<(max-tablebits))
}
}
for i, cl := range codelens {
if cl != 0 {
code := first[cl]
first[cl]++
v := uint16(cl)<<lenshift | uint16(i)
if cl <= tablebits {
extendedCode := code << (tablebits - cl)
for j := uint(0); j < 1<<(tablebits-cl); j++ {
h.table[extendedCode+j] = v
}
} else {
prefix := code >> (cl - tablebits)
suffix := code & (1<<(cl-tablebits) - 1)
extendedCode := suffix << (max - cl)
for j := uint(0); j < 1<<(max-cl); j++ {
h.extra[h.table[prefix]][extendedCode+j] = v
}
}
}
}
return h
}
// getCode retrieves the next code using the provided
// huffman tree. It sets f.err on error.
func (f *decompressor) getCode(h *huffman) uint16 {
if h.maxbits > 0 {
if f.nbits < maxTreePathLen {
f.feed()
}
// For codes with length < tablebits, it doesn't matter
// what the remainder of the bits used for table lookup
// are, since entries with all possible suffixes were
// added to the table.
c := h.table[f.c>>(32-tablebits)]
if c >= 1<<lenshift {
// The code is already in c.
} else {
c = h.extra[c][f.c<<tablebits>>(32-(h.maxbits-tablebits))]
}
n := byte(c >> lenshift)
if f.nbits >= n {
// Only consume the length of the code, not the maximum
// code length.
f.c <<= n
f.nbits -= n
return c & codemask
}
f.fail(io.ErrUnexpectedEOF)
return 0
}
// This is an empty tree. It should not be used.
f.fail(errCorrupt)
return 0
}
// mod17 computes the value mod 17.
func mod17(b byte) byte {
for b >= 17 {
b -= 17
}
return b
}
// readTree updates the huffman tree path lengths in lens by
// reading and decoding lengths from the byte stream. lens
// should be prepopulated with the previous block's tree's path
// lengths. For the first block, lens should be zero.
func (f *decompressor) readTree(lens []byte) error {
// Get the pre-tree for the main tree.
var pretreeLen [20]byte
for i := range pretreeLen {
pretreeLen[i] = byte(f.getBits(4))
}
if f.err != nil {
return f.err
}
h := buildTable(pretreeLen[:])
// The lengths are encoded as a series of huffman codes
// encoded by the pre-tree.
for i := 0; i < len(lens); {
c := byte(f.getCode(h))
if f.err != nil {
return f.err
}
switch {
case c <= 16: // length is delta from previous length
lens[i] = mod17(lens[i] + 17 - c)
i++
case c == 17: // next n + 4 lengths are zero
zeroes := int(f.getBits(4)) + 4
if i+zeroes > len(lens) {
return errCorrupt
}
for j := 0; j < zeroes; j++ {
lens[i+j] = 0
}
i += zeroes
case c == 18: // next n + 20 lengths are zero
zeroes := int(f.getBits(5)) + 20
if i+zeroes > len(lens) {
return errCorrupt
}
for j := 0; j < zeroes; j++ {
lens[i+j] = 0
}
i += zeroes
case c == 19: // next n + 4 lengths all have the same value
same := int(f.getBits(1)) + 4
if i+same > len(lens) {
return errCorrupt
}
c = byte(f.getCode(h))
if c > 16 {
return errCorrupt
}
l := mod17(lens[i] + 17 - c)
for j := 0; j < same; j++ {
lens[i+j] = l
}
i += same
default:
return errCorrupt
}
}
if f.err != nil {
return f.err
}
return nil
}
func (f *decompressor) readBlockHeader() (byte, uint16, error) {
// If the previous block was an unaligned uncompressed block, restore
// 2-byte alignment.
if f.unaligned {
err := f.ensureAtLeast(1)
if err != nil {
return 0, 0, err
}
f.bo++
f.unaligned = false
}
blockType := f.getBits(3)
full := f.getBits(1)
var blockSize uint16
if full != 0 {
blockSize = maxBlockSize
} else {
blockSize = f.getBits(16)
if blockSize > maxBlockSize {
return 0, 0, errCorrupt
}
}
if f.err != nil {
return 0, 0, f.err
}
switch blockType {
case verbatimBlock, alignedOffsetBlock:
// The caller will read the huffman trees.
case uncompressedBlock:
if f.nbits > 16 {
panic("impossible: more than one 16-bit word remains")
}
// Drop the remaining bits in the current 16-bit word
// If there are no bits left, discard a full 16-bit word.
n := f.nbits
if n == 0 {
n = 16
}
f.getBits(n)
// Read the LRU values for the next block.
err := f.ensureAtLeast(12)
if err != nil {
return 0, 0, err
}
f.lru[0] = uint16(binary.LittleEndian.Uint32(f.b[f.bo : f.bo+4]))
f.lru[1] = uint16(binary.LittleEndian.Uint32(f.b[f.bo+4 : f.bo+8]))
f.lru[2] = uint16(binary.LittleEndian.Uint32(f.b[f.bo+8 : f.bo+12]))
f.bo += 12
default:
return 0, 0, errCorrupt
}
return byte(blockType), blockSize, nil
}
// readTrees reads the two or three huffman trees for the current block.
// readAligned specifies whether to read the aligned offset tree.
func (f *decompressor) readTrees(readAligned bool) (main *huffman, length *huffman, aligned *huffman, err error) {
// Aligned offset blocks start with a small aligned offset tree.
if readAligned {
var alignedLen [8]byte
for i := range alignedLen {
alignedLen[i] = byte(f.getBits(3))
}
aligned = buildTable(alignedLen[:])
if aligned == nil {
err = errors.New("corrupt")
return
}
}
// The main tree is encoded in two parts.
err = f.readTree(f.mainlens[:maincodesplit])
if err != nil {
return
}
err = f.readTree(f.mainlens[maincodesplit:])
if err != nil {
return
}
main = buildTable(f.mainlens[:])
if main == nil {
err = errors.New("corrupt")
return
}
// The length tree is encoding in a single part.
err = f.readTree(f.lenlens[:])
if err != nil {
return
}
length = buildTable(f.lenlens[:])
if length == nil {
err = errors.New("corrupt")
return
}
err = f.err
return
}
// readCompressedBlock decodes a compressed block, writing into the window
// starting at start and ending at end, and using the provided huffman trees.
func (f *decompressor) readCompressedBlock(start, end uint16, hmain, hlength, haligned *huffman) (int, error) {
i := start
for i < end {
main := f.getCode(hmain)
if f.err != nil {
break
}
if main < 256 {
// Literal byte.
f.window[i] = byte(main)
i++
continue
}
// This is a match backward in the window. Determine
// the offset and dlength.
matchlen := (main - 256) % 8
slot := (main - 256) / 8
// The length is either the low bits of the code,
// or if this is 7, is encoded with the length tree.
if matchlen == 7 {
matchlen += f.getCode(hlength)
}
matchlen += 2
var matchoffset uint16
if slot < 3 {
// The offset is one of the LRU values.
matchoffset = f.lru[slot]
f.lru[slot] = f.lru[0]
f.lru[0] = matchoffset
} else {
// The offset is encoded as a combination of the
// slot and more bits from the bit stream.
offsetbits := footerBits[slot]
var verbatimbits, alignedbits uint16
if offsetbits > 0 {
if haligned != nil && offsetbits >= 3 {
// This is an aligned offset block. Combine
// the bits written verbatim with the aligned
// offset tree code.
verbatimbits = f.getBits(offsetbits-3) * 8
alignedbits = f.getCode(haligned)
} else {
// There are no aligned offset bits to read,
// only verbatim bits.
verbatimbits = f.getBits(offsetbits)
alignedbits = 0
}
}
matchoffset = basePosition[slot] + verbatimbits + alignedbits - 2
// Update the LRU cache.
f.lru[2] = f.lru[1]
f.lru[1] = f.lru[0]
f.lru[0] = matchoffset
}
if matchoffset <= i && matchlen <= end-i {
copyend := i + matchlen
for ; i < copyend; i++ {
f.window[i] = f.window[i-matchoffset]
}
} else {
f.fail(errCorrupt)
break
}
}
return int(i - start), f.err
}
// readBlock decodes the current block and returns the number of uncompressed bytes.
func (f *decompressor) readBlock(start uint16) (int, error) {
blockType, size, err := f.readBlockHeader()
if err != nil {
return 0, err
}
if blockType == uncompressedBlock {
if size%2 == 1 {
// Remember to realign the byte stream at the next block.
f.unaligned = true
}
copied := 0
if f.bo < f.bv {
copied = int(size)
s := int(start)
if copied > f.bv-f.bo {
copied = f.bv - f.bo
}
copy(f.window[s:s+copied], f.b[f.bo:f.bo+copied])
f.bo += copied
}
n, err := io.ReadFull(f.r, f.window[start+uint16(copied):start+size])
return copied + n, err
}
hmain, hlength, haligned, err := f.readTrees(blockType == alignedOffsetBlock)
if err != nil {
return 0, err
}
return f.readCompressedBlock(start, start+size, hmain, hlength, haligned)
}
// decodeE8 reverses the 0xe8 x86 instruction encoding that was performed
// to the uncompressed data before it was compressed.
func decodeE8(b []byte, off int64) {
if off > maxe8offset || len(b) < 10 {
return
}
for i := 0; i < len(b)-10; i++ {
if b[i] == 0xe8 {
currentPtr := int32(off) + int32(i)
abs := int32(binary.LittleEndian.Uint32(b[i+1 : i+5]))
if abs >= -currentPtr && abs < e8filesize {
var rel int32
if abs >= 0 {
rel = abs - currentPtr
} else {
rel = abs + e8filesize
}
binary.LittleEndian.PutUint32(b[i+1:i+5], uint32(rel))
}
i += 4
}
}
}
func (f *decompressor) Read(b []byte) (int, error) {
// Read and uncompress everything.
if f.windowReader == nil {
n := 0
for n < f.uncompressed {
k, err := f.readBlock(uint16(n))
if err != nil {
return 0, err
}
n += k
}
decodeE8(f.window[:f.uncompressed], 0)
f.windowReader = bytes.NewReader(f.window[:f.uncompressed])
}
// Just read directly from the window.
return f.windowReader.Read(b)
}
func (f *decompressor) Close() error {
return nil
}
// NewReader returns a new io.ReadCloser that decompresses a
// WIM LZX stream until uncompressedSize bytes have been returned.
func NewReader(r io.Reader, uncompressedSize int) (io.ReadCloser, error) {
if uncompressedSize > windowSize {
return nil, errors.New("uncompressed size is limited to 32KB")
}
f := &decompressor{
lru: [3]uint16{1, 1, 1},
uncompressed: uncompressedSize,
b: make([]byte, 4096),
r: r,
}
return f, nil
}

View file

@ -1,51 +0,0 @@
package main
import (
"flag"
"fmt"
"os"
"github.com/Microsoft/go-winio/wim"
)
func main() {
flag.Parse()
f, err := os.Open(flag.Arg(0))
if err != nil {
panic(err)
}
w, err := wim.NewReader(f)
if err != nil {
panic(err)
}
fmt.Printf("%#v\n%#v\n", w.Image[0], w.Image[0].Windows)
dir, err := w.Image[0].Open()
if err != nil {
panic(err)
}
err = recur(dir)
if err != nil {
panic(err)
}
}
func recur(d *wim.File) error {
files, err := d.Readdir()
if err != nil {
return fmt.Errorf("%s: %s", d.Name, err)
}
for _, f := range files {
if f.IsDir() {
err = recur(f)
if err != nil {
return fmt.Errorf("%s: %s", f.Name, err)
}
}
}
return nil
}

View file

@ -1,866 +0,0 @@
// Package wim implements a WIM file parser.
//
// WIM files are used to distribute Windows file system and container images.
// They are documented at https://msdn.microsoft.com/en-us/library/windows/desktop/dd861280.aspx.
package wim
import (
"bytes"
"crypto/sha1"
"encoding/binary"
"encoding/xml"
"errors"
"fmt"
"io"
"io/ioutil"
"strconv"
"sync"
"time"
"unicode/utf16"
)
// File attribute constants from Windows.
const (
FILE_ATTRIBUTE_READONLY = 0x00000001
FILE_ATTRIBUTE_HIDDEN = 0x00000002
FILE_ATTRIBUTE_SYSTEM = 0x00000004
FILE_ATTRIBUTE_DIRECTORY = 0x00000010
FILE_ATTRIBUTE_ARCHIVE = 0x00000020
FILE_ATTRIBUTE_DEVICE = 0x00000040
FILE_ATTRIBUTE_NORMAL = 0x00000080
FILE_ATTRIBUTE_TEMPORARY = 0x00000100
FILE_ATTRIBUTE_SPARSE_FILE = 0x00000200
FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400
FILE_ATTRIBUTE_COMPRESSED = 0x00000800
FILE_ATTRIBUTE_OFFLINE = 0x00001000
FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x00002000
FILE_ATTRIBUTE_ENCRYPTED = 0x00004000
FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x00008000
FILE_ATTRIBUTE_VIRTUAL = 0x00010000
FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x00020000
FILE_ATTRIBUTE_EA = 0x00040000
)
// Windows processor architectures.
const (
PROCESSOR_ARCHITECTURE_INTEL = 0
PROCESSOR_ARCHITECTURE_MIPS = 1
PROCESSOR_ARCHITECTURE_ALPHA = 2
PROCESSOR_ARCHITECTURE_PPC = 3
PROCESSOR_ARCHITECTURE_SHX = 4
PROCESSOR_ARCHITECTURE_ARM = 5
PROCESSOR_ARCHITECTURE_IA64 = 6
PROCESSOR_ARCHITECTURE_ALPHA64 = 7
PROCESSOR_ARCHITECTURE_MSIL = 8
PROCESSOR_ARCHITECTURE_AMD64 = 9
PROCESSOR_ARCHITECTURE_IA32_ON_WIN64 = 10
PROCESSOR_ARCHITECTURE_NEUTRAL = 11
PROCESSOR_ARCHITECTURE_ARM64 = 12
)
var wimImageTag = [...]byte{'M', 'S', 'W', 'I', 'M', 0, 0, 0}
type guid struct {
Data1 uint32
Data2 uint16
Data3 uint16
Data4 [8]byte
}
func (g guid) String() string {
return fmt.Sprintf("%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", g.Data1, g.Data2, g.Data3, g.Data4[0], g.Data4[1], g.Data4[2], g.Data4[3], g.Data4[4], g.Data4[5], g.Data4[6], g.Data4[7])
}
type resourceDescriptor struct {
FlagsAndCompressedSize uint64
Offset int64
OriginalSize int64
}
type resFlag byte
const (
resFlagFree resFlag = 1 << iota
resFlagMetadata
resFlagCompressed
resFlagSpanned
)
const validate = false
const supportedResFlags = resFlagMetadata | resFlagCompressed
func (r *resourceDescriptor) Flags() resFlag {
return resFlag(r.FlagsAndCompressedSize >> 56)
}
func (r *resourceDescriptor) CompressedSize() int64 {
return int64(r.FlagsAndCompressedSize & 0xffffffffffffff)
}
func (r *resourceDescriptor) String() string {
s := fmt.Sprintf("%d bytes at %d", r.CompressedSize(), r.Offset)
if r.Flags()&4 != 0 {
s += fmt.Sprintf(" (uncompresses to %d)", r.OriginalSize)
}
return s
}
// SHA1Hash contains the SHA1 hash of a file or stream.
type SHA1Hash [20]byte
type streamDescriptor struct {
resourceDescriptor
PartNumber uint16
RefCount uint32
Hash SHA1Hash
}
type hdrFlag uint32
const (
hdrFlagReserved hdrFlag = 1 << iota
hdrFlagCompressed
hdrFlagReadOnly
hdrFlagSpanned
hdrFlagResourceOnly
hdrFlagMetadataOnly
hdrFlagWriteInProgress
hdrFlagRpFix
)
const (
hdrFlagCompressReserved hdrFlag = 1 << (iota + 16)
hdrFlagCompressXpress
hdrFlagCompressLzx
)
const supportedHdrFlags = hdrFlagRpFix | hdrFlagReadOnly | hdrFlagCompressed | hdrFlagCompressLzx
type wimHeader struct {
ImageTag [8]byte
Size uint32
Version uint32
Flags hdrFlag
CompressionSize uint32
WIMGuid guid
PartNumber uint16
TotalParts uint16
ImageCount uint32
OffsetTable resourceDescriptor
XMLData resourceDescriptor
BootMetadata resourceDescriptor
BootIndex uint32
Padding uint32
Integrity resourceDescriptor
Unused [60]byte
}
type securityblockDisk struct {
TotalLength uint32
NumEntries uint32
}
const securityblockDiskSize = 8
type direntry struct {
Attributes uint32
SecurityID uint32
SubdirOffset int64
Unused1, Unused2 int64
CreationTime Filetime
LastAccessTime Filetime
LastWriteTime Filetime
Hash SHA1Hash
Padding uint32
ReparseHardLink int64
StreamCount uint16
ShortNameLength uint16
FileNameLength uint16
}
var direntrySize = int64(binary.Size(direntry{}) + 8) // includes an 8-byte length prefix
type streamentry struct {
Unused int64
Hash SHA1Hash
NameLength int16
}
var streamentrySize = int64(binary.Size(streamentry{}) + 8) // includes an 8-byte length prefix
// Filetime represents a Windows time.
type Filetime struct {
LowDateTime uint32
HighDateTime uint32
}
// Time returns the time as time.Time.
func (ft *Filetime) Time() time.Time {
// 100-nanosecond intervals since January 1, 1601
nsec := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime)
// change starting time to the Epoch (00:00:00 UTC, January 1, 1970)
nsec -= 116444736000000000
// convert into nanoseconds
nsec *= 100
return time.Unix(0, nsec)
}
// UnmarshalXML unmarshals the time from a WIM XML blob.
func (ft *Filetime) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
type time struct {
Low string `xml:"LOWPART"`
High string `xml:"HIGHPART"`
}
var t time
err := d.DecodeElement(&t, &start)
if err != nil {
return err
}
low, err := strconv.ParseUint(t.Low, 0, 32)
if err != nil {
return err
}
high, err := strconv.ParseUint(t.High, 0, 32)
if err != nil {
return err
}
ft.LowDateTime = uint32(low)
ft.HighDateTime = uint32(high)
return nil
}
type info struct {
Image []ImageInfo `xml:"IMAGE"`
}
// ImageInfo contains information about the image.
type ImageInfo struct {
Name string `xml:"NAME"`
Index int `xml:"INDEX,attr"`
CreationTime Filetime `xml:"CREATIONTIME"`
ModTime Filetime `xml:"LASTMODIFICATIONTIME"`
Windows *WindowsInfo `xml:"WINDOWS"`
}
// WindowsInfo contains information about the Windows installation in the image.
type WindowsInfo struct {
Arch byte `xml:"ARCH"`
ProductName string `xml:"PRODUCTNAME"`
EditionID string `xml:"EDITIONID"`
InstallationType string `xml:"INSTALLATIONTYPE"`
ProductType string `xml:"PRODUCTTYPE"`
Languages []string `xml:"LANGUAGES>LANGUAGE"`
DefaultLanguage string `xml:"LANGUAGES>DEFAULT"`
Version Version `xml:"VERSION"`
SystemRoot string `xml:"SYSTEMROOT"`
}
// Version represents a Windows build version.
type Version struct {
Major int `xml:"MAJOR"`
Minor int `xml:"MINOR"`
Build int `xml:"BUILD"`
SPBuild int `xml:"SPBUILD"`
SPLevel int `xml:"SPLEVEL"`
}
// ParseError is returned when the WIM cannot be parsed.
type ParseError struct {
Oper string
Path string
Err error
}
func (e *ParseError) Error() string {
if e.Path == "" {
return "WIM parse error at " + e.Oper + ": " + e.Err.Error()
}
return fmt.Sprintf("WIM parse error: %s %s: %s", e.Oper, e.Path, e.Err.Error())
}
// Reader provides functions to read a WIM file.
type Reader struct {
hdr wimHeader
r io.ReaderAt
fileData map[SHA1Hash]resourceDescriptor
XMLInfo string // The XML information about the WIM.
Image []*Image // The WIM's images.
}
// Image represents an image within a WIM file.
type Image struct {
wim *Reader
offset resourceDescriptor
sds [][]byte
rootOffset int64
r io.ReadCloser
curOffset int64
m sync.Mutex
ImageInfo
}
// StreamHeader contains alternate data stream metadata.
type StreamHeader struct {
Name string
Hash SHA1Hash
Size int64
}
// Stream represents an alternate data stream or reparse point data stream.
type Stream struct {
StreamHeader
wim *Reader
offset resourceDescriptor
}
// FileHeader contains file metadata.
type FileHeader struct {
Name string
ShortName string
Attributes uint32
SecurityDescriptor []byte
CreationTime Filetime
LastAccessTime Filetime
LastWriteTime Filetime
Hash SHA1Hash
Size int64
LinkID int64
ReparseTag uint32
ReparseReserved uint32
}
// File represents a file or directory in a WIM image.
type File struct {
FileHeader
Streams []*Stream
offset resourceDescriptor
img *Image
subdirOffset int64
}
// NewReader returns a Reader that can be used to read WIM file data.
func NewReader(f io.ReaderAt) (*Reader, error) {
r := &Reader{r: f}
section := io.NewSectionReader(f, 0, 0xffff)
err := binary.Read(section, binary.LittleEndian, &r.hdr)
if err != nil {
return nil, err
}
if r.hdr.ImageTag != wimImageTag {
return nil, &ParseError{Oper: "image tag", Err: errors.New("not a WIM file")}
}
if r.hdr.Flags&^supportedHdrFlags != 0 {
return nil, fmt.Errorf("unsupported WIM flags %x", r.hdr.Flags&^supportedHdrFlags)
}
if r.hdr.CompressionSize != 0x8000 {
return nil, fmt.Errorf("unsupported compression size %d", r.hdr.CompressionSize)
}
if r.hdr.TotalParts != 1 {
return nil, errors.New("multi-part WIM not supported")
}
fileData, images, err := r.readOffsetTable(&r.hdr.OffsetTable)
if err != nil {
return nil, err
}
xmlinfo, err := r.readXML()
if err != nil {
return nil, err
}
var info info
err = xml.Unmarshal([]byte(xmlinfo), &info)
if err != nil {
return nil, &ParseError{Oper: "XML info", Err: err}
}
for i, img := range images {
for _, imgInfo := range info.Image {
if imgInfo.Index == i+1 {
img.ImageInfo = imgInfo
break
}
}
}
r.fileData = fileData
r.Image = images
r.XMLInfo = xmlinfo
return r, nil
}
// Close releases resources associated with the Reader.
func (r *Reader) Close() error {
for _, img := range r.Image {
img.reset()
}
return nil
}
func (r *Reader) resourceReader(hdr *resourceDescriptor) (io.ReadCloser, error) {
return r.resourceReaderWithOffset(hdr, 0)
}
func (r *Reader) resourceReaderWithOffset(hdr *resourceDescriptor, offset int64) (io.ReadCloser, error) {
var sr io.ReadCloser
section := io.NewSectionReader(r.r, hdr.Offset, hdr.CompressedSize())
if hdr.Flags()&resFlagCompressed == 0 {
section.Seek(offset, 0)
sr = ioutil.NopCloser(section)
} else {
cr, err := newCompressedReader(section, hdr.OriginalSize, offset)
if err != nil {
return nil, err
}
sr = cr
}
return sr, nil
}
func (r *Reader) readResource(hdr *resourceDescriptor) ([]byte, error) {
rsrc, err := r.resourceReader(hdr)
if err != nil {
return nil, err
}
defer rsrc.Close()
return ioutil.ReadAll(rsrc)
}
func (r *Reader) readXML() (string, error) {
if r.hdr.XMLData.CompressedSize() == 0 {
return "", nil
}
rsrc, err := r.resourceReader(&r.hdr.XMLData)
if err != nil {
return "", err
}
defer rsrc.Close()
XMLData := make([]uint16, r.hdr.XMLData.OriginalSize/2)
err = binary.Read(rsrc, binary.LittleEndian, XMLData)
if err != nil {
return "", &ParseError{Oper: "XML data", Err: err}
}
// The BOM will always indicate little-endian UTF-16.
if XMLData[0] != 0xfeff {
return "", &ParseError{Oper: "XML data", Err: errors.New("invalid BOM")}
}
return string(utf16.Decode(XMLData[1:])), nil
}
func (r *Reader) readOffsetTable(res *resourceDescriptor) (map[SHA1Hash]resourceDescriptor, []*Image, error) {
fileData := make(map[SHA1Hash]resourceDescriptor)
var images []*Image
offsetTable, err := r.readResource(res)
if err != nil {
return nil, nil, &ParseError{Oper: "offset table", Err: err}
}
br := bytes.NewReader(offsetTable)
for i := 0; ; i++ {
var res streamDescriptor
err := binary.Read(br, binary.LittleEndian, &res)
if err == io.EOF {
break
}
if err != nil {
return nil, nil, &ParseError{Oper: "offset table", Err: err}
}
if res.Flags()&^supportedResFlags != 0 {
return nil, nil, &ParseError{Oper: "offset table", Err: errors.New("unsupported resource flag")}
}
// Validation for ad-hoc testing
if validate {
sec, err := r.resourceReader(&res.resourceDescriptor)
if err != nil {
panic(fmt.Sprint(i, err))
}
hash := sha1.New()
_, err = io.Copy(hash, sec)
sec.Close()
if err != nil {
panic(fmt.Sprint(i, err))
}
var cmphash SHA1Hash
copy(cmphash[:], hash.Sum(nil))
if cmphash != res.Hash {
panic(fmt.Sprint(i, "hash mismatch"))
}
}
if res.Flags()&resFlagMetadata != 0 {
image := &Image{
wim: r,
offset: res.resourceDescriptor,
}
images = append(images, image)
} else {
fileData[res.Hash] = res.resourceDescriptor
}
}
if len(images) != int(r.hdr.ImageCount) {
return nil, nil, &ParseError{Oper: "offset table", Err: errors.New("mismatched image count")}
}
return fileData, images, nil
}
func (r *Reader) readSecurityDescriptors(rsrc io.Reader) (sds [][]byte, n int64, err error) {
var secBlock securityblockDisk
err = binary.Read(rsrc, binary.LittleEndian, &secBlock)
if err != nil {
err = &ParseError{Oper: "security table", Err: err}
return
}
n += securityblockDiskSize
secSizes := make([]int64, secBlock.NumEntries)
err = binary.Read(rsrc, binary.LittleEndian, &secSizes)
if err != nil {
err = &ParseError{Oper: "security table sizes", Err: err}
return
}
n += int64(secBlock.NumEntries * 8)
sds = make([][]byte, secBlock.NumEntries)
for i, size := range secSizes {
sd := make([]byte, size&0xffffffff)
_, err = io.ReadFull(rsrc, sd)
if err != nil {
err = &ParseError{Oper: "security descriptor", Err: err}
return
}
n += int64(len(sd))
sds[i] = sd
}
secsize := int64((secBlock.TotalLength + 7) &^ 7)
if n > secsize {
err = &ParseError{Oper: "security descriptor", Err: errors.New("security descriptor table too small")}
return
}
_, err = io.CopyN(ioutil.Discard, rsrc, secsize-n)
if err != nil {
return
}
n = secsize
return
}
// Open parses the image and returns the root directory.
func (img *Image) Open() (*File, error) {
if img.sds == nil {
rsrc, err := img.wim.resourceReaderWithOffset(&img.offset, img.rootOffset)
if err != nil {
return nil, err
}
sds, n, err := img.wim.readSecurityDescriptors(rsrc)
if err != nil {
rsrc.Close()
return nil, err
}
img.sds = sds
img.r = rsrc
img.rootOffset = n
img.curOffset = n
}
f, err := img.readdir(img.rootOffset)
if err != nil {
return nil, err
}
if len(f) != 1 {
return nil, &ParseError{Oper: "root directory", Err: errors.New("expected exactly 1 root directory entry")}
}
return f[0], err
}
func (img *Image) reset() {
if img.r != nil {
img.r.Close()
img.r = nil
}
img.curOffset = -1
}
func (img *Image) readdir(offset int64) ([]*File, error) {
img.m.Lock()
defer img.m.Unlock()
if offset < img.curOffset || offset > img.curOffset+chunkSize {
// Reset to seek backward or to seek forward very far.
img.reset()
}
if img.r == nil {
rsrc, err := img.wim.resourceReaderWithOffset(&img.offset, offset)
if err != nil {
return nil, err
}
img.r = rsrc
img.curOffset = offset
}
if offset > img.curOffset {
_, err := io.CopyN(ioutil.Discard, img.r, offset-img.curOffset)
if err != nil {
img.reset()
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, err
}
}
var entries []*File
for {
e, n, err := img.readNextEntry(img.r)
img.curOffset += n
if err == io.EOF {
break
}
if err != nil {
img.reset()
return nil, err
}
entries = append(entries, e)
}
return entries, nil
}
func (img *Image) readNextEntry(r io.Reader) (*File, int64, error) {
var length int64
err := binary.Read(r, binary.LittleEndian, &length)
if err != nil {
return nil, 0, &ParseError{Oper: "directory length check", Err: err}
}
if length == 0 {
return nil, 8, io.EOF
}
left := length
if left < direntrySize {
return nil, 0, &ParseError{Oper: "directory entry", Err: errors.New("size too short")}
}
var dentry direntry
err = binary.Read(r, binary.LittleEndian, &dentry)
if err != nil {
return nil, 0, &ParseError{Oper: "directory entry", Err: err}
}
left -= direntrySize
namesLen := int64(dentry.FileNameLength + 2 + dentry.ShortNameLength)
if left < namesLen {
return nil, 0, &ParseError{Oper: "directory entry", Err: errors.New("size too short for names")}
}
names := make([]uint16, namesLen/2)
err = binary.Read(r, binary.LittleEndian, names)
if err != nil {
return nil, 0, &ParseError{Oper: "file name", Err: err}
}
left -= namesLen
var name, shortName string
if dentry.FileNameLength > 0 {
name = string(utf16.Decode(names[:dentry.FileNameLength/2]))
}
if dentry.ShortNameLength > 0 {
shortName = string(utf16.Decode(names[dentry.FileNameLength/2+1:]))
}
var offset resourceDescriptor
zerohash := SHA1Hash{}
if dentry.Hash != zerohash {
var ok bool
offset, ok = img.wim.fileData[dentry.Hash]
if !ok {
return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: fmt.Errorf("could not find file data matching hash %#v", dentry)}
}
}
f := &File{
FileHeader: FileHeader{
Attributes: dentry.Attributes,
CreationTime: dentry.CreationTime,
LastAccessTime: dentry.LastAccessTime,
LastWriteTime: dentry.LastWriteTime,
Hash: dentry.Hash,
Size: offset.OriginalSize,
Name: name,
ShortName: shortName,
},
offset: offset,
img: img,
subdirOffset: dentry.SubdirOffset,
}
isDir := false
if dentry.Attributes&FILE_ATTRIBUTE_REPARSE_POINT == 0 {
f.LinkID = dentry.ReparseHardLink
if dentry.Attributes&FILE_ATTRIBUTE_DIRECTORY != 0 {
isDir = true
}
} else {
f.ReparseTag = uint32(dentry.ReparseHardLink)
f.ReparseReserved = uint32(dentry.ReparseHardLink >> 32)
}
if isDir && f.subdirOffset == 0 {
return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("no subdirectory data for directory")}
} else if !isDir && f.subdirOffset != 0 {
return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("unexpected subdirectory data for non-directory")}
}
if dentry.SecurityID != 0xffffffff {
f.SecurityDescriptor = img.sds[dentry.SecurityID]
}
_, err = io.CopyN(ioutil.Discard, r, left)
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, 0, err
}
if dentry.StreamCount > 0 {
var streams []*Stream
for i := uint16(0); i < dentry.StreamCount; i++ {
s, n, err := img.readNextStream(r)
length += n
if err != nil {
return nil, 0, err
}
// The first unnamed stream should be treated as the file stream.
if i == 0 && s.Name == "" {
f.Hash = s.Hash
f.Size = s.Size
f.offset = s.offset
} else if s.Name != "" {
streams = append(streams, s)
}
}
f.Streams = streams
}
if dentry.Attributes&FILE_ATTRIBUTE_REPARSE_POINT != 0 && f.Size == 0 {
return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("reparse point is missing reparse stream")}
}
return f, length, nil
}
func (img *Image) readNextStream(r io.Reader) (*Stream, int64, error) {
var length int64
err := binary.Read(r, binary.LittleEndian, &length)
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, 0, &ParseError{Oper: "stream length check", Err: err}
}
left := length
if left < streamentrySize {
return nil, 0, &ParseError{Oper: "stream entry", Err: errors.New("size too short")}
}
var sentry streamentry
err = binary.Read(r, binary.LittleEndian, &sentry)
if err != nil {
return nil, 0, &ParseError{Oper: "stream entry", Err: err}
}
left -= streamentrySize
if left < int64(sentry.NameLength) {
return nil, 0, &ParseError{Oper: "stream entry", Err: errors.New("size too short for name")}
}
names := make([]uint16, sentry.NameLength/2)
err = binary.Read(r, binary.LittleEndian, names)
if err != nil {
return nil, 0, &ParseError{Oper: "file name", Err: err}
}
left -= int64(sentry.NameLength)
name := string(utf16.Decode(names))
var offset resourceDescriptor
if sentry.Hash != (SHA1Hash{}) {
var ok bool
offset, ok = img.wim.fileData[sentry.Hash]
if !ok {
return nil, 0, &ParseError{Oper: "stream entry", Path: name, Err: fmt.Errorf("could not find file data matching hash %v", sentry.Hash)}
}
}
s := &Stream{
StreamHeader: StreamHeader{
Hash: sentry.Hash,
Size: offset.OriginalSize,
Name: name,
},
wim: img.wim,
offset: offset,
}
_, err = io.CopyN(ioutil.Discard, r, left)
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, 0, err
}
return s, length, nil
}
// Open returns an io.ReadCloser that can be used to read the stream's contents.
func (s *Stream) Open() (io.ReadCloser, error) {
return s.wim.resourceReader(&s.offset)
}
// Open returns an io.ReadCloser that can be used to read the file's contents.
func (f *File) Open() (io.ReadCloser, error) {
return f.img.wim.resourceReader(&f.offset)
}
// Readdir reads the directory entries.
func (f *File) Readdir() ([]*File, error) {
if !f.IsDir() {
return nil, errors.New("not a directory")
}
return f.img.readdir(f.subdirOffset)
}
// IsDir returns whether the given file is a directory. It returns false when it
// is a directory reparse point.
func (f *FileHeader) IsDir() bool {
return f.Attributes&(FILE_ATTRIBUTE_DIRECTORY|FILE_ATTRIBUTE_REPARSE_POINT) == FILE_ATTRIBUTE_DIRECTORY
}

View file

@ -1,5 +0,0 @@
*.sublime-*
.DS_Store
*.swp
*.swo
tags

View file

@ -1,7 +0,0 @@
language: go
go:
- 1.4
- 1.5
- 1.6
- tip

View file

@ -1,57 +0,0 @@
package purell
import (
"testing"
)
var (
safeUrl = "HttPS://..iaMHost..Test:443/paTh^A%ef//./%41PaTH/..//?"
usuallySafeUrl = "HttPS://..iaMHost..Test:443/paTh^A%ef//./%41PaTH/../final/"
unsafeUrl = "HttPS://..www.iaMHost..Test:443/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment"
allDWORDUrl = "HttPS://1113982867:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment"
allOctalUrl = "HttPS://0102.0146.07.0223:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment"
allHexUrl = "HttPS://0x42660793:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment"
allCombinedUrl = "HttPS://..0x42660793.:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment"
)
func BenchmarkSafe(b *testing.B) {
for i := 0; i < b.N; i++ {
NormalizeURLString(safeUrl, FlagsSafe)
}
}
func BenchmarkUsuallySafe(b *testing.B) {
for i := 0; i < b.N; i++ {
NormalizeURLString(usuallySafeUrl, FlagsUsuallySafeGreedy)
}
}
func BenchmarkUnsafe(b *testing.B) {
for i := 0; i < b.N; i++ {
NormalizeURLString(unsafeUrl, FlagsUnsafeGreedy)
}
}
func BenchmarkAllDWORD(b *testing.B) {
for i := 0; i < b.N; i++ {
NormalizeURLString(allDWORDUrl, FlagsAllGreedy)
}
}
func BenchmarkAllOctal(b *testing.B) {
for i := 0; i < b.N; i++ {
NormalizeURLString(allOctalUrl, FlagsAllGreedy)
}
}
func BenchmarkAllHex(b *testing.B) {
for i := 0; i < b.N; i++ {
NormalizeURLString(allHexUrl, FlagsAllGreedy)
}
}
func BenchmarkAllCombined(b *testing.B) {
for i := 0; i < b.N; i++ {
NormalizeURLString(allCombinedUrl, FlagsAllGreedy)
}
}

View file

@ -1,9 +0,0 @@
PASS
BenchmarkSafe 500000 6131 ns/op
BenchmarkUsuallySafe 200000 7864 ns/op
BenchmarkUnsafe 100000 28560 ns/op
BenchmarkAllDWORD 50000 38722 ns/op
BenchmarkAllOctal 50000 40941 ns/op
BenchmarkAllHex 50000 44063 ns/op
BenchmarkAllCombined 50000 33613 ns/op
ok github.com/PuerkitoBio/purell 17.404s

View file

@ -1,35 +0,0 @@
package purell
import (
"fmt"
"net/url"
)
func ExampleNormalizeURLString() {
if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
panic(err)
} else {
fmt.Print(normalized)
}
// Output: http://somewebsite.com:80/Amazing%3F/url/
}
func ExampleMustNormalizeURLString() {
normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
FlagsUnsafeGreedy)
fmt.Print(normalized)
// Output: http://somewebsite.com/Amazing%FA/url
}
func ExampleNormalizeURL() {
if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
panic(err)
} else {
normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
fmt.Print(normalized)
}
// Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
}

View file

@ -1,768 +0,0 @@
package purell
import (
"fmt"
"net/url"
"testing"
)
type testCase struct {
nm string
src string
flgs NormalizationFlags
res string
parsed bool
}
var (
cases = [...]*testCase{
&testCase{
"LowerScheme",
"HTTP://www.SRC.ca",
FlagLowercaseScheme,
"http://www.SRC.ca",
false,
},
&testCase{
"LowerScheme2",
"http://www.SRC.ca",
FlagLowercaseScheme,
"http://www.SRC.ca",
false,
},
&testCase{
"LowerHost",
"HTTP://www.SRC.ca/",
FlagLowercaseHost,
"http://www.src.ca/", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"UpperEscapes",
`http://www.whatever.com/Some%aa%20Special%8Ecases/`,
FlagUppercaseEscapes,
"http://www.whatever.com/Some%AA%20Special%8Ecases/",
false,
},
&testCase{
"UnnecessaryEscapes",
`http://www.toto.com/%41%42%2E%44/%32%33%52%2D/%5f%7E`,
FlagDecodeUnnecessaryEscapes,
"http://www.toto.com/AB.D/23R-/_~",
false,
},
&testCase{
"RemoveDefaultPort",
"HTTP://www.SRC.ca:80/",
FlagRemoveDefaultPort,
"http://www.SRC.ca/", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"RemoveDefaultPort2",
"HTTP://www.SRC.ca:80",
FlagRemoveDefaultPort,
"http://www.SRC.ca", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"RemoveDefaultPort3",
"HTTP://www.SRC.ca:8080",
FlagRemoveDefaultPort,
"http://www.SRC.ca:8080", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"Safe",
"HTTP://www.SRC.ca:80/to%1ato%8b%ee/OKnow%41%42%43%7e",
FlagsSafe,
"http://www.src.ca/to%1Ato%8B%EE/OKnowABC~",
false,
},
&testCase{
"BothLower",
"HTTP://www.SRC.ca:80/to%1ato%8b%ee/OKnow%41%42%43%7e",
FlagLowercaseHost | FlagLowercaseScheme,
"http://www.src.ca:80/to%1Ato%8B%EE/OKnowABC~",
false,
},
&testCase{
"RemoveTrailingSlash",
"HTTP://www.SRC.ca:80/",
FlagRemoveTrailingSlash,
"http://www.SRC.ca:80", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"RemoveTrailingSlash2",
"HTTP://www.SRC.ca:80/toto/titi/",
FlagRemoveTrailingSlash,
"http://www.SRC.ca:80/toto/titi", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"RemoveTrailingSlash3",
"HTTP://www.SRC.ca:80/toto/titi/fin/?a=1",
FlagRemoveTrailingSlash,
"http://www.SRC.ca:80/toto/titi/fin?a=1", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"AddTrailingSlash",
"HTTP://www.SRC.ca:80",
FlagAddTrailingSlash,
"http://www.SRC.ca:80/", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"AddTrailingSlash2",
"HTTP://www.SRC.ca:80/toto/titi.html",
FlagAddTrailingSlash,
"http://www.SRC.ca:80/toto/titi.html/", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"AddTrailingSlash3",
"HTTP://www.SRC.ca:80/toto/titi/fin?a=1",
FlagAddTrailingSlash,
"http://www.SRC.ca:80/toto/titi/fin/?a=1", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"RemoveDotSegments",
"HTTP://root/a/b/./../../c/",
FlagRemoveDotSegments,
"http://root/c/", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"RemoveDotSegments2",
"HTTP://root/../a/b/./../c/../d",
FlagRemoveDotSegments,
"http://root/a/d", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"UsuallySafe",
"HTTP://www.SRC.ca:80/to%1ato%8b%ee/./c/d/../OKnow%41%42%43%7e/?a=b#test",
FlagsUsuallySafeGreedy,
"http://www.src.ca/to%1Ato%8B%EE/c/OKnowABC~?a=b#test",
false,
},
&testCase{
"RemoveDirectoryIndex",
"HTTP://root/a/b/c/default.aspx",
FlagRemoveDirectoryIndex,
"http://root/a/b/c/", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"RemoveDirectoryIndex2",
"HTTP://root/a/b/c/default#a=b",
FlagRemoveDirectoryIndex,
"http://root/a/b/c/default#a=b", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"RemoveFragment",
"HTTP://root/a/b/c/default#toto=tata",
FlagRemoveFragment,
"http://root/a/b/c/default", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"ForceHTTP",
"https://root/a/b/c/default#toto=tata",
FlagForceHTTP,
"http://root/a/b/c/default#toto=tata",
false,
},
&testCase{
"RemoveDuplicateSlashes",
"https://root/a//b///c////default#toto=tata",
FlagRemoveDuplicateSlashes,
"https://root/a/b/c/default#toto=tata",
false,
},
&testCase{
"RemoveDuplicateSlashes2",
"https://root//a//b///c////default#toto=tata",
FlagRemoveDuplicateSlashes,
"https://root/a/b/c/default#toto=tata",
false,
},
&testCase{
"RemoveWWW",
"https://www.root/a/b/c/",
FlagRemoveWWW,
"https://root/a/b/c/",
false,
},
&testCase{
"RemoveWWW2",
"https://WwW.Root/a/b/c/",
FlagRemoveWWW,
"https://Root/a/b/c/",
false,
},
&testCase{
"AddWWW",
"https://Root/a/b/c/",
FlagAddWWW,
"https://www.Root/a/b/c/",
false,
},
&testCase{
"SortQuery",
"http://root/toto/?b=4&a=1&c=3&b=2&a=5",
FlagSortQuery,
"http://root/toto/?a=1&a=5&b=2&b=4&c=3",
false,
},
&testCase{
"RemoveEmptyQuerySeparator",
"http://root/toto/?",
FlagRemoveEmptyQuerySeparator,
"http://root/toto/",
false,
},
&testCase{
"Unsafe",
"HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
FlagsUnsafeGreedy,
"http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3",
false,
},
&testCase{
"Safe2",
"HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
FlagsSafe,
"https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
false,
},
&testCase{
"UsuallySafe2",
"HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
FlagsUsuallySafeGreedy,
"https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid",
false,
},
&testCase{
"AddTrailingSlashBug",
"http://src.ca/",
FlagsAllNonGreedy,
"http://www.src.ca/",
false,
},
&testCase{
"SourceModified",
"HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
FlagsUnsafeGreedy,
"http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3",
true,
},
&testCase{
"IPv6-1",
"http://[2001:db8:1f70::999:de8:7648:6e8]/test",
FlagsSafe | FlagRemoveDotSegments,
"http://[2001:db8:1f70::999:de8:7648:6e8]/test",
false,
},
&testCase{
"IPv6-2",
"http://[::ffff:192.168.1.1]/test",
FlagsSafe | FlagRemoveDotSegments,
"http://[::ffff:192.168.1.1]/test",
false,
},
&testCase{
"IPv6-3",
"http://[::ffff:192.168.1.1]:80/test",
FlagsSafe | FlagRemoveDotSegments,
"http://[::ffff:192.168.1.1]/test",
false,
},
&testCase{
"IPv6-4",
"htTps://[::fFff:192.168.1.1]:443/test",
FlagsSafe | FlagRemoveDotSegments,
"https://[::ffff:192.168.1.1]/test",
false,
},
&testCase{
"FTP",
"ftp://user:pass@ftp.foo.net/foo/bar",
FlagsSafe | FlagRemoveDotSegments,
"ftp://user:pass@ftp.foo.net/foo/bar",
false,
},
&testCase{
"Standard-1",
"http://www.foo.com:80/foo",
FlagsSafe | FlagRemoveDotSegments,
"http://www.foo.com/foo",
false,
},
&testCase{
"Standard-2",
"http://www.foo.com:8000/foo",
FlagsSafe | FlagRemoveDotSegments,
"http://www.foo.com:8000/foo",
false,
},
&testCase{
"Standard-3",
"http://www.foo.com/%7ebar",
FlagsSafe | FlagRemoveDotSegments,
"http://www.foo.com/~bar",
false,
},
&testCase{
"Standard-4",
"http://www.foo.com/%7Ebar",
FlagsSafe | FlagRemoveDotSegments,
"http://www.foo.com/~bar",
false,
},
&testCase{
"Standard-5",
"http://USER:pass@www.Example.COM/foo/bar",
FlagsSafe | FlagRemoveDotSegments,
"http://USER:pass@www.example.com/foo/bar",
false,
},
&testCase{
"Standard-6",
"http://test.example/?a=%26&b=1",
FlagsSafe | FlagRemoveDotSegments,
"http://test.example/?a=%26&b=1",
false,
},
&testCase{
"Standard-7",
"http://test.example/%25/?p=%20val%20%25",
FlagsSafe | FlagRemoveDotSegments,
"http://test.example/%25/?p=%20val%20%25",
false,
},
&testCase{
"Standard-8",
"http://test.example/path/with a%20space+/",
FlagsSafe | FlagRemoveDotSegments,
"http://test.example/path/with%20a%20space+/",
false,
},
&testCase{
"Standard-9",
"http://test.example/?",
FlagsSafe | FlagRemoveDotSegments,
"http://test.example/",
false,
},
&testCase{
"Standard-10",
"http://a.COM/path/?b&a",
FlagsSafe | FlagRemoveDotSegments,
"http://a.com/path/?b&a",
false,
},
&testCase{
"StandardCasesAddTrailingSlash",
"http://test.example?",
FlagsSafe | FlagAddTrailingSlash,
"http://test.example/",
false,
},
&testCase{
"OctalIP-1",
"http://0123.011.0.4/",
FlagsSafe | FlagDecodeOctalHost,
"http://0123.011.0.4/",
false,
},
&testCase{
"OctalIP-2",
"http://0102.0146.07.0223/",
FlagsSafe | FlagDecodeOctalHost,
"http://66.102.7.147/",
false,
},
&testCase{
"OctalIP-3",
"http://0102.0146.07.0223.:23/",
FlagsSafe | FlagDecodeOctalHost,
"http://66.102.7.147.:23/",
false,
},
&testCase{
"OctalIP-4",
"http://USER:pass@0102.0146.07.0223../",
FlagsSafe | FlagDecodeOctalHost,
"http://USER:pass@66.102.7.147../",
false,
},
&testCase{
"DWORDIP-1",
"http://123.1113982867/",
FlagsSafe | FlagDecodeDWORDHost,
"http://123.1113982867/",
false,
},
&testCase{
"DWORDIP-2",
"http://1113982867/",
FlagsSafe | FlagDecodeDWORDHost,
"http://66.102.7.147/",
false,
},
&testCase{
"DWORDIP-3",
"http://1113982867.:23/",
FlagsSafe | FlagDecodeDWORDHost,
"http://66.102.7.147.:23/",
false,
},
&testCase{
"DWORDIP-4",
"http://USER:pass@1113982867../",
FlagsSafe | FlagDecodeDWORDHost,
"http://USER:pass@66.102.7.147../",
false,
},
&testCase{
"HexIP-1",
"http://0x123.1113982867/",
FlagsSafe | FlagDecodeHexHost,
"http://0x123.1113982867/",
false,
},
&testCase{
"HexIP-2",
"http://0x42660793/",
FlagsSafe | FlagDecodeHexHost,
"http://66.102.7.147/",
false,
},
&testCase{
"HexIP-3",
"http://0x42660793.:23/",
FlagsSafe | FlagDecodeHexHost,
"http://66.102.7.147.:23/",
false,
},
&testCase{
"HexIP-4",
"http://USER:pass@0x42660793../",
FlagsSafe | FlagDecodeHexHost,
"http://USER:pass@66.102.7.147../",
false,
},
&testCase{
"UnnecessaryHostDots-1",
"http://.www.foo.com../foo/bar.html",
FlagsSafe | FlagRemoveUnnecessaryHostDots,
"http://www.foo.com/foo/bar.html",
false,
},
&testCase{
"UnnecessaryHostDots-2",
"http://www.foo.com./foo/bar.html",
FlagsSafe | FlagRemoveUnnecessaryHostDots,
"http://www.foo.com/foo/bar.html",
false,
},
&testCase{
"UnnecessaryHostDots-3",
"http://www.foo.com.:81/foo",
FlagsSafe | FlagRemoveUnnecessaryHostDots,
"http://www.foo.com:81/foo",
false,
},
&testCase{
"UnnecessaryHostDots-4",
"http://www.example.com./",
FlagsSafe | FlagRemoveUnnecessaryHostDots,
"http://www.example.com/",
false,
},
&testCase{
"EmptyPort-1",
"http://www.thedraymin.co.uk:/main/?p=308",
FlagsSafe | FlagRemoveEmptyPortSeparator,
"http://www.thedraymin.co.uk/main/?p=308",
false,
},
&testCase{
"EmptyPort-2",
"http://www.src.ca:",
FlagsSafe | FlagRemoveEmptyPortSeparator,
"http://www.src.ca",
false,
},
&testCase{
"Slashes-1",
"http://test.example/foo/bar/.",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo/bar/",
false,
},
&testCase{
"Slashes-2",
"http://test.example/foo/bar/./",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo/bar/",
false,
},
&testCase{
"Slashes-3",
"http://test.example/foo/bar/..",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo/",
false,
},
&testCase{
"Slashes-4",
"http://test.example/foo/bar/../",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo/",
false,
},
&testCase{
"Slashes-5",
"http://test.example/foo/bar/../baz",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo/baz",
false,
},
&testCase{
"Slashes-6",
"http://test.example/foo/bar/../..",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/",
false,
},
&testCase{
"Slashes-7",
"http://test.example/foo/bar/../../",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/",
false,
},
&testCase{
"Slashes-8",
"http://test.example/foo/bar/../../baz",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/baz",
false,
},
&testCase{
"Slashes-9",
"http://test.example/foo/bar/../../../baz",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/baz",
false,
},
&testCase{
"Slashes-10",
"http://test.example/foo/bar/../../../../baz",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/baz",
false,
},
&testCase{
"Slashes-11",
"http://test.example/./foo",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo",
false,
},
&testCase{
"Slashes-12",
"http://test.example/../foo",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo",
false,
},
&testCase{
"Slashes-13",
"http://test.example/foo.",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo.",
false,
},
&testCase{
"Slashes-14",
"http://test.example/.foo",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/.foo",
false,
},
&testCase{
"Slashes-15",
"http://test.example/foo..",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo..",
false,
},
&testCase{
"Slashes-16",
"http://test.example/..foo",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/..foo",
false,
},
&testCase{
"Slashes-17",
"http://test.example/./../foo",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo",
false,
},
&testCase{
"Slashes-18",
"http://test.example/./foo/.",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo/",
false,
},
&testCase{
"Slashes-19",
"http://test.example/foo/./bar",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo/bar",
false,
},
&testCase{
"Slashes-20",
"http://test.example/foo/../bar",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/bar",
false,
},
&testCase{
"Slashes-21",
"http://test.example/foo//",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo/",
false,
},
&testCase{
"Slashes-22",
"http://test.example/foo///bar//",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo/bar/",
false,
},
&testCase{
"Relative",
"foo/bar",
FlagsAllGreedy,
"foo/bar",
false,
},
&testCase{
"Relative-1",
"./../foo",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"foo",
false,
},
&testCase{
"Relative-2",
"./foo/bar/../baz/../bang/..",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"foo/",
false,
},
&testCase{
"Relative-3",
"foo///bar//",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"foo/bar/",
false,
},
&testCase{
"Relative-4",
"www.youtube.com",
FlagsUsuallySafeGreedy,
"www.youtube.com",
false,
},
/*&testCase{
"UrlNorm-5",
"http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3",
FlagsSafe | FlagRemoveDotSegments,
"http://ja.wikipedia.org/wiki/\xe3\x82\xad\xe3\x83\xa3\xe3\x82\xbf\xe3\x83\x94\xe3\x83\xa9\xe3\x83\xbc\xe3\x82\xb8\xe3\x83\xa3\xe3\x83\x91\xe3\x83\xb3",
false,
},
&testCase{
"UrlNorm-1",
"http://test.example/?a=%e3%82%82%26",
FlagsAllGreedy,
"http://test.example/?a=\xe3\x82\x82%26",
false,
},*/
}
)
func TestRunner(t *testing.T) {
for _, tc := range cases {
runCase(tc, t)
}
}
func runCase(tc *testCase, t *testing.T) {
t.Logf("running %s...", tc.nm)
if tc.parsed {
u, e := url.Parse(tc.src)
if e != nil {
t.Errorf("%s - FAIL : %s", tc.nm, e)
return
} else {
NormalizeURL(u, tc.flgs)
if s := u.String(); s != tc.res {
t.Errorf("%s - FAIL expected '%s', got '%s'", tc.nm, tc.res, s)
}
}
} else {
if s, e := NormalizeURLString(tc.src, tc.flgs); e != nil {
t.Errorf("%s - FAIL : %s", tc.nm, e)
} else if s != tc.res {
t.Errorf("%s - FAIL expected '%s', got '%s'", tc.nm, tc.res, s)
}
}
}
func TestDecodeUnnecessaryEscapesAll(t *testing.T) {
var url = "http://host/"
for i := 0; i < 256; i++ {
url += fmt.Sprintf("%%%02x", i)
}
if s, e := NormalizeURLString(url, FlagDecodeUnnecessaryEscapes); e != nil {
t.Fatalf("Got error %s", e.Error())
} else {
const want = "http://host/%00%01%02%03%04%05%06%07%08%09%0A%0B%0C%0D%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20!%22%23$%25&'()*+,-./0123456789:;%3C=%3E%3F@ABCDEFGHIJKLMNOPQRSTUVWXYZ[%5C]%5E_%60abcdefghijklmnopqrstuvwxyz%7B%7C%7D~%7F%80%81%82%83%84%85%86%87%88%89%8A%8B%8C%8D%8E%8F%90%91%92%93%94%95%96%97%98%99%9A%9B%9C%9D%9E%9F%A0%A1%A2%A3%A4%A5%A6%A7%A8%A9%AA%AB%AC%AD%AE%AF%B0%B1%B2%B3%B4%B5%B6%B7%B8%B9%BA%BB%BC%BD%BE%BF%C0%C1%C2%C3%C4%C5%C6%C7%C8%C9%CA%CB%CC%CD%CE%CF%D0%D1%D2%D3%D4%D5%D6%D7%D8%D9%DA%DB%DC%DD%DE%DF%E0%E1%E2%E3%E4%E5%E6%E7%E8%E9%EA%EB%EC%ED%EE%EF%F0%F1%F2%F3%F4%F5%F6%F7%F8%F9%FA%FB%FC%FD%FE%FF"
if s != want {
t.Errorf("DecodeUnnecessaryEscapesAll:\nwant\n%s\ngot\n%s", want, s)
}
}
}
func TestEncodeNecessaryEscapesAll(t *testing.T) {
var url = "http://host/"
for i := 0; i < 256; i++ {
if i != 0x25 {
url += string(i)
}
}
if s, e := NormalizeURLString(url, FlagEncodeNecessaryEscapes); e != nil {
t.Fatalf("Got error %s", e.Error())
} else {
const want = "http://host/%00%01%02%03%04%05%06%07%08%09%0A%0B%0C%0D%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20!%22#$&'()*+,-./0123456789:;%3C=%3E?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[%5C]%5E_%60abcdefghijklmnopqrstuvwxyz%7B%7C%7D~%7F%C2%80%C2%81%C2%82%C2%83%C2%84%C2%85%C2%86%C2%87%C2%88%C2%89%C2%8A%C2%8B%C2%8C%C2%8D%C2%8E%C2%8F%C2%90%C2%91%C2%92%C2%93%C2%94%C2%95%C2%96%C2%97%C2%98%C2%99%C2%9A%C2%9B%C2%9C%C2%9D%C2%9E%C2%9F%C2%A0%C2%A1%C2%A2%C2%A3%C2%A4%C2%A5%C2%A6%C2%A7%C2%A8%C2%A9%C2%AA%C2%AB%C2%AC%C2%AD%C2%AE%C2%AF%C2%B0%C2%B1%C2%B2%C2%B3%C2%B4%C2%B5%C2%B6%C2%B7%C2%B8%C2%B9%C2%BA%C2%BB%C2%BC%C2%BD%C2%BE%C2%BF%C3%80%C3%81%C3%82%C3%83%C3%84%C3%85%C3%86%C3%87%C3%88%C3%89%C3%8A%C3%8B%C3%8C%C3%8D%C3%8E%C3%8F%C3%90%C3%91%C3%92%C3%93%C3%94%C3%95%C3%96%C3%97%C3%98%C3%99%C3%9A%C3%9B%C3%9C%C3%9D%C3%9E%C3%9F%C3%A0%C3%A1%C3%A2%C3%A3%C3%A4%C3%A5%C3%A6%C3%A7%C3%A8%C3%A9%C3%AA%C3%AB%C3%AC%C3%AD%C3%AE%C3%AF%C3%B0%C3%B1%C3%B2%C3%B3%C3%B4%C3%B5%C3%B6%C3%B7%C3%B8%C3%B9%C3%BA%C3%BB%C3%BC%C3%BD%C3%BE%C3%BF"
if s != want {
t.Errorf("EncodeNecessaryEscapesAll:\nwant\n%s\ngot\n%s", want, s)
}
}
}

View file

@ -1,53 +0,0 @@
package purell
import (
"testing"
)
// Test cases merged from PR #1
// Originally from https://github.com/jehiah/urlnorm/blob/master/test_urlnorm.py
func assertMap(t *testing.T, cases map[string]string, f NormalizationFlags) {
for bad, good := range cases {
s, e := NormalizeURLString(bad, f)
if e != nil {
t.Errorf("%s normalizing %v to %v", e.Error(), bad, good)
} else {
if s != good {
t.Errorf("source: %v expected: %v got: %v", bad, good, s)
}
}
}
}
// This tests normalization to a unicode representation
// precent escapes for unreserved values are unescaped to their unicode value
// tests normalization to idna domains
// test ip word handling, ipv6 address handling, and trailing domain periods
// in general, this matches google chromes unescaping for things in the address bar.
// spaces are converted to '+' (perhaphs controversial)
// http://code.google.com/p/google-url/ probably is another good reference for this approach
func TestUrlnorm(t *testing.T) {
testcases := map[string]string{
"http://test.example/?a=%e3%82%82%26": "http://test.example/?a=%e3%82%82%26",
//"http://test.example/?a=%e3%82%82%26": "http://test.example/?a=\xe3\x82\x82%26", //should return a unicode character
"http://s.xn--q-bga.DE/": "http://s.xn--q-bga.de/", //should be in idna format
"http://XBLA\u306eXbox.com": "http://xn--xblaxbox-jf4g.com", //test utf8 and unicode
"http://президент.рф": "http://xn--d1abbgf6aiiy.xn--p1ai",
"http://ПРЕЗИДЕНТ.РФ": "http://xn--d1abbgf6aiiy.xn--p1ai",
"http://ab¥ヲ₩○.com": "http://xn--ab-ida8983azmfnvs.com", //test width folding
"http://\u00e9.com": "http://xn--9ca.com",
"http://e\u0301.com": "http://xn--9ca.com",
"http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3": "http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3",
//"http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3": "http://ja.wikipedia.org/wiki/\xe3\x82\xad\xe3\x83\xa3\xe3\x82\xbf\xe3\x83\x94\xe3\x83\xa9\xe3\x83\xbc\xe3\x82\xb8\xe3\x83\xa3\xe3\x83\x91\xe3\x83\xb3",
"http://test.example/\xe3\x82\xad": "http://test.example/%E3%82%AD",
//"http://test.example/\xe3\x82\xad": "http://test.example/\xe3\x82\xad",
"http://test.example/?p=%23val#test-%23-val%25": "http://test.example/?p=%23val#test-%23-val%25", //check that %23 (#) is not escaped where it shouldn't be
"http://test.domain/I%C3%B1t%C3%ABrn%C3%A2ti%C3%B4n%EF%BF%BDliz%C3%A6ti%C3%B8n": "http://test.domain/I%C3%B1t%C3%ABrn%C3%A2ti%C3%B4n%EF%BF%BDliz%C3%A6ti%C3%B8n",
//"http://test.domain/I%C3%B1t%C3%ABrn%C3%A2ti%C3%B4n%EF%BF%BDliz%C3%A6ti%C3%B8n": "http://test.domain/I\xc3\xb1t\xc3\xabrn\xc3\xa2ti\xc3\xb4n\xef\xbf\xbdliz\xc3\xa6ti\xc3\xb8n",
}
assertMap(t, testcases, FlagsSafe|FlagRemoveDotSegments)
}

View file

@ -1,11 +0,0 @@
language: go
go:
- 1.4
- tip
install:
- go build .
script:
- go test -v

View file

@ -1,641 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package urlesc
import (
"net/url"
"testing"
)
type URLTest struct {
in string
out *url.URL
roundtrip string // expected result of reserializing the URL; empty means same as "in".
}
var urltests = []URLTest{
// no path
{
"http://www.google.com",
&url.URL{
Scheme: "http",
Host: "www.google.com",
},
"",
},
// path
{
"http://www.google.com/",
&url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "/",
},
"",
},
// path with hex escaping
{
"http://www.google.com/file%20one%26two",
&url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "/file one&two",
},
"http://www.google.com/file%20one&two",
},
// user
{
"ftp://webmaster@www.google.com/",
&url.URL{
Scheme: "ftp",
User: url.User("webmaster"),
Host: "www.google.com",
Path: "/",
},
"",
},
// escape sequence in username
{
"ftp://john%20doe@www.google.com/",
&url.URL{
Scheme: "ftp",
User: url.User("john doe"),
Host: "www.google.com",
Path: "/",
},
"ftp://john%20doe@www.google.com/",
},
// query
{
"http://www.google.com/?q=go+language",
&url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "/",
RawQuery: "q=go+language",
},
"",
},
// query with hex escaping: NOT parsed
{
"http://www.google.com/?q=go%20language",
&url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "/",
RawQuery: "q=go%20language",
},
"",
},
// %20 outside query
{
"http://www.google.com/a%20b?q=c+d",
&url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "/a b",
RawQuery: "q=c+d",
},
"",
},
// path without leading /, so no parsing
{
"http:www.google.com/?q=go+language",
&url.URL{
Scheme: "http",
Opaque: "www.google.com/",
RawQuery: "q=go+language",
},
"http:www.google.com/?q=go+language",
},
// path without leading /, so no parsing
{
"http:%2f%2fwww.google.com/?q=go+language",
&url.URL{
Scheme: "http",
Opaque: "%2f%2fwww.google.com/",
RawQuery: "q=go+language",
},
"http:%2f%2fwww.google.com/?q=go+language",
},
// non-authority with path
{
"mailto:/webmaster@golang.org",
&url.URL{
Scheme: "mailto",
Path: "/webmaster@golang.org",
},
"mailto:///webmaster@golang.org", // unfortunate compromise
},
// non-authority
{
"mailto:webmaster@golang.org",
&url.URL{
Scheme: "mailto",
Opaque: "webmaster@golang.org",
},
"",
},
// unescaped :// in query should not create a scheme
{
"/foo?query=http://bad",
&url.URL{
Path: "/foo",
RawQuery: "query=http://bad",
},
"",
},
// leading // without scheme should create an authority
{
"//foo",
&url.URL{
Host: "foo",
},
"",
},
// leading // without scheme, with userinfo, path, and query
{
"//user@foo/path?a=b",
&url.URL{
User: url.User("user"),
Host: "foo",
Path: "/path",
RawQuery: "a=b",
},
"",
},
// Three leading slashes isn't an authority, but doesn't return an error.
// (We can't return an error, as this code is also used via
// ServeHTTP -> ReadRequest -> Parse, which is arguably a
// different URL parsing context, but currently shares the
// same codepath)
{
"///threeslashes",
&url.URL{
Path: "///threeslashes",
},
"",
},
{
"http://user:password@google.com",
&url.URL{
Scheme: "http",
User: url.UserPassword("user", "password"),
Host: "google.com",
},
"http://user:password@google.com",
},
// unescaped @ in username should not confuse host
{
"http://j@ne:password@google.com",
&url.URL{
Scheme: "http",
User: url.UserPassword("j@ne", "password"),
Host: "google.com",
},
"http://j%40ne:password@google.com",
},
// unescaped @ in password should not confuse host
{
"http://jane:p@ssword@google.com",
&url.URL{
Scheme: "http",
User: url.UserPassword("jane", "p@ssword"),
Host: "google.com",
},
"http://jane:p%40ssword@google.com",
},
{
"http://j@ne:password@google.com/p@th?q=@go",
&url.URL{
Scheme: "http",
User: url.UserPassword("j@ne", "password"),
Host: "google.com",
Path: "/p@th",
RawQuery: "q=@go",
},
"http://j%40ne:password@google.com/p@th?q=@go",
},
{
"http://www.google.com/?q=go+language#foo",
&url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "/",
RawQuery: "q=go+language",
Fragment: "foo",
},
"",
},
{
"http://www.google.com/?q=go+language#foo%26bar",
&url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "/",
RawQuery: "q=go+language",
Fragment: "foo&bar",
},
"http://www.google.com/?q=go+language#foo&bar",
},
{
"file:///home/adg/rabbits",
&url.URL{
Scheme: "file",
Host: "",
Path: "/home/adg/rabbits",
},
"file:///home/adg/rabbits",
},
// "Windows" paths are no exception to the rule.
// See golang.org/issue/6027, especially comment #9.
{
"file:///C:/FooBar/Baz.txt",
&url.URL{
Scheme: "file",
Host: "",
Path: "/C:/FooBar/Baz.txt",
},
"file:///C:/FooBar/Baz.txt",
},
// case-insensitive scheme
{
"MaIlTo:webmaster@golang.org",
&url.URL{
Scheme: "mailto",
Opaque: "webmaster@golang.org",
},
"mailto:webmaster@golang.org",
},
// Relative path
{
"a/b/c",
&url.URL{
Path: "a/b/c",
},
"a/b/c",
},
// escaped '?' in username and password
{
"http://%3Fam:pa%3Fsword@google.com",
&url.URL{
Scheme: "http",
User: url.UserPassword("?am", "pa?sword"),
Host: "google.com",
},
"",
},
// escaped '?' and '#' in path
{
"http://example.com/%3F%23",
&url.URL{
Scheme: "http",
Host: "example.com",
Path: "?#",
},
"",
},
// unescaped [ ] ! ' ( ) * in path
{
"http://example.com/[]!'()*",
&url.URL{
Scheme: "http",
Host: "example.com",
Path: "[]!'()*",
},
"http://example.com/[]!'()*",
},
// escaped : / ? # [ ] @ in username and password
{
"http://%3A%2F%3F:%23%5B%5D%40@example.com",
&url.URL{
Scheme: "http",
User: url.UserPassword(":/?", "#[]@"),
Host: "example.com",
},
"",
},
// unescaped ! $ & ' ( ) * + , ; = in username and password
{
"http://!$&'():*+,;=@example.com",
&url.URL{
Scheme: "http",
User: url.UserPassword("!$&'()", "*+,;="),
Host: "example.com",
},
"",
},
// unescaped = : / . ? = in query component
{
"http://example.com/?q=http://google.com/?q=",
&url.URL{
Scheme: "http",
Host: "example.com",
Path: "/",
RawQuery: "q=http://google.com/?q=",
},
"",
},
// unescaped : / ? [ ] @ ! $ & ' ( ) * + , ; = in fragment
{
"http://example.com/#:/?%23[]@!$&'()*+,;=",
&url.URL{
Scheme: "http",
Host: "example.com",
Path: "/",
Fragment: ":/?#[]@!$&'()*+,;=",
},
"",
},
}
func DoTestString(t *testing.T, parse func(string) (*url.URL, error), name string, tests []URLTest) {
for _, tt := range tests {
u, err := parse(tt.in)
if err != nil {
t.Errorf("%s(%q) returned error %s", name, tt.in, err)
continue
}
expected := tt.in
if len(tt.roundtrip) > 0 {
expected = tt.roundtrip
}
s := Escape(u)
if s != expected {
t.Errorf("Escape(%s(%q)) == %q (expected %q)", name, tt.in, s, expected)
}
}
}
func TestURLString(t *testing.T) {
DoTestString(t, url.Parse, "Parse", urltests)
// no leading slash on path should prepend
// slash on String() call
noslash := URLTest{
"http://www.google.com/search",
&url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "search",
},
"",
}
s := Escape(noslash.out)
if s != noslash.in {
t.Errorf("Expected %s; go %s", noslash.in, s)
}
}
type EscapeTest struct {
in string
out string
err error
}
var escapeTests = []EscapeTest{
{
"",
"",
nil,
},
{
"abc",
"abc",
nil,
},
{
"one two",
"one+two",
nil,
},
{
"10%",
"10%25",
nil,
},
{
" ?&=#+%!<>#\"{}|\\^[]`☺\t:/@$'()*,;",
"+?%26%3D%23%2B%25%21%3C%3E%23%22%7B%7D%7C%5C%5E%5B%5D%60%E2%98%BA%09%3A/%40%24%27%28%29%2A%2C%3B",
nil,
},
}
func TestEscape(t *testing.T) {
for _, tt := range escapeTests {
actual := QueryEscape(tt.in)
if tt.out != actual {
t.Errorf("QueryEscape(%q) = %q, want %q", tt.in, actual, tt.out)
}
// for bonus points, verify that escape:unescape is an identity.
roundtrip, err := url.QueryUnescape(actual)
if roundtrip != tt.in || err != nil {
t.Errorf("QueryUnescape(%q) = %q, %s; want %q, %s", actual, roundtrip, err, tt.in, "[no error]")
}
}
}
var resolveReferenceTests = []struct {
base, rel, expected string
}{
// Absolute URL references
{"http://foo.com?a=b", "https://bar.com/", "https://bar.com/"},
{"http://foo.com/", "https://bar.com/?a=b", "https://bar.com/?a=b"},
{"http://foo.com/bar", "mailto:foo@example.com", "mailto:foo@example.com"},
// Path-absolute references
{"http://foo.com/bar", "/baz", "http://foo.com/baz"},
{"http://foo.com/bar?a=b#f", "/baz", "http://foo.com/baz"},
{"http://foo.com/bar?a=b", "/baz?c=d", "http://foo.com/baz?c=d"},
// Scheme-relative
{"https://foo.com/bar?a=b", "//bar.com/quux", "https://bar.com/quux"},
// Path-relative references:
// ... current directory
{"http://foo.com", ".", "http://foo.com/"},
{"http://foo.com/bar", ".", "http://foo.com/"},
{"http://foo.com/bar/", ".", "http://foo.com/bar/"},
// ... going down
{"http://foo.com", "bar", "http://foo.com/bar"},
{"http://foo.com/", "bar", "http://foo.com/bar"},
{"http://foo.com/bar/baz", "quux", "http://foo.com/bar/quux"},
// ... going up
{"http://foo.com/bar/baz", "../quux", "http://foo.com/quux"},
{"http://foo.com/bar/baz", "../../../../../quux", "http://foo.com/quux"},
{"http://foo.com/bar", "..", "http://foo.com/"},
{"http://foo.com/bar/baz", "./..", "http://foo.com/"},
// ".." in the middle (issue 3560)
{"http://foo.com/bar/baz", "quux/dotdot/../tail", "http://foo.com/bar/quux/tail"},
{"http://foo.com/bar/baz", "quux/./dotdot/../tail", "http://foo.com/bar/quux/tail"},
{"http://foo.com/bar/baz", "quux/./dotdot/.././tail", "http://foo.com/bar/quux/tail"},
{"http://foo.com/bar/baz", "quux/./dotdot/./../tail", "http://foo.com/bar/quux/tail"},
{"http://foo.com/bar/baz", "quux/./dotdot/dotdot/././../../tail", "http://foo.com/bar/quux/tail"},
{"http://foo.com/bar/baz", "quux/./dotdot/dotdot/./.././../tail", "http://foo.com/bar/quux/tail"},
{"http://foo.com/bar/baz", "quux/./dotdot/dotdot/dotdot/./../../.././././tail", "http://foo.com/bar/quux/tail"},
{"http://foo.com/bar/baz", "quux/./dotdot/../dotdot/../dot/./tail/..", "http://foo.com/bar/quux/dot/"},
// Remove any dot-segments prior to forming the target URI.
// http://tools.ietf.org/html/rfc3986#section-5.2.4
{"http://foo.com/dot/./dotdot/../foo/bar", "../baz", "http://foo.com/dot/baz"},
// Triple dot isn't special
{"http://foo.com/bar", "...", "http://foo.com/..."},
// Fragment
{"http://foo.com/bar", ".#frag", "http://foo.com/#frag"},
// RFC 3986: Normal Examples
// http://tools.ietf.org/html/rfc3986#section-5.4.1
{"http://a/b/c/d;p?q", "g:h", "g:h"},
{"http://a/b/c/d;p?q", "g", "http://a/b/c/g"},
{"http://a/b/c/d;p?q", "./g", "http://a/b/c/g"},
{"http://a/b/c/d;p?q", "g/", "http://a/b/c/g/"},
{"http://a/b/c/d;p?q", "/g", "http://a/g"},
{"http://a/b/c/d;p?q", "//g", "http://g"},
{"http://a/b/c/d;p?q", "?y", "http://a/b/c/d;p?y"},
{"http://a/b/c/d;p?q", "g?y", "http://a/b/c/g?y"},
{"http://a/b/c/d;p?q", "#s", "http://a/b/c/d;p?q#s"},
{"http://a/b/c/d;p?q", "g#s", "http://a/b/c/g#s"},
{"http://a/b/c/d;p?q", "g?y#s", "http://a/b/c/g?y#s"},
{"http://a/b/c/d;p?q", ";x", "http://a/b/c/;x"},
{"http://a/b/c/d;p?q", "g;x", "http://a/b/c/g;x"},
{"http://a/b/c/d;p?q", "g;x?y#s", "http://a/b/c/g;x?y#s"},
{"http://a/b/c/d;p?q", "", "http://a/b/c/d;p?q"},
{"http://a/b/c/d;p?q", ".", "http://a/b/c/"},
{"http://a/b/c/d;p?q", "./", "http://a/b/c/"},
{"http://a/b/c/d;p?q", "..", "http://a/b/"},
{"http://a/b/c/d;p?q", "../", "http://a/b/"},
{"http://a/b/c/d;p?q", "../g", "http://a/b/g"},
{"http://a/b/c/d;p?q", "../..", "http://a/"},
{"http://a/b/c/d;p?q", "../../", "http://a/"},
{"http://a/b/c/d;p?q", "../../g", "http://a/g"},
// RFC 3986: Abnormal Examples
// http://tools.ietf.org/html/rfc3986#section-5.4.2
{"http://a/b/c/d;p?q", "../../../g", "http://a/g"},
{"http://a/b/c/d;p?q", "../../../../g", "http://a/g"},
{"http://a/b/c/d;p?q", "/./g", "http://a/g"},
{"http://a/b/c/d;p?q", "/../g", "http://a/g"},
{"http://a/b/c/d;p?q", "g.", "http://a/b/c/g."},
{"http://a/b/c/d;p?q", ".g", "http://a/b/c/.g"},
{"http://a/b/c/d;p?q", "g..", "http://a/b/c/g.."},
{"http://a/b/c/d;p?q", "..g", "http://a/b/c/..g"},
{"http://a/b/c/d;p?q", "./../g", "http://a/b/g"},
{"http://a/b/c/d;p?q", "./g/.", "http://a/b/c/g/"},
{"http://a/b/c/d;p?q", "g/./h", "http://a/b/c/g/h"},
{"http://a/b/c/d;p?q", "g/../h", "http://a/b/c/h"},
{"http://a/b/c/d;p?q", "g;x=1/./y", "http://a/b/c/g;x=1/y"},
{"http://a/b/c/d;p?q", "g;x=1/../y", "http://a/b/c/y"},
{"http://a/b/c/d;p?q", "g?y/./x", "http://a/b/c/g?y/./x"},
{"http://a/b/c/d;p?q", "g?y/../x", "http://a/b/c/g?y/../x"},
{"http://a/b/c/d;p?q", "g#s/./x", "http://a/b/c/g#s/./x"},
{"http://a/b/c/d;p?q", "g#s/../x", "http://a/b/c/g#s/../x"},
// Extras.
{"https://a/b/c/d;p?q", "//g?q", "https://g?q"},
{"https://a/b/c/d;p?q", "//g#s", "https://g#s"},
{"https://a/b/c/d;p?q", "//g/d/e/f?y#s", "https://g/d/e/f?y#s"},
{"https://a/b/c/d;p#s", "?y", "https://a/b/c/d;p?y"},
{"https://a/b/c/d;p?q#s", "?y", "https://a/b/c/d;p?y"},
}
func TestResolveReference(t *testing.T) {
mustParse := func(url_ string) *url.URL {
u, err := url.Parse(url_)
if err != nil {
t.Fatalf("Expected URL to parse: %q, got error: %v", url_, err)
}
return u
}
opaque := &url.URL{Scheme: "scheme", Opaque: "opaque"}
for _, test := range resolveReferenceTests {
base := mustParse(test.base)
rel := mustParse(test.rel)
url := base.ResolveReference(rel)
if Escape(url) != test.expected {
t.Errorf("URL(%q).ResolveReference(%q) == %q, got %q", test.base, test.rel, test.expected, Escape(url))
}
// Ensure that new instances are returned.
if base == url {
t.Errorf("Expected URL.ResolveReference to return new URL instance.")
}
// Test the convenience wrapper too.
url, err := base.Parse(test.rel)
if err != nil {
t.Errorf("URL(%q).Parse(%q) failed: %v", test.base, test.rel, err)
} else if Escape(url) != test.expected {
t.Errorf("URL(%q).Parse(%q) == %q, got %q", test.base, test.rel, test.expected, Escape(url))
} else if base == url {
// Ensure that new instances are returned for the wrapper too.
t.Errorf("Expected URL.Parse to return new URL instance.")
}
// Ensure Opaque resets the URL.
url = base.ResolveReference(opaque)
if *url != *opaque {
t.Errorf("ResolveReference failed to resolve opaque URL: want %#v, got %#v", url, opaque)
}
// Test the convenience wrapper with an opaque URL too.
url, err = base.Parse("scheme:opaque")
if err != nil {
t.Errorf(`URL(%q).Parse("scheme:opaque") failed: %v`, test.base, err)
} else if *url != *opaque {
t.Errorf("Parse failed to resolve opaque URL: want %#v, got %#v", url, opaque)
} else if base == url {
// Ensure that new instances are returned, again.
t.Errorf("Expected URL.Parse to return new URL instance.")
}
}
}
type shouldEscapeTest struct {
in byte
mode encoding
escape bool
}
var shouldEscapeTests = []shouldEscapeTest{
// Unreserved characters (§2.3)
{'a', encodePath, false},
{'a', encodeUserPassword, false},
{'a', encodeQueryComponent, false},
{'a', encodeFragment, false},
{'z', encodePath, false},
{'A', encodePath, false},
{'Z', encodePath, false},
{'0', encodePath, false},
{'9', encodePath, false},
{'-', encodePath, false},
{'-', encodeUserPassword, false},
{'-', encodeQueryComponent, false},
{'-', encodeFragment, false},
{'.', encodePath, false},
{'_', encodePath, false},
{'~', encodePath, false},
// User information (§3.2.1)
{':', encodeUserPassword, true},
{'/', encodeUserPassword, true},
{'?', encodeUserPassword, true},
{'@', encodeUserPassword, true},
{'$', encodeUserPassword, false},
{'&', encodeUserPassword, false},
{'+', encodeUserPassword, false},
{',', encodeUserPassword, false},
{';', encodeUserPassword, false},
{'=', encodeUserPassword, false},
}
func TestShouldEscape(t *testing.T) {
for _, tt := range shouldEscapeTests {
if shouldEscape(tt.in, tt.mode) != tt.escape {
t.Errorf("shouldEscape(%q, %v) returned %v; expected %v", tt.in, tt.mode, !tt.escape, tt.escape)
}
}
}

View file

@ -1 +0,0 @@
logrus

View file

@ -1,8 +0,0 @@
language: go
go:
- 1.6
- 1.7
- tip
install:
- go get -t ./...
script: GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v ./...

View file

@ -1,66 +0,0 @@
# 0.10.0
* feature: Add a test hook (#180)
* feature: `ParseLevel` is now case-insensitive (#326)
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
* performance: avoid re-allocations on `WithFields` (#335)
# 0.9.0
* logrus/text_formatter: don't emit empty msg
* logrus/hooks/airbrake: move out of main repository
* logrus/hooks/sentry: move out of main repository
* logrus/hooks/papertrail: move out of main repository
* logrus/hooks/bugsnag: move out of main repository
* logrus/core: run tests with `-race`
* logrus/core: detect TTY based on `stderr`
* logrus/core: support `WithError` on logger
* logrus/core: Solaris support
# 0.8.7
* logrus/core: fix possible race (#216)
* logrus/doc: small typo fixes and doc improvements
# 0.8.6
* hooks/raven: allow passing an initialized client
# 0.8.5
* logrus/core: revert #208
# 0.8.4
* formatter/text: fix data race (#218)
# 0.8.3
* logrus/core: fix entry log level (#208)
* logrus/core: improve performance of text formatter by 40%
* logrus/core: expose `LevelHooks` type
* logrus/core: add support for DragonflyBSD and NetBSD
* formatter/text: print structs more verbosely
# 0.8.2
* logrus: fix more Fatal family functions
# 0.8.1
* logrus: fix not exiting on `Fatalf` and `Fatalln`
# 0.8.0
* logrus: defaults to stderr instead of stdout
* hooks/sentry: add special field for `*http.Request`
* formatter/text: ignore Windows for colors
# 0.7.3
* formatter/\*: allow configuration of timestamp layout
# 0.7.2
* formatter/text: Add configuration option for time format (#158)

View file

@ -87,7 +87,8 @@ func init() {
// Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{})
// Output to stdout instead of the default stderr, could also be a file.
// Output to stdout instead of the default stderr
// Can be any io.Writer, see below for File example
log.SetOutput(os.Stdout)
// Only log the warning severity or above.
@ -138,7 +139,15 @@ var log = logrus.New()
func main() {
// The API for setting attributes is a little different than the package level
// exported logger. See Godoc.
log.Out = os.Stderr
log.Out = os.Stdout
// You could set this to any `io.Writer` such as a file
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
// if err == nil {
// log.Out = file
// } else {
// log.Info("Failed to log to file, using default stderr")
// }
log.WithFields(logrus.Fields{
"animal": "walrus",
@ -171,6 +180,20 @@ In general, with Logrus using any of the `printf`-family functions should be
seen as a hint you should add a field, however, you can still use the
`printf`-family functions with Logrus.
#### Default Fields
Often it's helpful to have fields _always_ attached to log statements in an
application or parts of one. For example, you may want to always log the
`request_id` and `user_ip` in the context of a request. Instead of writing
`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
every line, you can create a `logrus.Entry` to pass around instead:
```go
requestLogger := log.WithFields(log.Fields{"request_id": request_id, user_ip: user_ip})
requestLogger.Info("something happened on that request") # will log request_id and user_ip
requestLogger.Warn("something not great happened")
```
#### Hooks
You can add hooks for logging levels. For example to send errors to an exception
@ -206,42 +229,47 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v
| Hook | Description |
| ----- | ----------- |
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
#### Level logging
@ -317,8 +345,11 @@ The built-in logging formatters are:
without colors.
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`
`DisableColors` field to `true`. For Windows, see
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
* `logrus.JSONFormatter`. Logs fields as JSON.
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
Third party logging formatters:
@ -367,6 +398,18 @@ srv := http.Server{
Each line written to that writer will be printed the usual way, using formatters
and hooks. The level for those entries is `info`.
This means that we can override the standard library logger easily:
```go
logger := logrus.New()
logger.Formatter = &logrus.JSONFormatter{}
// Use logrus for standard log output
// Note that `log` here references stdlib's log
// Not logrus imported under the name `log`.
log.SetOutput(logger.Writer())
```
#### Rotation
Log rotation is not provided with Logrus. Log rotation should be done by an

View file

@ -1,74 +0,0 @@
package logrus
import (
"io/ioutil"
"os/exec"
"testing"
"time"
)
func TestRegister(t *testing.T) {
current := len(handlers)
RegisterExitHandler(func() {})
if len(handlers) != current+1 {
t.Fatalf("can't add handler")
}
}
func TestHandler(t *testing.T) {
gofile := "/tmp/testprog.go"
if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil {
t.Fatalf("can't create go file")
}
outfile := "/tmp/testprog.out"
arg := time.Now().UTC().String()
err := exec.Command("go", "run", gofile, outfile, arg).Run()
if err == nil {
t.Fatalf("completed normally, should have failed")
}
data, err := ioutil.ReadFile(outfile)
if err != nil {
t.Fatalf("can't read output file %s", outfile)
}
if string(data) != arg {
t.Fatalf("bad data")
}
}
var testprog = []byte(`
// Test program for atexit, gets output file and data as arguments and writes
// data to output file in atexit handler.
package main
import (
"github.com/Sirupsen/logrus"
"flag"
"fmt"
"io/ioutil"
)
var outfile = ""
var data = ""
func handler() {
ioutil.WriteFile(outfile, []byte(data), 0666)
}
func badHandler() {
n := 0
fmt.Println(1/n)
}
func main() {
flag.Parse()
outfile = flag.Arg(0)
data = flag.Arg(1)
logrus.RegisterExitHandler(handler)
logrus.RegisterExitHandler(badHandler)
logrus.Fatal("Bye bye")
}
`)

View file

@ -1,77 +0,0 @@
package logrus
import (
"bytes"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestEntryWithError(t *testing.T) {
assert := assert.New(t)
defer func() {
ErrorKey = "error"
}()
err := fmt.Errorf("kaboom at layer %d", 4711)
assert.Equal(err, WithError(err).Data["error"])
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
assert.Equal(err, entry.WithError(err).Data["error"])
ErrorKey = "err"
assert.Equal(err, entry.WithError(err).Data["err"])
}
func TestEntryPanicln(t *testing.T) {
errBoom := fmt.Errorf("boom time")
defer func() {
p := recover()
assert.NotNil(t, p)
switch pVal := p.(type) {
case *Entry:
assert.Equal(t, "kaboom", pVal.Message)
assert.Equal(t, errBoom, pVal.Data["err"])
default:
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
}
}()
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
entry.WithField("err", errBoom).Panicln("kaboom")
}
func TestEntryPanicf(t *testing.T) {
errBoom := fmt.Errorf("boom again")
defer func() {
p := recover()
assert.NotNil(t, p)
switch pVal := p.(type) {
case *Entry:
assert.Equal(t, "kaboom true", pVal.Message)
assert.Equal(t, errBoom, pVal.Data["err"])
default:
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
}
}()
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
entry.WithField("err", errBoom).Panicf("kaboom %v", true)
}

View file

@ -1,50 +0,0 @@
package main
import (
"github.com/Sirupsen/logrus"
)
var log = logrus.New()
func init() {
log.Formatter = new(logrus.JSONFormatter)
log.Formatter = new(logrus.TextFormatter) // default
log.Level = logrus.DebugLevel
}
func main() {
defer func() {
err := recover()
if err != nil {
log.WithFields(logrus.Fields{
"omg": true,
"err": err,
"number": 100,
}).Fatal("The ice breaks!")
}
}()
log.WithFields(logrus.Fields{
"animal": "walrus",
"number": 8,
}).Debug("Started observing beach")
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(logrus.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(logrus.Fields{
"temperature": -4,
}).Debug("Temperature changes")
log.WithFields(logrus.Fields{
"animal": "orca",
"size": 9009,
}).Panic("It's over 9000!")
}

View file

@ -1,30 +0,0 @@
package main
import (
"github.com/Sirupsen/logrus"
"gopkg.in/gemnasium/logrus-airbrake-hook.v2"
)
var log = logrus.New()
func init() {
log.Formatter = new(logrus.TextFormatter) // default
log.Hooks.Add(airbrake.NewHook(123, "xyz", "development"))
}
func main() {
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(logrus.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(logrus.Fields{
"omg": true,
"number": 100,
}).Fatal("The ice breaks!")
}

View file

@ -1,98 +0,0 @@
package logrus
import (
"fmt"
"testing"
"time"
)
// smallFields is a small size data set for benchmarking
var smallFields = Fields{
"foo": "bar",
"baz": "qux",
"one": "two",
"three": "four",
}
// largeFields is a large size data set for benchmarking
var largeFields = Fields{
"foo": "bar",
"baz": "qux",
"one": "two",
"three": "four",
"five": "six",
"seven": "eight",
"nine": "ten",
"eleven": "twelve",
"thirteen": "fourteen",
"fifteen": "sixteen",
"seventeen": "eighteen",
"nineteen": "twenty",
"a": "b",
"c": "d",
"e": "f",
"g": "h",
"i": "j",
"k": "l",
"m": "n",
"o": "p",
"q": "r",
"s": "t",
"u": "v",
"w": "x",
"y": "z",
"this": "will",
"make": "thirty",
"entries": "yeah",
}
var errorFields = Fields{
"foo": fmt.Errorf("bar"),
"baz": fmt.Errorf("qux"),
}
func BenchmarkErrorTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields)
}
func BenchmarkSmallTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
}
func BenchmarkLargeTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
}
func BenchmarkSmallColoredTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
}
func BenchmarkLargeColoredTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
}
func BenchmarkSmallJSONFormatter(b *testing.B) {
doBenchmark(b, &JSONFormatter{}, smallFields)
}
func BenchmarkLargeJSONFormatter(b *testing.B) {
doBenchmark(b, &JSONFormatter{}, largeFields)
}
func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
entry := &Entry{
Time: time.Time{},
Level: InfoLevel,
Message: "message",
Data: fields,
}
var d []byte
var err error
for i := 0; i < b.N; i++ {
d, err = formatter.Format(entry)
if err != nil {
b.Fatal(err)
}
b.SetBytes(int64(len(d)))
}
}

View file

@ -1,122 +0,0 @@
package logrus
import (
"testing"
"github.com/stretchr/testify/assert"
)
type TestHook struct {
Fired bool
}
func (hook *TestHook) Fire(entry *Entry) error {
hook.Fired = true
return nil
}
func (hook *TestHook) Levels() []Level {
return []Level{
DebugLevel,
InfoLevel,
WarnLevel,
ErrorLevel,
FatalLevel,
PanicLevel,
}
}
func TestHookFires(t *testing.T) {
hook := new(TestHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
assert.Equal(t, hook.Fired, false)
log.Print("test")
}, func(fields Fields) {
assert.Equal(t, hook.Fired, true)
})
}
type ModifyHook struct {
}
func (hook *ModifyHook) Fire(entry *Entry) error {
entry.Data["wow"] = "whale"
return nil
}
func (hook *ModifyHook) Levels() []Level {
return []Level{
DebugLevel,
InfoLevel,
WarnLevel,
ErrorLevel,
FatalLevel,
PanicLevel,
}
}
func TestHookCanModifyEntry(t *testing.T) {
hook := new(ModifyHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
log.WithField("wow", "elephant").Print("test")
}, func(fields Fields) {
assert.Equal(t, fields["wow"], "whale")
})
}
func TestCanFireMultipleHooks(t *testing.T) {
hook1 := new(ModifyHook)
hook2 := new(TestHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook1)
log.Hooks.Add(hook2)
log.WithField("wow", "elephant").Print("test")
}, func(fields Fields) {
assert.Equal(t, fields["wow"], "whale")
assert.Equal(t, hook2.Fired, true)
})
}
type ErrorHook struct {
Fired bool
}
func (hook *ErrorHook) Fire(entry *Entry) error {
hook.Fired = true
return nil
}
func (hook *ErrorHook) Levels() []Level {
return []Level{
ErrorLevel,
}
}
func TestErrorHookShouldntFireOnInfo(t *testing.T) {
hook := new(ErrorHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
log.Info("test")
}, func(fields Fields) {
assert.Equal(t, hook.Fired, false)
})
}
func TestErrorHookShouldFireOnError(t *testing.T) {
hook := new(ErrorHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
log.Error("test")
}, func(fields Fields) {
assert.Equal(t, hook.Fired, true)
})
}

View file

@ -1,39 +0,0 @@
# Syslog Hooks for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>
## Usage
```go
import (
"log/syslog"
"github.com/Sirupsen/logrus"
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
)
func main() {
log := logrus.New()
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err == nil {
log.Hooks.Add(hook)
}
}
```
If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following.
```go
import (
"log/syslog"
"github.com/Sirupsen/logrus"
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
)
func main() {
log := logrus.New()
hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
if err == nil {
log.Hooks.Add(hook)
}
}
```

View file

@ -1,54 +0,0 @@
// +build !windows,!nacl,!plan9
package logrus_syslog
import (
"fmt"
"github.com/Sirupsen/logrus"
"log/syslog"
"os"
)
// SyslogHook to send logs via syslog.
type SyslogHook struct {
Writer *syslog.Writer
SyslogNetwork string
SyslogRaddr string
}
// Creates a hook to be added to an instance of logger. This is called with
// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
// `if err == nil { log.Hooks.Add(hook) }`
func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
w, err := syslog.Dial(network, raddr, priority, tag)
return &SyslogHook{w, network, raddr}, err
}
func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
line, err := entry.String()
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
return err
}
switch entry.Level {
case logrus.PanicLevel:
return hook.Writer.Crit(line)
case logrus.FatalLevel:
return hook.Writer.Crit(line)
case logrus.ErrorLevel:
return hook.Writer.Err(line)
case logrus.WarnLevel:
return hook.Writer.Warning(line)
case logrus.InfoLevel:
return hook.Writer.Info(line)
case logrus.DebugLevel:
return hook.Writer.Debug(line)
default:
return nil
}
}
func (hook *SyslogHook) Levels() []logrus.Level {
return logrus.AllLevels
}

View file

@ -1,26 +0,0 @@
package logrus_syslog
import (
"github.com/Sirupsen/logrus"
"log/syslog"
"testing"
)
func TestLocalhostAddAndPrint(t *testing.T) {
log := logrus.New()
hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
t.Errorf("Unable to connect to local syslog.")
}
log.Hooks.Add(hook)
for _, level := range hook.Levels() {
if len(log.Hooks[level]) != 1 {
t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
}
}
log.Info("Congratulations!")
}

View file

@ -1,67 +0,0 @@
package test
import (
"io/ioutil"
"github.com/Sirupsen/logrus"
)
// test.Hook is a hook designed for dealing with logs in test scenarios.
type Hook struct {
Entries []*logrus.Entry
}
// Installs a test hook for the global logger.
func NewGlobal() *Hook {
hook := new(Hook)
logrus.AddHook(hook)
return hook
}
// Installs a test hook for a given local logger.
func NewLocal(logger *logrus.Logger) *Hook {
hook := new(Hook)
logger.Hooks.Add(hook)
return hook
}
// Creates a discarding logger and installs the test hook.
func NewNullLogger() (*logrus.Logger, *Hook) {
logger := logrus.New()
logger.Out = ioutil.Discard
return logger, NewLocal(logger)
}
func (t *Hook) Fire(e *logrus.Entry) error {
t.Entries = append(t.Entries, e)
return nil
}
func (t *Hook) Levels() []logrus.Level {
return logrus.AllLevels
}
// LastEntry returns the last entry that was logged or nil.
func (t *Hook) LastEntry() (l *logrus.Entry) {
if i := len(t.Entries) - 1; i < 0 {
return nil
} else {
return t.Entries[i]
}
}
// Reset removes all Entries from this test hook.
func (t *Hook) Reset() {
t.Entries = make([]*logrus.Entry, 0)
}

View file

@ -1,39 +0,0 @@
package test
import (
"testing"
"github.com/Sirupsen/logrus"
"github.com/stretchr/testify/assert"
)
func TestAllHooks(t *testing.T) {
assert := assert.New(t)
logger, hook := NewNullLogger()
assert.Nil(hook.LastEntry())
assert.Equal(0, len(hook.Entries))
logger.Error("Hello error")
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
assert.Equal("Hello error", hook.LastEntry().Message)
assert.Equal(1, len(hook.Entries))
logger.Warn("Hello warning")
assert.Equal(logrus.WarnLevel, hook.LastEntry().Level)
assert.Equal("Hello warning", hook.LastEntry().Message)
assert.Equal(2, len(hook.Entries))
hook.Reset()
assert.Nil(hook.LastEntry())
assert.Equal(0, len(hook.Entries))
hook = NewGlobal()
logrus.Error("Hello error")
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
assert.Equal("Hello error", hook.LastEntry().Message)
assert.Equal(1, len(hook.Entries))
}

View file

@ -1,199 +0,0 @@
package logrus
import (
"encoding/json"
"errors"
"strings"
"testing"
)
func TestErrorNotLost(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("error", errors.New("wild walrus")))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["error"] != "wild walrus" {
t.Fatal("Error field not set")
}
}
func TestErrorNotLostOnFieldNotNamedError(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("omg", errors.New("wild walrus")))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["omg"] != "wild walrus" {
t.Fatal("Error field not set")
}
}
func TestFieldClashWithTime(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("time", "right now!"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["fields.time"] != "right now!" {
t.Fatal("fields.time not set to original time field")
}
if entry["time"] != "0001-01-01T00:00:00Z" {
t.Fatal("time field not set to current time, was: ", entry["time"])
}
}
func TestFieldClashWithMsg(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("msg", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["fields.msg"] != "something" {
t.Fatal("fields.msg not set to original msg field")
}
}
func TestFieldClashWithLevel(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("level", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["fields.level"] != "something" {
t.Fatal("fields.level not set to original level field")
}
}
func TestJSONEntryEndsWithNewline(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("level", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
if b[len(b)-1] != '\n' {
t.Fatal("Expected JSON log entry to end with a newline")
}
}
func TestJSONMessageKey(t *testing.T) {
formatter := &JSONFormatter{
FieldMap: FieldMap{
FieldKeyMsg: "message",
},
}
b, err := formatter.Format(&Entry{Message: "oh hai"})
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
s := string(b)
if !(strings.Contains(s, "message") && strings.Contains(s, "oh hai")) {
t.Fatal("Expected JSON to format message key")
}
}
func TestJSONLevelKey(t *testing.T) {
formatter := &JSONFormatter{
FieldMap: FieldMap{
FieldKeyLevel: "somelevel",
},
}
b, err := formatter.Format(WithField("level", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
s := string(b)
if !strings.Contains(s, "somelevel") {
t.Fatal("Expected JSON to format level key")
}
}
func TestJSONTimeKey(t *testing.T) {
formatter := &JSONFormatter{
FieldMap: FieldMap{
FieldKeyTime: "timeywimey",
},
}
b, err := formatter.Format(WithField("level", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
s := string(b)
if !strings.Contains(s, "timeywimey") {
t.Fatal("Expected JSON to format time key")
}
}
func TestJSONDisableTimestamp(t *testing.T) {
formatter := &JSONFormatter{
DisableTimestamp: true,
}
b, err := formatter.Format(WithField("level", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
s := string(b)
if strings.Contains(s, FieldKeyTime) {
t.Error("Did not prevent timestamp", s)
}
}
func TestJSONEnableTimestamp(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("level", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
s := string(b)
if !strings.Contains(s, FieldKeyTime) {
t.Error("Timestamp not present", s)
}
}

View file

@ -1,61 +0,0 @@
package logrus
import (
"os"
"testing"
)
// smallFields is a small size data set for benchmarking
var loggerFields = Fields{
"foo": "bar",
"baz": "qux",
"one": "two",
"three": "four",
}
func BenchmarkDummyLogger(b *testing.B) {
nullf, err := os.OpenFile("/dev/null", os.O_WRONLY, 0666)
if err != nil {
b.Fatalf("%v", err)
}
defer nullf.Close()
doLoggerBenchmark(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
}
func BenchmarkDummyLoggerNoLock(b *testing.B) {
nullf, err := os.OpenFile("/dev/null", os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
b.Fatalf("%v", err)
}
defer nullf.Close()
doLoggerBenchmarkNoLock(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
}
func doLoggerBenchmark(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
logger := Logger{
Out: out,
Level: InfoLevel,
Formatter: formatter,
}
entry := logger.WithFields(fields)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
entry.Info("aaa")
}
})
}
func doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
logger := Logger{
Out: out,
Level: InfoLevel,
Formatter: formatter,
}
logger.SetNoLock()
entry := logger.WithFields(fields)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
entry.Info("aaa")
}
})
}

View file

@ -1,361 +0,0 @@
package logrus
import (
"bytes"
"encoding/json"
"strconv"
"strings"
"sync"
"testing"
"github.com/stretchr/testify/assert"
)
func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
var buffer bytes.Buffer
var fields Fields
logger := New()
logger.Out = &buffer
logger.Formatter = new(JSONFormatter)
log(logger)
err := json.Unmarshal(buffer.Bytes(), &fields)
assert.Nil(t, err)
assertions(fields)
}
func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
var buffer bytes.Buffer
logger := New()
logger.Out = &buffer
logger.Formatter = &TextFormatter{
DisableColors: true,
}
log(logger)
fields := make(map[string]string)
for _, kv := range strings.Split(buffer.String(), " ") {
if !strings.Contains(kv, "=") {
continue
}
kvArr := strings.Split(kv, "=")
key := strings.TrimSpace(kvArr[0])
val := kvArr[1]
if kvArr[1][0] == '"' {
var err error
val, err = strconv.Unquote(val)
assert.NoError(t, err)
}
fields[key] = val
}
assertions(fields)
}
func TestPrint(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Print("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["level"], "info")
})
}
func TestInfo(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["level"], "info")
})
}
func TestWarn(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Warn("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["level"], "warning")
})
}
func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln("test", "test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test test")
})
}
func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln("test", 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test 10")
})
}
func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln(10, 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "10 10")
})
}
func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln(10, 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "10 10")
})
}
func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Info("test", 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test10")
})
}
func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Info("test", "test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "testtest")
})
}
func TestWithFieldsShouldAllowAssignments(t *testing.T) {
var buffer bytes.Buffer
var fields Fields
logger := New()
logger.Out = &buffer
logger.Formatter = new(JSONFormatter)
localLog := logger.WithFields(Fields{
"key1": "value1",
})
localLog.WithField("key2", "value2").Info("test")
err := json.Unmarshal(buffer.Bytes(), &fields)
assert.Nil(t, err)
assert.Equal(t, "value2", fields["key2"])
assert.Equal(t, "value1", fields["key1"])
buffer = bytes.Buffer{}
fields = Fields{}
localLog.Info("test")
err = json.Unmarshal(buffer.Bytes(), &fields)
assert.Nil(t, err)
_, ok := fields["key2"]
assert.Equal(t, false, ok)
assert.Equal(t, "value1", fields["key1"])
}
func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("msg", "hello").Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
})
}
func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("msg", "hello").Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["fields.msg"], "hello")
})
}
func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("time", "hello").Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["fields.time"], "hello")
})
}
func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("level", 1).Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["level"], "info")
assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only
})
}
func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
LogAndAssertText(t, func(log *Logger) {
ll := log.WithField("herp", "derp")
ll.Info("hello")
ll.Info("bye")
}, func(fields map[string]string) {
for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
if _, ok := fields[fieldName]; ok {
t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
}
}
})
}
func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
var buffer bytes.Buffer
var fields Fields
logger := New()
logger.Out = &buffer
logger.Formatter = new(JSONFormatter)
llog := logger.WithField("context", "eating raw fish")
llog.Info("looks delicious")
err := json.Unmarshal(buffer.Bytes(), &fields)
assert.NoError(t, err, "should have decoded first message")
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
assert.Equal(t, fields["msg"], "looks delicious")
assert.Equal(t, fields["context"], "eating raw fish")
buffer.Reset()
llog.Warn("omg it is!")
err = json.Unmarshal(buffer.Bytes(), &fields)
assert.NoError(t, err, "should have decoded second message")
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
assert.Equal(t, fields["msg"], "omg it is!")
assert.Equal(t, fields["context"], "eating raw fish")
assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
}
func TestConvertLevelToString(t *testing.T) {
assert.Equal(t, "debug", DebugLevel.String())
assert.Equal(t, "info", InfoLevel.String())
assert.Equal(t, "warning", WarnLevel.String())
assert.Equal(t, "error", ErrorLevel.String())
assert.Equal(t, "fatal", FatalLevel.String())
assert.Equal(t, "panic", PanicLevel.String())
}
func TestParseLevel(t *testing.T) {
l, err := ParseLevel("panic")
assert.Nil(t, err)
assert.Equal(t, PanicLevel, l)
l, err = ParseLevel("PANIC")
assert.Nil(t, err)
assert.Equal(t, PanicLevel, l)
l, err = ParseLevel("fatal")
assert.Nil(t, err)
assert.Equal(t, FatalLevel, l)
l, err = ParseLevel("FATAL")
assert.Nil(t, err)
assert.Equal(t, FatalLevel, l)
l, err = ParseLevel("error")
assert.Nil(t, err)
assert.Equal(t, ErrorLevel, l)
l, err = ParseLevel("ERROR")
assert.Nil(t, err)
assert.Equal(t, ErrorLevel, l)
l, err = ParseLevel("warn")
assert.Nil(t, err)
assert.Equal(t, WarnLevel, l)
l, err = ParseLevel("WARN")
assert.Nil(t, err)
assert.Equal(t, WarnLevel, l)
l, err = ParseLevel("warning")
assert.Nil(t, err)
assert.Equal(t, WarnLevel, l)
l, err = ParseLevel("WARNING")
assert.Nil(t, err)
assert.Equal(t, WarnLevel, l)
l, err = ParseLevel("info")
assert.Nil(t, err)
assert.Equal(t, InfoLevel, l)
l, err = ParseLevel("INFO")
assert.Nil(t, err)
assert.Equal(t, InfoLevel, l)
l, err = ParseLevel("debug")
assert.Nil(t, err)
assert.Equal(t, DebugLevel, l)
l, err = ParseLevel("DEBUG")
assert.Nil(t, err)
assert.Equal(t, DebugLevel, l)
l, err = ParseLevel("invalid")
assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
}
func TestGetSetLevelRace(t *testing.T) {
wg := sync.WaitGroup{}
for i := 0; i < 100; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
if i%2 == 0 {
SetLevel(InfoLevel)
} else {
GetLevel()
}
}(i)
}
wg.Wait()
}
func TestLoggingRace(t *testing.T) {
logger := New()
var wg sync.WaitGroup
wg.Add(100)
for i := 0; i < 100; i++ {
go func() {
logger.Info("info")
wg.Done()
}()
}
wg.Wait()
}
// Compile test
func TestLogrusInterface(t *testing.T) {
var buffer bytes.Buffer
fn := func(l FieldLogger) {
b := l.WithField("key", "value")
b.Debug("Test")
}
// test logger
logger := New()
logger.Out = &buffer
fn(logger)
// test Entry
e := logger.WithField("another", "value")
fn(e)
}

View file

@ -2,7 +2,9 @@
package logrus
import "io"
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
func IsTerminal(f io.Writer) bool {
return true
}

View file

@ -9,14 +9,20 @@
package logrus
import (
"io"
"os"
"syscall"
"unsafe"
)
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stderr
func IsTerminal(f io.Writer) bool {
var termios Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
switch v := f.(type) {
case *os.File:
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
default:
return false
}
}

View file

@ -3,13 +3,19 @@
package logrus
import (
"io"
"os"
"golang.org/x/sys/unix"
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal() bool {
_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
return err == nil
func IsTerminal(f io.Writer) bool {
switch v := f.(type) {
case *os.File:
_, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA)
return err == nil
default:
return false
}
}

View file

@ -8,6 +8,8 @@
package logrus
import (
"io"
"os"
"syscall"
"unsafe"
)
@ -19,9 +21,13 @@ var (
)
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stderr
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
func IsTerminal(f io.Writer) bool {
switch v := f.(type) {
case *os.File:
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
default:
return false
}
}

View file

@ -3,9 +3,9 @@ package logrus
import (
"bytes"
"fmt"
"runtime"
"sort"
"strings"
"sync"
"time"
)
@ -20,12 +20,10 @@ const (
var (
baseTimestamp time.Time
isTerminal bool
)
func init() {
baseTimestamp = time.Now()
isTerminal = IsTerminal()
}
type TextFormatter struct {
@ -50,6 +48,27 @@ type TextFormatter struct {
// that log extremely frequently and don't use the JSON formatter this may not
// be desired.
DisableSorting bool
// QuoteEmptyFields will wrap empty fields in quotes if true
QuoteEmptyFields bool
// QuoteCharacter can be set to the override the default quoting character "
// with something else. For example: ', or `.
QuoteCharacter string
// Whether the logger's out is to a terminal
isTerminal bool
sync.Once
}
func (f *TextFormatter) init(entry *Entry) {
if len(f.QuoteCharacter) == 0 {
f.QuoteCharacter = "\""
}
if entry.Logger != nil {
f.isTerminal = IsTerminal(entry.Logger.Out)
}
}
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
@ -70,8 +89,9 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
prefixFieldClashes(entry.Data)
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
f.Do(func() { f.init(entry) })
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
@ -125,7 +145,10 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
}
}
func needsQuoting(text string) bool {
func (f *TextFormatter) needsQuoting(text string) bool {
if f.QuoteEmptyFields && len(text) == 0 {
return true
}
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
@ -148,17 +171,17 @@ func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interf
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
switch value := value.(type) {
case string:
if !needsQuoting(value) {
if !f.needsQuoting(value) {
b.WriteString(value)
} else {
fmt.Fprintf(b, "%q", value)
fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter)
}
case error:
errmsg := value.Error()
if !needsQuoting(errmsg) {
if !f.needsQuoting(errmsg) {
b.WriteString(errmsg)
} else {
fmt.Fprintf(b, "%q", errmsg)
fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter)
}
default:
fmt.Fprint(b, value)

View file

@ -1,71 +0,0 @@
package logrus
import (
"bytes"
"errors"
"testing"
"time"
"strings"
)
func TestQuoting(t *testing.T) {
tf := &TextFormatter{DisableColors: true}
checkQuoting := func(q bool, value interface{}) {
b, _ := tf.Format(WithField("test", value))
idx := bytes.Index(b, ([]byte)("test="))
cont := bytes.Contains(b[idx+5:], []byte{'"'})
if cont != q {
if q {
t.Errorf("quoting expected for: %#v", value)
} else {
t.Errorf("quoting not expected for: %#v", value)
}
}
}
checkQuoting(false, "abcd")
checkQuoting(false, "v1.0")
checkQuoting(false, "1234567890")
checkQuoting(true, "/foobar")
checkQuoting(true, "x y")
checkQuoting(true, "x,y")
checkQuoting(false, errors.New("invalid"))
checkQuoting(true, errors.New("invalid argument"))
}
func TestTimestampFormat(t *testing.T) {
checkTimeStr := func(format string) {
customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format}
customStr, _ := customFormatter.Format(WithField("test", "test"))
timeStart := bytes.Index(customStr, ([]byte)("time="))
timeEnd := bytes.Index(customStr, ([]byte)("level="))
timeStr := customStr[timeStart+5 : timeEnd-1]
if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' {
timeStr = timeStr[1 : len(timeStr)-1]
}
if format == "" {
format = time.RFC3339
}
_, e := time.Parse(format, (string)(timeStr))
if e != nil {
t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e)
}
}
checkTimeStr("2006-01-02T15:04:05.000000000Z07:00")
checkTimeStr("Mon Jan _2 15:04:05 2006")
checkTimeStr("")
}
func TestDisableTimestampWithColoredOutput(t *testing.T) {
tf := &TextFormatter{DisableTimestamp: true, ForceColors: true}
b, _ := tf.Format(WithField("test", "test"))
if strings.Contains(string(b), "[0000]") {
t.Error("timestamp not expected when DisableTimestamp is true")
}
}
// TODO add tests for sorting etc., this requires a parser for the text
// formatter output.

View file

@ -11,39 +11,48 @@ func (logger *Logger) Writer() *io.PipeWriter {
}
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
return NewEntry(logger).WriterLevel(level)
}
func (entry *Entry) Writer() *io.PipeWriter {
return entry.WriterLevel(InfoLevel)
}
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
reader, writer := io.Pipe()
var printFunc func(args ...interface{})
switch level {
case DebugLevel:
printFunc = logger.Debug
printFunc = entry.Debug
case InfoLevel:
printFunc = logger.Info
printFunc = entry.Info
case WarnLevel:
printFunc = logger.Warn
printFunc = entry.Warn
case ErrorLevel:
printFunc = logger.Error
printFunc = entry.Error
case FatalLevel:
printFunc = logger.Fatal
printFunc = entry.Fatal
case PanicLevel:
printFunc = logger.Panic
printFunc = entry.Panic
default:
printFunc = logger.Print
printFunc = entry.Print
}
go logger.writerScanner(reader, printFunc)
go entry.writerScanner(reader, printFunc)
runtime.SetFinalizer(writer, writerFinalizer)
return writer
}
func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
printFunc(scanner.Text())
}
if err := scanner.Err(); err != nil {
logger.Errorf("Error while reading from Writer: %s", err)
entry.Errorf("Error while reading from Writer: %s", err)
}
reader.Close()
}

View file

@ -1 +0,0 @@
3.4.0: QmZTgGMg34JKEvF1hjr7wwYESvFhg9Khv2WFibDAi5dhno

Some files were not shown because too many files have changed in this diff Show more