Merge pull request #7329 from erikh/move_broadcastwriter

Cleanup: utils/broadcastwriter -> pkg/broadcastwriter
This commit is contained in:
Michael Crosby 2014-08-07 14:51:42 -07:00
commit 82b1b477ac
3 changed files with 282 additions and 0 deletions

View file

@ -0,0 +1,93 @@
package broadcastwriter
import (
"bytes"
"encoding/json"
"io"
"log"
"sync"
"time"
"github.com/docker/docker/pkg/jsonlog"
)
// BroadcastWriter accumulate multiple io.WriteCloser by stream.
type BroadcastWriter struct {
sync.Mutex
buf *bytes.Buffer
streams map[string](map[io.WriteCloser]struct{})
}
// AddWriter adds new io.WriteCloser for stream.
// If stream is "", then all writes proceed as is. Otherwise every line from
// input will be packed to serialized jsonlog.JSONLog.
func (w *BroadcastWriter) AddWriter(writer io.WriteCloser, stream string) {
w.Lock()
if _, ok := w.streams[stream]; !ok {
w.streams[stream] = make(map[io.WriteCloser]struct{})
}
w.streams[stream][writer] = struct{}{}
w.Unlock()
}
// Write writes bytes to all writers. Failed writers will be evicted during
// this call.
func (w *BroadcastWriter) Write(p []byte) (n int, err error) {
created := time.Now().UTC()
w.Lock()
if writers, ok := w.streams[""]; ok {
for sw := range writers {
if n, err := sw.Write(p); err != nil || n != len(p) {
// On error, evict the writer
delete(writers, sw)
}
}
}
w.buf.Write(p)
for {
line, err := w.buf.ReadString('\n')
if err != nil {
w.buf.Write([]byte(line))
break
}
for stream, writers := range w.streams {
if stream == "" {
continue
}
b, err := json.Marshal(jsonlog.JSONLog{Log: line, Stream: stream, Created: created})
if err != nil {
log.Printf("Error making JSON log line: %s", err)
continue
}
b = append(b, '\n')
for sw := range writers {
if _, err := sw.Write(b); err != nil {
delete(writers, sw)
}
}
}
}
w.Unlock()
return len(p), nil
}
// Clean closes and removes all writers. Last non-eol-terminated part of data
// will be saved.
func (w *BroadcastWriter) Clean() error {
w.Lock()
for _, writers := range w.streams {
for w := range writers {
w.Close()
}
}
w.streams = make(map[string](map[io.WriteCloser]struct{}))
w.Unlock()
return nil
}
func New() *BroadcastWriter {
return &BroadcastWriter{
streams: make(map[string](map[io.WriteCloser]struct{})),
buf: bytes.NewBuffer(nil),
}
}

View file

@ -0,0 +1,144 @@
package broadcastwriter
import (
"bytes"
"errors"
"testing"
)
type dummyWriter struct {
buffer bytes.Buffer
failOnWrite bool
}
func (dw *dummyWriter) Write(p []byte) (n int, err error) {
if dw.failOnWrite {
return 0, errors.New("Fake fail")
}
return dw.buffer.Write(p)
}
func (dw *dummyWriter) String() string {
return dw.buffer.String()
}
func (dw *dummyWriter) Close() error {
return nil
}
func TestBroadcastWriter(t *testing.T) {
writer := New()
// Test 1: Both bufferA and bufferB should contain "foo"
bufferA := &dummyWriter{}
writer.AddWriter(bufferA, "")
bufferB := &dummyWriter{}
writer.AddWriter(bufferB, "")
writer.Write([]byte("foo"))
if bufferA.String() != "foo" {
t.Errorf("Buffer contains %v", bufferA.String())
}
if bufferB.String() != "foo" {
t.Errorf("Buffer contains %v", bufferB.String())
}
// Test2: bufferA and bufferB should contain "foobar",
// while bufferC should only contain "bar"
bufferC := &dummyWriter{}
writer.AddWriter(bufferC, "")
writer.Write([]byte("bar"))
if bufferA.String() != "foobar" {
t.Errorf("Buffer contains %v", bufferA.String())
}
if bufferB.String() != "foobar" {
t.Errorf("Buffer contains %v", bufferB.String())
}
if bufferC.String() != "bar" {
t.Errorf("Buffer contains %v", bufferC.String())
}
// Test3: Test eviction on failure
bufferA.failOnWrite = true
writer.Write([]byte("fail"))
if bufferA.String() != "foobar" {
t.Errorf("Buffer contains %v", bufferA.String())
}
if bufferC.String() != "barfail" {
t.Errorf("Buffer contains %v", bufferC.String())
}
// Even though we reset the flag, no more writes should go in there
bufferA.failOnWrite = false
writer.Write([]byte("test"))
if bufferA.String() != "foobar" {
t.Errorf("Buffer contains %v", bufferA.String())
}
if bufferC.String() != "barfailtest" {
t.Errorf("Buffer contains %v", bufferC.String())
}
writer.Clean()
}
type devNullCloser int
func (d devNullCloser) Close() error {
return nil
}
func (d devNullCloser) Write(buf []byte) (int, error) {
return len(buf), nil
}
// This test checks for races. It is only useful when run with the race detector.
func TestRaceBroadcastWriter(t *testing.T) {
writer := New()
c := make(chan bool)
go func() {
writer.AddWriter(devNullCloser(0), "")
c <- true
}()
writer.Write([]byte("hello"))
<-c
}
func BenchmarkBroadcastWriter(b *testing.B) {
writer := New()
setUpWriter := func() {
for i := 0; i < 100; i++ {
writer.AddWriter(devNullCloser(0), "stdout")
writer.AddWriter(devNullCloser(0), "stderr")
writer.AddWriter(devNullCloser(0), "")
}
}
testLine := "Line that thinks that it is log line from docker"
var buf bytes.Buffer
for i := 0; i < 100; i++ {
buf.Write([]byte(testLine + "\n"))
}
// line without eol
buf.Write([]byte(testLine))
testText := buf.Bytes()
b.SetBytes(int64(5 * len(testText)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
setUpWriter()
b.StartTimer()
for j := 0; j < 5; j++ {
if _, err := writer.Write(testText); err != nil {
b.Fatal(err)
}
}
b.StopTimer()
writer.Clean()
b.StartTimer()
}
}

45
jsonlog/jsonlog.go Normal file
View file

@ -0,0 +1,45 @@
package jsonlog
import (
"encoding/json"
"fmt"
"io"
"log"
"time"
)
type JSONLog struct {
Log string `json:"log,omitempty"`
Stream string `json:"stream,omitempty"`
Created time.Time `json:"time"`
}
func (jl *JSONLog) Format(format string) (string, error) {
if format == "" {
return jl.Log, nil
}
if format == "json" {
m, err := json.Marshal(jl)
return string(m), err
}
return fmt.Sprintf("[%s] %s", jl.Created.Format(format), jl.Log), nil
}
func WriteLog(src io.Reader, dst io.WriteCloser, format string) error {
dec := json.NewDecoder(src)
for {
l := &JSONLog{}
if err := dec.Decode(l); err == io.EOF {
return nil
} else if err != nil {
log.Printf("Error streaming logs: %s", err)
return err
}
line, err := l.Format(format)
if err != nil {
return err
}
fmt.Fprintf(dst, "%s", line)
}
}