Merge pull request #14442 from cpuguy83/refactor_logdrvier_reader

Refactor log driver reader
This commit is contained in:
David Calavera 2015-07-22 11:54:35 -07:00
commit ebb4e279e8
7 changed files with 396 additions and 304 deletions

View file

@ -1,33 +1,20 @@
package broadcastwriter package broadcastwriter
import ( import (
"bytes"
"io" "io"
"sync" "sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/jsonlog"
"github.com/docker/docker/pkg/timeutils"
) )
// BroadcastWriter accumulate multiple io.WriteCloser by stream. // BroadcastWriter accumulate multiple io.WriteCloser by stream.
type BroadcastWriter struct { type BroadcastWriter struct {
sync.Mutex sync.Mutex
buf *bytes.Buffer writers map[io.WriteCloser]struct{}
jsLogBuf *bytes.Buffer
streams map[string](map[io.WriteCloser]struct{})
} }
// AddWriter adds new io.WriteCloser for stream. // AddWriter adds new io.WriteCloser.
// If stream is "", then all writes proceed as is. Otherwise every line from func (w *BroadcastWriter) AddWriter(writer io.WriteCloser) {
// input will be packed to serialized jsonlog.JSONLog.
func (w *BroadcastWriter) AddWriter(writer io.WriteCloser, stream string) {
w.Lock() w.Lock()
if _, ok := w.streams[stream]; !ok { w.writers[writer] = struct{}{}
w.streams[stream] = make(map[io.WriteCloser]struct{})
}
w.streams[stream][writer] = struct{}{}
w.Unlock() w.Unlock()
} }
@ -35,67 +22,12 @@ func (w *BroadcastWriter) AddWriter(writer io.WriteCloser, stream string) {
// this call. // this call.
func (w *BroadcastWriter) Write(p []byte) (n int, err error) { func (w *BroadcastWriter) Write(p []byte) (n int, err error) {
w.Lock() w.Lock()
if writers, ok := w.streams[""]; ok { for sw := range w.writers {
for sw := range writers { if n, err := sw.Write(p); err != nil || n != len(p) {
if n, err := sw.Write(p); err != nil || n != len(p) { // On error, evict the writer
// On error, evict the writer delete(w.writers, sw)
delete(writers, sw)
}
}
if len(w.streams) == 1 {
if w.buf.Len() >= 4096 {
w.buf.Reset()
} else {
w.buf.Write(p)
}
w.Unlock()
return len(p), nil
} }
} }
if w.jsLogBuf == nil {
w.jsLogBuf = new(bytes.Buffer)
w.jsLogBuf.Grow(1024)
}
var timestamp string
created := time.Now().UTC()
w.buf.Write(p)
for {
if n := w.buf.Len(); n == 0 {
break
}
i := bytes.IndexByte(w.buf.Bytes(), '\n')
if i < 0 {
break
}
lineBytes := w.buf.Next(i + 1)
if timestamp == "" {
timestamp, err = timeutils.FastMarshalJSON(created)
if err != nil {
continue
}
}
for stream, writers := range w.streams {
if stream == "" {
continue
}
jsonLog := jsonlog.JSONLogBytes{Log: lineBytes, Stream: stream, Created: timestamp}
err = jsonLog.MarshalJSONBuf(w.jsLogBuf)
if err != nil {
logrus.Errorf("Error making JSON log line: %s", err)
continue
}
w.jsLogBuf.WriteByte('\n')
b := w.jsLogBuf.Bytes()
for sw := range writers {
if _, err := sw.Write(b); err != nil {
delete(writers, sw)
}
}
}
w.jsLogBuf.Reset()
}
w.jsLogBuf.Reset()
w.Unlock() w.Unlock()
return len(p), nil return len(p), nil
} }
@ -104,19 +36,16 @@ func (w *BroadcastWriter) Write(p []byte) (n int, err error) {
// will be saved. // will be saved.
func (w *BroadcastWriter) Clean() error { func (w *BroadcastWriter) Clean() error {
w.Lock() w.Lock()
for _, writers := range w.streams { for w := range w.writers {
for w := range writers { w.Close()
w.Close()
}
} }
w.streams = make(map[string](map[io.WriteCloser]struct{})) w.writers = make(map[io.WriteCloser]struct{})
w.Unlock() w.Unlock()
return nil return nil
} }
func New() *BroadcastWriter { func New() *BroadcastWriter {
return &BroadcastWriter{ return &BroadcastWriter{
streams: make(map[string](map[io.WriteCloser]struct{})), writers: make(map[io.WriteCloser]struct{}),
buf: bytes.NewBuffer(nil),
} }
} }

View file

@ -32,9 +32,9 @@ func TestBroadcastWriter(t *testing.T) {
// Test 1: Both bufferA and bufferB should contain "foo" // Test 1: Both bufferA and bufferB should contain "foo"
bufferA := &dummyWriter{} bufferA := &dummyWriter{}
writer.AddWriter(bufferA, "") writer.AddWriter(bufferA)
bufferB := &dummyWriter{} bufferB := &dummyWriter{}
writer.AddWriter(bufferB, "") writer.AddWriter(bufferB)
writer.Write([]byte("foo")) writer.Write([]byte("foo"))
if bufferA.String() != "foo" { if bufferA.String() != "foo" {
@ -48,7 +48,7 @@ func TestBroadcastWriter(t *testing.T) {
// Test2: bufferA and bufferB should contain "foobar", // Test2: bufferA and bufferB should contain "foobar",
// while bufferC should only contain "bar" // while bufferC should only contain "bar"
bufferC := &dummyWriter{} bufferC := &dummyWriter{}
writer.AddWriter(bufferC, "") writer.AddWriter(bufferC)
writer.Write([]byte("bar")) writer.Write([]byte("bar"))
if bufferA.String() != "foobar" { if bufferA.String() != "foobar" {
@ -100,7 +100,7 @@ func TestRaceBroadcastWriter(t *testing.T) {
writer := New() writer := New()
c := make(chan bool) c := make(chan bool)
go func() { go func() {
writer.AddWriter(devNullCloser(0), "") writer.AddWriter(devNullCloser(0))
c <- true c <- true
}() }()
writer.Write([]byte("hello")) writer.Write([]byte("hello"))
@ -111,9 +111,9 @@ func BenchmarkBroadcastWriter(b *testing.B) {
writer := New() writer := New()
setUpWriter := func() { setUpWriter := func() {
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
writer.AddWriter(devNullCloser(0), "stdout") writer.AddWriter(devNullCloser(0))
writer.AddWriter(devNullCloser(0), "stderr") writer.AddWriter(devNullCloser(0))
writer.AddWriter(devNullCloser(0), "") writer.AddWriter(devNullCloser(0))
} }
} }
testLine := "Line that thinks that it is log line from docker" testLine := "Line that thinks that it is log line from docker"
@ -142,33 +142,3 @@ func BenchmarkBroadcastWriter(b *testing.B) {
b.StartTimer() b.StartTimer()
} }
} }
func BenchmarkBroadcastWriterWithoutStdoutStderr(b *testing.B) {
writer := New()
setUpWriter := func() {
for i := 0; i < 100; i++ {
writer.AddWriter(devNullCloser(0), "")
}
}
testLine := "Line that thinks that it is log line from docker"
var buf bytes.Buffer
for i := 0; i < 100; i++ {
buf.Write([]byte(testLine + "\n"))
}
// line without eol
buf.Write([]byte(testLine))
testText := buf.Bytes()
b.SetBytes(int64(5 * len(testText)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
setUpWriter()
for j := 0; j < 5; j++ {
if _, err := writer.Write(testText); err != nil {
b.Fatal(err)
}
}
writer.Clean()
}
}

226
ioutils/multireader.go Normal file
View file

@ -0,0 +1,226 @@
package ioutils
import (
"bytes"
"fmt"
"io"
"os"
)
type pos struct {
idx int
offset int64
}
type multiReadSeeker struct {
readers []io.ReadSeeker
pos *pos
posIdx map[io.ReadSeeker]int
}
func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) {
var tmpOffset int64
switch whence {
case os.SEEK_SET:
for i, rdr := range r.readers {
// get size of the current reader
s, err := rdr.Seek(0, os.SEEK_END)
if err != nil {
return -1, err
}
if offset > tmpOffset+s {
if i == len(r.readers)-1 {
rdrOffset := s + (offset - tmpOffset)
if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil {
return -1, err
}
r.pos = &pos{i, rdrOffset}
return offset, nil
}
tmpOffset += s
continue
}
rdrOffset := offset - tmpOffset
idx := i
rdr.Seek(rdrOffset, os.SEEK_SET)
// make sure all following readers are at 0
for _, rdr := range r.readers[i+1:] {
rdr.Seek(0, os.SEEK_SET)
}
if rdrOffset == s && i != len(r.readers)-1 {
idx += 1
rdrOffset = 0
}
r.pos = &pos{idx, rdrOffset}
return offset, nil
}
case os.SEEK_END:
for _, rdr := range r.readers {
s, err := rdr.Seek(0, os.SEEK_END)
if err != nil {
return -1, err
}
tmpOffset += s
}
r.Seek(tmpOffset+offset, os.SEEK_SET)
return tmpOffset + offset, nil
case os.SEEK_CUR:
if r.pos == nil {
return r.Seek(offset, os.SEEK_SET)
}
// Just return the current offset
if offset == 0 {
return r.getCurOffset()
}
curOffset, err := r.getCurOffset()
if err != nil {
return -1, err
}
rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset)
if err != nil {
return -1, err
}
r.pos = &pos{r.posIdx[rdr], rdrOffset}
return curOffset + offset, nil
default:
return -1, fmt.Errorf("Invalid whence: %d", whence)
}
return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset)
}
func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) {
var rdr io.ReadSeeker
var rdrOffset int64
for i, rdr := range r.readers {
offsetTo, err := r.getOffsetToReader(rdr)
if err != nil {
return nil, -1, err
}
if offsetTo > offset {
rdr = r.readers[i-1]
rdrOffset = offsetTo - offset
break
}
if rdr == r.readers[len(r.readers)-1] {
rdrOffset = offsetTo + offset
break
}
}
return rdr, rdrOffset, nil
}
func (r *multiReadSeeker) getCurOffset() (int64, error) {
var totalSize int64
for _, rdr := range r.readers[:r.pos.idx+1] {
if r.posIdx[rdr] == r.pos.idx {
totalSize += r.pos.offset
break
}
size, err := getReadSeekerSize(rdr)
if err != nil {
return -1, fmt.Errorf("error getting seeker size: %v", err)
}
totalSize += size
}
return totalSize, nil
}
func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) {
var offset int64
for _, r := range r.readers {
if r == rdr {
break
}
size, err := getReadSeekerSize(rdr)
if err != nil {
return -1, err
}
offset += size
}
return offset, nil
}
func (r *multiReadSeeker) Read(b []byte) (int, error) {
if r.pos == nil {
r.pos = &pos{0, 0}
}
bCap := int64(cap(b))
buf := bytes.NewBuffer(nil)
var rdr io.ReadSeeker
for _, rdr = range r.readers[r.pos.idx:] {
readBytes, err := io.CopyN(buf, rdr, bCap)
if err != nil && err != io.EOF {
return -1, err
}
bCap -= readBytes
if bCap == 0 {
break
}
}
rdrPos, err := rdr.Seek(0, os.SEEK_CUR)
if err != nil {
return -1, err
}
r.pos = &pos{r.posIdx[rdr], rdrPos}
return buf.Read(b)
}
func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) {
// save the current position
pos, err := rdr.Seek(0, os.SEEK_CUR)
if err != nil {
return -1, err
}
// get the size
size, err := rdr.Seek(0, os.SEEK_END)
if err != nil {
return -1, err
}
// reset the position
if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil {
return -1, err
}
return size, nil
}
// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided
// input readseekers. After calling this method the initial position is set to the
// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances
// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker.
// Seek can be used over the sum of lengths of all readseekers.
//
// When a MultiReadSeeker is used, no Read and Seek operations should be made on
// its ReadSeeker components. Also, users should make no assumption on the state
// of individual readseekers while the MultiReadSeeker is used.
func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker {
if len(readers) == 1 {
return readers[0]
}
idx := make(map[io.ReadSeeker]int)
for i, rdr := range readers {
idx[rdr] = i
}
return &multiReadSeeker{
readers: readers,
posIdx: idx,
}
}

149
ioutils/multireader_test.go Normal file
View file

@ -0,0 +1,149 @@
package ioutils
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"testing"
)
func TestMultiReadSeekerReadAll(t *testing.T) {
str := "hello world"
s1 := strings.NewReader(str + " 1")
s2 := strings.NewReader(str + " 2")
s3 := strings.NewReader(str + " 3")
mr := MultiReadSeeker(s1, s2, s3)
expectedSize := int64(s1.Len() + s2.Len() + s3.Len())
b, err := ioutil.ReadAll(mr)
if err != nil {
t.Fatal(err)
}
expected := "hello world 1hello world 2hello world 3"
if string(b) != expected {
t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected)
}
size, err := mr.Seek(0, os.SEEK_END)
if err != nil {
t.Fatal(err)
}
if size != expectedSize {
t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize)
}
// Reset the position and read again
pos, err := mr.Seek(0, os.SEEK_SET)
if err != nil {
t.Fatal(err)
}
if pos != 0 {
t.Fatalf("expected position to be set to 0, got %d", pos)
}
b, err = ioutil.ReadAll(mr)
if err != nil {
t.Fatal(err)
}
if string(b) != expected {
t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected)
}
}
func TestMultiReadSeekerReadEach(t *testing.T) {
str := "hello world"
s1 := strings.NewReader(str + " 1")
s2 := strings.NewReader(str + " 2")
s3 := strings.NewReader(str + " 3")
mr := MultiReadSeeker(s1, s2, s3)
var totalBytes int64
for i, s := range []*strings.Reader{s1, s2, s3} {
sLen := int64(s.Len())
buf := make([]byte, s.Len())
expected := []byte(fmt.Sprintf("%s %d", str, i+1))
if _, err := mr.Read(buf); err != nil && err != io.EOF {
t.Fatal(err)
}
if !bytes.Equal(buf, expected) {
t.Fatalf("expected %q to be %q", string(buf), string(expected))
}
pos, err := mr.Seek(0, os.SEEK_CUR)
if err != nil {
t.Fatalf("iteration: %d, error: %v", i+1, err)
}
// check that the total bytes read is the current position of the seeker
totalBytes += sLen
if pos != totalBytes {
t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1)
}
// This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well
newPos, err := mr.Seek(pos, os.SEEK_SET)
if err != nil {
t.Fatal(err)
}
if newPos != pos {
t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos)
}
}
}
func TestMultiReadSeekerReadSpanningChunks(t *testing.T) {
str := "hello world"
s1 := strings.NewReader(str + " 1")
s2 := strings.NewReader(str + " 2")
s3 := strings.NewReader(str + " 3")
mr := MultiReadSeeker(s1, s2, s3)
buf := make([]byte, s1.Len()+3)
_, err := mr.Read(buf)
if err != nil {
t.Fatal(err)
}
// expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string
expected := "hello world 1hel"
if string(buf) != expected {
t.Fatalf("expected %s to be %s", string(buf), expected)
}
}
func TestMultiReadSeekerNegativeSeek(t *testing.T) {
str := "hello world"
s1 := strings.NewReader(str + " 1")
s2 := strings.NewReader(str + " 2")
s3 := strings.NewReader(str + " 3")
mr := MultiReadSeeker(s1, s2, s3)
s1Len := s1.Len()
s2Len := s2.Len()
s3Len := s3.Len()
s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END)
if err != nil {
t.Fatal(err)
}
if s != int64(s1Len+s2Len) {
t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len())
}
buf := make([]byte, s3Len)
if _, err := mr.Read(buf); err != nil && err != io.EOF {
t.Fatal(err)
}
expected := fmt.Sprintf("%s %d", str, 3)
if string(buf) != fmt.Sprintf("%s %d", str, 3) {
t.Fatalf("expected %q to be %q", string(buf), expected)
}
}

View file

@ -3,7 +3,6 @@ package jsonlog
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"time" "time"
) )
@ -29,28 +28,3 @@ func (jl *JSONLog) Reset() {
jl.Stream = "" jl.Stream = ""
jl.Created = time.Time{} jl.Created = time.Time{}
} }
func WriteLog(src io.Reader, dst io.Writer, format string, since time.Time) error {
dec := json.NewDecoder(src)
l := &JSONLog{}
for {
l.Reset()
if err := dec.Decode(l); err != nil {
if err == io.EOF {
return nil
}
return err
}
if !since.IsZero() && l.Created.Before(since) {
continue
}
line, err := l.Format(format)
if err != nil {
return err
}
if _, err := io.WriteString(dst, line); err != nil {
return err
}
}
}

View file

@ -1,157 +0,0 @@
package jsonlog
import (
"bytes"
"encoding/json"
"io/ioutil"
"regexp"
"strconv"
"strings"
"testing"
"time"
"github.com/docker/docker/pkg/timeutils"
)
// Invalid json should return an error
func TestWriteLogWithInvalidJSON(t *testing.T) {
json := strings.NewReader("Invalid json")
w := bytes.NewBuffer(nil)
if err := WriteLog(json, w, "json", time.Time{}); err == nil {
t.Fatalf("Expected an error, got [%v]", w.String())
}
}
// Any format is valid, it will just print it
func TestWriteLogWithInvalidFormat(t *testing.T) {
testLine := "Line that thinks that it is log line from docker\n"
var buf bytes.Buffer
e := json.NewEncoder(&buf)
for i := 0; i < 35; i++ {
e.Encode(JSONLog{Log: testLine, Stream: "stdout", Created: time.Now()})
}
w := bytes.NewBuffer(nil)
if err := WriteLog(&buf, w, "invalid format", time.Time{}); err != nil {
t.Fatal(err)
}
res := w.String()
t.Logf("Result of WriteLog: %q", res)
lines := strings.Split(strings.TrimSpace(res), "\n")
expression := "^invalid format Line that thinks that it is log line from docker$"
logRe := regexp.MustCompile(expression)
expectedLines := 35
if len(lines) != expectedLines {
t.Fatalf("Must be %v lines but got %d", expectedLines, len(lines))
}
for _, l := range lines {
if !logRe.MatchString(l) {
t.Fatalf("Log line not in expected format [%v]: %q", expression, l)
}
}
}
// Having multiple Log/Stream element
func TestWriteLogWithMultipleStreamLog(t *testing.T) {
testLine := "Line that thinks that it is log line from docker\n"
var buf bytes.Buffer
e := json.NewEncoder(&buf)
for i := 0; i < 35; i++ {
e.Encode(JSONLog{Log: testLine, Stream: "stdout", Created: time.Now()})
}
w := bytes.NewBuffer(nil)
if err := WriteLog(&buf, w, "invalid format", time.Time{}); err != nil {
t.Fatal(err)
}
res := w.String()
t.Logf("Result of WriteLog: %q", res)
lines := strings.Split(strings.TrimSpace(res), "\n")
expression := "^invalid format Line that thinks that it is log line from docker$"
logRe := regexp.MustCompile(expression)
expectedLines := 35
if len(lines) != expectedLines {
t.Fatalf("Must be %v lines but got %d", expectedLines, len(lines))
}
for _, l := range lines {
if !logRe.MatchString(l) {
t.Fatalf("Log line not in expected format [%v]: %q", expression, l)
}
}
}
// Write log with since after created, it won't print anything
func TestWriteLogWithDate(t *testing.T) {
created, _ := time.Parse("YYYY-MM-dd", "2015-01-01")
var buf bytes.Buffer
testLine := "Line that thinks that it is log line from docker\n"
jsonLog := JSONLog{Log: testLine, Stream: "stdout", Created: created}
if err := json.NewEncoder(&buf).Encode(jsonLog); err != nil {
t.Fatal(err)
}
w := bytes.NewBuffer(nil)
if err := WriteLog(&buf, w, "json", time.Now()); err != nil {
t.Fatal(err)
}
res := w.String()
if res != "" {
t.Fatalf("Expected empty log, got [%v]", res)
}
}
// Happy path :)
func TestWriteLog(t *testing.T) {
testLine := "Line that thinks that it is log line from docker\n"
format := timeutils.RFC3339NanoFixed
logs := map[string][]string{
"": {"35", "^Line that thinks that it is log line from docker$"},
"json": {"1", `^{\"log\":\"Line that thinks that it is log line from docker\\n\",\"stream\":\"stdout\",\"time\":.{30,}\"}$`},
// 30+ symbols, five more can come from system timezone
format: {"35", `.{30,} Line that thinks that it is log line from docker`},
}
for givenFormat, expressionAndLines := range logs {
expectedLines, _ := strconv.Atoi(expressionAndLines[0])
expression := expressionAndLines[1]
var buf bytes.Buffer
e := json.NewEncoder(&buf)
for i := 0; i < 35; i++ {
e.Encode(JSONLog{Log: testLine, Stream: "stdout", Created: time.Now()})
}
w := bytes.NewBuffer(nil)
if err := WriteLog(&buf, w, givenFormat, time.Time{}); err != nil {
t.Fatal(err)
}
res := w.String()
t.Logf("Result of WriteLog: %q", res)
lines := strings.Split(strings.TrimSpace(res), "\n")
if len(lines) != expectedLines {
t.Fatalf("Must be %v lines but got %d", expectedLines, len(lines))
}
logRe := regexp.MustCompile(expression)
for _, l := range lines {
if !logRe.MatchString(l) {
t.Fatalf("Log line not in expected format [%v]: %q", expression, l)
}
}
}
}
func BenchmarkWriteLog(b *testing.B) {
var buf bytes.Buffer
e := json.NewEncoder(&buf)
testLine := "Line that thinks that it is log line from docker\n"
for i := 0; i < 30; i++ {
e.Encode(JSONLog{Log: testLine, Stream: "stdout", Created: time.Now()})
}
r := bytes.NewReader(buf.Bytes())
w := ioutil.Discard
format := timeutils.RFC3339NanoFixed
b.SetBytes(int64(r.Len()))
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := WriteLog(r, w, format, time.Time{}); err != nil {
b.Fatal(err)
}
b.StopTimer()
r.Seek(0, 0)
b.StartTimer()
}
}

View file

@ -3,6 +3,7 @@ package tailfile
import ( import (
"bytes" "bytes"
"errors" "errors"
"io"
"os" "os"
) )
@ -12,7 +13,7 @@ var eol = []byte("\n")
var ErrNonPositiveLinesNumber = errors.New("Lines number must be positive") var ErrNonPositiveLinesNumber = errors.New("Lines number must be positive")
//TailFile returns last n lines of file f //TailFile returns last n lines of file f
func TailFile(f *os.File, n int) ([][]byte, error) { func TailFile(f io.ReadSeeker, n int) ([][]byte, error) {
if n <= 0 { if n <= 0 {
return nil, ErrNonPositiveLinesNumber return nil, ErrNonPositiveLinesNumber
} }