stream: fix for large chunks

io.Copy() uses a 32*1024 byte chunk size. When a blocksize larger than
this was used, the copy was out of bounds.

https://gist.github.com/philips/b90ed91068930fe85bed

Signed-off-by: Vincent Batts <vbatts@hashbangbash.com>
This commit is contained in:
Vincent Batts 2015-04-09 14:12:48 -04:00
parent bedd7a10ff
commit 6b706eb39f
1 changed files with 10 additions and 1 deletions

View File

@ -150,13 +150,22 @@ func (mh *merkleHash) Write(b []byte) (int, error) {
offset int
)
if mh.lastBlock != nil && mh.lastBlockLen > 0 {
if (mh.lastBlockLen + len(b)) < mh.blockSize {
mh.lastBlockLen += copy(mh.lastBlock[mh.lastBlockLen:], b[:])
return len(b), nil
}
// XXX off by one?
numBytes = copy(curBlock[:], mh.lastBlock[:mh.lastBlockLen])
// not adding to numWritten, since these blocks were accounted for in a
// prior Write()
// then we'll chunk the front of the incoming bytes
offset = copy(curBlock[numBytes:], b[:(mh.blockSize-numBytes)])
end := mh.blockSize - numBytes
if end > len(b) {
end = len(b)
}
offset = copy(curBlock[numBytes:], b[:end])
n, err := NewNodeHashBlock(mh.hm, curBlock)
if err != nil {
// XXX might need to stash again the prior lastBlock and first little chunk