1
0
Fork 0
mirror of https://github.com/vbatts/merkle.git synced 2024-12-12 06:58:04 +00:00

stream: fix for large chunks

io.Copy() uses a 32*1024 byte chunk size. When a blocksize larger than
this was used, the copy was out of bounds.

https://gist.github.com/philips/b90ed91068930fe85bed

Signed-off-by: Vincent Batts <vbatts@hashbangbash.com>
This commit is contained in:
Vincent Batts 2015-04-09 14:12:48 -04:00
parent bedd7a10ff
commit 6b706eb39f

View file

@ -150,13 +150,22 @@ func (mh *merkleHash) Write(b []byte) (int, error) {
offset int offset int
) )
if mh.lastBlock != nil && mh.lastBlockLen > 0 { if mh.lastBlock != nil && mh.lastBlockLen > 0 {
if (mh.lastBlockLen + len(b)) < mh.blockSize {
mh.lastBlockLen += copy(mh.lastBlock[mh.lastBlockLen:], b[:])
return len(b), nil
}
// XXX off by one? // XXX off by one?
numBytes = copy(curBlock[:], mh.lastBlock[:mh.lastBlockLen]) numBytes = copy(curBlock[:], mh.lastBlock[:mh.lastBlockLen])
// not adding to numWritten, since these blocks were accounted for in a // not adding to numWritten, since these blocks were accounted for in a
// prior Write() // prior Write()
// then we'll chunk the front of the incoming bytes // then we'll chunk the front of the incoming bytes
offset = copy(curBlock[numBytes:], b[:(mh.blockSize-numBytes)]) end := mh.blockSize - numBytes
if end > len(b) {
end = len(b)
}
offset = copy(curBlock[numBytes:], b[:end])
n, err := NewNodeHashBlock(mh.hm, curBlock) n, err := NewNodeHashBlock(mh.hm, curBlock)
if err != nil { if err != nil {
// XXX might need to stash again the prior lastBlock and first little chunk // XXX might need to stash again the prior lastBlock and first little chunk