mirror of
				https://github.com/vbatts/tar-split.git
				synced 2025-10-28 00:54:30 +00:00 
			
		
		
		
	tar: asm: store padding in chunks to avoid memory exhaustion
Previously, we would read the entire padding in a given archive into memory in order to store it in the packer. This would cause memory exhaustion if a malicious archive was crafted with very large amounts of padding. Since a given SegmentType is reconstructed losslessly, we can simply chunk up any padding into large segments to avoid this problem. Use a reasonable default of 1MiB to avoid changing the tar-split.json of existing archives that are not malformed. Fixes: CVE-2017-14992 Signed-off-by: Aleksa Sarai <asarai@suse.de>
This commit is contained in:
		
							parent
							
								
									b9775006bf
								
							
						
					
					
						commit
						3d9db48dbe
					
				
					 1 changed files with 28 additions and 15 deletions
				
			
		|  | @ -2,7 +2,6 @@ package asm | |||
| 
 | ||||
| import ( | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 
 | ||||
| 	"github.com/vbatts/tar-split/archive/tar" | ||||
| 	"github.com/vbatts/tar-split/tar/storage" | ||||
|  | @ -119,20 +118,34 @@ func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io | |||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		// it is allowable, and not uncommon that there is further padding on the | ||||
| 		// end of an archive, apart from the expected 1024 null bytes. | ||||
| 		remainder, err := ioutil.ReadAll(outputRdr) | ||||
| 		if err != nil && err != io.EOF { | ||||
| 			pW.CloseWithError(err) | ||||
| 			return | ||||
| 		} | ||||
| 		_, err = p.AddEntry(storage.Entry{ | ||||
| 			Type:    storage.SegmentType, | ||||
| 			Payload: remainder, | ||||
| 		}) | ||||
| 		if err != nil { | ||||
| 			pW.CloseWithError(err) | ||||
| 			return | ||||
| 		// It is allowable, and not uncommon that there is further padding on | ||||
| 		// the end of an archive, apart from the expected 1024 null bytes. We | ||||
| 		// do this in chunks rather than in one go to avoid cases where a | ||||
| 		// maliciously crafted tar file tries to trick us into reading many GBs | ||||
| 		// into memory. | ||||
| 		const paddingChunkSize = 1024 * 1024 | ||||
| 		var paddingChunk [paddingChunkSize]byte | ||||
| 		for { | ||||
| 			var isEOF bool | ||||
| 			n, err := outputRdr.Read(paddingChunk[:]) | ||||
| 			if err != nil { | ||||
| 				if err != io.EOF { | ||||
| 					pW.CloseWithError(err) | ||||
| 					return | ||||
| 				} | ||||
| 				isEOF = true | ||||
| 			} | ||||
| 			_, err = p.AddEntry(storage.Entry{ | ||||
| 				Type:    storage.SegmentType, | ||||
| 				Payload: paddingChunk[:n], | ||||
| 			}) | ||||
| 			if err != nil { | ||||
| 				pW.CloseWithError(err) | ||||
| 				return | ||||
| 			} | ||||
| 			if isEOF { | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 		pW.Close() | ||||
| 	}() | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue