Upstream, chihaya reads this header in order to find the kid in the list of maintained keys. A long time ago, it used to just iterate, but now it needs to know the kid.
		
			
				
	
	
		
			130 lines
		
	
	
	
		
			3.7 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			130 lines
		
	
	
	
		
			3.7 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| import hashlib
 | |
| import time
 | |
| 
 | |
| from binascii import hexlify
 | |
| from cachetools import lru_cache
 | |
| 
 | |
| import bencode
 | |
| import jwt
 | |
| import resumablehashlib
 | |
| 
 | |
| from app import app, instance_keys
 | |
| 
 | |
| 
 | |
| ANNOUNCE_URL = app.config['BITTORRENT_ANNOUNCE_URL']
 | |
| FILENAME_PEPPER = app.config['BITTORRENT_FILENAME_PEPPER']
 | |
| REGISTRY_TITLE = app.config['REGISTRY_TITLE']
 | |
| 
 | |
| 
 | |
| @lru_cache(maxsize=1)
 | |
| def _load_private_key(private_key_file_path):
 | |
|   with open(private_key_file_path) as private_key_file:
 | |
|     return private_key_file.read()
 | |
| 
 | |
| 
 | |
| def jwt_from_infodict(infodict):
 | |
|   """ Returns an encoded JWT for the given BitTorrent info dict, signed by the local instance's
 | |
|       private key.
 | |
|   """
 | |
|   digest = hashlib.sha1()
 | |
|   digest.update(bencode.bencode(infodict))
 | |
|   return jwt_from_infohash(digest.digest())
 | |
| 
 | |
| 
 | |
| def jwt_from_infohash(infohash_digest):
 | |
|   """ Returns an encoded JWT for the given BitTorrent infohash, signed by the local instance's
 | |
|       private key.
 | |
|   """
 | |
|   token_data = {
 | |
|     'iss': instance_keys.service_name,
 | |
|     'aud': ANNOUNCE_URL,
 | |
|     'infohash': hexlify(infohash_digest),
 | |
|   }
 | |
|   return jwt.encode(token_data, instance_keys.local_private_key, algorithm='RS256',
 | |
|                     headers={'kid': instance_keys.local_key_id})
 | |
| 
 | |
| 
 | |
| def make_torrent(name, webseed, length, piece_length, pieces):
 | |
|   info_dict = {
 | |
|     'name': name,
 | |
|     'length': length,
 | |
|     'piece length': piece_length,
 | |
|     'pieces': pieces,
 | |
|     'private': 1,
 | |
|   }
 | |
| 
 | |
|   return bencode.bencode({
 | |
|     'announce': ANNOUNCE_URL + "?jwt=" + jwt_from_infodict(info_dict),
 | |
|     'url-list': webseed,
 | |
|     'encoding': 'UTF-8',
 | |
|     'created by': REGISTRY_TITLE,
 | |
|     'creation date': int(time.time()),
 | |
|     'info': info_dict,
 | |
|   })
 | |
| 
 | |
| 
 | |
| def public_torrent_filename(blob_uuid):
 | |
|   return hashlib.sha256(blob_uuid).hexdigest()
 | |
| 
 | |
| 
 | |
| def per_user_torrent_filename(user_uuid, blob_uuid):
 | |
|   return hashlib.sha256(FILENAME_PEPPER + "||" + blob_uuid + "||" + user_uuid).hexdigest()
 | |
| 
 | |
| 
 | |
| class PieceHasher(object):
 | |
|   """ Utility for computing torrent piece hashes as the data flows through the update
 | |
|       method of this class. Users should get the final value by calling final_piece_hashes
 | |
|       since new chunks are allocated lazily.
 | |
|   """
 | |
|   def __init__(self, piece_size, starting_offset=0, starting_piece_hash_bytes='',
 | |
|                hash_fragment_to_resume=None):
 | |
|     if not isinstance(starting_offset, (int, long)):
 | |
|       raise TypeError('starting_offset must be an integer')
 | |
|     elif not isinstance(piece_size, (int, long)):
 | |
|       raise TypeError('piece_size must be an integer')
 | |
| 
 | |
|     self._current_offset = starting_offset
 | |
|     self._piece_size = piece_size
 | |
|     self._piece_hashes = bytearray(starting_piece_hash_bytes)
 | |
| 
 | |
|     if hash_fragment_to_resume is None:
 | |
|       self._hash_fragment = resumablehashlib.sha1()
 | |
|     else:
 | |
|       self._hash_fragment = hash_fragment_to_resume
 | |
| 
 | |
| 
 | |
|   def update(self, buf):
 | |
|     buf_offset = 0
 | |
|     while buf_offset < len(buf):
 | |
|       buf_bytes_to_hash = buf[0:self._piece_length_remaining()]
 | |
|       to_hash_len = len(buf_bytes_to_hash)
 | |
| 
 | |
|       if self._piece_offset() == 0 and to_hash_len > 0 and self._current_offset > 0:
 | |
|         # We are opening a new piece
 | |
|         self._piece_hashes.extend(self._hash_fragment.digest())
 | |
|         self._hash_fragment = resumablehashlib.sha1()
 | |
| 
 | |
|       self._hash_fragment.update(buf_bytes_to_hash)
 | |
|       self._current_offset += to_hash_len
 | |
|       buf_offset += to_hash_len
 | |
| 
 | |
|   @property
 | |
|   def hashed_bytes(self):
 | |
|     return self._current_offset
 | |
| 
 | |
|   def _piece_length_remaining(self):
 | |
|     return self._piece_size - (self._current_offset % self._piece_size)
 | |
| 
 | |
|   def _piece_offset(self):
 | |
|     return self._current_offset % self._piece_size
 | |
| 
 | |
|   @property
 | |
|   def piece_hashes(self):
 | |
|     return self._piece_hashes
 | |
| 
 | |
|   @property
 | |
|   def hash_fragment(self):
 | |
|     return self._hash_fragment
 | |
| 
 | |
|   def final_piece_hashes(self):
 | |
|     return self._piece_hashes + self._hash_fragment.digest()
 |