refactor: Simplify huggingface hub api and update to reflect changes in constants.py

This commit is contained in:
teleprint-me 2024-05-28 19:16:32 -04:00
parent aa28cfe6ec
commit f1d067e7a6
No known key found for this signature in database
GPG key ID: B0D11345E65C4D48

View file

@ -12,9 +12,9 @@ from .constants import (
GPT_PRE_TOKENIZER_DEFAULT, GPT_PRE_TOKENIZER_DEFAULT,
HF_TOKENIZER_BPE_FILES, HF_TOKENIZER_BPE_FILES,
HF_TOKENIZER_SPM_FILES, HF_TOKENIZER_SPM_FILES,
MODEL_FILE_TYPE_NAMES, ModelFileExtension,
VOCAB_TYPE_NAMES, NormalizerType,
ModelFileType, PreTokenizerType,
VocabType, VocabType,
) )
@ -23,7 +23,7 @@ class HFHubBase:
def __init__( def __init__(
self, self,
model_path: None | str | pathlib.Path, model_path: None | str | pathlib.Path,
logger: None | logging.Logger logger: None | logging.Logger,
): ):
# Set the model path # Set the model path
if model_path is None: if model_path is None:
@ -44,7 +44,7 @@ class HFHubBase:
self._model_path = value self._model_path = value
def write_file(self, content: bytes, file_path: pathlib.Path) -> None: def write_file(self, content: bytes, file_path: pathlib.Path) -> None:
with open(file_path, 'wb') as file: with open(file_path, "wb") as file:
file.write(content) file.write(content)
self.logger.info(f"Wrote {len(content)} bytes to {file_path} successfully") self.logger.info(f"Wrote {len(content)} bytes to {file_path} successfully")
@ -54,7 +54,7 @@ class HFHubRequest(HFHubBase):
self, self,
auth_token: None | str, auth_token: None | str,
model_path: None | str | pathlib.Path, model_path: None | str | pathlib.Path,
logger: None | logging.Logger logger: None | logging.Logger,
): ):
super().__init__(model_path, logger) super().__init__(model_path, logger)
@ -91,15 +91,15 @@ class HFHubRequest(HFHubBase):
return [x.rfilename for x in model_info(model_repo).siblings] return [x.rfilename for x in model_info(model_repo).siblings]
def list_filtered_remote_files( def list_filtered_remote_files(
self, model_repo: str, file_type: ModelFileType self, model_repo: str, file_extension: ModelFileExtension
) -> list[str]: ) -> list[str]:
model_files = [] model_files = []
self.logger.info(f"Repo:{model_repo}") self.logger.info(f"Repo:{model_repo}")
self.logger.debug(f"Match:{MODEL_FILE_TYPE_NAMES[file_type]}:{file_type}") self.logger.debug(f"FileExtension:{file_extension.value}")
for filename in HFHubRequest.list_remote_files(model_repo): for filename in HFHubRequest.list_remote_files(model_repo):
suffix = pathlib.Path(filename).suffix suffix = pathlib.Path(filename).suffix
self.logger.debug(f"Suffix: {suffix}") self.logger.debug(f"Suffix: {suffix}")
if suffix == MODEL_FILE_TYPE_NAMES[file_type]: if suffix == file_extension.value:
self.logger.info(f"File: {filename}") self.logger.info(f"File: {filename}")
model_files.append(filename) model_files.append(filename)
return model_files return model_files
@ -118,36 +118,24 @@ class HFHubTokenizer(HFHubBase):
def __init__( def __init__(
self, self,
model_path: None | str | pathlib.Path, model_path: None | str | pathlib.Path,
logger: None | logging.Logger logger: None | logging.Logger,
): ):
super().__init__(model_path, logger) super().__init__(model_path, logger)
@staticmethod @staticmethod
def list_vocab_files(vocab_type: VocabType) -> tuple[str]: def list_vocab_files(vocab_type: VocabType) -> tuple[str]:
if vocab_type == VocabType.SPM: if vocab_type == VocabType.SPM.value:
return HF_TOKENIZER_SPM_FILES return HF_TOKENIZER_SPM_FILES
# NOTE: WPM and BPE are equivalent # NOTE: WPM and BPE are equivalent
return HF_TOKENIZER_BPE_FILES return HF_TOKENIZER_BPE_FILES
@staticmethod
def get_vocab_name(vocab_type: VocabType) -> str:
return VOCAB_TYPE_NAMES.get(vocab_type)
@staticmethod
def get_vocab_type(vocab_name: str) -> VocabType:
return {
"SPM": VocabType.SPM,
"BPE": VocabType.BPE,
"WPM": VocabType.WPM,
}.get(vocab_name, VocabType.NON)
@property @property
def default_pre_tokenizer(self) -> str: def default_pre_tokenizer(self) -> tuple[str, ...]:
return GPT_PRE_TOKENIZER_DEFAULT return GPT_PRE_TOKENIZER_DEFAULT
def config(self, model_repo: str) -> dict[str, object]: def config(self, model_repo: str) -> dict[str, object]:
path = self.model_path / model_repo / "config.json" path = self.model_path / model_repo / "config.json"
return json.loads(path.read_text(encoding='utf-8')) return json.loads(path.read_text(encoding="utf-8"))
def tokenizer_model(self, model_repo: str) -> SentencePieceProcessor: def tokenizer_model(self, model_repo: str) -> SentencePieceProcessor:
path = self.model_path / model_repo / "tokenizer.model" path = self.model_path / model_repo / "tokenizer.model"
@ -157,11 +145,11 @@ class HFHubTokenizer(HFHubBase):
def tokenizer_config(self, model_repo: str) -> dict[str, object]: def tokenizer_config(self, model_repo: str) -> dict[str, object]:
path = self.model_path / model_repo / "tokenizer_config.json" path = self.model_path / model_repo / "tokenizer_config.json"
return json.loads(path.read_text(encoding='utf-8')) return json.loads(path.read_text(encoding="utf-8"))
def tokenizer_json(self, model_repo: str) -> dict[str, object]: def tokenizer_json(self, model_repo: str) -> dict[str, object]:
path = self.model_path / model_repo / "tokenizer.json" path = self.model_path / model_repo / "tokenizer.json"
return json.loads(path.read_text(encoding='utf-8')) return json.loads(path.read_text(encoding="utf-8"))
def get_normalizer(self, model_repo: str) -> None | dict[str, object]: def get_normalizer(self, model_repo: str) -> None | dict[str, object]:
normalizer = self.tokenizer_json(model_repo).get("normalizer", dict()) normalizer = self.tokenizer_json(model_repo).get("normalizer", dict())
@ -174,20 +162,32 @@ class HFHubTokenizer(HFHubBase):
def get_pre_tokenizer(self, model_repo: str) -> None | dict[str, object]: def get_pre_tokenizer(self, model_repo: str) -> None | dict[str, object]:
pre_tokenizer = self.tokenizer_json(model_repo).get("pre_tokenizer") pre_tokenizer = self.tokenizer_json(model_repo).get("pre_tokenizer")
if pre_tokenizer: if pre_tokenizer:
self.logger.info(f"JSON:PreTokenizer: {json.dumps(pre_tokenizer, indent=2)}") self.logger.info(
f"JSON:PreTokenizer: {json.dumps(pre_tokenizer, indent=2)}"
)
return pre_tokenizer return pre_tokenizer
else: else:
self.logger.warn(f"WARN:PreTokenizer: {pre_tokenizer}") self.logger.warn(f"WARN:PreTokenizer: {pre_tokenizer}")
return pre_tokenizer return pre_tokenizer
def get_added_tokens(self, model_repo: str) -> None | list[dict[str, object]]: def get_added_tokens(self, model_repo: str) -> None | list[dict[str, object]]:
added_tokens = self.tokenizer_json(model_repo).get("pre_tokenizer", list()) added_tokens = self.tokenizer_json(model_repo).get("added_tokens", list())
if added_tokens: if added_tokens:
self.logger.info(f"JSON:AddedTokens: {json.dumps(added_tokens, indent=2)}") self.logger.info(f"JSON:AddedTokens: {json.dumps(added_tokens, indent=2)}")
else: else:
self.logger.warn(f"WARN:PreTokenizer: {added_tokens}") self.logger.warn(f"WARN:PreTokenizer: {added_tokens}")
return added_tokens return added_tokens
def get_pre_tokenizer_json_hash(self, model_repo: str) -> None | str:
tokenizer = self.tokenizer_json(model_repo)
tokenizer_path = self.model_path / model_repo / "tokenizer.json"
if tokenizer.get("pre_tokenizer"):
sha256sum = sha256(str(tokenizer.get("pre_tokenizer")).encode()).hexdigest()
else:
return
self.logger.info(f"Hashed '{tokenizer_path}' as {sha256sum}")
return sha256sum
def get_tokenizer_json_hash(self, model_repo: str) -> str: def get_tokenizer_json_hash(self, model_repo: str) -> str:
tokenizer = self.tokenizer_json(model_repo) tokenizer = self.tokenizer_json(model_repo)
tokenizer_path = self.model_path / model_repo / "tokenizer.json" tokenizer_path = self.model_path / model_repo / "tokenizer.json"
@ -212,29 +212,13 @@ class HFHubModel(HFHubBase):
self, self,
auth_token: None | str, auth_token: None | str,
model_path: None | str | pathlib.Path, model_path: None | str | pathlib.Path,
logger: None | logging.Logger logger: None | logging.Logger,
): ):
super().__init__(model_path, logger) super().__init__(model_path, logger)
self._request = HFHubRequest(auth_token, model_path, logger) self._request = HFHubRequest(auth_token, model_path, logger)
self._tokenizer = HFHubTokenizer(model_path, logger) self._tokenizer = HFHubTokenizer(model_path, logger)
@staticmethod
def get_model_type_name(model_type: ModelFileType) -> str:
return MODEL_FILE_TYPE_NAMES.get(model_type, "")
@staticmethod
def get_model_type(model_name: str) -> ModelFileType:
return {
".pt": ModelFileType.PT,
".pth": ModelFileType.PTH,
".bin": ModelFileType.BIN,
".safetensors": ModelFileType.SAFETENSORS,
".json": ModelFileType.JSON,
".model": ModelFileType.MODEL,
".gguf": ModelFileType.GGUF,
}.get(model_name, ModelFileType.NON)
@property @property
def request(self) -> HFHubRequest: def request(self) -> HFHubRequest:
return self._request return self._request
@ -271,8 +255,12 @@ class HFHubModel(HFHubBase):
os.makedirs(dir_path, exist_ok=True) os.makedirs(dir_path, exist_ok=True)
self._request_single_file(model_repo, file_name, dir_path / file_name) self._request_single_file(model_repo, file_name, dir_path / file_name)
def download_model_files(self, model_repo: str, file_type: ModelFileType) -> None: def download_model_files(
filtered_files = self.request.list_filtered_remote_files(model_repo, file_type) self, model_repo: str, file_extension: ModelFileExtension
) -> None:
filtered_files = self.request.list_filtered_remote_files(
model_repo, file_extension
)
self._request_listed_files(model_repo, filtered_files) self._request_listed_files(model_repo, filtered_files)
def download_all_vocab_files(self, model_repo: str, vocab_type: VocabType) -> None: def download_all_vocab_files(self, model_repo: str, vocab_type: VocabType) -> None: