patch: Apply fix for paths and logging
This commit is contained in:
parent
64096942ce
commit
6da2bd6fbc
1 changed files with 34 additions and 26 deletions
|
@ -76,11 +76,9 @@ class HFHub:
|
||||||
auth_token: str,
|
auth_token: str,
|
||||||
logger: None | logging.Logger
|
logger: None | logging.Logger
|
||||||
):
|
):
|
||||||
|
# Set the model path
|
||||||
if model_path is None:
|
if model_path is None:
|
||||||
self._model_path = pathlib.Path("models")
|
model_path = "models"
|
||||||
elif isinstance(model_path, str):
|
|
||||||
self._model_path = pathlib.Path(model_path)
|
|
||||||
else:
|
|
||||||
self._model_path = model_path
|
self._model_path = model_path
|
||||||
|
|
||||||
# Set the logger
|
# Set the logger
|
||||||
|
@ -89,6 +87,7 @@ class HFHub:
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
self.logger = logger
|
self.logger = logger
|
||||||
|
|
||||||
|
# Set the hub api
|
||||||
self._hub = HFHubRequest(auth_token, logger)
|
self._hub = HFHubRequest(auth_token, logger)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -97,7 +96,7 @@ class HFHub:
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def model_path(self) -> pathlib.Path:
|
def model_path(self) -> pathlib.Path:
|
||||||
return self._model_path
|
return pathlib.Path(self._model_path)
|
||||||
|
|
||||||
@model_path.setter
|
@model_path.setter
|
||||||
def model_path(self, value: pathlib.Path):
|
def model_path(self, value: pathlib.Path):
|
||||||
|
@ -107,7 +106,6 @@ class HFHub:
|
||||||
class HFTokenizer(HFHub):
|
class HFTokenizer(HFHub):
|
||||||
def __init__(self, model_path: str, auth_token: str, logger: logging.Logger):
|
def __init__(self, model_path: str, auth_token: str, logger: logging.Logger):
|
||||||
super().__init__(model_path, auth_token, logger)
|
super().__init__(model_path, auth_token, logger)
|
||||||
self._model_path = model_path
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_vocab_filenames(vocab_type: VocabType) -> tuple[str]:
|
def get_vocab_filenames(vocab_type: VocabType) -> tuple[str]:
|
||||||
|
@ -130,8 +128,7 @@ class HFTokenizer(HFHub):
|
||||||
|
|
||||||
def config(self, model_repo: str) -> dict[str, object]:
|
def config(self, model_repo: str) -> dict[str, object]:
|
||||||
path = self.model_path / model_repo / "config.json"
|
path = self.model_path / model_repo / "config.json"
|
||||||
with path.read_text(encoding='utf-8') as file:
|
return json.loads(path.read_text(encoding='utf-8'))
|
||||||
return json.loads(file)
|
|
||||||
|
|
||||||
def tokenizer_model(self, model_repo: str) -> SentencePieceProcessor:
|
def tokenizer_model(self, model_repo: str) -> SentencePieceProcessor:
|
||||||
path = self.model_path / model_repo / "tokenizer.model"
|
path = self.model_path / model_repo / "tokenizer.model"
|
||||||
|
@ -141,13 +138,11 @@ class HFTokenizer(HFHub):
|
||||||
|
|
||||||
def tokenizer_config(self, model_repo: str) -> dict[str, object]:
|
def tokenizer_config(self, model_repo: str) -> dict[str, object]:
|
||||||
path = self.model_path / model_repo / "tokenizer_config.json"
|
path = self.model_path / model_repo / "tokenizer_config.json"
|
||||||
with path.read_text(encoding='utf-8') as file:
|
return json.loads(path.read_text(encoding='utf-8'))
|
||||||
return json.loads(file)
|
|
||||||
|
|
||||||
def tokenizer_json(self, model_repo: str) -> dict[str, object]:
|
def tokenizer_json(self, model_repo: str) -> dict[str, object]:
|
||||||
path = self.model_path / model_repo / "tokenizer.json"
|
path = self.model_path / model_repo / "tokenizer.json"
|
||||||
with path.read_text(encoding='utf-8') as file:
|
return json.loads(path.read_text(encoding='utf-8'))
|
||||||
return json.loads(file)
|
|
||||||
|
|
||||||
def get_normalizer(self, model_repo: str) -> None | dict[str, object]:
|
def get_normalizer(self, model_repo: str) -> None | dict[str, object]:
|
||||||
normalizer = self.tokenizer_json(model_repo).get("normalizer", dict())
|
normalizer = self.tokenizer_json(model_repo).get("normalizer", dict())
|
||||||
|
@ -165,6 +160,14 @@ class HFTokenizer(HFHub):
|
||||||
self.logger.warn(f"WARN:PreTokenizer: {pre_tokenizer}")
|
self.logger.warn(f"WARN:PreTokenizer: {pre_tokenizer}")
|
||||||
return pre_tokenizer
|
return pre_tokenizer
|
||||||
|
|
||||||
|
def get_added_tokens(self, model_repo: str) -> None | list[dict[str, object]]:
|
||||||
|
added_tokens = self.tokenizer_json(model_repo).get("pre_tokenizer", list())
|
||||||
|
if added_tokens:
|
||||||
|
self.logger.info(f"JSON:AddedTokens: {json.dumps(added_tokens, indent=2)}")
|
||||||
|
else:
|
||||||
|
self.logger.warn(f"WARN:PreTokenizer: {added_tokens}")
|
||||||
|
return added_tokens
|
||||||
|
|
||||||
def get_tokenizer_json_hash(self, model_repo: str) -> str:
|
def get_tokenizer_json_hash(self, model_repo: str) -> str:
|
||||||
tokenizer = self.tokenizer_json(model_repo)
|
tokenizer = self.tokenizer_json(model_repo)
|
||||||
tokenizer_path = self.model_path / model_repo / "tokenizer.json"
|
tokenizer_path = self.model_path / model_repo / "tokenizer.json"
|
||||||
|
@ -173,16 +176,15 @@ class HFTokenizer(HFHub):
|
||||||
return sha256sum
|
return sha256sum
|
||||||
|
|
||||||
def log_tokenizer_json_info(self, model_repo: str) -> None:
|
def log_tokenizer_json_info(self, model_repo: str) -> None:
|
||||||
|
self.logger.info(f"{model_repo}")
|
||||||
tokenizer = self.tokenizer_json(model_repo)
|
tokenizer = self.tokenizer_json(model_repo)
|
||||||
self.logger.info(f"JSON:ModelRepo: {model_repo}")
|
for k, v in tokenizer.items():
|
||||||
for k, v in tokenizer.get("model", {}).items():
|
if k not in ["added_tokens", "model"]:
|
||||||
if k == "vocab":
|
self.logger.info(f"{k}:{json.dumps(v, indent=2)}")
|
||||||
continue # NOTE: Do not pollute the output
|
if k == "model":
|
||||||
self.logger.info(f"JSON:Model: {k}: {json.dumps(v, indent=2)}")
|
for x, y in v.items():
|
||||||
for k, v in tokenizer.get("normalizer", {}).items():
|
if x not in ["vocab", "merges"]:
|
||||||
self.logger.info(f"JSON:Normalizer: {k}: {json.dumps(v, indent=2)}")
|
self.logger.info(f"{k}:{x}:{json.dumps(y, indent=2)}")
|
||||||
for k, v in tokenizer.get("pre_tokenizer", {}).items():
|
|
||||||
self.logger.info(f"JSON:PreTokenizer: {k}: {json.dumps(v, indent=2)}")
|
|
||||||
|
|
||||||
|
|
||||||
class HFModel(HFHub):
|
class HFModel(HFHub):
|
||||||
|
@ -196,15 +198,15 @@ class HFModel(HFHub):
|
||||||
self._tokenizer = HFTokenizer(model_path, auth_token, logger)
|
self._tokenizer = HFTokenizer(model_path, auth_token, logger)
|
||||||
login(auth_token) # NOTE: Required for using model_info
|
login(auth_token) # NOTE: Required for using model_info
|
||||||
|
|
||||||
@property
|
|
||||||
def model_type(self) -> ModelFileType:
|
|
||||||
return ModelFileType
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_model_info(repo_id: str) -> list[str]:
|
def get_model_info(repo_id: str) -> list[str]:
|
||||||
# NOTE: Get repository metadata to extract remote filenames
|
# NOTE: Get repository metadata to extract remote filenames
|
||||||
return [x.rfilename for x in model_info(repo_id).siblings]
|
return [x.rfilename for x in model_info(repo_id).siblings]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def model_type(self) -> ModelFileType:
|
||||||
|
return ModelFileType
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def tokenizer(self) -> HFTokenizer:
|
def tokenizer(self) -> HFTokenizer:
|
||||||
return self._tokenizer
|
return self._tokenizer
|
||||||
|
@ -226,3 +228,9 @@ class HFModel(HFHub):
|
||||||
file_path = dir_path / vocab_file
|
file_path = dir_path / vocab_file
|
||||||
os.makedirs(dir_path, exist_ok=True)
|
os.makedirs(dir_path, exist_ok=True)
|
||||||
self.get_vocab_file(model_repo, vocab_file, file_path)
|
self.get_vocab_file(model_repo, vocab_file, file_path)
|
||||||
|
|
||||||
|
def get_model_file(self, model_repo: str, file_name: str, file_path: pathlib.Path) -> bool:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_all_model_files(self) -> None:
|
||||||
|
pass
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue