From 000cfcbb157568db1126f91a5db63aab257f7eeb Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 9 Aug 2024 21:37:19 +0800 Subject: [PATCH] chore(format): run black on dev (#678) Co-authored-by: github-actions[bot] --- ChatTTS/model/gpt.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ChatTTS/model/gpt.py b/ChatTTS/model/gpt.py index 0a90d0ce1..5b918b621 100644 --- a/ChatTTS/model/gpt.py +++ b/ChatTTS/model/gpt.py @@ -134,12 +134,16 @@ def from_pretrained(self, file_path: str, experimental=False): self.load_state_dict(torch.load(file_path, weights_only=True, mmap=True)) if ( - experimental and "cuda" in str(self.device_gpt) and platform.system().lower() == "linux" + experimental + and "cuda" in str(self.device_gpt) + and platform.system().lower() == "linux" ): # is TELlamaModel try: from .cuda import TELlamaModel - self.logger.warning("Linux with CUDA, try NVIDIA accelerated TELlamaModel because experimental is enabled") + self.logger.warning( + "Linux with CUDA, try NVIDIA accelerated TELlamaModel because experimental is enabled" + ) state_dict = self.gpt.state_dict() vanilla = TELlamaModel.from_state_dict(state_dict, self.llama_config) # Force mem release. Taken from huggingface code