| funasr/auto/auto_model.py | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
| funasr/frontends/fused.py | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
| funasr/utils/export_utils.py | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 |
funasr/auto/auto_model.py
@@ -182,7 +182,9 @@ set_all_random_seed(kwargs.get("seed", 0)) device = kwargs.get("device", "cuda") if not torch.cuda.is_available() or kwargs.get("ngpu", 1) == 0: if ((device =="cuda" and not torch.cuda.is_available()) or (device == "xpu" and not torch.xpu.is_available()) or kwargs.get("ngpu", 1) == 0): device = "cpu" kwargs["batch_size"] = 1 kwargs["device"] = device funasr/frontends/fused.py
@@ -78,6 +78,8 @@ self.factors = [frontend.hop_length // self.gcd for frontend in self.frontends] if torch.cuda.is_available(): dev = "cuda" elif torch.xpu.is_available(): dev = "xpu" else: dev = "cpu" if self.align_method == "linear_projection": funasr/utils/export_utils.py
@@ -28,12 +28,12 @@ **kwargs, ) elif type == "torchscript": device = "cuda" if torch.cuda.is_available() else "cpu" device = "cuda" if torch.cuda.is_available() else "xpu" if torch.xpu.is_available() else "cpu" print("Exporting torchscripts on device {}".format(device)) _torchscripts(m, path=export_dir, device=device) elif type == "bladedisc": assert ( torch.cuda.is_available() torch.cuda.is_available() or torch.xpu.is_available() ), "Currently bladedisc optimization for FunASR only supports GPU" # bladedisc only optimizes encoder/decoder modules if hasattr(m, "encoder") and hasattr(m, "decoder"): @@ -44,7 +44,7 @@ elif type == "onnx_fp16": assert ( torch.cuda.is_available() torch.cuda.is_available() or torch.xpu.is_available() ), "Currently onnx_fp16 optimization for FunASR only supports GPU" if hasattr(m, "encoder") and hasattr(m, "decoder"):