diff --git a/buzz/transcriber/recording_transcriber.py b/buzz/transcriber/recording_transcriber.py index d40e9e376..bfbaa86ad 100644 --- a/buzz/transcriber/recording_transcriber.py +++ b/buzz/transcriber/recording_transcriber.py @@ -62,6 +62,9 @@ def start(self): model_path = self.model_path keep_samples = int(0.15 * self.sample_rate) + if torch.cuda.is_available(): + logging.debug(f"CUDA version detected: {torch.version.cuda}") + if self.transcription_options.model.model_type == ModelType.WHISPER: device = "cuda" if torch.cuda.is_available() else "cpu" model = whisper.load_model(model_path, device=device) @@ -76,6 +79,10 @@ def start(self): logging.debug("CUDA GPUs are currently no supported on Running on Windows, using CPU") device = "cpu" + if torch.cuda.is_available() and torch.version.cuda < "12": + logging.debug("Unsupported CUDA version (<12), using CPU") + device = "cpu" + model = faster_whisper.WhisperModel( model_size_or_path=model_path, download_root=model_root_dir, diff --git a/buzz/transcriber/whisper_file_transcriber.py b/buzz/transcriber/whisper_file_transcriber.py index afe2dc47e..3585276da 100644 --- a/buzz/transcriber/whisper_file_transcriber.py +++ b/buzz/transcriber/whisper_file_transcriber.py @@ -52,6 +52,9 @@ def transcribe(self) -> List[Segment]: "Starting whisper file transcription, task = %s", self.transcription_task ) + if torch.cuda.is_available(): + logging.debug(f"CUDA version detected: {torch.version.cuda}") + recv_pipe, send_pipe = multiprocessing.Pipe(duplex=False) self.current_process = multiprocessing.Process( @@ -146,6 +149,10 @@ def transcribe_faster_whisper(cls, task: FileTranscriptionTask) -> List[Segment] logging.debug("CUDA GPUs are currently no supported on Running on Windows, using CPU") device = "cpu" + if torch.cuda.is_available() and torch.version.cuda < "12": + logging.debug("Unsupported CUDA version (<12), using CPU") + device = "cpu" + model = faster_whisper.WhisperModel( model_size_or_path=model_size_or_path, download_root=model_root_dir, diff --git a/docs/docs/faq.md b/docs/docs/faq.md index 793f199db..d496de3d2 100644 --- a/docs/docs/faq.md +++ b/docs/docs/faq.md @@ -35,10 +35,14 @@ sidebar_position: 5 On Windows see [this note](https://github.com/chidiwilliams/buzz/blob/main/CONTRIBUTING.md#gpu-support) on enabling CUDA GPU support. + For Faster whisper CUDA 12 is required, computers with older CUDA versions will use CPU. + 6. **How to fix `Unanticipated host error[PaErrorCode-9999]`?** Check if there are any system settings preventing apps from accessing the microphone. On Windows, see if Buzz has permission to use the microphone in Settings -> Privacy -> Microphone. + See method 1 in this video https://www.youtube.com/watch?v=eRcCYgOuSYQ + For method 2 there is no need to uninstall the antivirus, but see if you can temporarily disable it or if there are settings that may prevent Buzz from accessing the microphone. \ No newline at end of file