import gradio as gr from transformers import pipeline import numpy as np transcriber = pipeline("automatic-speech-recognition", model="NbAiLab/nb-whisper-tiny") def transcribe(stream, new_chunk): sr, y = new_chunk y = y.astype(np.float32) y /= np.max(np.abs(y)) if stream is not None: stream = np.concatenate([stream, y]) else: stream = y return stream, transcriber({ "sampling_rate": sr, "raw": stream, }, generate_kwargs={ 'num_beams': 5, 'task': 'transcribe', 'language': 'no' })["text"] demo = gr.Interface( transcribe, ["state", gr.Audio(sources=["microphone"], streaming=True)], ["state", "text"], live=True, ) if __name__ == "__main__": demo.launch()