diff --git a/README.md b/README.md index 9b41761392ca9a83c3e24cd314cfea8ba7169cc1..917573f528aa6712a43b79081e9b4fdd02597ca6 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,8 @@ ## Dev Environment +Run the following commands to create a virtual environment to install homeassistant and all dependencies: + ```sh python3 -m venv .venv source .venv/bin/activate diff --git a/openai_stt/stt.py b/openai_stt/stt.py index 0f1f105c85c2ff15f6aff5facf60db7575068718..2c7c9922b49113c86a78dbea7a8a821ff91d677a 100644 --- a/openai_stt/stt.py +++ b/openai_stt/stt.py @@ -3,6 +3,7 @@ Support for Whisper API STT. """ from typing import AsyncIterable import aiohttp +import logging import os import tempfile import voluptuous as vol @@ -22,6 +23,7 @@ from homeassistant.core import HomeAssistant import homeassistant.helpers.config_validation as cv import wave +_LOGGER = logging.getLogger(__name__) CONF_API_KEY = 'api_key' OPENAI_STT_URL = "https://api.openai.com/v1/audio/transcriptions" @@ -109,8 +111,6 @@ class OpenAISTTProvider(Provider): temp_file_path = temp_file.name - url = self._url or OPENAI_STT_URL - headers = { 'Authorization': f'Bearer {self._api_key}', } @@ -121,8 +121,10 @@ class OpenAISTTProvider(Provider): form.add_field('language', metadata.language) form.add_field('model', self._model) + _LOGGER.debug("URL: {} LANG: {} MODEL: {}".format(self._url, metadata.language, self._model)) + async with aiohttp.ClientSession() as session: - async with session.post(url, data=form, headers=headers) as response: + async with session.post(self._url, data=form, headers=headers) as response: if response.status == 200: json_response = await response.json() return SpeechResult(json_response["text"], SpeechResultState.SUCCESS)