76 lines
2.0 KiB
Python
76 lines
2.0 KiB
Python
import sys
|
|
import os
|
|
from groq import Groq
|
|
import speech_recognition as sr
|
|
|
|
|
|
def recognize(recognizer: sr.Recognizer, microphone: sr.Microphone) -> dict:
|
|
if not isinstance(recognizer, sr.Recognizer):
|
|
raise TypeError("`recognizer` must be `Recognizer` instance")
|
|
|
|
if not isinstance(microphone, sr.Microphone):
|
|
raise TypeError("`microphone` must be `Microphone` instance")
|
|
|
|
with microphone as source:
|
|
# recognizer.adjust_for_ambient_noise(source)
|
|
audio = recognizer.listen(source)
|
|
|
|
response = {"success": True, "error": None, "transcription": None}
|
|
|
|
try:
|
|
response["transcription"] = recognizer.recognize_google(audio)
|
|
except sr.RequestError:
|
|
response["success"] = False
|
|
response["error"] = "API unavailable"
|
|
except sr.UnknownValueError:
|
|
response["success"] = False
|
|
response["error"] = "Unable to recognize speech"
|
|
|
|
return response
|
|
|
|
|
|
def chat(client: Groq, message: str, model: str = "llama-3.3-70b-versatile") -> str:
|
|
request = client.chat.completions.create(
|
|
messages=[
|
|
{
|
|
"role": "system",
|
|
"content": "You're an ai girlfriend, answer short and nice.",
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": message,
|
|
},
|
|
],
|
|
model=model,
|
|
)
|
|
|
|
return request.choices
|
|
|
|
|
|
def setup(device_index=1):
|
|
client = Groq(api_key="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")
|
|
recognizer = sr.Recognizer()
|
|
microphone = sr.Microphone(device_index=device_index)
|
|
|
|
return client, recognizer, microphone
|
|
|
|
|
|
def main() -> None:
|
|
client, *devices = setup(1)
|
|
|
|
try:
|
|
while 1:
|
|
res = recognize(*devices)
|
|
if not res["success"]:
|
|
print(res["error"])
|
|
continue
|
|
print("User:", res["transcription"])
|
|
res = chat(client, res["transcription"])
|
|
print("AI:", res[0].message.content)
|
|
except KeyboardInterrupt:
|
|
sys.exit()
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|