Spaces:
Sleeping
Sleeping
Commit
·
230eb9d
1
Parent(s):
97c98c3
add groq
Browse files- app.py +30 -4
- requirements.txt +1 -0
app.py
CHANGED
@@ -3,9 +3,16 @@ import faster_whisper
|
|
3 |
import gradio as gr
|
4 |
from dotenv import load_dotenv
|
5 |
from huggingface_hub import InferenceClient
|
|
|
6 |
|
7 |
# Load API key dari .env
|
8 |
load_dotenv()
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
HF_API_KEY = os.getenv("HF_API_KEY")
|
10 |
|
11 |
if not HF_API_KEY:
|
@@ -28,6 +35,24 @@ MODEL_OPTIONS = [
|
|
28 |
]
|
29 |
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
def save_to_file(content, filename):
|
32 |
with open(filename, "w", encoding="utf-8") as file:
|
33 |
file.write(content)
|
@@ -65,10 +90,11 @@ def generate_soap_summary(transcription_text, selected_model):
|
|
65 |
messages = [
|
66 |
{"role": "user", "content": template.format(dialogue=transcription_text)}
|
67 |
]
|
68 |
-
response = huggingface_client.chat.completions.create(
|
69 |
-
|
70 |
-
)
|
71 |
-
soap = response.choices[0].message.content.strip()
|
|
|
72 |
return soap, save_to_file(soap, "soap_summary.txt")
|
73 |
|
74 |
|
|
|
3 |
import gradio as gr
|
4 |
from dotenv import load_dotenv
|
5 |
from huggingface_hub import InferenceClient
|
6 |
+
from groq import Groq
|
7 |
|
8 |
# Load API key dari .env
|
9 |
load_dotenv()
|
10 |
+
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
|
11 |
+
|
12 |
+
if not GROQ_API_KEY:
|
13 |
+
raise ValueError("GROQ API NOT FOUND!")
|
14 |
+
gclient = Groq(api_key=GROQ_API_KEY)
|
15 |
+
|
16 |
HF_API_KEY = os.getenv("HF_API_KEY")
|
17 |
|
18 |
if not HF_API_KEY:
|
|
|
35 |
]
|
36 |
|
37 |
|
38 |
+
def chat_with_groq(message):
|
39 |
+
"""Handles conversation with Groq LLM."""
|
40 |
+
response = gclient.chat.completions.create(
|
41 |
+
model="deepseek-r1-distill-llama-70b",
|
42 |
+
messages=[
|
43 |
+
{
|
44 |
+
"role": "system",
|
45 |
+
"content": "You are a customer service assistant. Follow instructions and reply in Indonesian.",
|
46 |
+
},
|
47 |
+
{"role": "user", "content": message},
|
48 |
+
],
|
49 |
+
temperature=1.0,
|
50 |
+
max_tokens=2048,
|
51 |
+
max_retries=2,
|
52 |
+
)
|
53 |
+
return response.choices[0].message["content"] # Extract response text
|
54 |
+
|
55 |
+
|
56 |
def save_to_file(content, filename):
|
57 |
with open(filename, "w", encoding="utf-8") as file:
|
58 |
file.write(content)
|
|
|
90 |
messages = [
|
91 |
{"role": "user", "content": template.format(dialogue=transcription_text)}
|
92 |
]
|
93 |
+
# response = huggingface_client.chat.completions.create(
|
94 |
+
# model=selected_model, messages=messages, max_tokens=1000, stream=False
|
95 |
+
# )
|
96 |
+
# soap = response.choices[0].message.content.strip()
|
97 |
+
soap = chat_with_groq(messages)
|
98 |
return soap, save_to_file(soap, "soap_summary.txt")
|
99 |
|
100 |
|
requirements.txt
CHANGED
@@ -2,3 +2,4 @@ faster_whisper==1.1.1
|
|
2 |
gradio==5.17.0
|
3 |
huggingface_hub==0.28.1
|
4 |
python-dotenv==1.0.1
|
|
|
|
2 |
gradio==5.17.0
|
3 |
huggingface_hub==0.28.1
|
4 |
python-dotenv==1.0.1
|
5 |
+
groq
|