Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -8,10 +8,10 @@ import gradio as gr
|
|
8 |
# 取得 Hugging Face Token,用於 API 調用推論服務
|
9 |
from huggingface_hub import InferenceClient
|
10 |
|
11 |
-
|
12 |
API_BASE_URL = os.environ.get("GROQ_API_BASE_URL")
|
13 |
client = InferenceClient(
|
14 |
-
api_key=
|
15 |
base_url=API_BASE_URL
|
16 |
)
|
17 |
|
@@ -130,7 +130,7 @@ def generate_math_questions(grade, term, qtype="Unspecified", num_questions=10):
|
|
130 |
# 使用 InferenceClient 呼叫 API 模型產生新題目
|
131 |
completion = client.chat.completions.create(
|
132 |
# model="mistralai/Mistral-7B-Instruct-v0.3",
|
133 |
-
model="
|
134 |
# model="mistralai/Mistral-Nemo-Instruct-2407",
|
135 |
messages=messages,
|
136 |
max_tokens=1024
|
|
|
8 |
# 取得 Hugging Face Token,用於 API 調用推論服務
|
9 |
from huggingface_hub import InferenceClient
|
10 |
|
11 |
+
API_KEY = os.environ.get("GROQ_API_KEY")
|
12 |
API_BASE_URL = os.environ.get("GROQ_API_BASE_URL")
|
13 |
client = InferenceClient(
|
14 |
+
api_key=API_KEY,
|
15 |
base_url=API_BASE_URL
|
16 |
)
|
17 |
|
|
|
130 |
# 使用 InferenceClient 呼叫 API 模型產生新題目
|
131 |
completion = client.chat.completions.create(
|
132 |
# model="mistralai/Mistral-7B-Instruct-v0.3",
|
133 |
+
model="gemma2-9b-it",
|
134 |
# model="mistralai/Mistral-Nemo-Instruct-2407",
|
135 |
messages=messages,
|
136 |
max_tokens=1024
|