Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
from huggingface_hub import InferenceClient
|
2 |
from fastapi import FastAPI, Request
|
3 |
from pydantic import BaseModel
|
4 |
import openai
|
@@ -14,8 +13,6 @@ HF_TOKEN = os.environ["HF_TOKEN"]
|
|
14 |
HB_TOKEN = os.environ["HB_TOKEN"]
|
15 |
PROMPTS_DOC_URL = os.environ["PROMPTS"]
|
16 |
|
17 |
-
client = InferenceClient(model=MODEL, token=HF_TOKEN)
|
18 |
-
|
19 |
def fetch_prompts_from_google_doc():
|
20 |
print("Fetching prompts from Google Doc...")
|
21 |
response = requests.get(PROMPTS_DOC_URL)
|
@@ -66,6 +63,11 @@ async def drive(prompt: DrivePrompt):
|
|
66 |
{prompt.code}
|
67 |
"""
|
68 |
|
|
|
|
|
|
|
|
|
|
|
69 |
chat_completion = client.chat.completions.create(
|
70 |
model="meta-llama/Meta-Llama-3-70B-Instruct",
|
71 |
messages=[
|
@@ -115,8 +117,8 @@ async def chat(prompt: NavigatePrompt):
|
|
115 |
"""
|
116 |
|
117 |
client = openai.OpenAI(
|
118 |
-
|
119 |
-
|
120 |
)
|
121 |
|
122 |
chat_completion = client.chat.completions.create(
|
|
|
|
|
1 |
from fastapi import FastAPI, Request
|
2 |
from pydantic import BaseModel
|
3 |
import openai
|
|
|
13 |
HB_TOKEN = os.environ["HB_TOKEN"]
|
14 |
PROMPTS_DOC_URL = os.environ["PROMPTS"]
|
15 |
|
|
|
|
|
16 |
def fetch_prompts_from_google_doc():
|
17 |
print("Fetching prompts from Google Doc...")
|
18 |
response = requests.get(PROMPTS_DOC_URL)
|
|
|
63 |
{prompt.code}
|
64 |
"""
|
65 |
|
66 |
+
client = openai.OpenAI(
|
67 |
+
api_key=HB_TOKEN,
|
68 |
+
base_url="https://api.hyperbolic.xyz/v1",
|
69 |
+
)
|
70 |
+
|
71 |
chat_completion = client.chat.completions.create(
|
72 |
model="meta-llama/Meta-Llama-3-70B-Instruct",
|
73 |
messages=[
|
|
|
117 |
"""
|
118 |
|
119 |
client = openai.OpenAI(
|
120 |
+
api_key=HB_TOKEN,
|
121 |
+
base_url="https://api.hyperbolic.xyz/v1",
|
122 |
)
|
123 |
|
124 |
chat_completion = client.chat.completions.create(
|