Update app110.py
Browse files
app110.py
CHANGED
@@ -27,6 +27,27 @@ client = OpenAI(
|
|
27 |
#api_key=os.environ.get('TOKEN2') # Hugging Face API token
|
28 |
api_key=os.environ.get('LLM')
|
29 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
#####
|
31 |
# from openai import OpenAI
|
32 |
|
|
|
27 |
#api_key=os.environ.get('TOKEN2') # Hugging Face API token
|
28 |
api_key=os.environ.get('LLM')
|
29 |
)
|
30 |
+
#######
|
31 |
+
#from openai import OpenAI
|
32 |
+
|
33 |
+
client = OpenAI(
|
34 |
+
base_url="https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.3-70B-Instruct/v1",
|
35 |
+
#api_key="hf_xxxxxxxxxxxxxxxxxxxxxxxx",
|
36 |
+
api_key=os.environ.get('LLM')
|
37 |
+
)
|
38 |
+
|
39 |
+
completion = client.chat.completions.create(
|
40 |
+
model="meta-llama/Llama-3.3-70B-Instruct",
|
41 |
+
messages=[
|
42 |
+
{
|
43 |
+
"role": "user",
|
44 |
+
"content": "What is the capital of France?"
|
45 |
+
}
|
46 |
+
],
|
47 |
+
)
|
48 |
+
|
49 |
+
print(completion.choices[0].message)
|
50 |
+
#######
|
51 |
#####
|
52 |
# from openai import OpenAI
|
53 |
|