Wedyan2023 commited on
Commit
4736749
·
verified ·
1 Parent(s): e69d1ed

Update app104.py

Browse files
Files changed (1) hide show
  1. app104.py +27 -5
app104.py CHANGED
@@ -17,13 +17,35 @@ warnings.filterwarnings('ignore')
17
  os.getenv("OAUTH_CLIENT_ID")
18
 
19
 
20
- # Load environment variables and initialize the OpenAI client to use Hugging Face Inference API.
21
- load_dotenv()
 
 
 
 
 
 
 
 
22
  client = OpenAI(
23
- base_url="https://api-inference.huggingface.co/v1",
24
- api_key=os.environ.get('TOKEN2') # Hugging Face API token
25
  )
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  # Create necessary directories
28
  for dir_name in ['data', 'feedback']:
29
  if not os.path.exists(dir_name):
@@ -148,7 +170,7 @@ with st.sidebar:
148
 
149
  selected_model = st.selectbox(
150
  "Select Model",
151
- ["meta-llama/Llama-3.3-70B-Instruct", "meta-llama/Llama-3.2-3B-Instruct","meta-llama/Llama-4-Scout-17B-16E-Instruct", "meta-llama/Meta-Llama-3-8B-Instruct",
152
  "meta-llama/Llama-3.1-70B-Instruct"],
153
  key='model_select'
154
  )
 
17
  os.getenv("OAUTH_CLIENT_ID")
18
 
19
 
20
+ # # Load environment variables and initialize the OpenAI client to use Hugging Face Inference API.
21
+ # load_dotenv()
22
+ # client = OpenAI(
23
+ # base_url="https://api-inference.huggingface.co/v1",
24
+ # api_key=os.environ.get('TOKEN2') # Hugging Face API token
25
+ # )
26
+
27
+ ####new
28
+ from openai import OpenAI
29
+
30
  client = OpenAI(
31
+ base_url="https://router.huggingface.co/together/v1",
32
+ api_key=os.environ.get('TOKEN2'),
33
  )
34
 
35
+ completion = client.chat.completions.create(
36
+ model="meta-llama/Meta-Llama-3-8B-Instruct-Turbo",
37
+ messages=[
38
+ {
39
+ "role": "user",
40
+ "content": "What is the capital of France?"
41
+ }
42
+ ],
43
+ max_tokens=512,
44
+ )
45
+
46
+ print(completion.choices[0].message)
47
+ #####
48
+
49
  # Create necessary directories
50
  for dir_name in ['data', 'feedback']:
51
  if not os.path.exists(dir_name):
 
170
 
171
  selected_model = st.selectbox(
172
  "Select Model",
173
+ ["meta-llama/Meta-Llama-3-8B-Instruct-Turbo", "meta-llama/Llama-3.3-70B-Instruct", "meta-llama/Llama-3.2-3B-Instruct","meta-llama/Llama-4-Scout-17B-16E-Instruct", "meta-llama/Meta-Llama-3-8B-Instruct",
174
  "meta-llama/Llama-3.1-70B-Instruct"],
175
  key='model_select'
176
  )