VishnuRamDebyez commited on
Commit
09ec474
·
verified ·
1 Parent(s): 8b9dc0a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -12
app.py CHANGED
@@ -1,14 +1,16 @@
1
  import os
2
- import openai
3
  from dotenv import load_dotenv
4
 
5
- # Load environment variables (make sure you have a .env file)
6
  load_dotenv()
7
 
8
- # OpenAI Client Initialization using OpenAI's official API
9
- openai.api_key = os.getenv("TOKEN")
 
 
 
10
 
11
- # Model Selection
12
  model = "meta-llama/Meta-Llama-3-8B-Instruct"
13
 
14
  def get_debyez_prompt_template(customer_message):
@@ -18,24 +20,26 @@ def get_debyez_prompt_template(customer_message):
18
  : '{customer_message}'
19
  """
20
 
21
- # Function to interact with the OpenAI model
22
  def get_response(prompt):
23
  try:
24
  messages = [{"role": "user", "content": prompt}]
25
- # Call OpenAI's chat completion API
26
- response = openai.ChatCompletion.create(
27
  model=model,
28
  messages=[
29
  {"role": m["role"], "content": get_debyez_prompt_template(m["content"])}
30
  for m in messages
31
  ],
32
  temperature=0.5,
 
33
  max_tokens=3000,
34
  )
35
-
36
- # Extract and return the model's reply
37
- return response['choices'][0]['message']['content']
38
-
 
 
39
  except Exception as e:
40
  return f"An error occurred: {str(e)}"
41
 
 
1
  import os
2
+ from openai import OpenAI
3
  from dotenv import load_dotenv
4
 
5
+ # Load environment variables (e.g., API keys)
6
  load_dotenv()
7
 
8
+ # Initialize the OpenAI client with Hugging Face API
9
+ client = OpenAI(
10
+ base_url="https://api-inference.huggingface.co/v1",
11
+ api_key=os.getenv("TOKEN")
12
+ )
13
 
 
14
  model = "meta-llama/Meta-Llama-3-8B-Instruct"
15
 
16
  def get_debyez_prompt_template(customer_message):
 
20
  : '{customer_message}'
21
  """
22
 
23
+ # API or CLI that will interact with the model
24
  def get_response(prompt):
25
  try:
26
  messages = [{"role": "user", "content": prompt}]
27
+ stream = client.chat.completions.create(
 
28
  model=model,
29
  messages=[
30
  {"role": m["role"], "content": get_debyez_prompt_template(m["content"])}
31
  for m in messages
32
  ],
33
  temperature=0.5,
34
+ stream=True,
35
  max_tokens=3000,
36
  )
37
+ response = ""
38
+ for chunk in stream:
39
+ response += chunk.choices[0].delta.content or ""
40
+ if not response.strip():
41
+ response = "Sorry, I couldn't generate a response right now."
42
+ return response
43
  except Exception as e:
44
  return f"An error occurred: {str(e)}"
45