NEXAS commited on
Commit
f3c5afd
·
verified ·
1 Parent(s): 1768511

Update utils/llm_ag.py

Browse files
Files changed (1) hide show
  1. utils/llm_ag.py +62 -60
utils/llm_ag.py CHANGED
@@ -1,60 +1,62 @@
1
- import requests
2
- import os
3
- from dotenv import load_dotenv
4
-
5
- load_dotenv()
6
-
7
- # Get the API key from environment variable
8
- GROQ_API_KEY = "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"
9
- if not GROQ_API_KEY:
10
- raise ValueError("GROQ_API_KEY is not set in the .env file")
11
-
12
- def intiate_convo(user_query, image_description, additional_text, model="mixtral-8x7b-32768"):
13
- # Prepare the message payload
14
- messages = [
15
- {
16
- "role": "system",
17
- "content": """You are a AI Assistant for training. Given an image description, additional context, and a user query, respond with a detailed long answer with steps, ,be polite.
18
- IMPORTANT: When referring to the image, subtly acknowledge it by saying "as I see here" rather than explicitly mentioning "image" or "photo."
19
- Your tone should be natural and conversational. Keep it detailed , engaging, and relevant to the query, using both the image description and the additional context as reference points."""
20
- },
21
- {
22
- "role": "user",
23
- "content": f"Image description: {image_description}. Additional context: {additional_text}. User query: {user_query}. Provide a detaile response like an ai assistant."
24
- }
25
- ]
26
-
27
- # Make the API request
28
- response = requests.post(
29
- "https://api.groq.com/openai/v1/chat/completions",
30
- json={
31
- "model": model,
32
- "messages": messages,
33
- "max_tokens": 32768,
34
- "stop": None,
35
- "stream": False
36
- },
37
- headers={
38
- "Authorization": f"Bearer {GROQ_API_KEY}",
39
- "Content-Type": "application/json"
40
- },
41
- timeout=60
42
- )
43
-
44
- # Process the response
45
- if response.status_code == 200:
46
- result = response.json()
47
- answer = result["choices"][0]["message"]["content"]
48
- return answer
49
- else:
50
- return f"Error from LLM API: {response.status_code} - {response.text}"
51
-
52
- # # Example usage
53
- # # Define the inputs
54
- # user_query = "Can you tell me more about the person in this description?"
55
- # image_description = """The main subject of the image is a person with dark complexion, short black hair, and white-framed glasses, wearing a dark-colored shirt or jacket. They are looking directly at the camera with a subtle expression."""
56
- # additional_text = """This individual is a software engineer specializing in AI development. They are known for their expertise in computer vision and enjoy photography as a hobby."""
57
-
58
- # # Get the LLM response
59
- # response = intiate_convo(user_query, image_description, additional_text)
60
- # print(response)
 
 
 
1
+ import requests
2
+ import os
3
+ from dotenv import load_dotenv
4
+
5
+ load_dotenv()
6
+
7
+ # Get the API key from environment variable
8
+ GROQ_API_KEY = "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"
9
+ if not GROQ_API_KEY:
10
+ raise ValueError("GROQ_API_KEY is not set in the .env file")
11
+
12
+ def intiate_convo(user_query, image_description, additional_text, model="mixtral-8x7b-32768"):
13
+ # Prepare the message payload
14
+ messages = [
15
+ {
16
+ "role": "system",
17
+ "content": """You are a AI Assistant for training. Given an image description, additional context, and a user query, respond with a PRECISE answer WITH THE HELP OF ADDITIONAL CONTEXT,be polite.
18
+ IMPORTANT: When referring to the image, subtly acknowledge it by saying "as I see here" rather than explicitly mentioning "image" or "photo."
19
+ Your tone should be natural and conversational.relevant to the query, using both the image description and the additional context as reference points.
20
+ Provide only the answer.
21
+ """
22
+ },
23
+ {
24
+ "role": "user",
25
+ "content": f"Image description: {image_description}. Additional context: {additional_text}. User query: {user_query}. Provide a detaile response like an ai assistant."
26
+ }
27
+ ]
28
+
29
+ # Make the API request
30
+ response = requests.post(
31
+ "https://api.groq.com/openai/v1/chat/completions",
32
+ json={
33
+ "model": model,
34
+ "messages": messages,
35
+ "max_tokens": 32768,
36
+ "stop": None,
37
+ "stream": False
38
+ },
39
+ headers={
40
+ "Authorization": f"Bearer {GROQ_API_KEY}",
41
+ "Content-Type": "application/json"
42
+ },
43
+ timeout=60
44
+ )
45
+
46
+ # Process the response
47
+ if response.status_code == 200:
48
+ result = response.json()
49
+ answer = result["choices"][0]["message"]["content"]
50
+ return answer
51
+ else:
52
+ return f"Error from LLM API: {response.status_code} - {response.text}"
53
+
54
+ # # Example usage
55
+ # # Define the inputs
56
+ # user_query = "Can you tell me more about the person in this description?"
57
+ # image_description = """The main subject of the image is a person with dark complexion, short black hair, and white-framed glasses, wearing a dark-colored shirt or jacket. They are looking directly at the camera with a subtle expression."""
58
+ # additional_text = """This individual is a software engineer specializing in AI development. They are known for their expertise in computer vision and enjoy photography as a hobby."""
59
+
60
+ # # Get the LLM response
61
+ # response = intiate_convo(user_query, image_description, additional_text)
62
+ # print(response)