Update helper/utils.py
Browse files- helper/utils.py +31 -0
helper/utils.py
CHANGED
@@ -6,6 +6,7 @@ import numpy as np
|
|
6 |
import pandas as pd
|
7 |
import PyPDF2
|
8 |
from openai import OpenAI
|
|
|
9 |
|
10 |
|
11 |
# Credit
|
@@ -168,6 +169,36 @@ def call_gpt(prompt: str, content: str) -> str:
|
|
168 |
return response.choices[0].message.content
|
169 |
|
170 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
def quantize_to_kbit(arr: Union[np.ndarray, Any], k: int = 16) -> np.ndarray:
|
172 |
"""Converts an array to a k-bit representation by normalizing and scaling its values.
|
173 |
|
|
|
6 |
import pandas as pd
|
7 |
import PyPDF2
|
8 |
from openai import OpenAI
|
9 |
+
from together import Together
|
10 |
|
11 |
|
12 |
# Credit
|
|
|
169 |
return response.choices[0].message.content
|
170 |
|
171 |
|
172 |
+
client = Together(api_key=os.environ["TOGETHER_API_KEY"])
|
173 |
+
|
174 |
+
def call_llama(prompt: str) -> str:
|
175 |
+
"""
|
176 |
+
Send a prompt to the Llama model and return the response.
|
177 |
+
Args:
|
178 |
+
prompt (str): The input prompt to send to the Llama model.
|
179 |
+
Returns:
|
180 |
+
str: The response from the Llama model.
|
181 |
+
"""
|
182 |
+
|
183 |
+
# Create a completion request with the prompt
|
184 |
+
response = client.chat.completions.create(
|
185 |
+
|
186 |
+
# Use the Llama-3-8b-chat-hf model
|
187 |
+
model="meta-llama/Llama-3-8b-chat-hf",
|
188 |
+
|
189 |
+
# Define the prompt as a user message
|
190 |
+
messages=[
|
191 |
+
{
|
192 |
+
"role": "user",
|
193 |
+
"content": prompt # Use the input prompt
|
194 |
+
}
|
195 |
+
],
|
196 |
+
)
|
197 |
+
|
198 |
+
# Return the content of the first response message
|
199 |
+
return response.choices[0].message.content
|
200 |
+
|
201 |
+
|
202 |
def quantize_to_kbit(arr: Union[np.ndarray, Any], k: int = 16) -> np.ndarray:
|
203 |
"""Converts an array to a k-bit representation by normalizing and scaling its values.
|
204 |
|