new shit
Browse files- App/Chat/utils/RAG.py +73 -58
- App/Chat/utils/Summarize.py +50 -50
App/Chat/utils/RAG.py
CHANGED
@@ -1,34 +1,49 @@
|
|
1 |
import aiohttp
|
2 |
import asyncio
|
3 |
-
import json,os
|
4 |
import yaml
|
5 |
import google.generativeai as palm
|
6 |
from App.Embedding.utils.Initialize import search
|
|
|
7 |
PALM_API = ""
|
8 |
API_KEY = os.environ.get("PALM_API", PALM_API)
|
9 |
palm.configure(api_key=API_KEY)
|
|
|
|
|
10 |
class GenerativeAIAssistant:
|
11 |
-
def __init__(
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
self.api_key = api_key
|
14 |
self.model = model
|
15 |
self.temperature = temperature
|
16 |
self.candidate_count = candidate_count
|
17 |
self.top_k = top_k
|
18 |
self.top_p = top_p
|
19 |
-
self.examples=[
|
|
|
|
|
|
|
|
|
|
|
20 |
self.context = "You are a helpful assistant"
|
21 |
|
22 |
-
def generate_template(self,question,task_id,summary=None):
|
23 |
if summary == None:
|
24 |
self.context = "You are a helpful assistant"
|
25 |
else:
|
26 |
self.context = summary
|
27 |
-
contexts=search(question,task_id=task_id)
|
28 |
-
context_yaml = ""
|
29 |
for context in contexts:
|
30 |
-
context_yaml += "\n"+ yaml.dump(context)
|
31 |
-
Template =f
|
32 |
#Instructions
|
33 |
You are given the following context in yaml of a transcript of a youtube video, the start and end times are indicated and the text that was said is also given. You are also given a question, use the context to answer the question in a consise manner, make it short and to the point, don't provide additional details.
|
34 |
|
@@ -40,51 +55,53 @@ class GenerativeAIAssistant:
|
|
40 |
|
41 |
#Question
|
42 |
{question}
|
43 |
-
|
44 |
return Template
|
45 |
|
46 |
-
async def generate_message(self,
|
47 |
-
user_message=messages[-1]
|
48 |
-
latest_message = messages[-1][
|
49 |
-
latest_message=
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
|
|
|
|
|
|
55 |
payload = {
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
"
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
|
89 |
# data = {
|
90 |
# "prompt": {
|
@@ -99,13 +116,11 @@ class GenerativeAIAssistant:
|
|
99 |
# }
|
100 |
|
101 |
async with aiohttp.ClientSession() as session:
|
102 |
-
async with session.post(
|
|
|
|
|
103 |
try:
|
104 |
-
temp=
|
105 |
-
return temp["
|
106 |
except Exception as e:
|
107 |
return f"Error ⚠️ {e} {temp}"
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
|
|
1 |
import aiohttp
|
2 |
import asyncio
|
3 |
+
import json, os
|
4 |
import yaml
|
5 |
import google.generativeai as palm
|
6 |
from App.Embedding.utils.Initialize import search
|
7 |
+
|
8 |
PALM_API = ""
|
9 |
API_KEY = os.environ.get("PALM_API", PALM_API)
|
10 |
palm.configure(api_key=API_KEY)
|
11 |
+
|
12 |
+
|
13 |
class GenerativeAIAssistant:
|
14 |
+
def __init__(
|
15 |
+
self,
|
16 |
+
api_key=API_KEY,
|
17 |
+
model="gemini-pro",
|
18 |
+
temperature=0.85,
|
19 |
+
candidate_count=1,
|
20 |
+
top_k=40,
|
21 |
+
top_p=0.95,
|
22 |
+
):
|
23 |
self.api_key = api_key
|
24 |
self.model = model
|
25 |
self.temperature = temperature
|
26 |
self.candidate_count = candidate_count
|
27 |
self.top_k = top_k
|
28 |
self.top_p = top_p
|
29 |
+
self.examples = [
|
30 |
+
{
|
31 |
+
"input": {"content": "hello"},
|
32 |
+
"output": {"content": "Hello to you too! How can I help you today?"},
|
33 |
+
}
|
34 |
+
]
|
35 |
self.context = "You are a helpful assistant"
|
36 |
|
37 |
+
def generate_template(self, question, task_id, summary=None):
|
38 |
if summary == None:
|
39 |
self.context = "You are a helpful assistant"
|
40 |
else:
|
41 |
self.context = summary
|
42 |
+
contexts = search(question, task_id=task_id)
|
43 |
+
context_yaml = ""
|
44 |
for context in contexts:
|
45 |
+
context_yaml += "\n" + yaml.dump(context)
|
46 |
+
Template = f"""
|
47 |
#Instructions
|
48 |
You are given the following context in yaml of a transcript of a youtube video, the start and end times are indicated and the text that was said is also given. You are also given a question, use the context to answer the question in a consise manner, make it short and to the point, don't provide additional details.
|
49 |
|
|
|
55 |
|
56 |
#Question
|
57 |
{question}
|
58 |
+
"""
|
59 |
return Template
|
60 |
|
61 |
+
async def generate_message(self, messages, task_id="ok"):
|
62 |
+
user_message = messages[-1]
|
63 |
+
# latest_message = messages[-1]["parts"][0]["text"]
|
64 |
+
latest_message = user_message["content"]
|
65 |
+
response = {
|
66 |
+
"content": self.generate_template(latest_message, task_id),
|
67 |
+
"role": "assistant",
|
68 |
+
}
|
69 |
+
# user_message["parts"][0]["text"] = latest_message
|
70 |
+
messages.append(response)
|
71 |
+
# url = f'https://generativelanguage.googleapis.com/v1beta/models/{self.model}:generateContent?key={self.api_key}'
|
72 |
+
url = "https://api.groq.com/openai/v1/chat/completions"
|
73 |
payload = {
|
74 |
+
"messages": messages,
|
75 |
+
"model": "mixtral-8x7b-32768",
|
76 |
+
}
|
77 |
+
# payload = {
|
78 |
+
# "contents": messages,
|
79 |
+
# "generationConfig": {
|
80 |
+
# "temperature": 0.9,
|
81 |
+
# "topK": 1,
|
82 |
+
# "topP": 1,
|
83 |
+
# "maxOutputTokens": 2048,
|
84 |
+
# "stopSequences": [],
|
85 |
+
# },
|
86 |
+
# "safetySettings": [
|
87 |
+
# {
|
88 |
+
# "category": "HARM_CATEGORY_HARASSMENT",
|
89 |
+
# "threshold": "BLOCK_ONLY_HIGH",
|
90 |
+
# },
|
91 |
+
# {
|
92 |
+
# "category": "HARM_CATEGORY_HATE_SPEECH",
|
93 |
+
# "threshold": "BLOCK_ONLY_HIGH",
|
94 |
+
# },
|
95 |
+
# {
|
96 |
+
# "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
97 |
+
# "threshold": "BLOCK_ONLY_HIGH",
|
98 |
+
# },
|
99 |
+
# {
|
100 |
+
# "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
101 |
+
# "threshold": "BLOCK_ONLY_HIGH",
|
102 |
+
# },
|
103 |
+
# ],
|
104 |
+
# }
|
|
|
105 |
|
106 |
# data = {
|
107 |
# "prompt": {
|
|
|
116 |
# }
|
117 |
|
118 |
async with aiohttp.ClientSession() as session:
|
119 |
+
async with session.post(
|
120 |
+
url, json=payload, headers={"Content-Type": "application/json"}
|
121 |
+
) as response:
|
122 |
try:
|
123 |
+
temp = await response.json()
|
124 |
+
return temp["choices"][0]["content"]
|
125 |
except Exception as e:
|
126 |
return f"Error ⚠️ {e} {temp}"
|
|
|
|
|
|
|
|
App/Chat/utils/Summarize.py
CHANGED
@@ -4,6 +4,7 @@ import google.generativeai as palm
|
|
4 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
5 |
from langchain import PromptTemplate
|
6 |
import os
|
|
|
7 |
# from poe_api_wrapper import PoeApi
|
8 |
import pprint
|
9 |
|
@@ -15,7 +16,7 @@ PALM_API = ""
|
|
15 |
API_KEY = os.environ.get("PALM_API", PALM_API)
|
16 |
palm.configure(api_key=API_KEY)
|
17 |
|
18 |
-
|
19 |
text_splitter = RecursiveCharacterTextSplitter(
|
20 |
separators=["\n\n", "\n", "."],
|
21 |
chunk_size=1500,
|
@@ -46,61 +47,63 @@ def count_tokens(text):
|
|
46 |
|
47 |
|
48 |
async def PalmTextModel(text, candidates=1):
|
49 |
-
url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key={API_KEY}"
|
50 |
-
|
51 |
headers = {
|
52 |
"Content-Type": "application/json",
|
|
|
53 |
}
|
54 |
-
|
55 |
-
payload = {
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
}
|
92 |
-
|
93 |
-
|
94 |
|
95 |
async with aiohttp.ClientSession() as session:
|
96 |
async with session.post(url, json=payload, headers=headers) as response:
|
97 |
if response.status == 200:
|
98 |
result = await response.json()
|
99 |
# print(result)
|
100 |
-
if candidates > 1:
|
101 |
-
|
102 |
-
|
103 |
-
|
|
|
|
|
|
|
104 |
return temp
|
105 |
else:
|
106 |
print(f"Error: {response.status}\n{await response.text()}")
|
@@ -138,6 +141,3 @@ async def Summarizer(essay):
|
|
138 |
main = " ".join(responses)
|
139 |
ans = await PalmTextModel(combine_prompt.format(text=main))
|
140 |
return ans
|
141 |
-
|
142 |
-
|
143 |
-
|
|
|
4 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
5 |
from langchain import PromptTemplate
|
6 |
import os
|
7 |
+
|
8 |
# from poe_api_wrapper import PoeApi
|
9 |
import pprint
|
10 |
|
|
|
16 |
API_KEY = os.environ.get("PALM_API", PALM_API)
|
17 |
palm.configure(api_key=API_KEY)
|
18 |
|
19 |
+
GroqAPIKey = os.environ.get("Groq_API", "")
|
20 |
text_splitter = RecursiveCharacterTextSplitter(
|
21 |
separators=["\n\n", "\n", "."],
|
22 |
chunk_size=1500,
|
|
|
47 |
|
48 |
|
49 |
async def PalmTextModel(text, candidates=1):
|
50 |
+
# url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key={API_KEY}"
|
51 |
+
url = "https://api.groq.com/openai/v1/chat/completions"
|
52 |
headers = {
|
53 |
"Content-Type": "application/json",
|
54 |
+
"Authorization": f"Bearer {GroqAPIKey}",
|
55 |
}
|
56 |
+
payload = {"messages": [{"role": "user", "content": text}]}
|
57 |
+
# payload = {
|
58 |
+
# "contents": [
|
59 |
+
# {
|
60 |
+
# "role": "user",
|
61 |
+
# "parts": [
|
62 |
+
# {
|
63 |
+
# "text": text
|
64 |
+
# }
|
65 |
+
# ]
|
66 |
+
# }
|
67 |
+
# ],
|
68 |
+
# "generationConfig": {
|
69 |
+
# "temperature": 0.9,
|
70 |
+
# "topK": 1,
|
71 |
+
# "topP": 1,
|
72 |
+
# "maxOutputTokens": 2048,
|
73 |
+
# "stopSequences": []
|
74 |
+
# },
|
75 |
+
# "safetySettings": [
|
76 |
+
# {
|
77 |
+
# "category": "HARM_CATEGORY_HARASSMENT",
|
78 |
+
# "threshold": "BLOCK_ONLY_HIGH"
|
79 |
+
# },
|
80 |
+
# {
|
81 |
+
# "category": "HARM_CATEGORY_HATE_SPEECH",
|
82 |
+
# "threshold": "BLOCK_ONLY_HIGH"
|
83 |
+
# },
|
84 |
+
# {
|
85 |
+
# "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
86 |
+
# "threshold": "BLOCK_ONLY_HIGH"
|
87 |
+
# },
|
88 |
+
# {
|
89 |
+
# "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
90 |
+
# "threshold": "BLOCK_ONLY_HIGH"
|
91 |
+
# }
|
92 |
+
# ]
|
93 |
+
# }
|
|
|
|
|
94 |
|
95 |
async with aiohttp.ClientSession() as session:
|
96 |
async with session.post(url, json=payload, headers=headers) as response:
|
97 |
if response.status == 200:
|
98 |
result = await response.json()
|
99 |
# print(result)
|
100 |
+
# if candidates > 1:
|
101 |
+
# temp = [
|
102 |
+
# candidate["content"]["parts"][0]["text"]
|
103 |
+
# for candidate in result["candidates"]
|
104 |
+
# ]
|
105 |
+
# return temp
|
106 |
+
temp = result["choices"][0]["message"]["content"]
|
107 |
return temp
|
108 |
else:
|
109 |
print(f"Error: {response.status}\n{await response.text()}")
|
|
|
141 |
main = " ".join(responses)
|
142 |
ans = await PalmTextModel(combine_prompt.format(text=main))
|
143 |
return ans
|
|
|
|
|
|