File size: 4,698 Bytes
4e0c974 cdef94b 4e0c974 bdaa2e2 a31da9c cdef94b 8ba499f a31da9c cdef94b 8ba499f 4e0c974 cdef94b 4e0c974 338b8f2 cdef94b 8ba499f cdef94b 4e0c974 9b5be52 4e0c974 9b5be52 4e0c974 9b5be52 4e0c974 cdef94b 89ebcd8 bdaa2e2 89ebcd8 96655e4 89ebcd8 7df9f15 bdaa2e2 a1192cf 89ebcd8 6f54b8e 89ebcd8 bdaa2e2 bcdd593 89ebcd8 4e0c974 8ba499f cdef94b 4e0c974 cdef94b 4e0c974 cdef94b 4e0c974 cdef94b 4e0c974 a819bf2 cdef94b d662165 4e0c974 338b8f2 bcdd593 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
import aiohttp
import asyncio, pprint
import google.generativeai as palm
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain import PromptTemplate
import os
# from poe_api_wrapper import PoeApi
import pprint
# client = PoeApi("sXvCnfYy8CHnXNTRlxhmVg==")
bot = "Assistant"
CHAT_CODE = ""
PALM_API = ""
API_KEY = os.environ.get("PALM_API", PALM_API)
palm.configure(api_key=API_KEY)
GroqAPIKey = os.environ.get("Groq_API", "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")
text_splitter = RecursiveCharacterTextSplitter(
separators=["\n\n", "\n", "."],
chunk_size=1500,
length_function=len,
chunk_overlap=100,
)
map_prompt = """
Write a verbose summary like a masters student of the following:
"{text}"
CONCISE SUMMARY:
"""
combine_prompt = """
Write a concise summary of the following text delimited by triple backquotes.
Return your response in a detailed verbose paragraph which covers the text. Make it as insightful to the reader as possible, write like a masters student.
```{text}```
SUMMARY:
"""
def count_tokens(text):
return palm.count_message_tokens(prompt=text)["token_count"]
async def PalmTextModel(text, candidates=1):
# url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key={API_KEY}"
url = "https://api.groq.com/openai/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {GroqAPIKey}",
}
payload = {"messages": [{"role": "user", "content": text}], "model": "mixtral-8x7b-32768"}
# payload = {
# "contents": [
# {
# "role": "user",
# "parts": [
# {
# "text": text
# }
# ]
# }
# ],
# "generationConfig": {
# "temperature": 0.9,
# "topK": 1,
# "topP": 1,
# "maxOutputTokens": 2048,
# "stopSequences": []
# },
# "safetySettings": [
# {
# "category": "HARM_CATEGORY_HARASSMENT",
# "threshold": "BLOCK_ONLY_HIGH"
# },
# {
# "category": "HARM_CATEGORY_HATE_SPEECH",
# "threshold": "BLOCK_ONLY_HIGH"
# },
# {
# "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
# "threshold": "BLOCK_ONLY_HIGH"
# },
# {
# "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
# "threshold": "BLOCK_ONLY_HIGH"
# }
# ]
# }
async with aiohttp.ClientSession() as session:
async with session.post(url, json=payload, headers=headers) as response:
if response.status == 200:
result = await response.json()
# print(result)
# if candidates > 1:
# temp = [
# candidate["content"]["parts"][0]["text"]
# for candidate in result["candidates"]
# ]
# return temp
temp = result["choices"][0]["message"]["content"]
# temp="hello"
# print('lo ')
return temp
else:
print(f"Error: {response.status}\n{await response.text()}")
# async def PalmTextModel(message):
# global CHAT_CODE
# if CHAT_CODE == "":
# for chunk in client.send_message(bot, message):
# pass
# CHAT_CODE = chunk["chatCode"]
# else:
# for chunk in client.send_message(bot, message, chatCode=CHAT_CODE):
# pass
# return chunk["text"]
async def Summarizer(essay):
docs = text_splitter.create_documents([essay])
# for 1 large document
if len(docs) == 1:
tasks = [
PalmTextModel(combine_prompt.format(text=doc.page_content)) for doc in docs
]
# Gather and execute the tasks concurrently
responses = await asyncio.gather(*tasks)
ans = " ".join(responses)
return ans
tasks = [PalmTextModel(map_prompt.format(text=doc.page_content)) for doc in docs]
# Gather and execute the tasks concurrently
chunked_tasks = [tasks[i:i+20] for i in range(0, len(tasks), 20)]
responses = []
for chunk in chunked_tasks:
chunk_responses = await asyncio.gather(*chunk)
responses.extend(chunk_responses)
main = " ".join(responses)
ans = await PalmTextModel(combine_prompt.format(text=main))
return ans
# asyncio.run(Summarizer("This is a test of the summarizer")) |