File size: 5,517 Bytes
17d12d8
29e6656
7edc5be
e1b0f65
f5e679e
2d6909b
 
 
 
f716a54
 
03fd59b
 
 
 
 
 
 
 
 
 
 
 
 
ef88cd6
03fd59b
e1b0f65
f716a54
 
 
5f853f6
03fd59b
5f853f6
f716a54
 
 
 
2d6909b
 
f716a54
5f853f6
ef88cd6
03fd59b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cf245ed
03fd59b
 
d62b586
cf245ed
5f853f6
cf245ed
5f853f6
 
cf245ed
 
5f853f6
 
 
 
 
 
 
 
 
 
 
 
ef88cd6
03fd59b
 
 
 
 
 
cf245ed
237e24d
cf245ed
36c549c
f716a54
 
 
 
36c549c
c80b6f5
36c549c
cf245ed
f716a54
2d6909b
 
 
 
 
 
 
 
 
 
 
f716a54
2d6909b
 
 
 
f716a54
2d6909b
f716a54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
03fd59b
f716a54
 
088ef38
f716a54
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
import torch
from openai import OpenAI
import os
from transformers import pipeline
from groq import Groq
import base64
import vertexai
from vertexai.generative_models import GenerativeModel, Part, FinishReason
import vertexai.preview.generative_models as generative_models
import google.generativeai as genai
import anthropic
from langchain_community.document_loaders import PyMuPDFLoader
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.sentence_transformer import (
    SentenceTransformerEmbeddings,
)
from langchain_community.vectorstores import Chroma
from langchain_text_splitters import CharacterTextSplitter
from langchain import hub
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain.chains import RetrievalQA
from langchain_groq import ChatGroq
from dotenv import load_dotenv

load_dotenv()

os.environ["GRPC_VERBOSITY"] = "ERROR"
os.environ["GLOG_minloglevel"] = "2"

groq_client = Groq(
    api_key=os.environ.get("GROQ_API_KEY"),
)
openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
# give access to all APIs for GCP instance
# gcloud auth application-default login
genai.configure(api_key=os.environ.get("GENAI_API_KEY"))
vertexai.init(project="proprietary-info-detection", location="us-central1")
gemini_client = GenerativeModel("gemini-1.5-pro-001")
claude_client = anthropic.Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))


def create_db_with_langchain(path):
    loader = PyMuPDFLoader(path)
    data = loader.load()
    # split it into chunks
    text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
    docs = text_splitter.split_documents(data)

    # create the open-source embedding function
    embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")

    # load it into Chroma
    db = Chroma.from_documents(docs, embedding_function)
    return db


def generate_groq_rag(text, model, path):
    llm = ChatGroq(
        temperature=0,
        model_name=model,
    )
    db = create_db_with_langchain(path)
    retriever = db.as_retriever(search_type="mmr", search_kwargs={"k": 4, "fetch_k": 20})
    prompt = hub.pull("rlm/rag-prompt")

    def format_docs(docs):
        return "\n\n".join(doc.page_content for doc in docs)

    rag_chain = {"context": retriever | format_docs, "question": RunnablePassthrough()} | prompt | llm
    return rag_chain.invoke(text).content


def generate_groq_base(text, model):
    completion = groq_client.chat.completions.create(
        model=model,
        messages=[
            {"role": "user", "content": text},
            {
                "role": "assistant",
                "content": "Please follow the instruction and write about the given topic in approximately the given number of words",
            },
        ],
        temperature=1,
        max_tokens=1024,
        stream=True,
        stop=None,
    )
    response = ""
    for i, chunk in enumerate(completion):
        if i != 0:
            response += chunk.choices[0].delta.content or ""
    return response


def generate_groq(text, model, path):
    if path:
        return generate_groq_rag(text, model, path)
    else:
        return generate_groq_base(text, model)


def generate_openai(text, model, openai_client):
    message = [{"role": "user", "content": text}]
    response = openai_client.chat.completions.create(
        model=model,
        messages=message,
        temperature=1,
        max_tokens=1024,
    )
    return response.choices[0].message.content


def generate_gemini(text, model, gemini_client):
    safety_settings = {
        generative_models.HarmCategory.HARM_CATEGORY_HATE_SPEECH: generative_models.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
        generative_models.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: generative_models.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
        generative_models.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: generative_models.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
        generative_models.HarmCategory.HARM_CATEGORY_HARASSMENT: generative_models.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
    }
    generation_config = {
        "max_output_tokens": 1024,
        "temperature": 1.0,
        "top_p": 1.0,
    }
    response = gemini_client.generate_content(
        [text],
        generation_config=generation_config,
        safety_settings=safety_settings,
        stream=False,
    )
    return response.text


def generate_claude(text, model, claude_client):
    response = claude_client.messages.create(
        model=model,
        max_tokens=1024,
        temperature=1.0,
        system="You are helpful assistant.",
        messages=[{"role": "user", "content": [{"type": "text", "text": text}]}],
    )
    return response.content[0].text.strip()


def generate(text, model, path, api=None):

    if model == "LLaMA 3":
        return generate_groq(text, "llama3-70b-8192", path)
    elif model == "OpenAI GPT 4o Mini":
        return generate_openai(text, "gpt-4o-mini", openai_client)
    elif model == "OpenAI GPT 4o":
        return generate_openai(text, "gpt-4o", openai_client)
    elif model == "OpenAI GPT 4":
        return generate_openai(text, "gpt-4-turbo", openai_client)
    elif model == "Gemini 1.5 Pro":
        return generate_gemini(text, "", gemini_client)
    elif model == "Claude Sonnet 3.5":
        return generate_claude(text, "claude-3-5-sonnet-20240620", claude_client)