File size: 3,932 Bytes
b4f02cd
 
 
 
 
 
 
 
 
 
 
 
7aa0ced
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b4f02cd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
# Generics
import os
import keyfile
import warnings
import streamlit as st
from pydantic import BaseModel
warnings.filterwarnings("ignore")

# Langchain packages
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Pinecone as PineconeVectorStore
from langchain.llms import HuggingFaceHub
from langchain import PromptTemplate
from langchain.schema.runnable import RunnablePassthrough
from langchain.schema.output_parser import StrOutputParser


from pinecone import Pinecone, ServerlessSpec
pc = Pinecone(api_key=keyfile.PINECONE_API_KEY)
os.environ["PINECONE_API_KEY"] = keyfile.PINECONE_API_KEY


cloud = os.environ.get("PINECONE_CLOUD") or "aws"
region = os.environ.get("PINECONE_REGION") or "us-east-1"
serv = ServerlessSpec(cloud = cloud, region = region)

model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
llm = HuggingFaceHub(
    repo_id = model_id,
    model_kwargs = {"temperature" : 0.8, "top_k" : 50},
    huggingfacehub_api_token = userdata.get("HFToken")
)

index_name = "parasgupta"
# We are check if the name of our index is not existing in pinecone directory
if index_name not in pc.list_indexes().names():
  # if not then we will create a index for us
  pc.create_index(
      name = index_name,
      dimension = 768,
      metric = "cosine",
      spec = serv
  )
  while not pc.describe_index(index_name).status['ready']:
    time.sleep(1)
# IF the index is not there in the index list
if index_name not in pc.list_indexes():
  docsearch = PineconeVectorStore.from_documents(docs, embeddings, index_name = index_name)
else:
  docsearch = PineconeVectorStore.from_existing_index(index_name, embeddings, pinecone_index = pc.Index(index_name))

    


loader = TextLoader("/content/drive/MyDrive/Intelli_GenAI/RAG/Machine Learning Operations.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size = 1000, chunk_overlap = 4)
docs = text_splitter.split_documents(documents)




class AIMessage(BaseModel):
    content: str
    
# initializing the sessionMessages
if "sessionMessages" not in st.session_state:
    st.session_state["sessionMessages"] = []
# General Instruction
if "sessionMessages" not in st.session_state:
    st.session_state.sessionMessage = [
         SystemMessage(content = "You are a medieval magical healer known for your peculiar sarcasm")
    ]

# Configuring the key
os.environ["GOOGLE_API_KEY"] = keyfile.GOOGLEKEY

# Create a model
llm = ChatGoogleGenerativeAI(
    model="gemini-1.5-pro",
    temperature=0.7,
    convert_system_message_to_human= True
)


# Response function
def load_answer(question):
    st.session_state.sessionMessages.append(HumanMessage(content=question))
    assistant_response = llm.invoke(st.session_state.sessionMessages)
    
    # Assuming assistant_response is an object with a 'content' attribute
    if hasattr(assistant_response, 'content') and isinstance(assistant_response.content, str):
        processed_content = assistant_response.content
        st.session_state.sessionMessages.append(AIMessage(content=processed_content))
    else:
        st.error("Invalid response received from AI.")
        processed_content = "Sorry, I couldn't process your request."

    return processed_content

# def load_answer(question):
#     st.session_state.sessionMessages.append(HumanMessage(content = question))
#     assistant_answer = llm.invoke(st.session_state.sessionMessages)
#     st.session_state.sessionMessages.append(AIMessage(content = assistant_answer))
#     return assistant_answer.content

# User message
def get_text():
    input_text = st.text_input("You: ", key = input)
    return input_text


# Implementation
user_input = get_text()
submit = st.button("Generate")

if submit:
    resp = load_answer(user_input)
    st.subheader("Answer: ")
    st.write(resp, key = 1)