File size: 6,940 Bytes
8bf58fb 044b65e 762e024 044b65e 1e07071 762e024 1e07071 762e024 8bf58fb 762e024 044b65e 762e024 044b65e 762e024 044b65e 762e024 044b65e 762e024 044b65e 762e024 044b65e 1e07071 044b65e 1e07071 044b65e 1e07071 044b65e 8bf58fb 044b65e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 |
import gradio as gr
from langchain.llms import GooglePalm
from langchain.document_loaders.csv_loader import CSVLoader
from langchain_huggingface import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
api_key = "AIzaSyCdM_aAIsW_nPbjarOF83mbX1_z1cVX2_M"
llm = GooglePalm(google_api_key = api_key, temperature=0.7)
loader = CSVLoader(file_path='aiotsmartlabs_faq.csv', source_column = 'prompt')
data = loader.load()
instructor_embeddings = HuggingFaceEmbeddings(model_name = "BAAI/bge-m3")
vectordb = FAISS.from_documents(documents = data, embedding = instructor_embeddings)
retriever = vectordb.as_retriever()
from langchain.prompts import PromptTemplate
prompt_template = """Given the following context and a question, generate an answer based on the context only.
In the answer try to provide as much text as possible from "response" section in the source document context without making much changes.
If somebody asks "Who are you?" or a similar phrase, state "I am Rishi's assistant built using a Large Language Model!"
If the answer is not found in the context, kindly state "I don't know. Please ask Rishi on Discord. Discord Invite Link: https://discord.gg/6ezpZGeCcM. Or email at [email protected]" Don't try to make up an answer.
CONTEXT: {context}
QUESTION: {question}"""
PROMPT = PromptTemplate(
template = prompt_template, input_variables = ["context", "question"]
)
from langchain.chains import RetrievalQA
chain = RetrievalQA.from_chain_type(llm = llm,
chain_type="stuff",
retriever=retriever,
input_key="query",
return_source_documents=True,
chain_type_kwargs = {"prompt": PROMPT})
def chatresponse(message, history):
output = chain(message)
return output['result']
gr.ChatInterface(chatresponse).launch()
# import gradio as gr
# from langchain.llms import GooglePalm
# api_key = "AIzaSyCdM_aAIsW_nPbjarOF83mbX1_z1cVX2_M"
# llm = GooglePalm(google_api_key = api_key, temperature=0.7)
# from langchain.document_loaders.csv_loader import CSVLoader
# loader = CSVLoader(file_path='aiotsmartlabs_faq.csv', source_column = 'prompt')
# data = loader.load()
# from langchain_huggingface import HuggingFaceEmbeddings
# from langchain.vectorstores import FAISS
# # instructor_embeddings = HuggingFaceEmbeddings(model_name = "Alibaba-NLP/gte-Qwen2-7B-instruct") # best model <-- but too big
# instructor_embeddings = HuggingFaceEmbeddings(model_name = "BAAI/bge-m3")
# # instructor_embeddings = HuggingFaceEmbeddings()
# vectordb = FAISS.from_documents(documents = data, embedding = instructor_embeddings)
# # e = embeddings_model.embed_query("What is your refund policy")
# retriever = vectordb.as_retriever()
# from langchain.prompts import PromptTemplate
# prompt_template = """Given the following context and a question, generate an answer based on the context only.
# In the answer try to provide as much text as possible from "response" section in the source document context without making much changes.
# If somebody asks "Who are you?" or a similar phrase, state "I am Rishi's assistant built using a Large Language Model!"
# If the answer is not found in the context, kindly state "I don't know. Please ask Rishi on Discord. Discord Invite Link: https://discord.gg/6ezpZGeCcM. Or email at [email protected]" Don't try to make up an answer.
# CONTEXT: {context}
# QUESTION: {question}"""
# PROMPT = PromptTemplate(
# template = prompt_template, input_variables = ["context", "question"]
# )
# from langchain.chains import RetrievalQA
# chain = RetrievalQA.from_chain_type(llm = llm,
# chain_type="stuff",
# retriever=retriever,
# input_key="query",
# return_source_documents=True,
# chain_type_kwargs = {"prompt": PROMPT})
# # Load your LLM model and necessary components
# # Assume `chain` is a function defined in your notebook that takes a query and returns the output as shown
# # For this example, we'll assume the model and chain function are already available
# def chatbot(query):
# response = chain(query)
# # Extract the 'result' part of the response
# result = response.get('result', 'Sorry, I could not find an answer.')
# return result
# # Define the Gradio interface
# iface = gr.Interface(
# fn=chatbot, # Function to call
# inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your question here..."), # Input type
# outputs="text", # Output type
# title="Hugging Face LLM Chatbot",
# description="Ask any question related to the documents and get an answer from the LLM model.",
# )
# # Launch the interface
# iface.launch()
# # Save this file as app.py and push it to your Hugging Face Space repository
# # import gradio as gr
# # def greet(name, intensity):
# # return "Hello, " + name + "!" * int(intensity)
# # demo = gr.Interface(
# # fn=greet,
# # inputs=["text", "slider"],
# # outputs=["text"],
# # )
# # demo.launch()
# # import gradio as gr
# # from huggingface_hub import InferenceClient
# # """
# # For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
# # """
# # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# # def respond(
# # message,
# # history: list[tuple[str, str]],
# # system_message,
# # max_tokens,
# # temperature,
# # top_p,
# # ):
# # messages = [{"role": "system", "content": system_message}]
# # for val in history:
# # if val[0]:
# # messages.append({"role": "user", "content": val[0]})
# # if val[1]:
# # messages.append({"role": "assistant", "content": val[1]})
# # messages.append({"role": "user", "content": message})
# # response = ""
# # for message in client.chat_completion(
# # messages,
# # max_tokens=max_tokens,
# # stream=True,
# # temperature=temperature,
# # top_p=top_p,
# # ):
# # token = message.choices[0].delta.content
# # response += token
# # yield response
# # """
# # For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
# # """
# # demo = gr.ChatInterface(
# # respond,
# # additional_inputs=[
# # gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
# # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
# # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
# # gr.Slider(
# # minimum=0.1,
# # maximum=1.0,
# # value=0.95,
# # step=0.05,
# # label="Top-p (nucleus sampling)",
# # ),
# # ],
# # )
# # if __name__ == "__main__":
# # demo.launch()
|