kishorefafa commited on
Commit
be2e833
·
verified ·
1 Parent(s): 8db9815
Files changed (1) hide show
  1. app.py +121 -0
app.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Install dependencies
2
+ !pip install -q transformers peft accelerate bitsandbytes safetensors sentencepiece streamlit chromadb langchain sentence-transformers gradio pypdf
3
+
4
+ # Import necessary libraries
5
+ import torch
6
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, pipeline
7
+
8
+ import os
9
+ import gradio as gr
10
+ from google.colab import drive
11
+
12
+ import chromadb
13
+ from langchain.llms import HuggingFacePipeline
14
+ from langchain.document_loaders import PyPDFDirectoryLoader
15
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
16
+ from langchain.embeddings import HuggingFaceEmbeddings
17
+ from langchain.vectorstores import Chroma
18
+ from langchain.chains import ConversationalRetrievalChain
19
+ from langchain.memory import ConversationBufferMemory
20
+
21
+ # Download the model from HuggingFace
22
+ model_name = "anakin87/zephyr-7b-alpha-sharded"
23
+ bnb_config = BitsAndBytesConfig(
24
+ load_in_4bit=True,
25
+ bnb_4bit_use_double_quant=True,
26
+ bnb_4bit_quant_type="nf4",
27
+ bnb_4bit_compute_dtype=torch.bfloat16
28
+ )
29
+
30
+ model = AutoModelForCausalLM.from_pretrained(
31
+ model_name,
32
+ torch_dtype=torch.bfloat16,
33
+ quantization_config=bnb_config
34
+ )
35
+
36
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
37
+ tokenizer.bos_token_id = 1 # Set beginning of sentence token id
38
+
39
+ # Mount Google Drive and specify folder path
40
+ drive.mount('/content/drive')
41
+ folder_path = '/content/drive/MyDrive/TestcaseReport/'
42
+
43
+ # Load the documents from Google Drive
44
+ loader = PyPDFDirectoryLoader(folder_path)
45
+ documents = loader.load()
46
+
47
+ # Split the documents into small chunks
48
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
49
+ all_splits = text_splitter.split_documents(documents)
50
+
51
+ # Specify embedding model
52
+ embedding_model_name = "sentence-transformers/all-mpnet-base-v2"
53
+ model_kwargs = {"device": "cpu"} # Using CPU since GPU is not available
54
+ embeddings = HuggingFaceEmbeddings(model_name=embedding_model_name, model_kwargs=model_kwargs)
55
+
56
+ # Embed document chunks
57
+ vectordb = Chroma.from_documents(documents=all_splits, embedding=embeddings, persist_directory="chroma_db")
58
+
59
+ # Specify the retriever
60
+ retriever = vectordb.as_retriever()
61
+
62
+ # Build HuggingFace pipeline for using zephyr-7b-alpha
63
+ pipeline = pipeline(
64
+ "text-generation",
65
+ model=model,
66
+ tokenizer=tokenizer,
67
+ use_cache=True,
68
+ device_map="auto",
69
+ max_length=2048,
70
+ do_sample=True,
71
+ top_k=5,
72
+ num_return_sequences=1,
73
+ eos_token_id=tokenizer.eos_token_id,
74
+ pad_token_id=tokenizer.eos_token_id,
75
+ )
76
+
77
+ # Specify the llm
78
+ llm = HuggingFacePipeline(pipeline=pipeline)
79
+
80
+ # Define the create_conversation function
81
+ def create_conversation(query: str, chat_history: list) -> tuple:
82
+ try:
83
+ memory = ConversationBufferMemory(
84
+ memory_key='chat_history',
85
+ return_messages=False
86
+ )
87
+ qa_chain = ConversationalRetrievalChain.from_llm(
88
+ llm=llm,
89
+ retriever=retriever,
90
+ memory=memory,
91
+ get_chat_history=lambda h: h,
92
+ )
93
+
94
+ result = qa_chain({'question': query, 'chat_history': chat_history})
95
+ chat_history.append((query, result['answer']))
96
+ return '', chat_history
97
+
98
+ except Exception as e:
99
+ chat_history.append((query, e))
100
+ return '', chat_history
101
+
102
+ def ask_question(query: str):
103
+ response = create_conversation(query, [])
104
+ gen_out = response[1][0][1]
105
+ response_start_token = "Helpful Answer:"
106
+ idx = gen_out.index(response_start_token)
107
+ rag_prompt = gen_out[:idx]
108
+ response_text = gen_out[idx:]
109
+
110
+ return rag_prompt, response_text
111
+
112
+ # Define the Gradio UI
113
+ with gr.Blocks() as demo:
114
+ chatbot = gr.Chatbot(label='My Chatbot')
115
+ msg = gr.Textbox()
116
+ clear = gr.ClearButton([msg, chatbot])
117
+
118
+ msg.submit(create_conversation, [msg, chatbot], [msg, chatbot])
119
+
120
+ # Launch the Gradio demo
121
+ demo.launch()