suvadityamuk commited on
Commit
3dd4599
·
1 Parent(s): 42ad526
Files changed (2) hide show
  1. app.py +158 -60
  2. requirements.txt +11 -1
app.py CHANGED
@@ -1,64 +1,162 @@
 
 
 
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import json
4
+ import torch
5
+ import spaces
6
+ import pymupdf
7
  import gradio as gr
8
+ from qdrant_client import QdrantClient
9
+ from utils import download_pdf_from_gdrive, merge_strings_with_prefix
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
+ def rag_query(query: str):
13
+ """
14
+ Allows searching the vector database which contains
15
+ information for a man named Suvaditya for a given query
16
+ by performing semantic search. Returns results by
17
+ looking at his resume, which contains a plethora of
18
+ information about him.
19
+
20
+ Args:
21
+ query: The query against which the search will be run,
22
+ in the form a single string phrase no more than
23
+ 10 words.
24
+
25
+ Returns:
26
+ search_results: A list of results that come closest
27
+ to the given query semantically,
28
+ determined by Cosine Similarity.
29
+ """
30
+ return client.query(
31
+ collection_name="resume",
32
+ query_text=query
33
+ )
34
+
35
+ def generate_answer(chat_history):
36
+ # Generate result
37
+ tool_prompt = tokenizer.apply_chat_template(
38
+ chat_history,
39
+ tools=[rag_query],
40
+ return_tensors="pt",
41
+ return_dict=True,
42
+ add_generation_prompt=True,
43
+ )
44
+ tool_prompt = tool_prompt.to(model.device)
45
+ out = model.generate(**tool_prompt, max_new_tokens=512)
46
+ generated_text = out[0, tool_prompt['input_ids'].shape[1]:]
47
+ generated_text = tokenizer.decode(generated_text)
48
+ return generated_text
49
+
50
+ def parse_tool_request(tool_call, top_k=5):
51
+ pattern = r"<tool_call>(.*?)</tool_call>"
52
+ match_result = re.search(pattern, tool_call, re.DOTALL)
53
+ if match_result:
54
+ result = match_result.group(1).strip()
55
+ else:
56
+ return None, None
57
+
58
+ query = json.loads(result)["arguments"]["query"]
59
+ query_results = [
60
+ query_piece.metadata["document"] for query_piece in rag_query(query)
61
+ ]
62
+
63
+ return query_results[:top_k], query
64
+
65
+ def update_chat_history(chat_history, tool_query, query_results):
66
+ assistant_tool_message = {
67
+ "role": "assistant",
68
+ "tool_calls": [{
69
+ "type": "function",
70
+ "function": {
71
+ "name": "rag_query",
72
+ "arguments": {"query": f"{tool_query}"}
73
+ }
74
+ }]
75
+ }
76
+ result_tool_message = {
77
+ "role": "tool",
78
+ "name": "rag_query",
79
+ "content": "\n".join(query_results)
80
+ }
81
+
82
+ chat_history.append(assistant_tool_message)
83
+ chat_history.append(result_tool_message)
84
+
85
+ return chat_history
86
 
87
  if __name__ == "__main__":
88
+ RESUME_PATH = os.path.join(os.getcwd(), "Resume.pdf")
89
+ RESUME_URL = "https://drive.google.com/file/d/1YMF9NNTG5gubwJ7ipI5JfxAJKhlD9h2v/"
90
+
91
+ # Download file
92
+ download_pdf_from_gdrive(RESUME_URL, RESUME_PATH)
93
+
94
+ doc = pymupdf.open(RESUME_PATH)
95
+ fulltext = doc[0].get_text().split("\n")
96
+ fulltext = merge_strings_with_prefix(fulltext)
97
+
98
+ # Embed the sentences
99
+ client = QdrantClient(":memory:")
100
+
101
+ client.set_model("sentence-transformers/all-MiniLM-L6-v2")
102
+
103
+ if not client.collection_exists(collection_name="resume"):
104
+ client.create_collection(
105
+ collection_name="resume",
106
+ vectors_config=client.get_fastembed_vector_params(),
107
+ )
108
+
109
+ _ = client.add(
110
+ collection_name="resume",
111
+ documents=fulltext,
112
+ ids=range(len(fulltext)),
113
+ batch_size=100,
114
+ parallel=0,
115
+ )
116
+
117
+ # FOR QWEN, THIS IS WORKING
118
+
119
+ model_name = "Qwen/Qwen2.5-3B-Instruct"
120
+
121
+ @spaces.GPU
122
+ def rag_process(message, chat_history):
123
+ # Append current user message to chat history
124
+ current_message = {
125
+ "role": "user",
126
+ "content": message
127
+ }
128
+ chat_history.append(current_message)
129
+
130
+ # Generate LLM answer
131
+ generated_text = generate_answer(chat_history)
132
+
133
+ # Detect if tool call is requested by LLM. If yes, then
134
+ # execute tool and use else return None
135
+ query_results, tool_query = parse_tool_request(generated_text)
136
+
137
+ # If tool call was requested
138
+ if query_results is not None and tool_query is not None:
139
+ print("Inside")
140
+ # Update chat history with result of tool call
141
+ chat_history = update_chat_history(
142
+ chat_history, tool_query, query_results
143
+ )
144
+ # Generate result from the
145
+ generated_text = generate_answer(chat_history)
146
+
147
+ return generated_text[:-10]
148
+
149
+ model = AutoModelForCausalLM.from_pretrained(
150
+ model_name,
151
+ torch_dtype=torch.bfloat16,
152
+ device_map="auto",
153
+ load_in_4bit=True,
154
+ ).to_bettertransformer().to('cuda')
155
+
156
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
157
+
158
+ demo = gr.ChatInterface(
159
+ fn=rag_process,
160
+ type="messages",
161
+ )
162
+ demo.launch()
requirements.txt CHANGED
@@ -1 +1,11 @@
1
- huggingface_hub==0.25.2
 
 
 
 
 
 
 
 
 
 
 
1
+ huggingface_hub==0.25.2
2
+ qdrant_client
3
+ pymupdf
4
+ gdown
5
+ fastembed
6
+ transformers
7
+ torch
8
+ torchvision
9
+ torchaudio
10
+ accelerate
11
+ bitsandbytes