NCTCMumbai commited on
Commit
d52cca8
1 Parent(s): 481533f

Upload 16 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ .lancedb/Huggingface_docs.lance/_indices/6c02d989-8ff6-447e-ac72-509805b2b124/index.idx filter=lfs diff=lfs merge=lfs -text
37
+ doc_embed1.lance/data/a253243d-bbbc-4a8f-a874-88a380332825.lance filter=lfs diff=lfs merge=lfs -text
38
+ .lancedb1/doc_embed1.lance/data/a253243d-bbbc-4a8f-a874-88a380332825.lance filter=lfs diff=lfs merge=lfs -text
.lancedb1/Expenditure_.lance/_latest.manifest ADDED
Binary file (238 Bytes). View file
 
.lancedb1/Expenditure_.lance/_transactions/0-7c4aba2c-7fca-462c-a282-3193b077a778.txn ADDED
@@ -0,0 +1 @@
 
 
1
+ $7c4aba2c-7fca-462c-a282-3193b077a778�Utext ���������*string084vector ���������*fixed_size_list:float:76808
.lancedb1/Expenditure_.lance/_transactions/1-660601ba-ca3a-41c1-8121-39e93bc09f36.txn ADDED
Binary file (98 Bytes). View file
 
.lancedb1/Expenditure_.lance/_versions/1.manifest ADDED
Binary file (183 Bytes). View file
 
.lancedb1/Expenditure_.lance/_versions/2.manifest ADDED
Binary file (238 Bytes). View file
 
.lancedb1/Expenditure_.lance/data/ea6d8679-d96f-49bf-9443-69b4b1e7e509.lance ADDED
The diff for this file is too large to render. See raw diff
 
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
- title: Expenditure Observer Bot
3
- emoji: 📈
4
- colorFrom: blue
5
- colorTo: blue
6
  sdk: gradio
7
- sdk_version: 4.26.0
8
  app_file: app.py
9
  pinned: false
10
  ---
 
1
  ---
2
+ title: RAG Sample More Documents
3
+ emoji: 🌍
4
+ colorFrom: red
5
+ colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 4.7.1
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ Credit to Derek Thomas, [email protected]
4
+ """
5
+
6
+ import subprocess
7
+
8
+ # subprocess.run(["pip", "install", "--upgrade", "transformers[torch,sentencepiece]==4.34.1"])
9
+
10
+ import logging
11
+ from pathlib import Path
12
+ from time import perf_counter
13
+
14
+ import gradio as gr
15
+ from jinja2 import Environment, FileSystemLoader
16
+ import numpy as np
17
+ from sentence_transformers import CrossEncoder
18
+
19
+ from backend.query_llm import generate_hf, generate_openai
20
+ from backend.semantic_search import table, retriever
21
+
22
+ VECTOR_COLUMN_NAME = "vector"
23
+ TEXT_COLUMN_NAME = "text"
24
+
25
+ proj_dir = Path(__file__).parent
26
+ # Setting up the logging
27
+ logging.basicConfig(level=logging.INFO)
28
+ logger = logging.getLogger(__name__)
29
+
30
+ # Set up the template environment with the templates directory
31
+ env = Environment(loader=FileSystemLoader(proj_dir / 'templates'))
32
+
33
+ # Load the templates directly from the environment
34
+ template = env.get_template('template.j2')
35
+ template_html = env.get_template('template_html.j2')
36
+
37
+ # crossEncoder
38
+ #cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
39
+ #cross_encoder = CrossEncoder('BAAI/bge-reranker-base')
40
+ # Examples
41
+ examples = ['What is the 4 digit classification heading for Gold jewellery?',
42
+ 'What is the 6 digit classification heading for Mobile phone?',
43
+ ]
44
+
45
+
46
+ def add_text(history, text):
47
+ history = [] if history is None else history
48
+ history = history + [(text, None)]
49
+ return history, gr.Textbox(value="", interactive=False)
50
+
51
+
52
+ def bot(history, cross_encoder):
53
+ top_rerank = 15
54
+ top_k_rank = 10
55
+ query = history[-1][0]
56
+
57
+ if not query:
58
+ gr.Warning("Please submit a non-empty string as a prompt")
59
+ raise ValueError("Empty string was submitted")
60
+
61
+ logger.warning('Retrieving documents...')
62
+ # Retrieve documents relevant to query
63
+ document_start = perf_counter()
64
+
65
+ query_vec = retriever.encode(query)
66
+ logger.warning(f'Finished query vec')
67
+ doc1 = table.search(query_vec, vector_column_name=VECTOR_COLUMN_NAME).limit(top_k_rank)
68
+
69
+
70
+
71
+ logger.warning(f'Finished search')
72
+ documents = table.search(query_vec, vector_column_name=VECTOR_COLUMN_NAME).limit(top_rerank).to_list()
73
+ documents = [doc[TEXT_COLUMN_NAME] for doc in documents]
74
+ logger.warning(f'start cross encoder {len(documents)}')
75
+ # Retrieve documents relevant to query
76
+ query_doc_pair = [[query, doc] for doc in documents]
77
+ if cross_encoder=='MiniLM-L6v2' :
78
+ cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
79
+ else:
80
+ cross_encoder = CrossEncoder('BAAI/bge-reranker-base')
81
+ cross_scores = cross_encoder.predict(query_doc_pair)
82
+ sim_scores_argsort = list(reversed(np.argsort(cross_scores)))
83
+ logger.warning(f'Finished cross encoder {len(documents)}')
84
+
85
+ documents = [documents[idx] for idx in sim_scores_argsort[:top_k_rank]]
86
+ logger.warning(f'num documents {len(documents)}')
87
+
88
+ document_time = perf_counter() - document_start
89
+ logger.warning(f'Finished Retrieving documents in {round(document_time, 2)} seconds...')
90
+
91
+ # Create Prompt
92
+ prompt = template.render(documents=documents, query=query)
93
+ prompt_html = template_html.render(documents=documents, query=query)
94
+
95
+ generate_fn = generate_hf
96
+
97
+ history[-1][1] = ""
98
+ for character in generate_fn(prompt, history[:-1]):
99
+ history[-1][1] = character
100
+ print('Final history is ',history)
101
+ yield history, prompt_html
102
+
103
+
104
+ with gr.Blocks(theme='Insuz/SimpleIndigo') as demo:
105
+ chatbot = gr.Chatbot(
106
+ [],
107
+ elem_id="chatbot",
108
+ avatar_images=('https://aui.atlassian.com/aui/8.8/docs/images/avatar-person.svg',
109
+ 'https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg'),
110
+ bubble_full_width=False,
111
+ show_copy_button=True,
112
+ show_share_button=True,
113
+ )
114
+
115
+ with gr.Row():
116
+ txt = gr.Textbox(
117
+ scale=3,
118
+ show_label=False,
119
+ placeholder="Enter text and press enter",
120
+ container=False,
121
+ )
122
+ txt_btn = gr.Button(value="Submit text", scale=1)
123
+
124
+ cross_encoder = gr.Radio(choices=['MiniLM-L6v2','BGE reranker'], value='MiniLM-L6v2')
125
+
126
+ prompt_html = gr.HTML()
127
+ # Turn off interactivity while generating if you click
128
+ txt_msg = txt_btn.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
129
+ bot, [chatbot, cross_encoder], [chatbot, prompt_html])
130
+
131
+ # Turn it back on
132
+ txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
133
+
134
+ # Turn off interactivity while generating if you hit enter
135
+ txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
136
+ bot, [chatbot, cross_encoder], [chatbot, prompt_html])
137
+
138
+ # Turn it back on
139
+ txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
140
+
141
+ # Examples
142
+ gr.Examples(examples, txt)
143
+
144
+ demo.queue()
145
+ demo.launch(debug=True)
backend/__pycache__/query_llm.cpython-310.pyc ADDED
Binary file (4.36 kB). View file
 
backend/__pycache__/semantic_search.cpython-310.pyc ADDED
Binary file (700 Bytes). View file
 
backend/query_llm.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import openai
4
+ import gradio as gr
5
+
6
+ from os import getenv
7
+ from typing import Any, Dict, Generator, List
8
+
9
+ from huggingface_hub import InferenceClient
10
+ from transformers import AutoTokenizer
11
+
12
+ #tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
13
+ tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
14
+ temperature = 0.5
15
+ top_p = 0.7
16
+ repetition_penalty = 1.2
17
+
18
+ OPENAI_KEY = getenv("OPENAI_API_KEY")
19
+ HF_TOKEN = getenv("HUGGING_FACE_HUB_TOKEN")
20
+
21
+ #hf_client = InferenceClient(
22
+ # "mistralai/Mistral-7B-Instruct-v0.1",
23
+ # token=HF_TOKEN
24
+ # )
25
+
26
+
27
+ hf_client = InferenceClient(
28
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
29
+ token=HF_TOKEN
30
+ )
31
+ def format_prompt(message: str, api_kind: str):
32
+ """
33
+ Formats the given message using a chat template.
34
+
35
+ Args:
36
+ message (str): The user message to be formatted.
37
+
38
+ Returns:
39
+ str: Formatted message after applying the chat template.
40
+ """
41
+
42
+ # Create a list of message dictionaries with role and content
43
+ messages: List[Dict[str, Any]] = [{'role': 'user', 'content': message}]
44
+
45
+ if api_kind == "openai":
46
+ return messages
47
+ elif api_kind == "hf":
48
+ return tokenizer.apply_chat_template(messages, tokenize=False)
49
+ elif api_kind:
50
+ raise ValueError("API is not supported")
51
+
52
+
53
+ def generate_hf(prompt: str, history: str, temperature: float = 0.5, max_new_tokens: int = 4000,
54
+ top_p: float = 0.95, repetition_penalty: float = 1.0) -> Generator[str, None, str]:
55
+ """
56
+ Generate a sequence of tokens based on a given prompt and history using Mistral client.
57
+
58
+ Args:
59
+ prompt (str): The initial prompt for the text generation.
60
+ history (str): Context or history for the text generation.
61
+ temperature (float, optional): The softmax temperature for sampling. Defaults to 0.9.
62
+ max_new_tokens (int, optional): Maximum number of tokens to be generated. Defaults to 256.
63
+ top_p (float, optional): Nucleus sampling probability. Defaults to 0.95.
64
+ repetition_penalty (float, optional): Penalty for repeated tokens. Defaults to 1.0.
65
+
66
+ Returns:
67
+ Generator[str, None, str]: A generator yielding chunks of generated text.
68
+ Returns a final string if an error occurs.
69
+ """
70
+
71
+ temperature = max(float(temperature), 1e-2) # Ensure temperature isn't too low
72
+ top_p = float(top_p)
73
+
74
+ generate_kwargs = {
75
+ 'temperature': temperature,
76
+ 'max_new_tokens': max_new_tokens,
77
+ 'top_p': top_p,
78
+ 'repetition_penalty': repetition_penalty,
79
+ 'do_sample': True,
80
+ 'seed': 42,
81
+ }
82
+
83
+ formatted_prompt = format_prompt(prompt, "hf")
84
+
85
+ try:
86
+ stream = hf_client.text_generation(formatted_prompt, **generate_kwargs,
87
+ stream=True, details=True, return_full_text=False)
88
+ output = ""
89
+ for response in stream:
90
+ output += response.token.text
91
+ yield output
92
+
93
+ except Exception as e:
94
+ if "Too Many Requests" in str(e):
95
+ print("ERROR: Too many requests on Mistral client")
96
+ gr.Warning("Unfortunately Mistral is unable to process")
97
+ return "Unfortunately, I am not able to process your request now."
98
+ elif "Authorization header is invalid" in str(e):
99
+ print("Authetification error:", str(e))
100
+ gr.Warning("Authentication error: HF token was either not provided or incorrect")
101
+ return "Authentication error"
102
+ else:
103
+ print("Unhandled Exception:", str(e))
104
+ gr.Warning("Unfortunately Mistral is unable to process")
105
+ return "I do not know what happened, but I couldn't understand you."
106
+
107
+
108
+ def generate_openai(prompt: str, history: str, temperature: float = 0.9, max_new_tokens: int = 256,
109
+ top_p: float = 0.95, repetition_penalty: float = 1.0) -> Generator[str, None, str]:
110
+ """
111
+ Generate a sequence of tokens based on a given prompt and history using Mistral client.
112
+
113
+ Args:
114
+ prompt (str): The initial prompt for the text generation.
115
+ history (str): Context or history for the text generation.
116
+ temperature (float, optional): The softmax temperature for sampling. Defaults to 0.9.
117
+ max_new_tokens (int, optional): Maximum number of tokens to be generated. Defaults to 256.
118
+ top_p (float, optional): Nucleus sampling probability. Defaults to 0.95.
119
+ repetition_penalty (float, optional): Penalty for repeated tokens. Defaults to 1.0.
120
+
121
+ Returns:
122
+ Generator[str, None, str]: A generator yielding chunks of generated text.
123
+ Returns a final string if an error occurs.
124
+ """
125
+
126
+ temperature = max(float(temperature), 1e-2) # Ensure temperature isn't too low
127
+ top_p = float(top_p)
128
+
129
+ generate_kwargs = {
130
+ 'temperature': temperature,
131
+ 'max_tokens': max_new_tokens,
132
+ 'top_p': top_p,
133
+ 'frequency_penalty': max(-2., min(repetition_penalty, 2.)),
134
+ }
135
+
136
+ formatted_prompt = format_prompt(prompt, "openai")
137
+
138
+ try:
139
+ stream = openai.ChatCompletion.create(model="gpt-3.5-turbo-0301",
140
+ messages=formatted_prompt,
141
+ **generate_kwargs,
142
+ stream=True)
143
+ output = ""
144
+ for chunk in stream:
145
+ output += chunk.choices[0].delta.get("content", "")
146
+ yield output
147
+
148
+ except Exception as e:
149
+ if "Too Many Requests" in str(e):
150
+ print("ERROR: Too many requests on OpenAI client")
151
+ gr.Warning("Unfortunately OpenAI is unable to process")
152
+ return "Unfortunately, I am not able to process your request now."
153
+ elif "You didn't provide an API key" in str(e):
154
+ print("Authetification error:", str(e))
155
+ gr.Warning("Authentication error: OpenAI key was either not provided or incorrect")
156
+ return "Authentication error"
157
+ else:
158
+ print("Unhandled Exception:", str(e))
159
+ gr.Warning("Unfortunately OpenAI is unable to process")
160
+ return "I do not know what happened, but I couldn't understand you."
backend/semantic_search.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import logging
3
+ import lancedb
4
+ import os
5
+ from pathlib import Path
6
+ from sentence_transformers import SentenceTransformer
7
+ #from FlagEmbedding import LLMEmbedder, FlagReranker # Al document present here https://github.com/FlagOpen/FlagEmbedding/tree/master
8
+ #EMB_MODEL_NAME = "thenlper/gte-base"
9
+ EMB_MODEL_NAME = 'BAAI/llm-embedder'
10
+ task = "qa" # Encode for a specific task (qa, icl, chat, lrlm, tool, convsearch)
11
+ #EMB_MODEL_NAME = LLMEmbedder('BAAI/llm-embedder', use_fp16=False) # Load model (automatically use GPUs)
12
+
13
+ #reranker_model = FlagReranker('BAAI/bge-reranker-base', use_fp16=True) # use_fp16 speeds up computation with a slight performance degradation
14
+
15
+
16
+ #EMB_MODEL_NAME = "thenlper/gte-base"
17
+ #DB_TABLE_NAME = "Huggingface_docs"
18
+ DB_TABLE_NAME = "Expenditure_"
19
+ # Setting up the logging
20
+ logging.basicConfig(level=logging.INFO)
21
+ logger = logging.getLogger(__name__)
22
+ retriever = SentenceTransformer(EMB_MODEL_NAME)
23
+
24
+ # db
25
+ db_uri = os.path.join(Path(__file__).parents[1], ".lancedb1")
26
+ db = lancedb.connect(db_uri)
27
+ table = db.open_table(DB_TABLE_NAME)
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ transformers[torch,sentencepiece]
2
+ wikiextractor==3.0.6
3
+ sentence-transformers>2.2.0
4
+ ipywidgets==8.1.1
5
+ tqdm==4.66.1
6
+ aiohttp==3.8.6
7
+ huggingface-hub==0.17.3
8
+ lancedb
9
+ openai==0.28
templates/template.j2 ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ Instructions: Use the following unique documents in the Context section to answer the Query at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
2
+ Context:
3
+ {% for doc in documents %}
4
+ ---
5
+ {{ doc }}
6
+ {% endfor %}
7
+ ---
8
+ Query: {{ query }}
templates/template_html.j2 ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Information Page</title>
7
+ <link rel="stylesheet" href="https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&amp;display=swap">
8
+ <link rel="stylesheet" href="https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:wght@400;600&amp;display=swap">
9
+ <style>
10
+ * {
11
+ font-family: "Source Sans Pro";
12
+ }
13
+
14
+ .instructions > * {
15
+ color: #111 !important;
16
+ }
17
+
18
+ details.doc-box * {
19
+ color: #111 !important;
20
+ }
21
+
22
+ .dark {
23
+ background: #111;
24
+ color: white;
25
+ }
26
+
27
+ .doc-box {
28
+ padding: 10px;
29
+ margin-top: 10px;
30
+ background-color: #baecc2;
31
+ border-radius: 6px;
32
+ color: #111 !important;
33
+ max-width: 700px;
34
+ box-shadow: rgba(0, 0, 0, 0.2) 0px 1px 2px 0px;
35
+ }
36
+
37
+ .doc-full {
38
+ margin: 10px 14px;
39
+ line-height: 1.6rem;
40
+ }
41
+
42
+ .instructions {
43
+ color: #111 !important;
44
+ background: #b7bdfd;
45
+ display: block;
46
+ border-radius: 6px;
47
+ padding: 6px 10px;
48
+ line-height: 1.6rem;
49
+ max-width: 700px;
50
+ box-shadow: rgba(0, 0, 0, 0.2) 0px 1px 2px 0px;
51
+ }
52
+
53
+ .query {
54
+ color: #111 !important;
55
+ background: #ffbcbc;
56
+ display: block;
57
+ border-radius: 6px;
58
+ padding: 6px 10px;
59
+ line-height: 1.6rem;
60
+ max-width: 700px;
61
+ box-shadow: rgba(0, 0, 0, 0.2) 0px 1px 2px 0px;
62
+ }
63
+ </style>
64
+ </head>
65
+ <body>
66
+ <div class="prose svelte-1ybaih5" id="component-6">
67
+ <h2>Prompt</h2>
68
+ Below is the prompt that is given to the model. <hr>
69
+ <h2>Instructions</h2>
70
+ <span class="instructions">Use the following pieces of context to answer the question at the end.<br>If you don't know the answer, just say that you don't know, <span style="font-weight: bold;">don't try to make up an answer.</span></span><br>
71
+ <h2>Context</h2>
72
+ {% for doc in documents %}
73
+ <details class="doc-box">
74
+ <summary>
75
+ <b>Doc {{ loop.index }}:</b> <span class="doc-short">{{ doc[:100] }}...</span>
76
+ </summary>
77
+ <div class="doc-full">{{ doc }}</div>
78
+ </details>
79
+ {% endfor %}
80
+
81
+ <h2>Query</h2>
82
+ <span class="query">{{ query }}</span>
83
+ </div>
84
+
85
+ <script>
86
+ document.addEventListener("DOMContentLoaded", function() {
87
+ const detailsElements = document.querySelectorAll('.doc-box');
88
+
89
+ detailsElements.forEach(detail => {
90
+ detail.addEventListener('toggle', function() {
91
+ const docShort = this.querySelector('.doc-short');
92
+ if (this.open) {
93
+ docShort.style.display = 'none';
94
+ } else {
95
+ docShort.style.display = 'inline';
96
+ }
97
+ });
98
+ });
99
+ });
100
+ </script>
101
+ </body>
102
+ </html>