Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,23 +1,136 @@
|
|
1 |
import gradio as gr
|
2 |
-
from
|
|
|
|
|
3 |
import spaces
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
-
|
|
|
|
|
|
|
|
|
6 |
|
7 |
-
tokenizer = AutoTokenizer.from_pretrained("Yoxas/autotrain-gpt2-statistical1")
|
8 |
-
model = AutoModelForCausalLM.from_pretrained("Yoxas/autotrain-gpt2-statistical1")
|
9 |
|
10 |
-
|
11 |
-
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
18 |
|
19 |
-
# Create the Gradio interface
|
20 |
-
interface = gr.Interface(fn=chatbot, inputs="text", outputs="text", title="Research Paper Abstract Chatbot")
|
21 |
|
22 |
-
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from datasets import load_dataset
|
3 |
+
|
4 |
+
import os
|
5 |
import spaces
|
6 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig
|
7 |
+
import torch
|
8 |
+
from threading import Thread
|
9 |
+
from sentence_transformers import SentenceTransformer
|
10 |
+
from datasets import load_dataset
|
11 |
+
import time
|
12 |
+
|
13 |
+
token = os.environ["HF_TOKEN"]
|
14 |
+
ST = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
|
15 |
+
|
16 |
+
dataset = load_dataset("not-lain/wikipedia", "Yoxas/statistical_literacy",revision = "embedded")
|
17 |
+
|
18 |
+
data = dataset["train", "10kstats"]
|
19 |
+
data = data.add_faiss_index("embeddings") # column name that has the embeddings of the dataset
|
20 |
+
|
21 |
+
|
22 |
+
model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
|
23 |
+
|
24 |
+
# use quantization to lower GPU usage
|
25 |
+
bnb_config = BitsAndBytesConfig(
|
26 |
+
load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16
|
27 |
+
)
|
28 |
+
|
29 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id,token=token)
|
30 |
+
model = AutoModelForCausalLM.from_pretrained(
|
31 |
+
model_id,
|
32 |
+
torch_dtype=torch.bfloat16,
|
33 |
+
device_map="auto",
|
34 |
+
quantization_config=bnb_config,
|
35 |
+
token=token
|
36 |
+
)
|
37 |
+
terminators = [
|
38 |
+
tokenizer.eos_token_id,
|
39 |
+
tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
40 |
+
]
|
41 |
+
|
42 |
+
SYS_PROMPT = """You are an assistant for answering questions.
|
43 |
+
You are given the extracted parts of a long document and a question. Provide a conversational answer.
|
44 |
+
If you don't know the answer, just say "I do not know." Don't make up an answer."""
|
45 |
+
|
46 |
+
|
47 |
+
|
48 |
+
def search(query: str, k: int = 3 ):
|
49 |
+
"""a function that embeds a new query and returns the most probable results"""
|
50 |
+
embedded_query = ST.encode(query) # embed new query
|
51 |
+
scores, retrieved_examples = data.get_nearest_examples( # retrieve results
|
52 |
+
"embeddings", embedded_query, # compare our new embedded query with the dataset embeddings
|
53 |
+
k=k # get only top k results
|
54 |
+
)
|
55 |
+
return scores, retrieved_examples
|
56 |
+
|
57 |
+
def format_prompt(prompt,retrieved_documents,k):
|
58 |
+
"""using the retrieved documents we will prompt the model to generate our responses"""
|
59 |
+
PROMPT = f"Question:{prompt}\nContext:"
|
60 |
+
for idx in range(k) :
|
61 |
+
PROMPT+= f"{retrieved_documents['text'][idx]}\n"
|
62 |
+
return PROMPT
|
63 |
+
|
64 |
+
|
65 |
+
@spaces.GPU(duration=150)
|
66 |
+
def talk(prompt,history):
|
67 |
+
k = 1 # number of retrieved documents
|
68 |
+
scores , retrieved_documents = search(prompt, k)
|
69 |
+
formatted_prompt = format_prompt(prompt,retrieved_documents,k)
|
70 |
+
formatted_prompt = formatted_prompt[:2000] # to avoid GPU OOM
|
71 |
+
messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
|
72 |
+
# tell the model to generate
|
73 |
+
input_ids = tokenizer.apply_chat_template(
|
74 |
+
messages,
|
75 |
+
add_generation_prompt=True,
|
76 |
+
return_tensors="pt"
|
77 |
+
).to(model.device)
|
78 |
+
outputs = model.generate(
|
79 |
+
input_ids,
|
80 |
+
max_new_tokens=1024,
|
81 |
+
eos_token_id=terminators,
|
82 |
+
do_sample=True,
|
83 |
+
temperature=0.6,
|
84 |
+
top_p=0.9,
|
85 |
+
)
|
86 |
+
streamer = TextIteratorStreamer(
|
87 |
+
tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
|
88 |
+
)
|
89 |
+
generate_kwargs = dict(
|
90 |
+
input_ids= input_ids,
|
91 |
+
streamer=streamer,
|
92 |
+
max_new_tokens=1024,
|
93 |
+
do_sample=True,
|
94 |
+
top_p=0.95,
|
95 |
+
temperature=0.75,
|
96 |
+
eos_token_id=terminators,
|
97 |
+
)
|
98 |
+
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
99 |
+
t.start()
|
100 |
|
101 |
+
outputs = []
|
102 |
+
for text in streamer:
|
103 |
+
outputs.append(text)
|
104 |
+
print(outputs)
|
105 |
+
yield "".join(outputs)
|
106 |
|
|
|
|
|
107 |
|
108 |
+
TITLE = "# RAG"
|
|
|
109 |
|
110 |
+
DESCRIPTION = """
|
111 |
+
A rag pipeline with a chatbot feature
|
112 |
+
Resources used to build this project :
|
113 |
+
* embedding model : https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1
|
114 |
+
* dataset : https://huggingface.co/datasets/not-lain/wikipedia
|
115 |
+
* faiss docs : https://huggingface.co/docs/datasets/v2.18.0/en/package_reference/main_classes#datasets.Dataset.add_faiss_index
|
116 |
+
* chatbot : https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct
|
117 |
+
"""
|
118 |
|
|
|
|
|
119 |
|
120 |
+
demo = gr.ChatInterface(
|
121 |
+
fn=talk,
|
122 |
+
chatbot=gr.Chatbot(
|
123 |
+
show_label=True,
|
124 |
+
show_share_button=True,
|
125 |
+
show_copy_button=True,
|
126 |
+
likeable=True,
|
127 |
+
layout="bubble",
|
128 |
+
bubble_full_width=False,
|
129 |
+
),
|
130 |
+
theme="Soft",
|
131 |
+
examples=[["what's anarchy ? "]],
|
132 |
+
title=TITLE,
|
133 |
+
description=DESCRIPTION,
|
134 |
+
|
135 |
+
)
|
136 |
+
demo.launch(debug=True)
|