Spaces:
Runtime error
Runtime error
File size: 5,616 Bytes
76b30b5 58bd4d6 05cca96 76b30b5 05cca96 58bd4d6 05cca96 58bd4d6 05cca96 76b30b5 58bd4d6 76b30b5 58bd4d6 76b30b5 58bd4d6 76b30b5 67284c9 58bd4d6 76b30b5 58bd4d6 76b30b5 58bd4d6 76b30b5 58bd4d6 05cca96 76b30b5 05cca96 58bd4d6 05cca96 76b30b5 05cca96 58bd4d6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 |
# Necessary imports
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.prompts import PromptTemplate
from langchain.chains.question_answering import load_qa_chain
from datasets import load_dataset
import pandas as pd
from functools import lru_cache
from huggingface_hub import InferenceClient
import gradio as gr
# Initialize the Hugging Face Inference Client
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# Load dataset
dataset = load_dataset('arbml/LK_Hadith')
df = pd.DataFrame(dataset['train'])
# Filter data (Only retain Hadiths with non-weak grades)
filtered_df = df[df['Arabic_Grade'] != 'ΨΆΨΉΩΩ']
documents = list(filtered_df['Arabic_Matn'])
metadatas = [{"Hadith_Grade": grade} for grade in filtered_df['Arabic_Grade']]
# Text splitter (using a smaller chunk size for memory efficiency)
text_splitter = CharacterTextSplitter(chunk_size=1000)
nltk_chunks = text_splitter.create_documents(documents, metadatas=metadatas)
# LLM (Replace Ollama with a Hugging Face Hub model)
from langchain.llms import HuggingFaceHub
llm = HuggingFaceHub(repo_id="salmatrafi/acegpt:7b")
# Create an embedding model (Hugging Face transformer model for embeddings)
embeddings = HuggingFaceEmbeddings(model_name="intfloat/multilingual-e5-base")
# Generate document embeddings
docs_text = [doc.page_content for doc in nltk_chunks]
try:
docs_embedding = embeddings.embed_documents(docs_text)
except Exception as e:
print(f"Error in embedding generation: {str(e)}")
# Create Chroma vector store with embeddings
try:
vector_store = Chroma.from_documents(nltk_chunks, embedding=embeddings)
except Exception as e:
print(f"Error in creating vector store: {str(e)}")
# Question answering prompt template
qna_template = "\n".join([
"Answer the next question using the provided context.",
"If the answer is not contained in the context, say 'NO ANSWER IS AVAILABLE'",
"### Context:",
"{context}",
"",
"### Question:",
"{question}",
"",
"### Answer:",
])
qna_prompt = PromptTemplate(
template=qna_template,
input_variables=['context', 'question'],
verbose=True
)
# Combine intermediate context template
combine_template = "\n".join([
"Given intermediate contexts for a question, generate a final answer.",
"If the answer is not contained in the intermediate contexts, say 'NO ANSWER IS AVAILABLE'",
"### Summaries:",
"{summaries}",
"",
"### Question:",
"{question}",
"",
"### Final Answer:",
])
combine_prompt = PromptTemplate(
template=combine_template,
input_variables=['summaries', 'question'],
)
# Load map-reduce chain for question answering
map_reduce_chain = load_qa_chain(llm, chain_type="map_reduce",
return_intermediate_steps=True,
question_prompt=qna_prompt,
combine_prompt=combine_prompt)
# Function to preprocess the query (handling long inputs)
def preprocess_query(query):
if len(query) > 512: # Arbitrary length, adjust based on LLM input limits
query = query[:512] + "..."
return query
# Caching mechanism for frequently asked questions
@lru_cache(maxsize=100) # Cache up to 100 recent queries
def answer_query(query):
query = preprocess_query(query)
try:
# Search for similar documents in vector store
similar_docs = vector_store.similarity_search(query, k=5)
if not similar_docs:
return "No relevant documents found."
# Run map-reduce chain to get the answer
final_answer = map_reduce_chain({
"input_documents": similar_docs,
"question": query
}, return_only_outputs=True)
output_text = final_answer.get('output_text', "No answer generated by the model.")
except Exception as e:
output_text = f"An error occurred: {str(e)}"
return output_text
# Gradio Chatbot response function using Hugging Face Inference Client
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
try:
for msg in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = msg.choices[0].delta.content
response += token
yield response
except Exception as e:
yield f"An error occurred during chat completion: {str(e)}"
# Gradio Chat Interface
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
# Launch the Gradio interface
if __name__ == "__main__":
demo.launch()
|