Spaces:
Sleeping
Sleeping
File size: 5,893 Bytes
c71be5c 08448e8 c71be5c 91b021f c7a91bb 2398560 c7e3111 74c9f1a c7e3111 c71be5c 0df6c66 c9a3b22 c71be5c 08448e8 c71be5c 6f9b7ab 60c7d45 c71be5c 08448e8 7618247 c71be5c 08448e8 1819fdd da6beb0 aba96da f61deda d065dd0 08448e8 b104ce4 08448e8 923f344 c9a3b22 dba6b3f 50eff43 aba96da 08448e8 c8df060 1737537 c8df060 74c9f1a c8df060 4313a99 233772e aba96da cad36dc aba96da f61deda 08448e8 25e99e3 8d1615a 4313a99 307204c aaff5ae 4313a99 1737537 d3db3bd 4313a99 d3db3bd 4313a99 25e99e3 8d1615a 4466f61 c71be5c 0fa7c3e 8d1615a 0fa7c3e c71be5c 966bb6b c71be5c a1544d7 c9a3b22 c71be5c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
import gradio as gr
from sentence_transformers import SentenceTransformer
from huggingface_hub import InferenceClient
import pandas as pd
import torch
import math
import httpcore
import pickle
setattr(httpcore, 'SyncHTTPTransport', 'AsyncHTTPProxy')
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
model = SentenceTransformer('intfloat/multilingual-e5-large-instruct')
examples=[
["Why is men created?"],
["Please tell me about superstition!"],
["How moses defeat pharaoh?"],
]
def get_detailed_instruct(task_description: str, query: str) -> str:
return f'Instruct: {task_description}\nQuery: {query}'
def respond(
message,
history: list[tuple[str, str]],
max_tokens = 2048,
temperature = 0.7,
top_p = 0.95,
):
#system role
messages = [{"role": "system", "content": "You are a sunni moslem bot that always give answer based on quran, hadith, and the companions of prophet Muhammad!"}]
#make a moslem bot
messages.append({"role": "user", "content": "I want you to answer strictly based on quran and hadith"})
messages.append({"role": "assistant", "content": "I'd be happy to help! Please go ahead and provide the sentence you'd like me to analyze. Please specify whether you're referencing a particular verse or hadith (Prophetic tradition) from the Quran or Hadith, or if you're asking me to analyze a general statement."})
#adding fatwa references
'''
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
selected_references = torch.load('selected_references.sav', map_location=torch.device(device))
encoded_questions = torch.load('encoded_questions.sav', map_location=torch.device(device))
task = 'Given a web search query, retrieve relevant passages that answer the query'
queries = [
get_detailed_instruct(task, message)
]
examples.append(message)
query_embeddings = model.encode(queries, convert_to_tensor=True, normalize_embeddings=True)
scores = (query_embeddings @ encoded_questions.T) * 100
selected_references['similarity'] = scores.tolist()[0]
sorted_references = selected_references.sort_values(by='similarity', ascending=False)
print(sorted_references.shape[0])
sorted_references = sorted_references.iloc[:1]
sorted_references = sorted_references.sort_values(by='similarity', ascending=True)
print(sorted_references.shape[0])
print(sorted_references['similarity'].tolist())
from googletrans import Translator
translator = Translator()
for index, row in sorted_references.iterrows():
if(type(row["user"]) is str and type(row['assistant']) is str):
try:
translator = Translator()
print(index)
print(f'{row["user"]}')
translated = translator.translate(f'{row["user"]}', src='ar', dest='en')
print(translated)
user = translated.text
print(user)
#print(row['assistant'])
assistant = translator.translate(row['assistant']).text
#print(assistant)
messages.append({"role": "user", "content":user })
messages.append({"role": "assistant", "content": assistant})
except:
print("adding fatwa references exception occurred")
#adding more references
df = pd.read_csv("moslem-bot-reference.csv", sep='|')
for index, row in df.iterrows():
messages.append({"role": "user", "content": row['user']})
messages.append({"role": "assistant", "content": row['assistant']})
'''
#history from chat session
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
#latest user question
from googletrans import Translator
translator = Translator()
"""
en_message = ""
message_language = "en"
print("===message===")
print(message)
print("============")
try:
translator = Translator()
print(translator.detect(message))
message_language = translator.detect(message).lang
print(message_language)
print(translator.translate(message))
en_message = translator.translate(message).text
except:
print("en_message exception occurred")
messages.append({"role": "user", "content": en_message})
"""
messages.append({"role": "user", "content": message})
#print(messages)
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
try:
token = message.choices[0].delta.content
response += token
#translated_response = translator.translate(response, src='en', dest=message_language).text
#yield translated_response
yield response
except:
yield ""
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Slider(minimum=1, maximum=2048, value=2048, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
cache_examples="lazy",
examples=examples,
)
if __name__ == "__main__":
demo.launch() |