File size: 2,820 Bytes
5c60ed2 f8adcff 0635997 7c12ef4 0635997 f846748 cd7ca86 185d396 57b0d16 f846748 0635997 a37b742 0635997 f846748 5b9e4ac f846748 a37b742 443f706 a37b742 1cafcb9 a37b742 be65967 34f414b f846748 d1fef0d f846748 9fce83c f846748 cdec1a7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
import gradio as gr
from huggingface_hub import InferenceClient, login, snapshot_download
from langchain_community.vectorstores import FAISS
from langchain_huggingface import HuggingFaceEmbeddings
import os
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
login(token=os.getenv('TOKEN'))
client = InferenceClient("meta-llama/Llama-3.2-1B-Instruct")
#client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
folder = snapshot_download(repo_id="umaiku/faiss_index", repo_type="dataset", local_dir=os.getcwd())
embeddings = HuggingFaceEmbeddings(model_name="intfloat/multilingual-e5-small")
vector_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
retriever = vector_db.as_retriever(search_type="similarity_score_threshold", search_kwargs={"score_threshold": 0.75})
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
document = retriever.invoke(message)
if document == []:
message = message + "\nNo cases were found about this subject"
else:
message = message + "\nUse the following jurisprudence case to answer " + document[0].page_content + "\n Give the following url " + document[0].metadata["case_url"]
print(message)
# for val in history:
# if val[0]:
# messages.append({"role": "user", "content": val[0]})
# if val[1]:
# messages.append({"role": "assistant", "content": val[1]})
# messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
response += token
yield response
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are an assistant in Swiss Jurisprudence cases.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
description="# 📜 ALexI: Artificial Legal Intelliegence for Swiss Jurisprudence",
)
if __name__ == "__main__":
demo.launch(debug=True) |