Spaces:
Sleeping
Sleeping
File size: 4,669 Bytes
c46e62c eac7abb 2ff5a83 eac7abb e3c7652 7750c4a 5f10bb4 7750c4a 73e6552 4f66cb8 73e6552 4f66cb8 73e6552 4f66cb8 8bda472 16937bb 8bda472 16937bb 9686f63 7750c4a eac7abb f443a92 91dc355 eac7abb f443a92 eac7abb b24691a 61f786b eac7abb 73e6552 b4f6b6b 5f10bb4 a6d68c1 0b9036a a6d68c1 c098e5a 0b9036a a6d68c1 73e6552 950aabc eac7abb 8eaf182 eac7abb 7750c4a 74b728e eac7abb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
import gradio as gr
# from transformers import pipeline
# from transformers.utils import logging
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
import torch
from llama_index.core import VectorStoreIndex
from llama_index.core import Document
from llama_index.core import Settings
from llama_index.llms.huggingface import (
HuggingFaceInferenceAPI,
HuggingFaceLLM,
)
from huggingface_hub import login
import chromadb as chromadb
from chromadb.utils import embedding_functions
import shutil
import os
#
last = 0
CHROMA_DATA_PATH = "chroma_data/"
EMBED_MODEL = "BAAI/bge-m3"
# all-MiniLM-L6-v2
CHUNK_SIZE = 800
CHUNK_OVERLAP = 50
max_results = 3
min_len = 40
min_distance = 0.35
max_distance = 0.6
temperature = 0.55
max_tokens=3072
top_p=0.8
frequency_penalty=0.0
presence_penalty=0.15
jezik = "srpski"
system_sr = "Zoveš se U-Chat AI asistent i pomažeš korisniku usluga kompanije United Group. Korisnik postavlja pitanje ili problem, upareno sa dodatnima saznanjima. Na osnovu toga napiši korisniku kratak i ljubazan odgovor koji kompletira njegov zahtev ili mu daje odgovor na pitanje. "
# " Ako ne znaš odgovor, reci da ne znaš, ne izmišljaj ga."
system_sr += "Usluge kompanije United Group uključuju i kablovsku mrežu za digitalnu televiziju, pristup internetu, uređaj EON SMART BOX za TV sadržaj, kao i fiksnu telefoniju."
chroma_client = chromadb.PersistentClient(CHROMA_DATA_PATH)
embedding_func = embedding_functions.SentenceTransformerEmbeddingFunction(
model_name=EMBED_MODEL
)
collection = chroma_client.get_or_create_collection(
name="chroma_data",
embedding_function=embedding_func,
metadata={"hnsw:space": "cosine"},
)
last = collection.count()
#
HF_TOKEN = "wncSKewozDfuZCXCyFbYbAMHgUrfcrumkc"
#
login(token=("hf_" + HF_TOKEN))
system_propmpt = system_sr
# "facebook/blenderbot-400M-distill", facebook/blenderbot-400M-distill, stabilityai/stablelm-zephyr-3b, BAAI/bge-small-en-v1.5
Settings.llm = HuggingFaceInferenceAPI(model_name="mistralai/Mistral-Nemo-Instruct-2407",
device_map="auto",
system_prompt = system_propmpt,
context_window=4096,
max_new_tokens=256,
# stopping_ids=[50278, 50279, 50277, 1, 0],
generate_kwargs={"temperature": 0.5, "do_sample": False},
# tokenizer_kwargs={"max_length": 4096},
tokenizer_name="mistralai/Mistral-Nemo-Instruct-2407",
)
# "BAAI/bge-m3"
Settings.embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
#documents = [Document(text="Indian parliament elections happened in April-May 2024. BJP Party won."),
# Document(text="Indian parliament elections happened in April-May 2021. XYZ Party won."),
# Document(text="Indian parliament elections happened in 2020. ABC Party won."),
# ]
#index = VectorStoreIndex.from_documents(
# documents,
#)
vector_store = ChromaVectorStore(chroma_collection=collection)
index = VectorStoreIndex.from_vector_store(vector_store, embed_model=Settings.embed_model)
query_engine = index.as_query_engine()
def rag(input_text, file):
if (file):
# documents = []
# for f in file:
# documents += SimpleDirectoryReader(f).load_data()
# f = file + "*.pdf"
pathname = os.path.dirname
# shutil.copyfile(file.name, path)
print("pathname=" + pathname)
print("basename=" + os.path.basename(file))
print("filename=" + file.name)
documents = SimpleDirectoryReader(pathname).load_data()
index = VectorStoreIndex.from_documents(documents)
# collection.add(
# documents=documents,
# ids=[f"id{last+i}" for i in range(len(documents))],
# metadatas=[{"state": "s0", "next": "s0", "used": False, "source": 'None', "page": -1, "lang": jezik } for i in range(len(documents)) ]
# )
query_engine = index.as_query_engine()
return query_engine.query(
input_text
)
iface = gr.Interface(fn=rag, inputs=[gr.Textbox(label="Pitanje:", lines=6), gr.File()],
outputs=[gr.Textbox(label="Odgovor:", lines=6)],
title="Kako Vam mogu pomoći?",
description= "UChat"
)
iface.launch() |