File size: 1,550 Bytes
0cfd68a 65a3485 0cfd68a 4415c8e 0cfd68a 65a3485 0cfd68a 4ffd843 0cfd68a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
from haystack.document_stores import InMemoryDocumentStore
import pandas as pd
import gradio as gr
df=pd.read_parquet('df.parquet')
dirname='lot3'
df['fileclean']=df.file.str.replace(f'.*{dirname}/[^/]+/','').str.replace('[\(\)]','').str.replace('/[^/]+$','').str.replace('/',' ').str.replace('-',' ').str.replace(' 0+',' ')
candidats=pd.read_parquet('candidats.parquet')
df2=pd.read_parquet('df2.parquet')
for c in df2.columns:
candidats[c]=candidats[c].astype(str)
df2[c]=df2[c].astype(str)
candidats=candidats.merge(df2)
document_store = InMemoryDocumentStore(use_bm25=True)
docs=df.drop_duplicates(subset=['fileclean']).rename(columns={'fileclean':'content'}).to_dict(orient='records')
document_store.write_documents(docs)
from haystack.nodes import BM25Retriever
retriever = BM25Retriever(document_store=document_store)
from haystack.pipelines import DocumentSearchPipeline
pipeline = DocumentSearchPipeline(retriever=retriever)
def semanticsearch(query):
result = pipeline.run(
query=query,
params={
"Retriever": {
"top_k": 10
}
},debug=False
)
results=[]
for document in result['documents']:
result=document.meta
result['score']=document.score
results.append(result)
results=pd.DataFrame(results)
return results
demo = gr.Interface(
semanticsearch,
[
gr.Dropdown(candidats.sort_values(by='text').text.tolist()),
],
[gr.Dataframe()]
)
if __name__ == "__main__":
demo.launch() |