File size: 2,012 Bytes
8a8ccdb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5596de2
8a8ccdb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import gradio as gr
from functools import partial
from transformers import pipeline
from sentence_transformers import SentenceTransformer, util
from scipy.special import softmax
import os


class SentenceSimilarity:

    def __init__(self, model: str):
        self.model = SentenceTransformer(model)

    def __call__(self, query: str, corpus: list[str]):
        query_embedding = self.model.encode(query)
        corpus_embeddings = self.model.encode(corpus)
        output = util.semantic_search(query_embedding, corpus_embeddings)
        sorted_output = sorted(output[0], key=lambda x: x["corpus_id"])
        probabilities = softmax([x["score"] for x in sorted_output])
        return probabilities


# Sentence Similarity
def sentence_similarity(text: str, documents: list[str], pipe: SentenceSimilarity):
    doc_texts = []
    for doc in documents:
        f = open(doc, "r")
        doc_texts.append(f.read())
    answer = pipe(query=text, corpus=doc_texts)
    return {os.path.basename(doc): prob for doc, prob in zip(documents, answer)}


# Text Analysis
def cls_inference(input: list[str], pipe: pipeline) -> str:
    results = pipe(input, top_k=None)
    return {x["label"]: x["score"] for x in results}


def text_interface(
    pipe: pipeline, examples: list[str], output_label: str, title: str, desc: str
):
    return gr.Interface(
        fn=partial(cls_inference, pipe=pipe),
        inputs=[
            gr.Textbox(lines=5, label="Input Text"),
        ],
        title=title,
        description=desc,
        outputs=[gr.Label(label=output_label)],
        examples=examples,
        allow_flagging="never",
    )


# POSP
def pos_tagging(text: str, pipe: pipeline):
    output = pipe(text)
    return {"text": text, "entities": output}


# Text Analysis
def text_analysis(text, pipes: dict):
    sa = cls_inference(text, pipes["Sentiment Analysis"])
    emot = cls_inference(text, pipes["Emotion Classifier"])
    pos = pos_tagging(text, pipes["POS Tagging"])
    return (sa, emot, pos)