File size: 4,116 Bytes
0181e0b
 
9cd92fc
 
062a15c
9cd92fc
 
f1fde39
0181e0b
 
 
f1fde39
0181e0b
 
f1fde39
 
0181e0b
f1fde39
0181e0b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9061dc6
9cd92fc
 
0181e0b
 
60a1bbd
9cd92fc
 
 
 
 
 
 
4a829e5
 
9cd92fc
 
995e059
0181e0b
 
 
 
 
13b5184
60a1bbd
 
3166f4c
0181e0b
 
 
 
 
60a1bbd
0181e0b
 
 
60a1bbd
3166f4c
0181e0b
 
167c46c
0181e0b
 
 
e098ae7
0181e0b
 
 
 
 
e274963
5c6b22d
0181e0b
 
28e88ea
0181e0b
194b4db
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import os
import random
import gradio as gr
import pandas as pd
import requests

from pyabsa import ATEPCCheckpointManager
from pyabsa.functional.dataset.dataset_manager import download_datasets_from_github, ABSADatasetList, detect_infer_dataset

download_datasets_from_github(os.getcwd())

dataset_items = {dataset.name: dataset for dataset in ABSADatasetList()}

def get_example(dataset):
    task = 'apc'
    dataset_file = detect_infer_dataset(dataset_items[dataset], task)

    for fname in dataset_file:
        lines = []
        if isinstance(fname, str):
            fname = [fname]

        for f in fname:
            print('loading: {}'.format(f))
            fin = open(f, 'r', encoding='utf-8')
            lines.extend(fin.readlines())
            fin.close()
        for i in range(len(lines)):
            lines[i] = lines[i][:lines[i].find('!sent!')].replace('[ASP]', '')
        return sorted(set(lines), key=lines.index)


dataset_dict = {dataset.name: get_example(dataset.name) for dataset in ABSADatasetList()}
aspect_extractor = ATEPCCheckpointManager.get_aspect_extractor(checkpoint='multilingual-256-2')


def perform_inference(text, dataset):
    if not text:
        text = dataset_dict[dataset][random.randint(0, len(dataset_dict[dataset]) - 1)]

    result = aspect_extractor.extract_aspect(inference_source=[text],
                                             pred_sentiment=True)

    result = pd.DataFrame({
        'aspect': result[0]['aspect'],
        'sentiment': result[0]['sentiment'],
        # 'probability': result[0]['probs'],
        'confidence': [round(x, 4) for x in result[0]['confidence']],
        'position': result[0]['position']
    })
    return result, '{}'.format(text)


demo = gr.Blocks()

with demo:
    gr.Markdown("# <p align='center'>Multilingual Aspect-based Sentiment Analysis !</p>")
    gr.Markdown("""### Repo: [PyABSA](https://github.com/yangheng95/PyABSA)
                ### Author: [Heng Yang](https://github.com/yangheng95) (杨恒)
                ## This demo is based on v.1.16.27, while the latest release is [v2.X](https://github.com/yangheng95/PyABSA)
                [![Downloads](https://pepy.tech/badge/pyabsa)](https://pepy.tech/project/pyabsa) 
                [![Downloads](https://pepy.tech/badge/pyabsa/month)](https://pepy.tech/project/pyabsa)
                """
                )
    gr.Markdown("Your input text should be no more than 80 words, that's the longest text we used in training. However, you can try longer text in self-training ")
    gr.Markdown("**You don't need to split each Chinese (Korean, etc.) token as the provided, just input the natural language text.**")
    output_dfs = []
    with gr.Row():
        with gr.Column():
            input_sentence = gr.Textbox(placeholder='Leave this box blank and choose a dataset will give you a random example...', label="Example:")
            gr.Markdown("You can find the datasets at [github.com/yangheng95/ABSADatasets](https://github.com/yangheng95/ABSADatasets/tree/v1.2)")
            dataset_ids = gr.Radio(choices=[dataset.name for dataset in ABSADatasetList()[:-1]], value='Laptop14', label="Datasets")
            inference_button = gr.Button("Let's go!")
            gr.Markdown("There is a [demo](https://huggingface.co/spaces/yangheng/PyABSA-ATEPC-Chinese) specialized for the Chinese langauge")
            gr.Markdown("This demo support many other language as well, you can try and explore the results of other languages by yourself.")

        with gr.Column():
            output_text = gr.TextArea(label="Example:")
            output_df = gr.DataFrame(label="Prediction Results:")
            output_dfs.append(output_df)

        inference_button.click(fn=perform_inference,
                               inputs=[input_sentence, dataset_ids],
                               outputs=[output_df, output_text],
                               api_name='inference')

    gr.Markdown("![visitor badge](https://visitor-badge.glitch.me/badge?page_id=https://huggingface.co/spaces/yangheng/Multilingual-Aspect-Based-Sentiment-Analysis)")


demo.launch()