jfarray commited on
Commit
9543aec
·
1 Parent(s): 37dcb89

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +96 -0
app.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import json
3
+ from datasets import load_dataset
4
+ from sentence_transformers import SentenceTransformer, InputExample, losses, util, evaluation
5
+ import pandas as pd
6
+
7
+ def Main(Modelo, Texto1, Texto2):
8
+
9
+ error = ""
10
+ modelResult = ""
11
+
12
+ if File == None and Operacion == "Procesar Fichero":
13
+ error = "Debe seleccionar un fichero"
14
+ else:
15
+ try:
16
+ if Operacion == "Comparar Textos":
17
+ data_test = []
18
+ data_test.append(InputExample(guid= "", texts=[Texto1, Texto2], label=0))
19
+ else:
20
+ data_test = ConvertJsonToList(File.name)
21
+
22
+ modelResult = TestModel('jfarray/Model_'+ Modelo +'_50_Epochs',data_test)
23
+ except Exception as e:
24
+ error = e
25
+
26
+ return [error, modelResult]
27
+
28
+ def ConvertJsonToList(fileName):
29
+ subject_fileDataset = load_dataset("json", data_files=fileName)
30
+
31
+ samples = []
32
+
33
+ for i in range (0,len(subject_fileDataset["train"])): #len(subject1)
34
+ hashed_id = subject_fileDataset["train"][i]['hashed_id']
35
+ mark = subject_fileDataset["train"][i]['nota']
36
+ responseStudent = subject_fileDataset["train"][i]['respuesta']
37
+ responseTeacher = ""
38
+ for j in range(0,len(subject_fileDataset["train"][i]['metadata']['minipreguntas'])):
39
+ responseTeacher = responseTeacher + subject_fileDataset["train"][i]['metadata']['minipreguntas'][j]['minirespuesta']
40
+
41
+ ie = InputExample(guid= hashed_id, texts=[responseTeacher, responseStudent], label=mark)
42
+
43
+ samples.append(ie)
44
+
45
+ return samples
46
+
47
+ def TestModel(checkpoint, data):
48
+ local_model_path = checkpoint
49
+ model = SentenceTransformer(local_model_path)
50
+ df = pd.DataFrame(columns=["Hashed_id", "Nota", "Similitud Semántica"])
51
+
52
+ sentences1 = []
53
+ sentences2 = []
54
+ hashed_ids = []
55
+ marks = []
56
+ scores = []
57
+ for i in range (0,len(data)): #len(data)
58
+ sentences1.append(data[i].texts[0])
59
+ sentences2.append(data[i].texts[1])
60
+
61
+ #Compute embedding for both lists
62
+ embeddings1 = model.encode(sentences1, convert_to_tensor=True)
63
+ embeddings2 = model.encode(sentences2, convert_to_tensor=True)
64
+
65
+ #Compute cosine-similarits
66
+ cosine_scores = util.cos_sim(embeddings1, embeddings2)
67
+
68
+ for i in range(len(sentences1)):
69
+ hashed_ids.append(data[i].guid)
70
+ marks.append(data[i].label)
71
+ scores.append(round(cosine_scores[i][i].item(),3))
72
+
73
+ df['Similitud Semántica'] = scores
74
+
75
+ return df
76
+
77
+ Modelos = gr.inputs.Dropdown(["dccuchile_bert-base-spanish-wwm-uncased"
78
+ , "bert-base-multilingual-uncased"
79
+ , "all-distilroberta-v1"
80
+ , "paraphrase-multilingual-mpnet-base-v2"
81
+ , "paraphrase-multilingual-MiniLM-L12-v2"
82
+ , "distiluse-base-multilingual-cased-v1"])
83
+ Opciones = gr.inputs.Radio(["Comparar Textos", "Procesar Fichero"])
84
+ Text1Input = gr.inputs.Textbox(lines=10, placeholder="Escriba el texto aqui ...")
85
+ Text2Input = gr.inputs.Textbox(lines=10, placeholder="Escriba el otro texto aqui ...")
86
+ LabelOutput = gr.outputs.Label(num_top_classes=None, type="auto", label="")
87
+ DataFrameOutput = gr.outputs.Dataframe(headers=["Similitud Semántica"]
88
+ , max_rows=20, max_cols=None, overflow_row_behaviour="paginate", type="auto", label="Resultado")
89
+
90
+ iface = gr.Interface(fn=Main
91
+ , inputs=[ Modelos, Text1Input ,Text2Input]
92
+ , outputs=[LabelOutput, DataFrameOutput]
93
+ , title = "Similitud Semántica de textos en Español de tamaño medio (200-250 palabras)"
94
+ )
95
+
96
+ iface.launch(share = True,enable_queue=True, show_error =True)