Steph974 commited on
Commit
2a96c49
·
verified ·
1 Parent(s): 92b35cc

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -0
app.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Importation
2
+
3
+ %matplotlib inline
4
+ import numpy as np
5
+ import pandas as pd
6
+ import matplotlib.pyplot as plt
7
+ from sklearn import metrics
8
+ import torch
9
+ from torch.utils.data import Dataset, DataLoader
10
+ from transformers import AutoModel, AutoTokenizer
11
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
12
+
13
+ import gradio as gr
14
+ from gradio.components import Label
15
+
16
+ path = "./weights"
17
+ model = AutoModel.from_pretrained(path, trust_remote_code=True)
18
+ class CamembertClass(torch.nn.Module):
19
+ def __init__(self):
20
+ super(CamembertClass, self).__init__()
21
+ self.l1 = model
22
+ self.dropout = torch.nn.Dropout(0.1)
23
+ self.pre_classifier = torch.nn.Linear(1024, 1024)
24
+ self.classifier = torch.nn.Linear(1024, 3)
25
+
26
+ def forward(self, input_ids, attention_mask, token_type_ids):
27
+ output_1 = self.l1(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
28
+ hidden_state = output_1[0]
29
+ pooler = hidden_state[:, 0]
30
+ pooler = self.pre_classifier(pooler)
31
+ pooler = torch.nn.ReLU()(pooler)
32
+ pooler = self.dropout(pooler)
33
+ output = self.classifier(pooler)
34
+ return output
35
+
36
+ #model_gradio = CamembertClass()
37
+ path = "./pytorch_model.bin"
38
+ model = torch.load(path, map_location="cpu")
39
+ path_tokenizer = "./"
40
+ tokenizer = AutoTokenizer.from_pretrained(path_tokenizer)
41
+
42
+ model.eval() # Mettez votre modèle en mode évaluation
43
+
44
+ # Fonction d'inférence pour Gradio
45
+ def predict(text):
46
+ inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512)
47
+
48
+ # Extract necessary inputs for the model
49
+ input_ids = inputs['input_ids']
50
+ attention_mask = inputs['attention_mask']
51
+ token_type_ids = inputs.get('token_type_ids', None) # Some models do not use segment IDs
52
+
53
+ # Make prediction
54
+ with torch.no_grad():
55
+ # Directly use outputs if your model returns logits directly
56
+ logits = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
57
+
58
+
59
+ # Convert logits to probabilities
60
+ probabilities = torch.softmax(logits, dim=1).detach().cpu().numpy()[0]
61
+ # Replace the following with your actual classes
62
+ classes = ['Negative Sentiment', 'Positive Sentiment']
63
+ return {classes[i]: float(probabilities[i]) for i in range(len(classes))}
64
+
65
+ # Création de l'interface Gradio
66
+ iface = gr.Interface(fn=predict,
67
+ inputs=gr.components.Textbox(placeholder="Enter your text here..."),
68
+ outputs=gr.components.Label(num_top_classes=2))
69
+ iface.launch(share=True)