Rahmat82 commited on
Commit
f70b29b
·
verified ·
1 Parent(s): 8e2815a

optimum usage

Browse files
Files changed (1) hide show
  1. app.py +18 -4
app.py CHANGED
@@ -1,12 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
 
3
  import torch
4
 
5
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
6
 
7
- model_id = "Rahmat82/DistilBERT-finetuned-on-emotion"
8
- tokenizer = AutoTokenizer.from_pretrained(model_id, return_tensors="pt", use_fast=True)
9
- model = AutoModelForSequenceClassification.from_pretrained(model_id)
10
  model.to(device)
11
 
12
  def predict(query: str) -> dict:
 
1
+ # import gradio as gr
2
+ # from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
+ # import torch
4
+
5
+ # device = 'cuda' if torch.cuda.is_available() else 'cpu'
6
+
7
+ # model_id = "Rahmat82/DistilBERT-finetuned-on-emotion"
8
+ # tokenizer = AutoTokenizer.from_pretrained(model_id, return_tensors="pt", use_fast=True)
9
+ # model = AutoModelForSequenceClassification.from_pretrained(model_id)
10
+ # model.to(device)
11
+
12
+
13
+
14
  import gradio as gr
15
+ from transformers import pipeline, AutoTokenizer
16
+ from optimum.onnxruntime import ORTModelForSequenceClassification
17
  import torch
18
 
19
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
20
 
21
+ model_name = "Rahmat82/DistilBERT-finetuned-on-emotion"
22
+ model = ORTModelForSequenceClassification.from_pretrained(model_name, export=True)
23
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
24
  model.to(device)
25
 
26
  def predict(query: str) -> dict: