File size: 1,402 Bytes
26ce2b9
c6d903d
 
7ff0c05
 
c6d903d
c6d9fb0
 
 
 
bae20b2
7ff0c05
 
 
c6d903d
a37094d
c6d903d
7ff0c05
c6d903d
4a93af1
 
 
ed909e7
10d7087
5d35835
89fe9f7
7ff0c05
 
ab926cc
7ff0c05
ad0751b
6ec95e4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
from flask import Flask, jsonify, request, render_template
from transformers import AutoAdapterModel, AutoTokenizer, TextClassificationPipeline

# tokenizer = AutoTokenizer.from_pretrained("UBC-NLP/MARBERT")
# model = AutoAdapterModel.from_pretrained("UBC-NLP/MARBERT")

# sarcasm_adapter = Repository(local_dir="sarcasm_adapter", clone_from="nehalelkaref/sarcasm_adapter")
# aoc3_adapter = Repository(local_dir="aoc3_adapter", clone_from="nehalelkaref/aoc3_adapter")
# aoc4_adapter = Repository(local_dir="aoc4_adapter", clone_from="nehalelkaref/aoc4_adapter")
# fusion_adapter = Repository(local_dir="fusion_adapter", clone_from="nehalelkaref/region_fusion")

# model.load_adapter("nehalelkaref/aoc3_adapter", set_active=True, with_head=False, source="hf")
# model.load_adapter("nehalelkaref/aoc4_adapter", set_active=True, with_head=False, source="hf")
# model.load_adapter("nehalelkaref/sarcasm_adapter", set_active=True, with_head=False, source="hf")

# model.load_adapter_fusion("nehalelkaref/region_fusion",with_head=True, set_active=True, source="hf")

# pipe = TextClassificationPipeline(tokenizer=tokenizer, model=model)


app = Flask(__name__)

@app.route("/", methods=['GET'])
def home():
    return render_template('home.html')

@app.route('/classify', methods = ['POST'])
def classify():
    return render_template('prediction.html', output=output)

if __name__ == "__main__":
  app.run()