File size: 1,327 Bytes
93a2217
e1b04d1
bf5c1c9
bc27fb1
90151e1
e3db2f8
90151e1
 
e1b04d1
90151e1
 
5517f9c
e3db2f8
 
93a2217
e1b04d1
 
207c16a
bf5c1c9
 
93a2217
c36c2b7
93a2217
 
 
207c16a
93a2217
 
 
 
 
 
e3db2f8
9920987
987e371
9920987
 
987e371
 
9920987
 
93a2217
9920987
93a2217
 
c5527cd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
from flask import Flask, request, render_template, jsonify
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "facebook/blenderbot-1B-distill"

# https://huggingface.co/models?sort=trending&search=facebook%2Fblenderbot
# facebook/blenderbot-3B
# facebook/blenderbot-1B-distill
# facebook/blenderbot-400M-distill
# facebook/blenderbot-90M
# facebook/blenderbot_small-90M

# https://www.youtube.com/watch?v=irjYqV6EebU

app = Flask("AI API")
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

@app.get("/")
def read_root():
  return render_template("index.html")

@app.route("/test")
def test_route():
  return "This is a test route."

@app.route("/api", methods=["POST"])
def receive_data():
  data = request.get_json()
  print("Prompt:", data["prompt"])

  input_text = data["prompt"]
  
  # Tokenize the input text
  input_ids = tokenizer.encode(input_text, return_tensors="pt")
  
  # Generate output using the model
  output_ids = model.generate(input_ids, num_beams=5, no_repeat_ngram_size=2)
  generated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
  
  answer_data = { "answer": generated_text }
  print("Response:", generated_text)
  
  return jsonify(answer_data)

app.run(host="0.0.0.0", port=7860, debug=False)