|
from flask import Flask, request, jsonify |
|
import torch |
|
from transformers import AutoModel, AutoTokenizer |
|
from fastsafetensors import safe_load |
|
|
|
|
|
myapp = Flask(__name__) |
|
|
|
|
|
model_path = "https://huggingface.co/prompthero/openjourney-v4/blob/main/safety_checker/model.safetensors" |
|
model_data = safe_load(model_path) |
|
|
|
|
|
model_name = "prompthero/openjourney-v4" |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
model = AutoModel.from_pretrained(model_name, state_dict=model_data).to("cpu") |
|
|
|
@myapp.route('/') |
|
def index(): |
|
return "Welcome to the AI Model API!" |
|
|
|
@myapp.route('/generate', methods=['POST']) |
|
def generate_output(): |
|
data = request.json |
|
prompt = data.get('prompt', 'Hello, world!') |
|
|
|
|
|
inputs = tokenizer(prompt, return_tensors="pt") |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
|
|
|
|
return jsonify(outputs) |
|
|
|
if __name__ == "__main__": |
|
myapp.run(host='0.0.0.0', port=5000) |