File size: 878 Bytes
b215f2c d396c6b 2b3d2ae b215f2c 9184993 b215f2c aa4c474 b215f2c 2b3d2ae b215f2c 9184993 26236d1 b215f2c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 |
from flask import Flask, request, jsonify
from transformers import AutoTokenizer, AutoModel
app = Flask(__name__)
# Load model and tokenizer
try:
tokenizer = AutoTokenizer.from_pretrained('stepfun-ai/GOT-OCR2_0', revision='cf6b7386bc89a54f09785612ba74cb12de6fa17c', trust_remote_code=True)
model = AutoModel.from_pretrained('stepfun-ai/GOT-OCR2_0', revision='cf6b7386bc89a54f09785612ba74cb12de6fa17c', trust_remote_code=True)
except Exception as e:
print(f"Error loading model and tokenizer: {e}")
@app.route('/predict', methods=['POST'])
def predict():
# Assuming you send image data in the request
data = request.json
# Add your model inference logic here
# e.g., model.forward(data)
return jsonify({"message": "Prediction made successfully!"})
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000) # Adjust port if necessary
|