imageORC / app.py
sarim's picture
fast api and streamlit
5a9b6a8
raw
history blame
1.51 kB
from base64 import b64decode, b64encode
from io import BytesIO
from fastapi import FastAPI, File, Form
from PIL import Image
from transformers import pipeline
import streamlit as st
# description = """
# ## DocQA with πŸ€— transformers, FastAPI, and Docker
# This app shows how to do Document Question Answering using
# FastAPI in a Docker Space πŸš€
# Check out the docs for the `/predict` endpoint below to try it out!
# """
# NOTE - we configure docs_url to serve the interactive Docs at the root path
# of the app. This way, we can use the docs as a landing page for the app on Spaces.
#app = FastAPI(docs_url="/", description=description)
pipe = pipeline("document-question-answering", model="impira/layoutlm-document-qa")
image = 'https://templates.invoicehome.com/invoice-template-us-neat-750px.png'
question = "What is the invoice number?"
output = pipe(image, question)
st.write(output)
# @app.post("/predict")
# def predict(image_file: bytes = File(...), question: str = Form(...)):
# """
# Using the document-question-answering pipeline from `transformers`, take
# a given input document (image) and a question about it, and return the
# predicted answer. The model used is available on the hub at:
# [`impira/layoutlm-document-qa`](https://huggingface.co/impira/layoutlm-document-qa).
# """
# image = Image.open(BytesIO(image_file))
# output = pipe(image, question)
# return output
# @app.get("/hello")
# def read_root():
# return {"Hello": "World"}