imageORC / app.py
sarim's picture
base route
2f05624
import os
# os.system('chmod 777 /tmp')
# os.system('apt-get update -y')
# os.system('apt-get install tesseract-ocr -y')
# os.system('pip install -q pytesseract')
from base64 import b64decode, b64encode
from io import BytesIO
import tesserocr
from fastapi import FastAPI, File, Form
from PIL import Image
from transformers import pipeline
#import streamlit as st
# pytesseract.pytesseract.tesseract_cmd = r’./Tesseract-OCR/tesseract.exe’
choices = os.popen('tesseract --list-langs').read().split('\n')[1:-1]
description = """
## DocQA with 🤗 transformers, FastAPI, and Docker
This app shows how to do Document Question Answering using
FastAPI in a Docker Space 🚀
Check out the docs for the `/predict` endpoint below to try it out!
"""
# NOTE - we configure docs_url to serve the interactive Docs at the root path
# of the app. This way, we can use the docs as a landing page for the app on Spaces.
app = FastAPI()
pipe = pipeline("document-question-answering", model="impira/layoutlm-document-qa")
#st.write(output)
# @app.post("/predict")
# def predict(image_file: bytes = File(...), question: str = Form(...)):
# """
# Using the document-question-answering pipeline from `transformers`, take
# a given input document (image) and a question about it, and return the
# predicted answer. The model used is available on the hub at:
# [`impira/layoutlm-document-qa`](https://huggingface.co/impira/layoutlm-document-qa).
# """
# image = Image.open(BytesIO(image_file))
# output = pipe(image, question)
# return output
@app.get("/")
def root():
return {"Hello":"world"}
@app.get("/hello")
def read_root():
image = 'https://templates.invoicehome.com/invoice-template-us-neat-750px.png'
question = "What is the invoice number?"
output = pipe(image, question)
return output