sarim commited on
Commit
5a9b6a8
·
1 Parent(s): d2725d2

fast api and streamlit

Browse files
Files changed (2) hide show
  1. app.py +37 -12
  2. requirements.txt +8 -6
app.py CHANGED
@@ -1,18 +1,43 @@
 
 
 
 
 
1
  from transformers import pipeline
2
  import streamlit as st
3
- import os
4
 
5
- print(os.popen(f'apt search tesseract').read())
6
 
7
- nlp = pipeline(
8
- "document-question-answering",
9
- model="impira/layoutlm-document-qa",
10
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- nlp(
13
- "https://templates.invoicehome.com/invoice-template-us-neat-750px.png",
14
- "What is the invoice number?"
15
- )
 
 
 
 
 
 
 
16
 
17
- st.title("Hello Document OCR")
18
- st.write("Vat amount")
 
 
1
+ from base64 import b64decode, b64encode
2
+ from io import BytesIO
3
+
4
+ from fastapi import FastAPI, File, Form
5
+ from PIL import Image
6
  from transformers import pipeline
7
  import streamlit as st
 
8
 
 
9
 
10
+ # description = """
11
+ # ## DocQA with 🤗 transformers, FastAPI, and Docker
12
+ # This app shows how to do Document Question Answering using
13
+ # FastAPI in a Docker Space 🚀
14
+ # Check out the docs for the `/predict` endpoint below to try it out!
15
+ # """
16
+
17
+ # NOTE - we configure docs_url to serve the interactive Docs at the root path
18
+ # of the app. This way, we can use the docs as a landing page for the app on Spaces.
19
+ #app = FastAPI(docs_url="/", description=description)
20
+
21
+ pipe = pipeline("document-question-answering", model="impira/layoutlm-document-qa")
22
+ image = 'https://templates.invoicehome.com/invoice-template-us-neat-750px.png'
23
+
24
+ question = "What is the invoice number?"
25
+ output = pipe(image, question)
26
+
27
+ st.write(output)
28
 
29
+ # @app.post("/predict")
30
+ # def predict(image_file: bytes = File(...), question: str = Form(...)):
31
+ # """
32
+ # Using the document-question-answering pipeline from `transformers`, take
33
+ # a given input document (image) and a question about it, and return the
34
+ # predicted answer. The model used is available on the hub at:
35
+ # [`impira/layoutlm-document-qa`](https://huggingface.co/impira/layoutlm-document-qa).
36
+ # """
37
+ # image = Image.open(BytesIO(image_file))
38
+ # output = pipe(image, question)
39
+ # return output
40
 
41
+ # @app.get("/hello")
42
+ # def read_root():
43
+ # return {"Hello": "World"}
requirements.txt CHANGED
@@ -1,7 +1,9 @@
1
  streamlit==0.84.1
2
- transformers
3
- Pillow
4
- pytesseract
5
- tesseract
6
- torch>=1.7.0
7
- torchvision>=0.8.1
 
 
 
1
  streamlit==0.84.1
2
+ fastapi==0.74.*
3
+ requests==2.27.*
4
+ uvicorn[standard]==0.17.*
5
+ sentencepiece==0.1.*
6
+ torch==1.11.*
7
+ transformers[vision]==4.*
8
+ pytesseract==0.3.10
9
+ python-multipart==0.0.6