Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,16 +1,13 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import requests
|
3 |
-
import json
|
4 |
import os
|
|
|
|
|
5 |
|
6 |
# Streamlit app setup
|
7 |
st.title('Llama Cloud API Document Extraction')
|
8 |
|
9 |
-
|
10 |
# File Upload
|
11 |
uploaded_file = st.file_uploader('Choose a PDF file', type='pdf')
|
12 |
-
|
13 |
-
api_key = os.getenv('api_key')
|
14 |
|
15 |
if uploaded_file is not None:
|
16 |
# Upload file to Llama Cloud
|
@@ -20,7 +17,7 @@ if uploaded_file is not None:
|
|
20 |
}
|
21 |
files = {'upload_file': (uploaded_file.name, uploaded_file, 'application/pdf')}
|
22 |
response = requests.post('https://api.cloud.llamaindex.ai/api/v1/files', headers=headers, files=files)
|
23 |
-
file_id = response.json()
|
24 |
st.write(f'File uploaded with ID: {file_id}')
|
25 |
|
26 |
# Infer Schema
|
@@ -29,7 +26,7 @@ if uploaded_file is not None:
|
|
29 |
'file_ids': [file_id]
|
30 |
}
|
31 |
schema_response = requests.post('https://api.cloud.llamaindex.ai/api/v1/extraction/schemas/infer', headers=headers, json=schema_data)
|
32 |
-
schema_id = schema_response.json()
|
33 |
st.write(f'Schema inferred with ID: {schema_id}')
|
34 |
|
35 |
# Start Extraction Job
|
@@ -38,12 +35,12 @@ if uploaded_file is not None:
|
|
38 |
'file_id': file_id
|
39 |
}
|
40 |
job_response = requests.post('https://api.cloud.llamaindex.ai/api/v1/extraction/jobs', headers=headers, json=job_data)
|
41 |
-
job_id = job_response.json()
|
42 |
st.write(f'Extraction job started with ID: {job_id}')
|
43 |
|
44 |
# Check Job Status
|
45 |
status_response = requests.get(f'https://api.cloud.llamaindex.ai/api/v1/extraction/jobs/{job_id}', headers=headers)
|
46 |
-
status = status_response.json()
|
47 |
st.write(f'Job Status: {status}')
|
48 |
|
49 |
# Display Results
|
@@ -51,6 +48,6 @@ if uploaded_file is not None:
|
|
51 |
results_response = requests.get(f'https://api.cloud.llamaindex.ai/api/v1/extraction/jobs/{job_id}/result', headers=headers)
|
52 |
results = results_response.json()
|
53 |
st.write('### Extraction Results:')
|
54 |
-
st.
|
55 |
else:
|
56 |
st.write('Extraction job is still in progress or has failed.')
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
+
import requests
|
3 |
+
import streamlit as st
|
4 |
|
5 |
# Streamlit app setup
|
6 |
st.title('Llama Cloud API Document Extraction')
|
7 |
|
|
|
8 |
# File Upload
|
9 |
uploaded_file = st.file_uploader('Choose a PDF file', type='pdf')
|
10 |
+
api_key = os.getenv('API_KEY')
|
|
|
11 |
|
12 |
if uploaded_file is not None:
|
13 |
# Upload file to Llama Cloud
|
|
|
17 |
}
|
18 |
files = {'upload_file': (uploaded_file.name, uploaded_file, 'application/pdf')}
|
19 |
response = requests.post('https://api.cloud.llamaindex.ai/api/v1/files', headers=headers, files=files)
|
20 |
+
file_id = response.json().get('id')
|
21 |
st.write(f'File uploaded with ID: {file_id}')
|
22 |
|
23 |
# Infer Schema
|
|
|
26 |
'file_ids': [file_id]
|
27 |
}
|
28 |
schema_response = requests.post('https://api.cloud.llamaindex.ai/api/v1/extraction/schemas/infer', headers=headers, json=schema_data)
|
29 |
+
schema_id = schema_response.json().get('id')
|
30 |
st.write(f'Schema inferred with ID: {schema_id}')
|
31 |
|
32 |
# Start Extraction Job
|
|
|
35 |
'file_id': file_id
|
36 |
}
|
37 |
job_response = requests.post('https://api.cloud.llamaindex.ai/api/v1/extraction/jobs', headers=headers, json=job_data)
|
38 |
+
job_id = job_response.json().get('id')
|
39 |
st.write(f'Extraction job started with ID: {job_id}')
|
40 |
|
41 |
# Check Job Status
|
42 |
status_response = requests.get(f'https://api.cloud.llamaindex.ai/api/v1/extraction/jobs/{job_id}', headers=headers)
|
43 |
+
status = status_response.json().get('status')
|
44 |
st.write(f'Job Status: {status}')
|
45 |
|
46 |
# Display Results
|
|
|
48 |
results_response = requests.get(f'https://api.cloud.llamaindex.ai/api/v1/extraction/jobs/{job_id}/result', headers=headers)
|
49 |
results = results_response.json()
|
50 |
st.write('### Extraction Results:')
|
51 |
+
st.json(results)
|
52 |
else:
|
53 |
st.write('Extraction job is still in progress or has failed.')
|