Hemasagar commited on
Commit
1178ae5
·
verified ·
1 Parent(s): 7823821

Create utils.py

Browse files
Files changed (1) hide show
  1. utils.py +114 -0
utils.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #As Langchain team has been working aggresively on improving the tool, we can see a lot of changes happening every weeek,
2
+ #As a part of it, the below import has been depreciated
3
+ #from langchain.llms import OpenAI
4
+ from langchain_openai import OpenAI
5
+
6
+ from pypdf import PdfReader
7
+ #from langchain.llms.openai import OpenAI
8
+ import pandas as pd
9
+ import re
10
+ # import replicate
11
+ from langchain.prompts import PromptTemplate
12
+ from langchain_community.llms import CTransformers
13
+ from ctransformers import AutoModelForCausalLM
14
+
15
+
16
+
17
+ #Extract Information from PDF file
18
+ def get_pdf_text(pdf_doc):
19
+ text = ""
20
+ pdf_reader = PdfReader(pdf_doc)
21
+ for page in pdf_reader.pages:
22
+ text += page.extract_text()
23
+ return text
24
+
25
+ # filename = r"/Invoice_Extraction_Bot/Invoice/invoice_1001329.pdf"
26
+
27
+ # raw_data=get_pdf_text(filename)
28
+ #Function to extract data from text...
29
+ def extracted_data(pages_data):
30
+ template = """Please Extract all the following values : invoice no., Description, Quantity, date,
31
+ Unit price , Amount, Total, email, phone number and address from this data: {pages}
32
+ Expected output: remove any dollar symbols {{'Invoice no.': '1001329','Description': 'Office Chair','Quantity': '2','Date': '5/4/2023','Unit price': '1100.00$','Amount': '2200.00$','Total': '2200.00$','Email': '[email protected]','Phone number': '9999999999','Address': 'Mumbai, India'}}
33
+ """
34
+ prompt_template = PromptTemplate(input_variables=["pages"], template=template)
35
+
36
+ # llm = OpenAI(temperature=.7)
37
+ # full_response=llm(prompt_template.format(pages=pages_data))
38
+
39
+
40
+ #The below code will be used when we want to use LLAMA 2 model, we will use Replicate for hosting our model....
41
+
42
+ # output = CTransformers(model=r"TheBloke/llama-2-7b-chat.ggmlv3.q8_0.bin", #https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/tree/main
43
+ # model_type='llama',
44
+ # input={"prompt":prompt_template.format(pages=pages_data) ,
45
+ # "temperature":0.1, "top_p":0.9, "max_length":512, "repetition_penalty":1})
46
+ llm = AutoModelForCausalLM.from_pretrained("TheBloke/Llama-2-7B-Chat-GGML", model_file="llama-2-7b-chat.ggmlv3.q8_0.bin")
47
+ output_text=llm(prompt_template.format(pages=pages_data))
48
+
49
+ full_response = ''
50
+ for item in output_text:
51
+ full_response += item
52
+
53
+
54
+ #print(full_response)
55
+ return full_response
56
+
57
+ #print(raw_data)
58
+ # print("extracted raw data")
59
+ # llm_extracted_data=extracted_data(raw_data)
60
+ #print(llm_extracted_data)
61
+
62
+ # iterate over files in
63
+ # that user uploaded PDF files, one by one
64
+ def create_docs(filename):
65
+
66
+ df = pd.DataFrame({'Invoice no.': pd.Series(dtype='str'),
67
+ 'Description': pd.Series(dtype='str'),
68
+ 'Quantity': pd.Series(dtype='str'),
69
+ 'Date': pd.Series(dtype='str'),
70
+ 'Unit price': pd.Series(dtype='str'),
71
+ 'Amount': pd.Series(dtype='int'),
72
+ 'Total': pd.Series(dtype='str'),
73
+ 'Email': pd.Series(dtype='str'),
74
+ 'Phone number': pd.Series(dtype='str'),
75
+ 'Address': pd.Series(dtype='str')
76
+ })
77
+
78
+
79
+
80
+
81
+ for filename in filename:
82
+
83
+ print(filename)
84
+ raw_data=get_pdf_text(filename)
85
+ print(raw_data)
86
+ # print("extracted raw data")
87
+
88
+ llm_extracted_data=extracted_data(raw_data)
89
+ #print(llm_extracted_data)
90
+ #print("llm extracted data")
91
+ #Adding items to our list - Adding data & its metadata
92
+
93
+ pattern = r'{(.+)}'
94
+ match = re.search(pattern, llm_extracted_data, re.DOTALL)
95
+
96
+
97
+
98
+ if match:
99
+ extracted_text = match.group(1)
100
+ # Converting the extracted text to a dictionary
101
+ data_dict = eval('{' + extracted_text + '}')
102
+ print(data_dict)
103
+ else:
104
+ print("No match found.")
105
+ # Initialize data_dict
106
+ data_dict = {}
107
+
108
+
109
+ df=df._append([data_dict], ignore_index=True)
110
+ print("********************DONE***************")
111
+ #df=df.append(save_to_dataframe(llm_extracted_data), ignore_index=True)
112
+
113
+ df.head()
114
+ return df