File size: 4,249 Bytes
1178ae5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
#As Langchain team has been working aggresively on improving the tool, we can see a lot of changes happening every weeek,
#As a part of it, the below import has been depreciated
#from langchain.llms import OpenAI
from langchain_openai import OpenAI

from pypdf import PdfReader
#from langchain.llms.openai import OpenAI
import pandas as pd
import re
# import replicate
from langchain.prompts import PromptTemplate
from langchain_community.llms import CTransformers
from ctransformers import AutoModelForCausalLM



#Extract Information from PDF file
def get_pdf_text(pdf_doc):
    text = ""
    pdf_reader = PdfReader(pdf_doc)
    for page in pdf_reader.pages:
        text += page.extract_text()
    return text

# filename = r"/Invoice_Extraction_Bot/Invoice/invoice_1001329.pdf"

# raw_data=get_pdf_text(filename)
#Function to extract data from text...
def extracted_data(pages_data):
    template = """Please Extract all the following values : invoice no., Description, Quantity, date, 
        Unit price , Amount, Total, email, phone number and address from this data: {pages}
        Expected output: remove any dollar symbols {{'Invoice no.': '1001329','Description': 'Office Chair','Quantity': '2','Date': '5/4/2023','Unit price': '1100.00$','Amount': '2200.00$','Total': '2200.00$','Email': '[email protected]','Phone number': '9999999999','Address': 'Mumbai, India'}}
        """
    prompt_template = PromptTemplate(input_variables=["pages"], template=template)

    # llm = OpenAI(temperature=.7)
    # full_response=llm(prompt_template.format(pages=pages_data))


    #The below code will be used when we want to use LLAMA 2 model,  we will use Replicate for hosting our model....

    # output = CTransformers(model=r"TheBloke/llama-2-7b-chat.ggmlv3.q8_0.bin",     #https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/tree/main
    #                 model_type='llama',
    #                     input={"prompt":prompt_template.format(pages=pages_data) ,
    #                                 "temperature":0.1, "top_p":0.9, "max_length":512, "repetition_penalty":1})
    llm = AutoModelForCausalLM.from_pretrained("TheBloke/Llama-2-7B-Chat-GGML", model_file="llama-2-7b-chat.ggmlv3.q8_0.bin")
    output_text=llm(prompt_template.format(pages=pages_data))

    full_response = ''
    for item in output_text:
        full_response += item
        

        #print(full_response)
    return full_response

#print(raw_data)
# print("extracted raw data")
# llm_extracted_data=extracted_data(raw_data)
        #print(llm_extracted_data)

# iterate over files in
# that user uploaded PDF files, one by one
def create_docs(filename):
    
    df = pd.DataFrame({'Invoice no.': pd.Series(dtype='str'),
                   'Description': pd.Series(dtype='str'),
                   'Quantity': pd.Series(dtype='str'),
                   'Date': pd.Series(dtype='str'),
	                'Unit price': pd.Series(dtype='str'),
                   'Amount': pd.Series(dtype='int'),
                   'Total': pd.Series(dtype='str'),
                   'Email': pd.Series(dtype='str'),
	                'Phone number': pd.Series(dtype='str'),
                   'Address': pd.Series(dtype='str')
                    })

    

    
    for filename in filename:
        
        print(filename)
        raw_data=get_pdf_text(filename)
        print(raw_data)
        # print("extracted raw data")

        llm_extracted_data=extracted_data(raw_data)
        #print(llm_extracted_data)
        #print("llm extracted data")
        #Adding items to our list - Adding data & its metadata

        pattern = r'{(.+)}'
        match = re.search(pattern, llm_extracted_data, re.DOTALL)

       

        if match:
            extracted_text = match.group(1)
            # Converting the extracted text to a dictionary
            data_dict = eval('{' + extracted_text + '}')
            print(data_dict)
        else:
            print("No match found.")
            # Initialize data_dict
            data_dict = {}

        
        df=df._append([data_dict], ignore_index=True)
        print("********************DONE***************")
        #df=df.append(save_to_dataframe(llm_extracted_data), ignore_index=True)

    df.head()
    return df