File size: 4,182 Bytes
fb3b5a9
 
 
 
 
 
 
 
 
 
 
 
ef89dbb
 
 
 
 
 
 
 
 
650714a
 
 
 
 
fb3b5a9
 
ef89dbb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
303bf04
ef89dbb
 
53916a8
ef89dbb
53916a8
ef89dbb
 
 
 
 
 
2663a60
 
ef89dbb
fb3b5a9
ef89dbb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb3b5a9
 
 
 
 
 
ef89dbb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77591ae
ef89dbb
 
 
77591ae
ef89dbb
 
77591ae
ef89dbb
77591ae
 
 
ef89dbb
77591ae
 
ef89dbb
 
 
 
fb3b5a9
ef89dbb
fb3b5a9
ef89dbb
fb3b5a9
ef89dbb
fb3b5a9
77591ae
fb3b5a9
77591ae
 
ef89dbb
77591ae
 
ef89dbb
 
53916a8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
# -*- coding: utf-8 -*-
"""LLM Comparison

Automatically generated by Colab.

Original file is located at
    https://colab.research.google.com/drive/156SKaX3DY6jwOhcpwZVM5AiLscOAbNNJ
"""

# Commented out IPython magic to ensure Python compatibility.
# %pip install -qU pixeltable gradio sentence-transformers tiktoken openai openpyxl

import gradio as gr
import pandas as pd
import pixeltable as pxt
from pixeltable.iterators import DocumentSplitter
import numpy as np
from pixeltable.functions.huggingface import sentence_transformer
from pixeltable.functions import openai
import os

"""## Store OpenAI API Key"""

if 'OPENAI_API_KEY' not in os.environ:
    os.environ['OPENAI_API_KEY'] = getpass.getpass('Enter your OpenAI API key:')

"""Pixeltable Set up"""

# Ensure a clean slate for the demo
pxt.drop_dir('rag_demo', force=True)
pxt.create_dir('rag_demo')

# Set up embedding function
@pxt.expr_udf
def e5_embed(text: str) -> np.ndarray:
    return sentence_transformer(text, model_id='intfloat/e5-large-v2')

# Create prompt function
@pxt.udf
def create_prompt(top_k_list: list[dict], question: str) -> str:
    concat_top_k = '\n\n'.join(
        elt['text'] for elt in reversed(top_k_list)
    )
    return f'''
    PASSAGES:

    {concat_top_k}

    QUESTION:

    {question}'''

def process_files(ground_truth_file, pdf_files):
    # Process ground truth file
    if ground_truth_file.name.endswith('.csv'):
        queries_t = pxt.io.import_csv('rag_demo.queries', ground_truth_file.name)
    else:
        queries_t = pxt.io.import_excel('rag_demo.queries', ground_truth_file.name)

    # Process PDF files
    documents_t = pxt.create_table(
        'rag_demo.documents',
        {'document': pxt.DocumentType()}
    )
    
    documents_t.insert({'document': file.name} for file in pdf_files if file.name.endswith('.pdf'))

     # Create chunks view
    chunks_t = pxt.create_view(
        'rag_demo.chunks',
        documents_t,
        iterator=DocumentSplitter.create(
            document=documents_t.document,
            separators='token_limit',
            limit=300
        )
    )

    # Add embedding index
    chunks_t.add_embedding_index('text', string_embed=e5_embed)

    # Create top_k query
    @chunks_t.query
    def top_k(query_text: str):
      sim = chunks_t.text.similarity(query_text)
      return (
          chunks_t.order_by(sim, asc=False)
              .select(chunks_t.text, sim=sim)
              .limit(5)
      )

    # Add computed columns to queries_t
    queries_t['question_context'] = chunks_t.top_k(queries_t.Question)
    queries_t['prompt'] = create_prompt(
        queries_t.question_context, queries_t.Question
    )

    # Prepare messages for OpenAI
    messages = [
        {
            'role': 'system',
            'content': 'Please read the following passages and answer the question based on their contents.'
        },
        {
            'role': 'user',
            'content': queries_t.prompt
        }
    ]

     # Add OpenAI response column
    queries_t['response'] = openai.chat_completions(
        model='gpt-4-0125-preview', messages=messages
    )
    
    queries_t['answer'] = queries_t.response.choices[0].message.content

    df_output = queries_t.select(queries_t.Question, queries_t.correct_answer, queries_t.answer).collect().to_pandas()

    try:
    #Display content
      return df_output

    except Exception as e:
        return f"An error occurred: {str(e)}", None

# Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# RAG Demo App")

    with gr.Row():
        ground_truth_file = gr.File(label="Upload Ground Truth (CSV or XLSX)", file_count="single")
        pdf_files = gr.File(label="Upload PDF Documents", file_count="multiple")

    process_button = gr.Button("Process Files")

    df_output = gr.DataFrame(label="Pixeltable Table")

    #question_input = gr.Textbox(label="Enter your question")
    #query_button = gr.Button("Query LLM")

    process_button.click(process_files, inputs=[ground_truth_file, pdf_files], outputs=df_output)
    #query_button.click(query_llm, inputs=question_input, outputs=output_dataframe)

if __name__ == "__main__":
    demo.launch()