File size: 2,951 Bytes
c7759ea
 
 
bfbbbea
c7759ea
 
 
dfb781f
 
0b3012a
dfb781f
8e1c60e
dfb781f
 
4ba3d22
bdf3e70
c7759ea
e24890b
e9f9106
df7a162
8e1c60e
d959e7d
c7759ea
6b0a5a8
b1cf10f
4ba3d22
e991c57
 
 
 
 
 
a40632d
a52308c
c7759ea
 
 
aeb447f
c7759ea
 
f0bdafb
aeb447f
2f1a468
 
c7759ea
 
 
 
 
1a8278b
 
c7759ea
 
9675192
bb2fd4a
c7759ea
 
 
 
 
 
bc1c38e
c7759ea
 
4df0f2d
e991c57
4df0f2d
c7759ea
 
 
17dfda2
52589e7
 
c7759ea
52589e7
c7759ea
 
8e1c60e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c7759ea
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
###########################
# UI for Meeting RAG Q&A. #
###########################

##################### Imports #####################
import gradio as gr
from utilities.setup import get_files
#from server import EmbeddingService, QAService

from connections.pinecone import PineconeConnector
from services.embed_service.embed import EmbeddingService

from server import QAService

#import spaces

#################### Functions ####################
def process_transcripts(files, context):
    
    print(files)
    with EmbeddingService(conf, pinecone=pinecones) as e:
        f = e.run(files)
    # some way to wait or a progress bar?
    return "Completed Loading Data"

#@spaces.GPU
def retrieve_answer(question, context):
    #with QAService(conf) as q:
    #    q.run(question)


    return question + context


##################### Process #####################
def main(conf):
    with gr.Blocks() as demo:
    
        # Main page
        with gr.TabItem(conf["layout"]["page_names"][0]):
            gr.Markdown(get_files.load_markdown_file(conf["layout"]["about"]))
    
    
        
        # User config page
        with gr.TabItem(conf["layout"]["page_names"][1]):
            gr.Markdown("# Upload Transcript and Necessary Context")
            gr.Markdown("Please wait as the transcript is being processed.")
            load_file = gr.UploadButton(label="Upload Transcript (.vtt)", 
                                                     file_types=[".vtt"],
                                        file_count='multiple')
            goals = gr.Textbox(label="Goals for the Meeting",
                                            value=conf["defaults"]["goals"]) # not incorporated yet. Will be with Q&A.
            repository = gr.Textbox(label="Progress", value="Waiting for load...", visible=True)
            load_file.upload(process_transcripts, [load_file, goals], repository)
    
    
    
        # Meeting Question & Answer Page
        with gr.TabItem(conf["layout"]["page_names"][2]):
            question = gr.Textbox(label="Ask a Question",
                                  value=conf["defaults"]["question"])
            ask_button = gr.Button("Ask!")
            model_output = gr.components.Textbox(label="Answer")
            ask_button.click(fn=retrieve_answer,
                             inputs=[question, goals],
                             outputs=model_output)
    
    
    
        demo.launch()


##################### Execute #####################
if __name__ == "__main__":
    # Get config
    conf = get_files.json_cfg()
    
    # Get keys
    keys = get_files.get_keys()

    # initialize pinecone connector
    pc_connector = PineconeConnector(
        api_key=keys["pinecone"],
        index_name=conf["embeddings"]["index_name"],
        embedding=conf["embeddings"]["embedding"],
    )
    
    pinecones = pc_connector.run()

    # initialize model connector

    # run main
    main(conf)