File size: 2,804 Bytes
15fc611
78e2d06
 
 
aba8ba9
e30edef
78e2d06
 
 
 
 
ce9a8c3
b2a615a
78e2d06
 
951feec
78e2d06
 
 
 
 
 
 
 
 
 
 
 
e30edef
639690f
129f965
98f29e9
78e2d06
8af3424
78e2d06
76f8ae7
21056ba
27683e9
88cf551
85a7c4c
 
21056ba
 
30af5b2
91de607
e8c095a
fd67f34
98f29e9
78e2d06
21056ba
6f74a68
38e745a
35fcfc0
91de607
 
35fcfc0
21056ba
7b72dca
21056ba
74c6351
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import os
import openai
import gradio as gr

openai.api_key = os.getenv('OPEN_AI_KEY')
hf_t_key = ('HF_TOKEN_KEY')

def predict(message, history):
    history_openai_format = []
    for human, assistant in history:
        history_openai_format.append({"role": "user", "content": human })
        history_openai_format.append({"role": "assistant", "content": assistant})
    history_openai_format.append({"role": "user", "content": message})

    response = openai.ChatCompletion.create(
        model='ft:gpt-3.5-turbo-1106:2292030-peach-tech::8cxzbHH4',
        messages= history_openai_format,
        temperature=1.0,
        stream=True
    )

    partial_message = ""
    for chunk in response:
        if len(chunk['choices'][0]['delta']) != 0:
            partial_message = partial_message + chunk['choices'][0]['delta']['content']
            yield partial_message

A1 = gr.ChatInterface(predict,
                title="COLLEAGUE",
                description="An AI Productivity Assistant that Chats, Transcribes, and Creates, Built By Peach State Innovation and Technology. Select The Corresponding Tab For Tool Accessibility",
                textbox=gr.Textbox(placeholder="Enter your question/prompt here..."),
                theme= gr.themes.Glass(primary_hue="neutral", neutral_hue="slate"),
                retry_btn=None,
                clear_btn="Clear Conversation")

A2 = gr.load("models/openai/whisper-small",
            title=" ",
            description="Transcribe Speech and Audio Files",
             inputs=[gr.inputs.Audio(source="microphone", type="filepath", optional=True), 
                     gr.inputs.Audio(source="upload", type="filepath", optional=True, label="Audio file")],
                      )

A3 = gr.load(
             "models/Salesforce/blip-image-captioning-large",
              title=" ",
              description="Take and Upload a Photo or Existing Image, I'll Give You Its Description",
              outputs=[gr.Textbox(label="I see...")],
              theme= gr.themes.Glass(primary_hue="neutral", neutral_hue="slate"))

A4 = gr.load(
             "models/stabilityai/stable-diffusion-xl-base-1.0",
              inputs=[gr.Textbox(label="Enter Your Image Description")],
              outputs=[gr.Image(label="Image")],
              title=" ",
              description="Bring Your Imagination Into Existence On The Digital Canvas With COLLEAGUE, Powered With Stable Diffusion",
              allow_flagging="never", 
              examples=["A gigantic celtic leprechaun wandering the streets of downtown Atlanta","A child eating pizza in a Brazilian favela"])

pcp = gr.TabbedInterface([A1, A2, A3, A4], ["Chat", "Transcribe", "Describe", "Create"], theme= gr.themes.Glass(primary_hue="neutral", neutral_hue="slate"))
pcp.queue().launch()