File size: 3,587 Bytes
5c10ee6
 
11526c9
9cfd70d
5c10ee6
9cfd70d
5c10ee6
9cfd70d
11526c9
e852070
 
9cfd70d
5c10ee6
9cfd70d
e852070
 
11526c9
5c10ee6
 
e852070
5c10ee6
e852070
 
 
 
 
 
11526c9
5c10ee6
 
 
d322c8d
5c10ee6
 
 
 
 
 
9cfd70d
11526c9
 
 
 
9cfd70d
 
 
5c10ee6
 
 
9cfd70d
 
 
 
 
 
 
 
 
5c10ee6
 
9cfd70d
 
5c10ee6
9cfd70d
 
 
 
11526c9
 
9cfd70d
 
5c10ee6
 
 
9cfd70d
 
5c10ee6
9cfd70d
5c10ee6
 
 
 
9cfd70d
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import asyncio
import datetime
import os

import gradio as gr

import koil


import lm.lm.openai
import lm.log.arweaveditems

OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") 

MODEL = lm.lm.openai.DEFAULT_MODEL

async def apredict(input):
    timestamp = datetime.datetime.now().isoformat()
    try:
        api = lm.lm.openai.openai(api_key = OPENAI_API_KEY, model = MODEL)
    except:
        if model == lm.lm.openai.DEFAULT_MODEL:
            model = 'gpt-4'
            api = lm.lm.openai.openai(api_key = OPENAI_API_KEY, model = MODEL)
        else:
            raise
    log = lm.lm.arweaveditems.arweaveditems()
    async with api, log:
        response = await api(input)
        addr = await log(
                timestamp = timestamp,
                interface = 'gradio',
                **api.metadata,
                input = input,
                output = response
        )
    print(addr)
    return [addr, response]

def predict(input):
    with koil.Koil() as Koil:
        return koil.unkoil(apredict, input)

def reset_textbox():
    return gr.update(value='')

title = """<h1 align="center">🔥GPT4 +🚀Arweave</h1>"""
description = """Provides GPT4 completions logged to arweave.

In this app, you can explore the outputs of a gpt-4 LLM.
"""

theme = gr.themes.Default(primary_hue="green")                

with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
                #chatbot {height: 520px; overflow: auto;}""",
              theme=theme) as demo:
    gr.HTML(title)
    gr.HTML("""<h3 align="center">🔥This Huggingface Gradio Demo provides you access to GPT4 API. 🎉🥳🎉You don't need any OPENAI API key🙌</h1>""")
    gr.HTML('''<center>Duplicate the space to provide a different api key, or donate your key to others in the community tab.</center>''')
    with gr.Column(elem_id = "col_container"):
        chatbot = gr.Chatbot(elem_id='chatbot') #c
        inputs = gr.Textbox(label= "Type an input and press Enter") #t
        state = gr.State([]) #s
        with gr.Row():
            with gr.Column(scale=7):
                b1 = gr.Button().style(full_width=True)
            #with gr.Column(scale=3):
            #    server_status_code = gr.Textbox(label="Status code from OpenAI server", )
    
        #inputs, top_p, temperature, top_k, repetition_penalty
        #with gr.Accordion("Parameters", open=False):
            #top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
            #temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
            #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
            #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
            #chat_counter = gr.Number(value=0, visible=False, precision=0)

    #inputs.submit( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],)  #openai_api_key
    inputs.submit(predict, [inputs], [chatbot])
    #b1.click( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],)  #openai_api_key
    b1.click(predict, [inputs], [chatbot])
    b1.click(reset_textbox, [], [inputs])
    inputs.submit(reset_textbox, [], [inputs])
                    
    #gr.Markdown(description)
    demo.queue(max_size=20, concurrency_count=10).launch(debug=True)