File size: 1,532 Bytes
c3a1883
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import gradio as gr
import openai


def generate_image(prompt, api_key):
    # api_key = os.environ.get('OPENAI_API_KEY')
    if not api_key:
        raise gr.Error("缺少OPENAI_API_KEY")

    response = openai.Image.create(
        prompt=prompt,
        n=1,
        size="1024x1024",
        api_key=api_key
    )
    image_url = response['data'][0]['url']
    return image_url


def audio(file, api_key):
    if not api_key:
        raise gr.Error("缺少OPENAI_API_KEY")

    with open(file, 'rb') as f:
        transcript = openai.Audio.transcribe(model="whisper-1", file=f,
                                             api_key=api_key)
        return transcript['text']


def openai_tab():
    with gr.Tab("openai"):
        gr.Markdown('### 文字生成图片')
        api_key_input = gr.Textbox(label='OPENAI_API_KEY', placeholder='请输入你的OPENAI_API_KEY', type='password')

        with gr.Row():
            text_input = gr.Textbox(label='请输入你的创意', placeholder='请输入你的创意')
            text_output = gr.Image(label="图片")
        text_button = gr.Button('生成创意')
        text_button.click(generate_image, inputs=[text_input, api_key_input], outputs=text_output)

        gr.Markdown('### 音频识别')
        with gr.Row():
            file_input = gr.Audio(label='音频文件', type='filepath')
            text_output = gr.Text()
        text_button = gr.Button('音频识别')
        text_button.click(audio, inputs=[file_input, api_key_input], outputs=text_output)