File size: 4,296 Bytes
1d3019d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd7b07a
1d3019d
 
 
 
 
 
 
 
 
b294aba
 
 
 
 
 
 
 
 
 
 
 
 
1d3019d
b294aba
 
 
 
 
1d3019d
b294aba
 
 
1d3019d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974882e
1d3019d
 
 
 
 
 
 
 
 
 
 
 
974882e
 
1d3019d
 
 
 
 
 
 
 
 
974882e
1d3019d
 
 
974882e
 
 
 
 
 
 
1d3019d
974882e
 
1d3019d
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import os
import random
from huggingface_hub import InferenceClient
import gradio as gr
from datetime import datetime
from PIL import Image
import agent
from models import models
import urllib.request
import uuid
import requests
import io
loaded_model=[]
for i,model in enumerate(models):
    loaded_model.append(gr.load(f'models/{model}'))
print (loaded_model)

now = datetime.now()
date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")

client = InferenceClient(
    "mistralai/Mixtral-8x7B-Instruct-v0.1"
)
history = []
max_prompt=1000

def format_prompt(message, history):
  prompt = "<s>"
  for user_prompt, bot_response in history:
    prompt += f"[INST] {user_prompt} [/INST]"
    prompt += f" {bot_response}</s> "
  prompt += f"[INST] {message} [/INST]"
  return prompt

def condense(in_prompt):
    seed = random.randint(1,1111111111111111)
    print (seed)
    generate_kwargs = dict(
        temperature=1.0,
        max_new_tokens=512,
        top_p=0.99,
        repetition_penalty=1.0,
        do_sample=True,
        seed=seed,
    )
    content = agent.CONDENSE_PROMPT + prompt
    print(f'CONDENSED:: {content}')

    stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
    resp = ""
    for response in stream:
        resp += response.token.text
    return resp

def run_gpt(in_prompt,history,):
    if len(in_prompt)>max_prompt:
        in_prompt = condense(in_prompt)
    print(f'history :: {history}')
    prompt=format_prompt(in_prompt,history)
    seed = random.randint(1,1111111111111111)
    print (seed)
    generate_kwargs = dict(
        temperature=1.0,
        max_new_tokens=1048,
        top_p=0.99,
        repetition_penalty=1.0,
        do_sample=True,
        seed=seed,
    )
    content = agent.GENERATE_PROMPT + prompt
    print(content)
    stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
    resp = ""
    for response in stream:
        resp += response.token.text
    return resp


def run(purpose,history,model_drop):
    print (history)
    #print(purpose)
    #print(hist)
    task=None
    #if history:
    #    history=str(history).strip("[]")
    #if not history:
    #    history = ""

    #action_name, action_input = parse_action(line)
    out_prompt = run_gpt(
        purpose,
        history,
        
        )

    yield ([(purpose,out_prompt)],None)
    #out_img = infer(out_prompt)
    model=loaded_model[int(model_drop)]
    out_img=model(out_prompt)
    print(out_img)
    url=f'https://johann22-mixtral-diffusion.hf.space/file={out_img}'
    print(url)
    uid = uuid.uuid4()
    #urllib.request.urlretrieve(image, 'tmp.png')
    #out=Image.open('tmp.png')
    r = requests.get(url, stream=True)
    if r.status_code == 200:
        out = Image.open(io.BytesIO(r.content))
    yield ([(purpose,out_prompt)],out)
        #return ([(purpose,history)])



################################################

with gr.Blocks() as iface:
    gr.HTML("""<center><h1>Chat Diffusion</h1><br><h3>This chatbot will generate images</h3></center>""")
    #chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
    with gr.Row():
        with gr.Column(scale=1):
            chatbot=gr.Chatbot()
            msg = gr.Textbox()
            model_drop=gr.Dropdown(label="Diffusion Models", type="index", choices=[m for m in models], value=models[0])
            with gr.Group():
                submit_b = gr.Button()
                with gr.Row():
                    stop_b = gr.Button("Stop")
                    clear = gr.ClearButton([msg, chatbot])
        with gr.Column(scale=2):
            im_out=gr.Image(label="Image")
        
    sub_b = submit_b.click(run, [msg,chatbot,model_drop],[chatbot,im_out])
    sub_e = msg.submit(run, [msg, chatbot,model_drop], [chatbot,im_out])
    stop_b.click(None,None,None, cancels=[sub_b,sub_e])
iface.launch()
'''
gr.ChatInterface(
    fn=run,
    chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
    title="Mixtral 46.7B\nMicro-Agent\nInternet Search <br> development test",
    examples=examples,
    concurrency_limit=20,
).launch(show_api=False)
'''