Spaces:
Runtime error
Runtime error
import os | |
import random | |
from huggingface_hub import InferenceClient | |
import gradio as gr | |
#from utils import parse_action, parse_file_content, read_python_module_structure | |
from datetime import datetime | |
from PIL import Image | |
import agent | |
from models import models | |
import urllib.request | |
import uuid | |
import requests | |
import io | |
loaded_model=[] | |
for i,model in enumerate(models): | |
loaded_model.append(gr.load(f'models/{model}')) | |
print (loaded_model) | |
now = datetime.now() | |
date_time_str = now.strftime("%Y-%m-%d %H:%M:%S") | |
client = InferenceClient( | |
"mistralai/Mixtral-8x7B-Instruct-v0.1" | |
) | |
############################################ | |
model = gr.load("models/stabilityai/sdxl-turbo") | |
VERBOSE = True | |
MAX_HISTORY = 10000 | |
#MODEL = "gpt-3.5-turbo" # "gpt-4" | |
history = [] | |
def infer(txt): | |
return (model(txt)) | |
def format_prompt(message, history): | |
prompt = "<s>" | |
for user_prompt, bot_response in history: | |
prompt += f"[INST] {user_prompt} [/INST]" | |
prompt += f" {bot_response}</s> " | |
prompt += f"[INST] {message} [/INST]" | |
return prompt | |
def run_gpt( | |
in_prompt, | |
history, | |
): | |
print(f'history :: {history}') | |
prompt=format_prompt(in_prompt,history) | |
seed = random.randint(1,1111111111111111) | |
print (seed) | |
generate_kwargs = dict( | |
temperature=1.0, | |
max_new_tokens=1048, | |
top_p=0.99, | |
repetition_penalty=1.0, | |
do_sample=True, | |
seed=seed, | |
) | |
content = agent.GENERATE_PROMPT + prompt | |
print(content) | |
#formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history) | |
#formatted_prompt = format_prompt(f'{content}', history) | |
stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False) | |
resp = "" | |
for response in stream: | |
resp += response.token.text | |
return resp | |
def run(purpose,history,model_drop): | |
print (history) | |
#print(purpose) | |
#print(hist) | |
task=None | |
directory="./" | |
#if history: | |
# history=str(history).strip("[]") | |
#if not history: | |
# history = "" | |
#action_name, action_input = parse_action(line) | |
out_prompt = run_gpt( | |
purpose, | |
history, | |
) | |
yield ("",[(purpose,out_prompt)],None) | |
#out_img = infer(out_prompt) | |
model=loaded_model[int(model_drop)] | |
out_img=model(out_prompt) | |
print(out_img) | |
url=f'https://johann22-mixtral-chat-diffusion.hf.space/file={out_img}' | |
print(url) | |
uid = uuid.uuid4() | |
#urllib.request.urlretrieve(image, 'tmp.png') | |
#out=Image.open('tmp.png') | |
r = requests.get(url, stream=True) | |
if r.status_code == 200: | |
out = Image.open(io.BytesIO(r.content)) | |
yield ([(purpose,out_prompt)],out) | |
#return ("", [(purpose,history)]) | |
################################################ | |
with gr.Blocks() as iface: | |
gr.HTML("""<center><h1>Mixtral Chat Diffusion</h1><br><h3>This chatbot will generate images</h3></center>""") | |
#chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"), | |
with gr.Row(): | |
with gr.Column(): | |
chatbot=gr.Chatbot() | |
msg = gr.Textbox() | |
model_drop=gr.Dropdown(label="Diffusion Models", type="index", choices=[m for m in models], value=models[0]) | |
with gr.Row(): | |
submit_b = gr.Button() | |
stop_b = gr.Button("Stop") | |
clear = gr.ClearButton([msg, chatbot]) | |
sumbox=gr.Image(label="Image") | |
sub_b = submit_b.click(run, [msg,chatbot,model_drop],[msg,chatbot,sumbox]) | |
sub_e = msg.submit(run, [msg, chatbot,model_drop], [msg, chatbot,sumbox]) | |
stop_b.click(None,None,None, cancels=[sub_b,sub_e]) | |
iface.launch() | |
''' | |
gr.ChatInterface( | |
fn=run, | |
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"), | |
title="Mixtral 46.7B\nMicro-Agent\nInternet Search <br> development test", | |
examples=examples, | |
concurrency_limit=20, | |
).launch(show_api=False) | |
''' | |