import os
import gradio as gr
import copy
import time
import llama_cpp
from llama_cpp import Llama
from huggingface_hub import hf_hub_download

saiga = Llama(
    model_path=hf_hub_download(
        repo_id="FinancialSupport/saiga-7b-gguf",
        filename="saiga-7b.Q4_K_M.gguf",
    ),
    n_ctx=4086,
) 

dante = Llama(
    model_path=hf_hub_download(
        repo_id="FinancialSupport/saiga-7b-gguf",
        filename="saiga-7b-dante-qlora.Q4_K_M.gguf",
    ),
    n_ctx=4086,
) 

history = []

def generate_text(message, history):
    temp = ""
    input_prompt = "Conversazione tra umano ed un assistente AI di nome saiga-7b\n"
    for interaction in history:
        input_prompt += "[|Umano|] " + interaction[0] + "\n"
        input_prompt += "[|Assistente|]" + interaction[1]
    
    input_prompt += "[|Umano|] " + message + "\n[|Assistente|]"

    print(input_prompt)
    
    output = saiga(input_prompt,
    temperature= 0.15,
    top_p= 0.1,
    top_k= 40, 
    repeat_penalty= 1.1,
    max_tokens= 1024,
    stop= [
        "[|Umano|]",
        "[|Assistente|]",
    ],
    stream= True)
    
    for out in output:
        stream = copy.deepcopy(out)
        temp += stream["choices"][0]["text"]
        yield temp

    history = ["init", input_prompt]

def generate_text_Dante(message, history):
    temp = ""
    input_prompt = ""
    for interaction in history:
        input_prompt += "[|Umano|] " + interaction[0] + "\n"
        input_prompt += "[|Assistente|]" + interaction[1]
    
    input_prompt += "[|Umano|] " + message + "\n[|Assistente|]"

    print(input_prompt)
           
    output = dante(input_prompt,
    temperature= 0.15,
    top_p= 0.1,
    top_k= 40, 
    repeat_penalty= 1.1,
    max_tokens= 1024,
    stop= [
        "[|Umano|]",
        "[|Assistente|]",
    ],
    stream= True)
    
    for out in output:
        stream = copy.deepcopy(out)
        temp += stream["choices"][0]["text"]
        yield temp

    history = ["init", input_prompt]


with gr.Blocks() as demo:
    # with gr.Tab('saiga'):
    #     gr.ChatInterface(
    #     generate_text,
    #     title="saiga-7b running on CPU (quantized Q4_K)",
    #     description="This is a quantized version of saiga-7b running on CPU (very slow). It is less powerful than the original version, but it can even run on the free tier of huggingface.",
    #     examples=[
    #         "Dammi 3 idee di ricette che posso fare con i pistacchi",
    #         "Prepara un piano di esercizi da poter fare a casa",
    #         "Scrivi una poesia sulla nuova AI chiamata cerbero-7b"
    #     ],
    #     cache_examples=False,
    #     retry_btn=None,
    #     undo_btn="Delete Previous",
    #     clear_btn="Clear",
    # )
    with gr.Tab('Dante'):
        gr.ChatInterface(
        generate_text_Dante,
        title="saigaDante-7b running on CPU (quantized Q4_K)",
        description="This is a quantized version of saiga-7b with Dante LoRA attached running on CPU (very slow).",
        examples=[
            "Traduci in volgare fiorentino: tanto va la gatta al lardo che ci lascia lo zampino",
            "Traduci in volgare fiorentino: narrami come cucinare la pasta alla carbonara vegana.",
            "Traduci in volgare fiorentino: raccontami una fiaba su Firenze"
        ],
        cache_examples=False,
        retry_btn=None,
        undo_btn="Delete Previous",
        clear_btn="Clear",
    )
        
demo.queue(concurrency_count=1, max_size=5)
demo.launch()