import torch from transformers import pipeline, logging, AutoModelForCausalLM, AutoTokenizer import gradio as gr ## 1 - Loading Model model_name = "microsoft/phi-2" model = AutoModelForCausalLM.from_pretrained( model_name, trust_remote_code=True ) model.config.use_cache = False ## 2 - Loading Tokenizer tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) tokenizer.pad_token = tokenizer.eos_token ## 3 - Load adapter (trained LORA weights) peft_model_folder = 'checkpoint700' model.load_adapter(peft_model_folder) def generate_dialogue(input_text): pipe = pipeline(task="text-generation",model=model,tokenizer=tokenizer,max_length=200) result = pipe(f"{input_text}") return_answer = (result[0]['generated_text']).replace(input_text,'') return return_answer HTML_TEMPLATE = """
Generate dialogue for given some initial prompt for context.
Model: Phi-2 (https://huggingface.co/microsoft/phi-2), Dataset: oasst1 (https://huggingface.co/datasets/OpenAssistant/oasst1)
""" with gr.Blocks(theme=gr.themes.Glass(),css=".gradio-container {background: url('file=https://github.com/santule/ERA/assets/20509836/e78f2bb3-ddd8-4ce9-a941-3d3d7ef7a272')}") as interface: gr.HTML(value=HTML_TEMPLATE, show_label=False) gr.Markdown("") gr.Markdown("") gr.Markdown("") gr.Markdown("") gr.Markdown("") gr.Markdown("") gr.Markdown("") gr.Markdown("") gr.Markdown("") gr.Markdown("") gr.Markdown("") with gr.Row(): input_text = gr.Textbox( label="Input Text", value="Enter your prompt here: This text will set the context for the AI's response." ) outputs = gr.Textbox( label="Answer" ) inputs = [input_text] with gr.Column(): button = gr.Button("Ask me") button.click(generate_dialogue, inputs=inputs, outputs=outputs) interface.launch()