# import gradio as gr # from groq import Groq # client = Groq( # api_key=("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"), # ) # def generate_response(input_text): # chat_completion = client.chat.completions.create( # messages=[ # { # "role": "user", # "content": input_text, # } # ], # model="llama3-8b-8192", # ) # return chat_completion.choices[0].message.content # custom_css = """ # body { # background-color: #f5f5f5; # font-family: 'Arial', sans-serif; # color: #333; # } # .gradio-container { # border-radius: 12px; # padding: 20px; # background-color: #ffffff; # box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1); # } # input[type="text"], textarea { # border-radius: 10px; # border: 1px solid #ddd; # padding: 12px; # width: 100%; # font-size: 14px; # color: #333; # background-color: #f9f9f9; # } # button { # background-color: #007bff; # color: white; # border: none; # padding: 12px 24px; # border-radius: 10px; # cursor: pointer; # font-size: 14px; # font-weight: bold; # } # button:hover { # background-color: #0056b3; # } # h1 { # font-weight: 600; # color: #333; # } # textarea { # resize: none; # } # """ # iface = gr.Interface( # fn=generate_response, # inputs=gr.Textbox(label="ورودی" , lines=2, placeholder="اینجا یه چی بپرس... "), # outputs=gr.Textbox(label="جواب"), # title="💬 Parviz Chatbot", # description="زنده باد", # theme="dark", # allow_flagging="never" # ) # iface.launch() # import gradio as gr # from groq import Groq # import time # client = Groq(api_key="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") # def generate_response(message, chat_history): # chat_completion = client.chat.completions.create( # messages=[{"role": "user", "content": message}], # model="llama3-8b-8192", # ) # bot_message = chat_completion.choices[0].message.content # for i in range(0, len(bot_message), 10): # yield chat_history + [(message, bot_message[:i + 10])] # time.sleep(0.1) # yield chat_history + [(message, bot_message)] # with gr.Blocks() as demo: # gr.Markdown("
زنده باد
") # chatbot = gr.Chatbot(label="جواب") # msg = gr.Textbox(label="ورودی", placeholder="اینجا یه چی بپرس... ", lines=1) # msg.submit(generate_response, [msg, chatbot], chatbot) # clear = gr.ClearButton([msg, chatbot]) # demo.launch() import gradio as gr import torch from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, GenerationConfig import re import time tokenizer = AutoTokenizer.from_pretrained("universitytehran/PersianMind-v1.0") model = AutoModelForSeq2SeqLM.from_pretrained("universitytehran/PersianMind-v1.0") def generate_response(message, chat_history): TEMPLATE = "{context}\nYou: {prompt}\nParvizGPT " CONTEXT = "This is a conversation with ParvizGPT. It is an artificial intelligence model designed by Amir Mahdi Parviz " \ "NLP expert to help you with various tasks such as answering questions, " \ "providing recommendations, and helping with decision making. You can ask it anything you want and " \ "it will do its best to give you accurate and relevant information." prompt = TEMPLATE.format(context=CONTEXT, prompt=message) generation_config = GenerationConfig( max_new_tokens=128, do_sample=True, top_k=50, top_p=0.95, temperature=0.8, repetition_penalty=1.2 ) tokenized_test_text = tokenizer(prompt, return_tensors='pt').input_ids.to("cpu") model.to("cpu") outputs = model.generate(tokenized_test_text, generation_config=generation_config, max_new_tokens=128) result = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] for i in range(0, len(result), 10): yield chat_history + [(message, result[:i + 10])] time.sleep(0.1) yield chat_history + [(message, result)] with gr.Blocks() as demo: gr.Markdown("made by A.M.Parviz \
") chatbot = gr.Chatbot(label="جواب") msg = gr.Textbox(label="ورودی", placeholder="سوال خودتو رو بپرس", lines=1) msg.submit(generate_response, [msg, chatbot], chatbot) clear = gr.ClearButton([msg, chatbot]) demo.launch()