File size: 1,709 Bytes
4f3c848 500768c 88bccf2 4f3c848 500768c 9d3c8fc 500768c 9d3c8fc 500768c 9d3c8fc 500768c 88bccf2 ad578b5 88bccf2 ad578b5 4f3c848 88bccf2 ad578b5 88bccf2 ad578b5 88bccf2 ad578b5 88bccf2 ad578b5 88bccf2 ad578b5 88bccf2 ad578b5 18c9167 500768c ad578b5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
import pytesseract
# Login to huggingface CLI
huggingface-cli login
# Initialize chat model (You can change the model here)
chat_model = pipeline("text-generation", model="gpt2") # You can switch to any model of your choice
# Initialize LLaMA model for more advanced instruction-following tasks
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.3-70B-Instruct")
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.3-70B-Instruct")
# Chat function
def chat_fn(history, user_input):
conversation = {"history": history, "user": user_input}
response = chat_model(user_input, max_length=50, num_return_sequences=1)
conversation["bot"] = response[0]['generated_text']
history.append((user_input, conversation["bot"]))
return history, ""
# OCR function
def ocr(image):
text = pytesseract.image_to_string(image)
return text
# Gradio interface
with gr.Blocks() as demo:
gr.Markdown("### الصور والدردشة")
# Image OCR section
with gr.Tab("استخراج النصوص من الصور"):
with gr.Row():
image_input = gr.Image(type="pil")
ocr_output = gr.Textbox()
submit_button = gr.Button("Submit")
submit_button.click(ocr, inputs=image_input, outputs=ocr_output)
# Chat section
with gr.Tab("المحادثة"):
chatbot = gr.Chatbot()
msg = gr.Textbox(label="اكتب رسالتك")
clear = gr.Button("Clear")
msg.submit(chat_fn, [chatbot, msg], [chatbot, msg])
clear.click(lambda: None, None, chatbot)
# Launch the Gradio interface
demo.launch() |