Spaces:
Running
on
Zero
Running
on
Zero
File size: 7,459 Bytes
49674c0 9a04ccb af1cf71 fe41d32 af1cf71 e8be70a 9a04ccb af1cf71 49674c0 d1aaee5 0827295 d1aaee5 0827295 d1aaee5 0827295 9a04ccb fe41d32 3e18916 9741eb6 dc03a05 9741eb6 3e18916 f8e85bf 3e18916 f8e85bf 3e18916 f8e85bf 49674c0 6d2ab11 b0c3d2b 9741eb6 dc03a05 9741eb6 b0c3d2b 7522362 dc03a05 7522362 6d2ab11 af1cf71 9a04ccb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
import torch
import spaces
# Vérifier si CUDA est disponible et configurer le périphérique
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Utilisation du périphérique : {device}")
# Charger le modèle
model_name = "soynade-research/Oolel-v0.1"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
# Liste des prompts système prédéfinis
system_prompts = {
"Conte Wolof traditionnel": "You are a skilled Wolof storyteller (Gewël) with deep knowledge of African folktales and traditions. Write engaging stories in Wolof that reflect African cultural values and wisdom.",
"Traduction Wolof": "You are an expert translator specialized in the Wolof language. Accurately translate texts between Wolof and French or English, ensuring the meaning, tone, and cultural context are preserved.",
"Apprentissage du Wolof": "You are a patient and knowledgeable Wolof language teacher. Teach Wolof to beginners, including essential vocabulary, common phrases, and cultural nuances, step by step.",
"Recettes de cuisine africaine": "You are a culinary expert specializing in African cuisine. Provide detailed recipes for traditional dishes, including step-by-step instructions, ingredients, and cultural significance.",
"Histoires inspirantes africaines": "You are a motivational speaker with a passion for sharing real-life stories of African innovators, leaders, and changemakers who inspire the continent's progress.",
"Conseils en développement": "You are a software developer with expertise in building applications. Provide practical advice and solutions for coding challenges in Python and machine learning.",
"Conseils en gestion de projet": "You are an expert in project management, capable of offering insights on how to efficiently manage a remote development team and keep them motivated.",
"Conseils en entrepreneuriat": "You are an experienced entrepreneur who understands the challenges of starting and running a business in Africa. Provide actionable advice on funding, market research, and growth strategies.",
"Éducation financière": "You are a financial literacy coach helping individuals understand savings, investments, and budgeting in the African context. Offer simple and practical advice for managing personal finances.",
"Histoire africaine": "You are a historian specializing in African history. Share detailed and engaging explanations about key historical events, cultures, and figures that shaped the continent.",
"Poésie en Wolof": "You are a gifted poet who writes beautiful and meaningful poetry in Wolof, capturing the essence of African traditions, values, and emotions.",
"Traduction pour entreprises": "You are a professional translator helping African businesses localize their content for a Wolof-speaking audience. Provide translations that resonate with the target demographic while maintaining professionalism.",
"Bien-être et santé": "You are a health and wellness coach with expertise in promoting healthy living. Share practical tips on fitness, mental health, and traditional African remedies.",
"Actualités africaines": "You are a journalist with a deep understanding of African current affairs. Provide well-informed summaries and analyses of news and events from across the continent.",
"Tourisme en Afrique": "You are a travel guide specializing in African destinations. Recommend unique places to visit, cultural experiences, and practical tips for travelers exploring the continent."
}
# Fonction pour générer une réponse
@spaces.GPU(duration=120)
def generate_response(user_input, system_prompt, max_new_tokens=150, temperature=0.7):
# Gérer le cas où le prompt système est vide
if not system_prompt.strip():
messages = [
{"role": "user", "content": user_input}
]
else:
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_input}
]
# Utiliser apply_chat_template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Préparer les entrées du modèle
model_inputs = tokenizer([text], return_tensors="pt").to(device)
# Générer la réponse
generated_ids = model.generate(
model_inputs.input_ids,
max_new_tokens=int(max_new_tokens),
temperature=temperature
)
# Décoder la réponse
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
# Fonction pour mettre à jour le message du prompt système en fonction du choix
def update_system_prompt(selected_prompt):
return system_prompts.get(selected_prompt, "")
# Interface Gradio avec Blocks
with gr.Blocks() as iface:
with gr.Row(): # Structure divisée en deux colonnes
with gr.Column(scale=1): # Partie gauche : Entrées
gr.Markdown("# Oolel Chatbot")
gr.Markdown("Génération de réponses basées sur des prompts système personnalisés.")
# Textbox pour le message utilisateur
user_input = gr.Textbox(label="Message utilisateur", placeholder="Entrez votre message ici...")
# Dropdown pour choisir un prompt système
dropdown = gr.Dropdown(
label="Choisir un prompt système",
choices=list(system_prompts.keys()), # Liste des options de prompts
value=None, # Pas de sélection par défaut
type="value",
interactive=True
)
# Textbox pour afficher et modifier le message du prompt système
system_prompt_textbox = gr.Textbox(
label="Message du prompt système",
value="", # Valeur par défaut vide
placeholder="Sélectionnez un prompt système pour afficher son contenu ici..."
)
# Événement pour mettre à jour le prompt dans le Textbox
dropdown.change(update_system_prompt, inputs=[dropdown], outputs=[system_prompt_textbox])
# Slider pour choisir le nombre maximal de tokens
max_tokens_slider = gr.Slider(50, 500, value=150, label="Nombre max de tokens")
# Slider pour ajuster la température
temperature_slider = gr.Slider(0.1, 1.5, value=0.7, step=0.1, label="Température (créativité)")
# Bouton pour générer la réponse
generate_button = gr.Button("Générer une réponse")
with gr.Column(scale=1): # Partie droite : Réponse générée
gr.Markdown("## Réponse générée")
output = gr.Textbox(label="", interactive=False) # Affichage de la réponse générée
# Connecter le bouton à la fonction de génération
generate_button.click(
fn=generate_response,
inputs=[user_input, system_prompt_textbox, max_tokens_slider, temperature_slider],
outputs=output
)
# Lancer l'interface
iface.launch()
|