Spaces:
Sleeping
Sleeping
import transformers | |
import torch | |
model_id = "yodayo-ai/nephra_v1.0" | |
pipeline = transformers.pipeline( | |
"text-generation", | |
model=model_id, | |
model_kwargs={"torch_dtype": torch.bfloat16}, | |
device_map="auto", | |
offload_folder="offload", # Only provided here during model initialization | |
) | |
# Define characters and traits | |
characters = [ | |
{"name": "Alex", | |
"description": "Alex is a young and ambitious adventurer, full of energy and a thirst for new discoveries. Always ready to face any challenge, he is driven by a desire to explore uncharted places.", | |
"traits": "brave, energetic, optimistic, determined"}, | |
{"name": "Maya", | |
"description": "Maya is a wise and experienced sorceress, with deep knowledge in magic and ancient rituals. She is known for her calm demeanor, analytical mind, and ability to find solutions in difficult situations.", | |
"traits": "calm, thoughtful, intuitive, attentive"}, | |
{"name": "Victor", | |
"description": "Victor is a former warrior who gave up fighting for inner peace and harmony. His life experience and pursuit of justice make him a reliable friend and mentor.", | |
"traits": "serious, thoughtful, fair, balanced"} | |
] | |
def generate_response(character_name, user_input, max_length=100, temperature=0.7, top_p=0.85, repetition_penalty=1.1): | |
# Find the character data | |
character = next((c for c in characters if c["name"] == character_name), None) | |
if not character: | |
return "Character not found." | |
# Create the prompt text | |
prompt_text = (f"You are {character_name}, {character['description']}. Traits: {character['traits']}. " | |
f"In response to the question '{user_input}', respond {random.choice(['inspired', 'with doubt', 'joyfully', 'thoughtfully', 'skeptically'])}. Please complete the response.") | |
# Generate response | |
outputs = pipeline( | |
prompt_text, | |
max_new_tokens=max_length, | |
do_sample=True, | |
temperature=temperature, | |
top_p=top_p, | |
repetition_penalty=repetition_penalty | |
) | |
return outputs[0]['generated_text'] | |
# Gradio Interface | |
import gradio as gr | |
iface = gr.Interface( | |
fn=generate_response, | |
inputs=[ | |
gr.Dropdown([c["name"] for c in characters], label="Choose Character"), | |
gr.Textbox(lines=2, placeholder="Enter your text here..."), | |
gr.Slider(20, 200, step=1, value=100, label="Max Length"), | |
gr.Slider(0.1, 1.0, step=0.1, value=0.7, label="Temperature"), | |
gr.Slider(0.1, 1.0, step=0.05, value=0.85, label="Top-p"), | |
gr.Slider(1.0, 2.0, step=0.1, value=1.1, label="Repetition Penalty") | |
], | |
outputs="text", | |
title="Roleplaying Model Demo", | |
description="Generate responses based on the chosen character's traits." | |
) | |
if __name__ == "__main__": | |
iface.launch() |