import random import gradio as gr import ollama from huggingface_hub import InferenceClient import os def chat_short_story(length, genre, theme, tone, writing_style): """ Generates a creative short story using a Hugging Face model. Parameters: length (int): The approximate word count for the story. genre (str): The genre of the story (e.g., fantasy, mystery). theme (str): The central theme of the story. tone (str): The tone of the story (e.g., humorous, serious). writing_style (str): The writing style (e.g., poetic, conversational). Returns: str: The generated short story, or an error message if unsuccessful. """ # System message to define the assistant's role system_message = ( "You are a highly creative short story writer capable of crafting stories across any genre. " "For every story created, ensure you generate a suitable title." ) # Construct the user prompt prompt = ( f"Write a creative short story of approximately {length} words in the {genre} genre. " f"Use a {writing_style} writing style with a {tone} tone. " f"The story should revolve around the theme of {theme}. " f"Ensure the narrative is compelling and includes a suitable title." ) # Retrieve the Hugging Face API key from the environment HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY") if not HUGGINGFACE_API_KEY: return "Error: Hugging Face API key not found. Please set the HUGGINGFACE_API_KEY environment variable." # Initialize the Hugging Face Inference Client try: client = InferenceClient(api_key=HUGGINGFACE_API_KEY) except Exception as e: return f"Error: Failed to initialize Hugging Face client. Details: {e}" # Interact with the model try: result = client.chat.completions.create( model="meta-llama/Llama-3.2-3B-Instruct", messages=[ {"role": "system", "content": system_message}, {"role": "user", "content": prompt}, ] ) except Exception as e: return f"Error: Failed to interact with the model. Details: {e}" # Extract the story content from the response if "choices" in result and len(result["choices"]) > 0: return result["choices"][0]["message"]["content"] else: return "Error: No story generated. Please check your prompt or model configuration." # Predefined options Length = [100, 250, 750] Length = [l for l in Length if l >= 100] r_length = random.choice(Length) Genre = [ "Fiction", "Nonfiction", "Drama", "Poetry", "Fantasy", "Horror", "Mystery", "Science Fiction", "Suspense", "Women's fiction", "Supernatural/Paranormal", "Young adult" ] r_genre = random.choice(Genre) Themes = [ "Love", "Redemption", "Forgiveness", "Coming of age", "Revenge", "Good vs evil", "Bravery and hardship", "The power of social status", "The destructive nature of love", "The fallibility of the human condition" ] r_themes = random.choice(Themes) Writing_Styles = ["Expository", "Narrative", "Descriptive", "Persuasive", "Creative"] r_Style = random.choice(Writing_Styles) Tones = ["Formal", "Optimistic", "Worried", "Friendly", "Curious", "Assertive", "Encouraging"] r_tones = random.choice(Tones) # Gradio Interface setup iface = gr.Interface( fn=chat_short_story, inputs=[ gr.Slider(value=100, label="Story_Length", minimum=100, maximum=2500), gr.Dropdown(label="Story_Genre", choices=Genre), gr.Dropdown(label="Story_Theme", choices=Themes), gr.Dropdown(label="Writing_Styles", choices=Writing_Styles), gr.Dropdown(label="Story_Tone", choices=Tones) ], outputs=gr.Text(), title="Welcome to the Patrick's Story Generator" ) iface.launch()