import numpy as np import pandas as pd import torch from transformers import AutoModelForCausalLM, AutoTokenizer import gradio as gr # Load a free model from Hugging Face model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" # Small model that works well for simple tasks tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16) # Financial knowledge base - simple templates and responses financial_templates = { "budget": "Here's a simple budget template based on the 50/30/20 rule:\n- 50% for needs (rent, groceries, utilities)\n- 30% for wants (dining out, entertainment)\n- 20% for savings and debt repayment", "emergency_fund": "An emergency fund should ideally cover 3-6 months of expenses. Start with a goal of $1,000, then build from there.", "debt": "Focus on high-interest debt first (like credit cards). Consider the debt avalanche (highest interest first) or debt snowball (smallest balance first) methods.", "investing": "For beginners, consider index funds or ETFs for diversification. Time in the market beats timing the market.", "retirement": "Take advantage of employer matches in retirement accounts - it's free money. Start early to benefit from compound interest." } # Define guided chat flow def guided_response(user_message, chat_history): # Check if we should use a template response for key, template in financial_templates.items(): if key in user_message.lower(): return template # For more general queries, use the AI model prompt = f"""I need financial advice: {user_message} """ inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate( inputs["input_ids"], max_length=512, temperature=0.7, do_sample=True, pad_token_id=tokenizer.eos_token_id ) response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Extract just the assistant's response if "" in response: response = response.split("")[1].strip() return response # Create budget calculator function def calculate_budget(monthly_income, housing, utilities, groceries, transportation): total_needs = housing + utilities + groceries + transportation needs_percent = (total_needs / monthly_income) * 100 available_for_wants = monthly_income * 0.3 available_for_savings = monthly_income * 0.2 return f"""Based on the 50/30/20 rule: Current spending on needs: ${total_needs:.2f} ({needs_percent:.1f}% of income) Recommended max for needs: ${monthly_income * 0.5:.2f} (50%) Available for wants: ${available_for_wants:.2f} (30%) Recommended for savings/debt: ${available_for_savings:.2f} (20%) {'Your needs expenses are within recommended limits!' if needs_percent <= 50 else 'Your needs expenses exceed 50% of income. Consider areas to reduce spending.'} """ # Setup Gradio interface with tabs with gr.Blocks() as app: gr.Markdown("# Financial Advisor Bot") with gr.Tab("Chat Advisor"): chatbot = gr.Chatbot(height=400) msg = gr.Textbox(label="Ask a question about personal finance") clear = gr.Button("Clear") def respond(message, chat_history): bot_message = guided_response(message, chat_history) chat_history.append((message, bot_message)) return "", chat_history msg.submit(respond, [msg, chatbot], [msg, chatbot]) clear.click(lambda: None, None, chatbot, queue=False) with gr.Tab("Budget Calculator"): gr.Markdown("## 50/30/20 Budget Calculator") with gr.Row(): income = gr.Number(label="Monthly Income (after tax)") with gr.Row(): gr.Markdown("### Monthly Expenses (Needs)") with gr.Row(): housing = gr.Number(label="Housing", value=0) utilities = gr.Number(label="Utilities", value=0) groceries = gr.Number(label="Groceries", value=0) transport = gr.Number(label="Transportation", value=0) calculate_btn = gr.Button("Calculate Budget") output = gr.Textbox(label="Budget Analysis", lines=10) calculate_btn.click( calculate_budget, inputs=[income, housing, utilities, groceries, transport], outputs=output ) # Launch the app in Colab app.launch(share=True) # share=True creates a public link you can share with others