import sys import json import os from datetime import datetime from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline from peft import PeftModel, PeftConfig # Load model and tokenizer def load_model(): base_model = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" adapter_path = "Harish2002/cli-lora-tinyllama" # ✅ fixed path tokenizer = AutoTokenizer.from_pretrained(base_model) model = AutoModelForCausalLM.from_pretrained(base_model) model = PeftModel.from_pretrained(model, adapter_path) return tokenizer, model # Generate plan from input instruction def generate_plan(prompt, tokenizer, model): pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=256) output = pipe(prompt)[0]['generated_text'] return output.strip() # Check if first line is a shell command def is_shell_command(line): return line.startswith(("git", "bash", "tar", "gzip", "grep", "python", "./", "cd", "ls")) # Log to logs/trace.jsonl def log_trace(prompt, response): os.makedirs("logs", exist_ok=True) trace = { "timestamp": datetime.utcnow().isoformat(), "input": prompt, "response": response } with open("logs/trace.jsonl", "a") as f: f.write(json.dumps(trace) + "\n") # Main if __name__ == "__main__": if len(sys.argv) < 2: print("Usage: python agent.py \"Your instruction here\"") sys.exit(1) user_input = sys.argv[1] tokenizer, model = load_model() result = generate_plan(user_input, tokenizer, model) # Print result and echo dry-run if it's a shell command print("\nGenerated Plan:\n") print(result) first_line = result.splitlines()[0] if is_shell_command(first_line): print("\nDry-run:") print(f"echo {first_line}") log_trace(user_input, result)