Spaces:
Runtime error
Runtime error
File size: 1,657 Bytes
996bcd5 958c61e bf01853 996bcd5 958c61e bf01853 958c61e 996bcd5 958c61e 996bcd5 bf01853 958c61e bf01853 958c61e 996bcd5 958c61e 996bcd5 958c61e 996bcd5 958c61e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import AutoModelForCausalLM, AutoTokenizer
from datasets import load_dataset
import torch
# Load model and tokenizer
MODEL_NAME = "meta-llama/Llama-3.2-1B" # Replace with your fine-tuned model
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16)
# Load AWS-Bot dataset
DATASET_NAME = "Faizal2805/cyberbot" # Replace with your dataset
dataset = load_dataset(DATASET_NAME, split="train")
def get_dataset_response(prompt: str):
"""
Check if the user's input matches a dataset entry and return a predefined response.
If no match is found, return None.
"""
for example in dataset:
if example["text"].startswith(f"<s>[INST] {prompt} [/INST]"):
return example["text"].split("</s>")[-1].strip()
return None
def generate_response(prompt: str):
"""
Generate a response from the dataset if available; otherwise, use the model.
"""
dataset_response = get_dataset_response(prompt)
if dataset_response:
return dataset_response # Return predefined dataset response
# Fallback to model-based response
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_length=200)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
app = FastAPI()
class Query(BaseModel):
text: str
@app.post("/chat")
def chat(query: Query):
response = generate_response(query.text)
return {"response": response}
@app.get("/")
def root():
return {"message": "AWS-Bot is running!"}
|