Faizal2805 commited on
Commit
bf01853
·
verified ·
1 Parent(s): 958c61e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -1
app.py CHANGED
@@ -1,15 +1,38 @@
1
  from fastapi import FastAPI
2
  from pydantic import BaseModel
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
4
  import torch
5
 
6
  # Load model and tokenizer
7
- MODEL_NAME = "meta-llama/Llama-3.2-1B" # Replace with your model
8
 
9
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
10
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16)
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  def generate_response(prompt: str):
 
 
 
 
 
 
 
 
13
  inputs = tokenizer(prompt, return_tensors="pt")
14
  outputs = model.generate(**inputs, max_length=200)
15
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
 
1
  from fastapi import FastAPI
2
  from pydantic import BaseModel
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ from datasets import load_dataset
5
  import torch
6
 
7
  # Load model and tokenizer
8
+ MODEL_NAME = "meta-llama/Llama-3.2-1B" # Replace with your fine-tuned model
9
 
10
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
11
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16)
12
 
13
+ # Load AWS-Bot dataset
14
+ DATASET_NAME = "Faizal2805/cyberbot" # Replace with your dataset
15
+ dataset = load_dataset(DATASET_NAME, split="train")
16
+
17
+ def get_dataset_response(prompt: str):
18
+ """
19
+ Check if the user's input matches a dataset entry and return a predefined response.
20
+ If no match is found, return None.
21
+ """
22
+ for example in dataset:
23
+ if example["text"].startswith(f"<s>[INST] {prompt} [/INST]"):
24
+ return example["text"].split("</s>")[-1].strip()
25
+ return None
26
+
27
  def generate_response(prompt: str):
28
+ """
29
+ Generate a response from the dataset if available; otherwise, use the model.
30
+ """
31
+ dataset_response = get_dataset_response(prompt)
32
+ if dataset_response:
33
+ return dataset_response # Return predefined dataset response
34
+
35
+ # Fallback to model-based response
36
  inputs = tokenizer(prompt, return_tensors="pt")
37
  outputs = model.generate(**inputs, max_length=200)
38
  return tokenizer.decode(outputs[0], skip_special_tokens=True)