ahmed792002 commited on
Commit
4c9ca34
·
verified ·
1 Parent(s): 915193a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -11
app.py CHANGED
@@ -5,23 +5,15 @@ import torch
5
  # Load pre-trained model and tokenizer
6
  tokenizer = AutoTokenizer.from_pretrained("ahmed792002/alzheimers_memory_support_ai")
7
  model = AutoModelForCausalLM.from_pretrained("ahmed792002/alzheimers_memory_support_ai")
8
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
9
- model.to(device) # Send the model to the correct device
10
-
11
- # Function to clean input text
12
- def clean_text(text):
13
- return text.strip() # Simply remove leading/trailing spaces
14
 
15
  # Chatbot function
16
  def chatbot(query, history, system_message, max_length, temperature, top_k, top_p):
17
  """
18
  Processes a user query through the specified model to generate a response.
19
  """
20
- # Clean the input query
21
- query = clean_text(query)
22
 
23
  # Tokenize input query
24
- input_ids = tokenizer.encode(query, return_tensors="pt").to(device)
25
 
26
  # Generate text using the model
27
  final_outputs = model.generate(
@@ -48,8 +40,6 @@ demo = gr.ChatInterface(
48
  gr.Slider(1, 100, value=50, step=1, label="Top-K"), # Slider for top_k
49
  gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-P"), # Slider for top_p
50
  ],
51
- title="Custom Alzheimer's Memory Support AI",
52
- description="This chatbot uses the fine-tuned model 'ahmed792002/alzheimers_memory_support_ai'. Customize settings like max length, temperature, top-k, and top-p for better results.",
53
  )
54
 
55
  if __name__ == "__main__":
 
5
  # Load pre-trained model and tokenizer
6
  tokenizer = AutoTokenizer.from_pretrained("ahmed792002/alzheimers_memory_support_ai")
7
  model = AutoModelForCausalLM.from_pretrained("ahmed792002/alzheimers_memory_support_ai")
 
 
 
 
 
 
8
 
9
  # Chatbot function
10
  def chatbot(query, history, system_message, max_length, temperature, top_k, top_p):
11
  """
12
  Processes a user query through the specified model to generate a response.
13
  """
 
 
14
 
15
  # Tokenize input query
16
+ input_ids = tokenizer.encode(query, return_tensors="pt")=
17
 
18
  # Generate text using the model
19
  final_outputs = model.generate(
 
40
  gr.Slider(1, 100, value=50, step=1, label="Top-K"), # Slider for top_k
41
  gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-P"), # Slider for top_p
42
  ],
 
 
43
  )
44
 
45
  if __name__ == "__main__":