mrcuddle commited on
Commit
2af0bcd
·
verified ·
1 Parent(s): b61919c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -0
app.py CHANGED
@@ -2,30 +2,45 @@ import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
  import spaces
 
 
 
 
5
 
6
  # Load the model and tokenizer
7
  model_name = "mrcuddle/SD-Prompter"
 
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
  model = AutoModelForCausalLM.from_pretrained(model_name)
 
10
 
11
  @spaces.GPU
12
  # Function to generate a response
13
  def chat(message, history):
 
 
 
14
  # Combine the message and history into a single input
15
  input_text = " ".join([f"{user}: {msg}" for user, msg in history] + [f"User: {message}"])
 
 
16
  inputs = tokenizer(input_text, return_tensors="pt")
 
17
 
18
  # Generate a response
19
  with torch.no_grad():
20
  outputs = model.generate(inputs.input_ids, max_length=300, num_return_sequences=1)
21
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
22
 
23
  # Extract only the new response part
24
  response = response.replace(input_text, "").strip()
 
25
 
26
  # Append the new message and response to the history
27
  history.append(("User", message))
28
  history.append(("Assistant", response))
 
29
 
30
  return history, history
31
 
@@ -37,4 +52,5 @@ iface = gr.ChatInterface(
37
  )
38
 
39
  # Launch the interface
 
40
  iface.launch()
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
  import spaces
5
+ import logging
6
+
7
+ # Configure logging
8
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
9
 
10
  # Load the model and tokenizer
11
  model_name = "mrcuddle/SD-Prompter"
12
+ logging.info(f"Loading model and tokenizer for {model_name}")
13
  tokenizer = AutoTokenizer.from_pretrained(model_name)
14
  model = AutoModelForCausalLM.from_pretrained(model_name)
15
+ logging.info("Model and tokenizer loaded successfully")
16
 
17
  @spaces.GPU
18
  # Function to generate a response
19
  def chat(message, history):
20
+ logging.info(f"Received message: {message}")
21
+ logging.info(f"Chat history: {history}")
22
+
23
  # Combine the message and history into a single input
24
  input_text = " ".join([f"{user}: {msg}" for user, msg in history] + [f"User: {message}"])
25
+ logging.info(f"Input text: {input_text}")
26
+
27
  inputs = tokenizer(input_text, return_tensors="pt")
28
+ logging.info(f"Tokenized input: {inputs}")
29
 
30
  # Generate a response
31
  with torch.no_grad():
32
  outputs = model.generate(inputs.input_ids, max_length=300, num_return_sequences=1)
33
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
34
+ logging.info(f"Generated response: {response}")
35
 
36
  # Extract only the new response part
37
  response = response.replace(input_text, "").strip()
38
+ logging.info(f"Extracted response: {response}")
39
 
40
  # Append the new message and response to the history
41
  history.append(("User", message))
42
  history.append(("Assistant", response))
43
+ logging.info(f"Updated chat history: {history}")
44
 
45
  return history, history
46
 
 
52
  )
53
 
54
  # Launch the interface
55
+ logging.info("Launching Gradio interface")
56
  iface.launch()