AngelaKkkkkkkkk commited on
Commit
bc906a4
1 Parent(s): ad300e3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -1,12 +1,12 @@
1
  from vllm import LLM, SamplingParams
2
 
3
- # Initialize the LLM (use your correct model ID)
4
- llm = LLM(model="Tann-dev/sex-chat-dirty-girlfriend")
5
 
6
- # Set up sampling parameters (you can adjust these as needed)
7
  sampling_params = SamplingParams(temperature=0.7, max_tokens=50)
8
 
9
- # Define a prompt to generate text from
10
  prompt = "Hello, how can I help you?"
11
 
12
  # Generate text from the model
 
1
  from vllm import LLM, SamplingParams
2
 
3
+ # Initialize the LLM with CPU-only mode
4
+ llm = LLM(model="Tann-dev/sex-chat-dirty-girlfriend", device="cpu")
5
 
6
+ # Set up sampling parameters
7
  sampling_params = SamplingParams(temperature=0.7, max_tokens=50)
8
 
9
+ # Define a prompt to generate text
10
  prompt = "Hello, how can I help you?"
11
 
12
  # Generate text from the model