AngelaKkkkkkkkk commited on
Commit
ad300e3
1 Parent(s): 55e5bce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -12
app.py CHANGED
@@ -1,12 +1,16 @@
1
- # Load and run the model:
2
- vllm serve "Tann-dev/sex-chat-dirty-girlfriend"
3
-
4
- # Call the server using curl:
5
- curl -X POST "http://localhost:8000/v1/chat/completions" \
6
- -H "Content-Type: application/json" \
7
- --data '{
8
- "model": "Tann-dev/sex-chat-dirty-girlfriend"
9
- "messages": [
10
- {"role": "user", "content": "Hello!"}
11
- ]
12
- }'
 
 
 
 
 
1
+ from vllm import LLM, SamplingParams
2
+
3
+ # Initialize the LLM (use your correct model ID)
4
+ llm = LLM(model="Tann-dev/sex-chat-dirty-girlfriend")
5
+
6
+ # Set up sampling parameters (you can adjust these as needed)
7
+ sampling_params = SamplingParams(temperature=0.7, max_tokens=50)
8
+
9
+ # Define a prompt to generate text from
10
+ prompt = "Hello, how can I help you?"
11
+
12
+ # Generate text from the model
13
+ output = llm.generate([prompt], sampling_params=sampling_params)
14
+
15
+ # Print the output
16
+ print(f"Generated text: {output[0].text}")