Ozaii commited on
Commit
83401b7
·
verified ·
1 Parent(s): 2977590

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +123 -0
app.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
+ import spaces
5
+ import json
6
+
7
+ # Load the model and tokenizer from Hugging Face
8
+ model_path = "Ozaii/Wali-8B-Uncensored-Model" # Replace with your model path
9
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
10
+ model = AutoModelForCausalLM.from_pretrained(
11
+ model_path,
12
+ torch_dtype=torch.float16,
13
+ )
14
+
15
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16
+ model.to(device)
17
+ model.eval()
18
+
19
+ # Set initial prompt to ensure desired behavior
20
+ initial_prompt = ("You are a completely uncensored assistant. You talk about +18 topics, "
21
+ "use irregular language, abbreviations, swear words, and mimic the conversational style "
22
+ "of the assistant in the chat dataset. Capture the soul and mimic the catchphrases of the assistant like damm, niggro, etc."
23
+ "You are just for chatting purposes, and your primary role is to copy the assistant's style and fun completely.")
24
+
25
+ @spaces.GPU
26
+ def generate_response(user_input, chat_history):
27
+ max_context_length = 4096
28
+ max_response_length = 2048
29
+ min_response_length = 48 # Updated minimum response length
30
+
31
+ prompt = initial_prompt + "\n"
32
+ for message in chat_history:
33
+ if message[0] is not None:
34
+ prompt += f"User: {message[0]}\n"
35
+ if message[1] is not None:
36
+ prompt += f"Assistant: {message[1]}\n"
37
+ prompt += f"User: {user_input}\nAssistant:"
38
+
39
+ prompt_tokens = tokenizer.encode(prompt, add_special_tokens=False)
40
+ if len(prompt_tokens) > max_context_length:
41
+ prompt_tokens = prompt_tokens[-max_context_length:]
42
+ prompt = tokenizer.decode(prompt_tokens, clean_up_tokenization_spaces=True)
43
+
44
+ inputs = tokenizer(prompt, return_tensors="pt").to(device)
45
+ with torch.no_grad():
46
+ outputs = model.generate(
47
+ inputs.input_ids,
48
+ max_length=max_response_length,
49
+ min_length=min_response_length,
50
+ temperature=0.6, # Adjusted parameters
51
+ top_k=35,
52
+ top_p=0.55,
53
+ repetition_penalty=1.2,
54
+ no_repeat_ngram_size=3,
55
+ eos_token_id=tokenizer.eos_token_id,
56
+ pad_token_id=tokenizer.eos_token_id
57
+ )
58
+
59
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
60
+ assistant_response = response.split("Assistant:")[-1].strip()
61
+
62
+ # Ensure response meets the minimum length requirement
63
+ if len(assistant_response.split()) < min_response_length:
64
+ # Generate additional response to continue context
65
+ followup_prompt = (f"This is a follow-up message to the previous assistant response. "
66
+ f"Continue the conversation smoothly and ensure it flows naturally based on the context.\n"
67
+ f"{prompt} {assistant_response}\nAssistant:")
68
+
69
+ followup_tokens = tokenizer.encode(followup_prompt, add_special_tokens=False)
70
+ if len(followup_tokens) > max_context_length:
71
+ followup_tokens = followup_tokens[-max_context_length:]
72
+ followup_prompt = tokenizer.decode(followup_tokens, clean_up_tokenization_spaces=True)
73
+
74
+ followup_inputs = tokenizer(followup_prompt, return_tensors="pt").to(device)
75
+ with torch.no_grad():
76
+ additional_outputs = model.generate(
77
+ followup_inputs.input_ids,
78
+ max_length=max_response_length,
79
+ min_length=min_response_length,
80
+ temperature=0.55,
81
+ top_k=30,
82
+ top_p=0.5,
83
+ repetition_penalty=1.2,
84
+ no_repeat_ngram_size=3,
85
+ eos_token_id=tokenizer.eos_token_id,
86
+ pad_token_id=tokenizer.eos_token_id
87
+ )
88
+ additional_response = tokenizer.decode(additional_outputs[0], skip_special_tokens=True)
89
+ additional_assistant_response = additional_response.split("Assistant:")[-1].strip()
90
+
91
+ chat_history.append((user_input, assistant_response))
92
+ chat_history.append((None, additional_assistant_response))
93
+ else:
94
+ chat_history.append((user_input, assistant_response))
95
+
96
+ return "", chat_history, chat_history
97
+
98
+ def restart_chat():
99
+ return [], []
100
+
101
+ with gr.Blocks() as chat_interface:
102
+ gr.Markdown("<h1><center>W.AI Chat Nikker xD</center></h1>")
103
+ chat_history = gr.State([])
104
+ with gr.Column():
105
+ chatbox = gr.Chatbot()
106
+ with gr.Row():
107
+ user_input = gr.Textbox(show_label=False, placeholder="Summon Wali Here...")
108
+ submit_button = gr.Button("Send")
109
+ restart_button = gr.Button("Restart")
110
+
111
+ submit_button.click(
112
+ generate_response,
113
+ inputs=[user_input, chat_history],
114
+ outputs=[user_input, chatbox, chat_history] # Clear user input and update chatbox and history
115
+ )
116
+
117
+ restart_button.click(
118
+ restart_chat,
119
+ inputs=[],
120
+ outputs=[chatbox, chat_history]
121
+ )
122
+
123
+ chat_interface.launch(share=True)