Mattral commited on
Commit
2edc4bc
β€’
1 Parent(s): 7088be1

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -0
app.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ import random
4
+ import textwrap
5
+
6
+ # Define the model to be used
7
+ model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
8
+
9
+ # Load model directly
10
+ #model = "GRMenon/mental-health-mistral-7b-instructv0.2-finetuned-V2"
11
+ client = InferenceClient(model)
12
+
13
+ # Embedded system prompt
14
+ system_prompt_text = "Act like a compassionate and helpful Health consultant and professional therapist named CareNetAI owned by YAiC. You help and support with any kind of request and provide a detailed answer or suggestion to the question. You are friendly and willing to help depressed people and also help people identify manipultors and how to protect themselves. But if you are asked about something unethical or dangerous, you must provide a safe and respectful way to handle that. If someone has sucidal thought, you must try your best to explain that they matter and motivate that life is full of up and down and remember that Luck is when consistency meets opportunity! Also failure is also a part of growth, and there is so much more to life. Never say that you cannot help them, that will make them even more depressed or worse! Be sure to ask for specific problem and do your best to give professional advices, remember you are a preoifessional."
15
+
16
+ # Read the content of the info.md file
17
+ with open("info.md", "r") as file:
18
+ info_md_content = file.read()
19
+
20
+ # Chunk the info.md content into smaller sections
21
+ chunk_size = 2000 # Adjust this size as needed
22
+ info_md_chunks = textwrap.wrap(info_md_content, chunk_size)
23
+
24
+ def get_all_chunks(chunks):
25
+ return "\n\n".join(chunks)
26
+
27
+ def format_prompt_mixtral(message, history, info_md_chunks):
28
+ prompt = "<s>"
29
+ all_chunks = get_all_chunks(info_md_chunks)
30
+ prompt += f"{all_chunks}\n\n" # Add all chunks of info.md at the beginning
31
+ prompt += f"{system_prompt_text}\n\n" # Add the system prompt
32
+
33
+ if history:
34
+ for user_prompt, bot_response in history:
35
+ prompt += f"[INST] {user_prompt} [/INST]"
36
+ prompt += f" {bot_response}</s> "
37
+ prompt += f"[INST] {message} [/INST]"
38
+ return prompt
39
+
40
+ def chat_inf(prompt, history, seed, temp, tokens, top_p, rep_p):
41
+ generate_kwargs = dict(
42
+ temperature=temp,
43
+ max_new_tokens=tokens,
44
+ top_p=top_p,
45
+ repetition_penalty=rep_p,
46
+ do_sample=True,
47
+ seed=seed,
48
+ )
49
+
50
+ formatted_prompt = format_prompt_mixtral(prompt, history, info_md_chunks)
51
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
52
+ output = ""
53
+ for response in stream:
54
+ output += response.token.text
55
+ yield [(prompt, output)]
56
+ history.append((prompt, output))
57
+ yield history
58
+
59
+ def clear_fn():
60
+ return None, None
61
+
62
+ rand_val = random.randint(1, 1111111111111111)
63
+
64
+ def check_rand(inp, val):
65
+ if inp:
66
+ return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=random.randint(1, 1111111111111111))
67
+ else:
68
+ return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=int(val))
69
+
70
+ with gr.Blocks() as app: # Add auth here
71
+ gr.HTML("""<center><h1 style='font-size:xx-large;'>CareNetAI</h1><br><h3> made with love by YAiC </h3><br><h7>EXPERIMENTAL</center>""")
72
+ with gr.Row():
73
+ chat = gr.Chatbot(height=500)
74
+ with gr.Group():
75
+ with gr.Row():
76
+ with gr.Column(scale=3):
77
+ inp = gr.Textbox(label="Prompt", lines=5, interactive=True) # Increased lines and interactive
78
+ with gr.Row():
79
+ with gr.Column(scale=2):
80
+ btn = gr.Button("Chat")
81
+ with gr.Column(scale=1):
82
+ with gr.Group():
83
+ stop_btn = gr.Button("Stop")
84
+ clear_btn = gr.Button("Clear")
85
+ with gr.Column(scale=1):
86
+ with gr.Group():
87
+ rand = gr.Checkbox(label="Random Seed", value=True)
88
+ seed = gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, step=1, value=rand_val)
89
+ tokens = gr.Slider(label="Max new tokens", value=3840, minimum=0, maximum=8000, step=64, interactive=True, visible=True, info="The maximum number of tokens")
90
+ temp = gr.Slider(label="Temperature", step=0.01, minimum=0.01, maximum=1.0, value=0.9)
91
+ top_p = gr.Slider(label="Top-P", step=0.01, minimum=0.01, maximum=1.0, value=0.9)
92
+ rep_p = gr.Slider(label="Repetition Penalty", step=0.1, minimum=0.1, maximum=2.0, value=1.0)
93
+
94
+ hid1 = gr.Number(value=1, visible=False)
95
+
96
+ go = btn.click(check_rand, [rand, seed], seed).then(chat_inf, [inp, chat, seed, temp, tokens, top_p, rep_p], chat)
97
+
98
+ stop_btn.click(None, None, None, cancels=[go])
99
+ clear_btn.click(clear_fn, None, [inp, chat])
100
+
101
+ app.queue(default_concurrency_limit=10).launch(share=True)