TurtleLiu commited on
Commit
87b2337
·
1 Parent(s): 0043882

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, BitsAndBytesConfig
2
  import gradio as gr
3
  import torch
@@ -57,6 +58,7 @@ def generate_response(message, history):
57
  result = pipe(f"{prompt}")[0]['generated_text']
58
  return result
59
 
 
60
  '''
61
  def generate_response(prompt, history, temperature=0.9, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0, **kwargs,):
62
  temperature = float(temperature)
@@ -91,7 +93,7 @@ def generate_response(prompt, history, temperature=0.9, max_new_tokens=1024, top
91
  max_new_tokens=max_new_tokens,
92
  )
93
  '''
94
-
95
  # UI design
96
  examples=[
97
  ["Patient is feeling stressed due to work and has trouble sleeping.", None, None, None, None, None],
@@ -107,9 +109,9 @@ gr.ChatInterface(
107
  examples=examples,
108
  concurrency_limit=20,
109
  ).launch(show_api=False, debug=True)
110
-
 
111
 
112
- '''
113
  from huggingface_hub import InferenceClient
114
  import gradio as gr
115
 
@@ -117,7 +119,7 @@ client = InferenceClient(
117
  "TurtleLiu/mistral7b_psychology_bot"
118
  )
119
 
120
-
121
  def format_prompt(message, history):
122
  prompt = "<s>"
123
  for user_prompt, bot_response in history:
@@ -125,7 +127,7 @@ def format_prompt(message, history):
125
  prompt += f" {bot_response}</s> "
126
  prompt += f"[INST] {message} [/INST]"
127
  return prompt
128
-
129
 
130
 
131
  def format_prompt(message, history):
@@ -178,4 +180,3 @@ gr.ChatInterface(
178
  examples=examples,
179
  concurrency_limit=20,
180
  ).launch(show_api=False, debug=True)
181
- '''
 
1
+ '''
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, BitsAndBytesConfig
3
  import gradio as gr
4
  import torch
 
58
  result = pipe(f"{prompt}")[0]['generated_text']
59
  return result
60
 
61
+ '''
62
  '''
63
  def generate_response(prompt, history, temperature=0.9, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0, **kwargs,):
64
  temperature = float(temperature)
 
93
  max_new_tokens=max_new_tokens,
94
  )
95
  '''
96
+ '''
97
  # UI design
98
  examples=[
99
  ["Patient is feeling stressed due to work and has trouble sleeping.", None, None, None, None, None],
 
109
  examples=examples,
110
  concurrency_limit=20,
111
  ).launch(show_api=False, debug=True)
112
+ '''
113
+
114
 
 
115
  from huggingface_hub import InferenceClient
116
  import gradio as gr
117
 
 
119
  "TurtleLiu/mistral7b_psychology_bot"
120
  )
121
 
122
+ '''
123
  def format_prompt(message, history):
124
  prompt = "<s>"
125
  for user_prompt, bot_response in history:
 
127
  prompt += f" {bot_response}</s> "
128
  prompt += f"[INST] {message} [/INST]"
129
  return prompt
130
+ '''
131
 
132
 
133
  def format_prompt(message, history):
 
180
  examples=examples,
181
  concurrency_limit=20,
182
  ).launch(show_api=False, debug=True)