JustKiddo commited on
Commit
16bab56
·
verified ·
1 Parent(s): 9ce7a58

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -67
app.py CHANGED
@@ -1,7 +1,6 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from datasets import load_dataset
4
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
5
 
6
  """
7
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
@@ -9,12 +8,13 @@ For more information on `huggingface_hub` Inference API support, please check th
9
 
10
  #Update: Using a new base model
11
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
12
  dataset = load_dataset("JustKiddo/KiddosVault")
13
 
14
- # Load the tokenizer and model for token display
15
- tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") #Google's T5 Model
16
- model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small")
17
-
18
  def respond(
19
  message,
20
  history: list[tuple[str, str]],
@@ -47,71 +47,25 @@ def respond(
47
  response += token
48
  yield response
49
 
50
- #My custom token generator
51
- def generate_tokens(text):
52
- input = tokenizer(text, return_tensors="pt")
53
- output = model.generate(**input)
54
-
55
- input_ids = input["input_ids"].tolist()[0]
56
- output_ids = output.tolist()[0]
57
-
58
- formatted_output = [format(x, 'd') for x in output_ids]
59
-
60
- input_tokens_str = tokenizer.convert_ids_to_tokens(input_ids)
61
- #output_tokens_str = tokenizer.convert_tokens_to_ids(output_ids)
62
-
63
- return " ".join(input_tokens_str), " ".join(formatted_output)
64
-
65
  """
66
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
67
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
- #chatInterface = gr.ChatInterface(
70
- # respond,
71
- # additional_inputs=[
72
- # gr.Textbox(value="You are a professional Mental Healthcare Chatbot.", label="System message"),
73
- # gr.Slider(minimum=1, maximum=6144, value=6144, step=1, label="Max new tokens"),
74
- # gr.Slider(minimum=0.1, maximum=4.0, value=1, step=0.1, label="Temperature"),
75
- # gr.Slider(
76
- # minimum=0.1,
77
- # maximum=1.0,
78
- # value=0.95,
79
- # step=0.05,
80
- # label="Top-p (nucleus sampling)",
81
- # ),
82
- # ],
83
- #)
84
-
85
- with gr.Blocks() as demo:
86
- with gr.Column():
87
- gr.ChatInterface(
88
- respond,
89
- additional_inputs=[
90
- gr.Textbox(value="You are a professional Mental Healthcare Chatbot.", label="System message"),
91
- gr.Slider(minimum=1, maximum=6144, value=6144, step=1, label="Max new tokens"),
92
- gr.Slider(minimum=0.1, maximum=4.0, value=1, step=0.1, label="Temperature"),
93
- gr.Slider(
94
- minimum=0.1,
95
- maximum=1.0,
96
- value=0.95,
97
- step=0.05,
98
- label="Top-p (nucleus sampling)",
99
- ),
100
- ],
101
- )
102
-
103
- with gr.Row():
104
- input_text = gr.Textbox(label="Input text")
105
- input_tokens = gr.Textbox(label="Input tokens")
106
- output_ids = gr.Textbox(label="Output tokens")
107
-
108
- def update_tokens(input_text):
109
- input_tokens_str, output_ids = generate_tokens(input_text)
110
- return input_tokens_str, output_ids
111
-
112
- input_text.change(update_tokens,
113
- inputs=input_text,
114
- outputs=[input_tokens, output_ids])
115
 
116
  if __name__ == "__main__":
117
- demo.launch(debug=True, share=True)
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from datasets import load_dataset
 
4
 
5
  """
6
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
8
 
9
  #Update: Using a new base model
10
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
11
+ #client = InferenceClient("HuggingFaceH4/zephyr-7b-gemma-v0.1")
12
+ #topic_model = BERTopic.load("MaartenGr/BERTopic_Wikipedia")
13
+ # Train model
14
+ #topic_model = BERTopic("english")
15
+ #topics, probs = topic_model.fit_transform(docs)
16
  dataset = load_dataset("JustKiddo/KiddosVault")
17
 
 
 
 
 
18
  def respond(
19
  message,
20
  history: list[tuple[str, str]],
 
47
  response += token
48
  yield response
49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  """
51
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
52
  """
53
+ demo = gr.ChatInterface(
54
+ respond,
55
+ additional_inputs=[
56
+ gr.Textbox(value="You are a professional Mental Healthcare Chatbot.", label="System message"),
57
+ gr.Slider(minimum=1, maximum=6144, value=6144, step=1, label="Max new tokens"),
58
+ gr.Slider(minimum=0.1, maximum=4.0, value=1, step=0.1, label="Temperature"),
59
+ gr.Slider(
60
+ minimum=0.1,
61
+ maximum=1.0,
62
+ value=0.95,
63
+ step=0.05,
64
+ label="Top-p (nucleus sampling)",
65
+ ),
66
+ ],
67
+ )
68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
  if __name__ == "__main__":
71
+ demo.launch(debug=True)