mrfakename commited on
Commit
f7d0046
1 Parent(s): a4cdc2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -1
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import spaces
 
2
  import torch
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
4
  import gradio as gr
@@ -19,16 +20,24 @@ model = AutoModelForCausalLM.from_pretrained(
19
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
20
  trust_remote_code=True,
21
  ).to(device)
22
-
23
  @spaces.GPU(enable_queue=True)
24
  def generate_text(text, temperature, maxLen):
 
 
25
  inputs = tokenizer([text], return_tensors="pt").to(device)
26
  streamer = TextIteratorStreamer(tokenizer)
27
  generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=maxLen, temperature=temperature)
28
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
29
  thread.start()
30
  t = ""
 
31
  for out in streamer:
 
 
 
 
 
32
  t += out
33
  yield t
34
  with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo:
 
1
  import spaces
2
+ from detoxify import Detoxify
3
  import torch
4
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
5
  import gradio as gr
 
20
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
21
  trust_remote_code=True,
22
  ).to(device)
23
+ mdl = Detoxify('original', device='cuda')
24
  @spaces.GPU(enable_queue=True)
25
  def generate_text(text, temperature, maxLen):
26
+ if mdl.predict(text)['toxicity'] > 0.7:
27
+ raise gr.Error("Sorry, our systems may have detected toxic content. Please try a different input.")
28
  inputs = tokenizer([text], return_tensors="pt").to(device)
29
  streamer = TextIteratorStreamer(tokenizer)
30
  generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=maxLen, temperature=temperature)
31
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
32
  thread.start()
33
  t = ""
34
+ toks = 0
35
  for out in streamer:
36
+ toks += 1
37
+ if toks == 3:
38
+ toks = 0
39
+ if mdl.predict(t)['toxicity'] > 0.7:
40
+ raise gr.Error("Sorry, our systems may have detected toxic content. Please try a different input.")
41
  t += out
42
  yield t
43
  with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo: