mrfakename commited on
Commit
f6eaeab
1 Parent(s): fffc278

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -11
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import spaces
2
- from detoxify import Detoxify
3
  import torch
4
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
5
  import gradio as gr
@@ -22,9 +21,6 @@ model = AutoModelForCausalLM.from_pretrained(
22
  ).to(device)
23
  @spaces.GPU(enable_queue=True)
24
  def generate_text(text, temperature, maxLen):
25
- mdl = Detoxify('original', device='cuda')
26
- if mdl.predict(text)['toxicity'] > 0.7:
27
- raise gr.Error("Sorry, our systems may have detected toxic content. Please try a different input.")
28
  inputs = tokenizer([text], return_tensors="pt").to(device)
29
  streamer = TextIteratorStreamer(tokenizer)
30
  generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=maxLen, temperature=temperature)
@@ -33,19 +29,15 @@ def generate_text(text, temperature, maxLen):
33
  t = ""
34
  toks = 0
35
  for out in streamer:
36
- toks += 1
37
- if toks >= 3:
38
- toks = 0
39
- if mdl.predict(t)['toxicity'] > 0.7:
40
- raise gr.Error("Sorry, our systems may have detected toxic content. Please try a different input.")
41
- break
42
  t += out
43
  yield t
44
  with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo:
45
  gr.Markdown("""
46
  # (Unofficial) Demo of Microsoft's Phi-2 on GPU
47
 
48
- Not affiliated with Microsoft!
 
 
49
 
50
  Note: for longer generation (>512), keep clicking "Generate!" The demo is currently limited to 512 demos per generation to ensure all users have access to this service. Please note that once you start generating, you cannot stop generating until the generation is done.
51
 
 
1
  import spaces
 
2
  import torch
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
4
  import gradio as gr
 
21
  ).to(device)
22
  @spaces.GPU(enable_queue=True)
23
  def generate_text(text, temperature, maxLen):
 
 
 
24
  inputs = tokenizer([text], return_tensors="pt").to(device)
25
  streamer = TextIteratorStreamer(tokenizer)
26
  generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=maxLen, temperature=temperature)
 
29
  t = ""
30
  toks = 0
31
  for out in streamer:
 
 
 
 
 
 
32
  t += out
33
  yield t
34
  with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo:
35
  gr.Markdown("""
36
  # (Unofficial) Demo of Microsoft's Phi-2 on GPU
37
 
38
+ The model is suitable for commercial use and is licensed under the MIT license. I am not responsible for any outputs you generate. You are solely responsible for ensuring that your usage of the model complies with applicable laws and regulations.
39
+
40
+ I am not affiliated with the authors of the model (Microsoft).
41
 
42
  Note: for longer generation (>512), keep clicking "Generate!" The demo is currently limited to 512 demos per generation to ensure all users have access to this service. Please note that once you start generating, you cannot stop generating until the generation is done.
43