duchaba commited on
Commit
878a89c
·
1 Parent(s): a2dd927

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -57,7 +57,6 @@ class HFace_Pluto(object):
57
  self.fname_id = 0
58
  self.dname_img = "img_colab/"
59
  self._huggingface_key=b'gAAAAABld_3fKLl7aPBJzfAq-th37t95pMu2bVbH9QccOSecaUnm33XrpKpCXP4GL6Wr23g3vtrKWli5JK1ZPh18ilnDb_Su6GoVvU92Vzba64k3gBQwKF_g5DoH2vWq2XM8vx_5mKJh'
60
- self._gpt_key=b'gAAAAABld_-y70otUll4Jwq3jEBXiw1tooSFo_gStRbkCyuu9_Dmdehc4M8lI_hFbum9CwyZuj9ZnXgxFIROebcPSF5qoA197VRvzUDQOMxY5zmHnImVROrsXVdZqXyIeYH_Q6cvXvFTX3rLBIKKWgvJmnpYGRaV6Q=='
61
  self._kaggle_key=b'gAAAAABld_4_B6rrRhFYyfl77dacu1RhR4ktaLU6heYhQBSIj4ELBm7y4DzU1R8-H4yPKd0w08s11wkFJ9AR7XyESxM1SsrMBzqQEeW9JKNbl6jAaonFGmqbhFblkQqH4XjsapZru0qX'
62
  self._fkey="fes_f8Im569hYnI1Tn6FqP-6hS4rdmNOJ6DWcRPOsvc="
63
  self._color_primary = '#2780e3' #blue
@@ -67,6 +66,7 @@ class HFace_Pluto(object):
67
  self._color_warning = '#ff7518' #orange
68
  self._color_danger = '#ff0039' #red
69
  self._color_mid_gray = '#495057'
 
70
  return
71
  #
72
  # pretty print output name-value line
@@ -288,8 +288,7 @@ monty._ph()
288
  # %%write -a app.py
289
 
290
  # client.moderations.create()
291
- # OPENAI_API_KEY="My API Key"
292
- ai_client = openai.OpenAI(api_key=monty._decrypt_it(monty._gpt_key))
293
  # %%writefile -a app.py
294
 
295
  #@add_method(HFace_Pluto)
@@ -354,7 +353,7 @@ def _draw_censor(self,data):
354
  exp = (0.01, 0.01)
355
  x = [data['max_value'], (data['sum_value']-data['max_value'])]
356
  title='\nMessage Is Flagged As Unsafe\n'
357
- lab = [data['max_key'], 'Other']
358
  if (data['is_flagged']):
359
  col=[self._color_danger, self._color_mid_gray]
360
  elif (data['is_safer_flagged']):
@@ -406,8 +405,9 @@ in_box = [gradio.Textbox(lines=1, label="Message to be moderate/censor:", placeh
406
  out_box = [gradio.Plot(label="Moderate/Censor Score: (Red, Orange, and Green)"),
407
  gradio.Textbox(lines=4, label="Response Raw JSON Data:")]
408
  #
409
- title = "Text Moderate - Filter hate, violent, and sexual language."
410
  desc = 'NOTE: The lower value for the Safer setting indicates a more stringent level of censorship.'
 
411
  arti = "<ul><li>This model use the NLP OpenAI Moderation model."
412
  arti += "</li><li>Creator: Duc Haba</li><li>License: GNU General Public License 3.0</li></ul>"
413
  exp = [
 
57
  self.fname_id = 0
58
  self.dname_img = "img_colab/"
59
  self._huggingface_key=b'gAAAAABld_3fKLl7aPBJzfAq-th37t95pMu2bVbH9QccOSecaUnm33XrpKpCXP4GL6Wr23g3vtrKWli5JK1ZPh18ilnDb_Su6GoVvU92Vzba64k3gBQwKF_g5DoH2vWq2XM8vx_5mKJh'
 
60
  self._kaggle_key=b'gAAAAABld_4_B6rrRhFYyfl77dacu1RhR4ktaLU6heYhQBSIj4ELBm7y4DzU1R8-H4yPKd0w08s11wkFJ9AR7XyESxM1SsrMBzqQEeW9JKNbl6jAaonFGmqbhFblkQqH4XjsapZru0qX'
61
  self._fkey="fes_f8Im569hYnI1Tn6FqP-6hS4rdmNOJ6DWcRPOsvc="
62
  self._color_primary = '#2780e3' #blue
 
66
  self._color_warning = '#ff7518' #orange
67
  self._color_danger = '#ff0039' #red
68
  self._color_mid_gray = '#495057'
69
+ self._ok=b'gAAAAABld_-y70otUll4Jwq3jEBXiw1tooSFo_gStRbkCyuu9_Dmdehc4M8lI_hFbum9CwyZuj9ZnXgxFIROebcPSF5qoA197VRvzUDQOMxY5zmHnImVROrsXVdZqXyIeYH_Q6cvXvFTX3rLBIKKWgvJmnpYGRaV6Q=='
70
  return
71
  #
72
  # pretty print output name-value line
 
288
  # %%write -a app.py
289
 
290
  # client.moderations.create()
291
+ ai_client = openai.OpenAI(api_key=monty._decrypt_it(monty._ok))
 
292
  # %%writefile -a app.py
293
 
294
  #@add_method(HFace_Pluto)
 
353
  exp = (0.01, 0.01)
354
  x = [data['max_value'], (data['sum_value']-data['max_value'])]
355
  title='\nMessage Is Flagged As Unsafe\n'
356
+ lab = [data['max_key'], 'Other 18 categories']
357
  if (data['is_flagged']):
358
  col=[self._color_danger, self._color_mid_gray]
359
  elif (data['is_safer_flagged']):
 
405
  out_box = [gradio.Plot(label="Moderate/Censor Score: (Red, Orange, and Green)"),
406
  gradio.Textbox(lines=4, label="Response Raw JSON Data:")]
407
  #
408
+ title = "Friendly Text Moderate <br>- Identify 19 categories of hate, violent, and sexual language."
409
  desc = 'NOTE: The lower value for the Safer setting indicates a more stringent level of censorship.'
410
+ # desc += "<br>There are 19 different categories of hate, violent, and sexual language that can be identified."
411
  arti = "<ul><li>This model use the NLP OpenAI Moderation model."
412
  arti += "</li><li>Creator: Duc Haba</li><li>License: GNU General Public License 3.0</li></ul>"
413
  exp = [