mipatov commited on
Commit
4aa6857
·
1 Parent(s): 5890407

use gradio

Browse files
Files changed (1) hide show
  1. app.py +13 -24
app.py CHANGED
@@ -1,13 +1,12 @@
1
  import transformers
2
  import torch
3
  import tokenizers
4
- import streamlit as st
5
  import re
6
 
7
  from PIL import Image
8
 
9
 
10
- @st.cache(hash_funcs={tokenizers.Tokenizer: lambda _: None, tokenizers.AddedToken: lambda _: None, re.Pattern: lambda _: None}, allow_output_mutation=True, suppress_st_warning=True)
11
  def get_model(model_name, model_path='pytorch_model.bin'):
12
  tokenizer = transformers.GPT2Tokenizer.from_pretrained(model_name)
13
  model = transformers.OPTForCausalLM.from_pretrained(model_name)
@@ -34,28 +33,18 @@ def predict(text, model, tokenizer, n_beams=5, temperature=2.5, top_p=0.8, lengt
34
  return list(map(tokenizer.decode, out))[0]
35
 
36
 
37
- model, tokenizer = get_model('facebook/opt-13b', 'OPT13b-skeptic.bin')
38
 
39
- # st.title("NeuroKorzh")
 
 
 
 
 
 
 
 
 
40
 
41
- # image = Image.open('korzh.jpg')
42
- # st.image(image, caption='НейроКорж')
43
 
44
- # option = st.selectbox('Выберите своего Коржа', ('Быстрый', 'Глубокий'))
45
- craziness = st.slider(label='Craziness', min_value=0, max_value=100, value=50, step=5)
46
- temperature = 2 + craziness / 50.
47
-
48
- st.markdown("\n")
49
-
50
- text = st.text_area(label='What are you interested in?', value='Covid - a worldwide conspiracy?', height=80)
51
- button = st.button('Go')
52
-
53
- if button:
54
- try:
55
- with st.spinner('Finding out the truth'):
56
- result = predict(text, model, tokenizer, temperature=temperature)
57
-
58
- st.text_area(label='', value=result, height=1100)
59
-
60
- except Exception:
61
- st.error("Ooooops, something went wrong. Please try again and report to me, tg: @vladyur")
 
1
  import transformers
2
  import torch
3
  import tokenizers
4
+ import gradio as gr
5
  import re
6
 
7
  from PIL import Image
8
 
9
 
 
10
  def get_model(model_name, model_path='pytorch_model.bin'):
11
  tokenizer = transformers.GPT2Tokenizer.from_pretrained(model_name)
12
  model = transformers.OPTForCausalLM.from_pretrained(model_name)
 
33
  return list(map(tokenizer.decode, out))[0]
34
 
35
 
36
+ model, tokenizer = get_model('big-kek/NeuroSkeptic', 'big-kek/NeuroSkeptic')
37
 
38
+ example = 'Who is Bill Gates really?'
39
+ demo = gr.Interface(
40
+ fn=predict,
41
+ inputs=[
42
+ gr.components.Textbox(label="what is your interest?",value = example),
43
+ ],
44
+ outputs=[
45
+ gr.components.Textbox(label="oh! my ...",interactive = False),
46
+ ],
47
+ )
48
 
49
+ demo.launch()
 
50