anirudh-sub commited on
Commit
ad21763
·
1 Parent(s): 9b219c2

Add app.py and tokenizer.model

Browse files
Files changed (2) hide show
  1. app.py +42 -0
  2. tokenizer.model +3 -0
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """from fastai.vision.all import *
2
+ import gradio as gr
3
+
4
+ learn = load_learner('tokenizer.model')
5
+
6
+ categories = ('Rasam', 'Sambar')
7
+
8
+ def classify_image(img):
9
+ pred, idx, probs = learn.predict(img)
10
+ return dict(zip(categories, map(float, probs)))
11
+
12
+ image = gr.inputs.Image(shape=(192, 192))
13
+ label = gr.outputs.Label()
14
+ examples = ['sambar.jpg', 'rasam.jpg']
15
+
16
+ intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples)
17
+ intf.launch()"""
18
+
19
+ from transformers import AutoTokenizer
20
+ import transformers
21
+ import torch
22
+
23
+ model = "anirudh-sub/debate_model_practice"
24
+
25
+ def debate_response(text):
26
+ sequences = pipeline(
27
+ "How do I give a 1AR in debate in Lincoln Douglas Debate? \n",
28
+ do_sample=True,
29
+ top_k=10,
30
+ num_return_sequences=1,
31
+ eos_token_id=tokenizer.eos_token_id,
32
+ max_length=500,
33
+ )
34
+
35
+ response = ""
36
+ for seq in sequences:
37
+ reponse += {seq['generated_text']}
38
+ return resposnse
39
+
40
+ text = gr.inputs.Text()
41
+ response = gr.outputs.Text()
42
+ intf = gr.Interface(fn=debate_response, inputs=text, outputs=response)
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723