furquan commited on
Commit
3853cf9
·
1 Parent(s): 16816bc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -0
app.py CHANGED
@@ -8,6 +8,7 @@ from transformers import pipeline, AutoTokenizer, AutoModel
8
  tokenizer = AutoTokenizer.from_pretrained("furquan/opt-1-3b-prompt-tuned-sentiment-analysis", trust_remote_code=True)
9
  model = AutoModel.from_pretrained("furquan/opt-1-3b-prompt-tuned-sentiment-analysis", trust_remote_code=True)
10
 
 
11
  title = "OPT-1.3B"
12
  description = "This demo uses meta's opt-1.3b Causal LM as base model that was prompt tuned on the Stanford Sentiment Treebank-5 way dataset to only output the sentiment of a given text."
13
  article = "<p style='text-align: center'><a href='https://arxiv.org/pdf/2104.08691.pdf' target='_blank'>The Power of Scale for Parameter-Efficient Prompt Tuning</a></p>"
 
8
  tokenizer = AutoTokenizer.from_pretrained("furquan/opt-1-3b-prompt-tuned-sentiment-analysis", trust_remote_code=True)
9
  model = AutoModel.from_pretrained("furquan/opt-1-3b-prompt-tuned-sentiment-analysis", trust_remote_code=True)
10
 
11
+
12
  title = "OPT-1.3B"
13
  description = "This demo uses meta's opt-1.3b Causal LM as base model that was prompt tuned on the Stanford Sentiment Treebank-5 way dataset to only output the sentiment of a given text."
14
  article = "<p style='text-align: center'><a href='https://arxiv.org/pdf/2104.08691.pdf' target='_blank'>The Power of Scale for Parameter-Efficient Prompt Tuning</a></p>"