furquan commited on
Commit
ed1f74d
·
1 Parent(s): 1f78700

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -5,8 +5,8 @@ from transformers import pipeline, AutoTokenizer, AutoModel
5
 
6
  #pipe = pipeline("text-generation", model="furquan/opt_2_7_b_prompt_tuned_sentiment_analysis", trust_remote_code=True, cache_dir="/local/home/furquanh/myProjects/week12/").to('cuda')
7
 
8
- tokenizer = AutoTokenizer.from_pretrained("furquan/opt_2_7_b_prompt_tuned_sentiment_analysis",cache_dir="/local/home/furquanh/myProjects/week12/", trust_remote_code=True)
9
- model = AutoModel.from_pretrained("furquan/opt_2_7_b_prompt_tuned_sentiment_analysis",cache_dir="/local/home/furquanh/myProjects/week12/", trust_remote_code=True)
10
 
11
  title = "OPT-2.7B"
12
  description = "This demo uses meta's opt-2.7b model prompt tuned on the Stanford Sentiment Treebank-5 way dataset to only output the sentiment of a given text."
 
5
 
6
  #pipe = pipeline("text-generation", model="furquan/opt_2_7_b_prompt_tuned_sentiment_analysis", trust_remote_code=True, cache_dir="/local/home/furquanh/myProjects/week12/").to('cuda')
7
 
8
+ tokenizer = AutoTokenizer.from_pretrained("furquan/opt_2_7_b_prompt_tuned_sentiment_analysis", trust_remote_code=True)
9
+ model = AutoModel.from_pretrained("furquan/opt_2_7_b_prompt_tuned_sentiment_analysis",trust_remote_code=True)
10
 
11
  title = "OPT-2.7B"
12
  description = "This demo uses meta's opt-2.7b model prompt tuned on the Stanford Sentiment Treebank-5 way dataset to only output the sentiment of a given text."