spxwlkr commited on
Commit
3b3fae5
·
verified ·
1 Parent(s): 35a902c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -33
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  from peft import AutoPeftModelForCausalLM
2
  from transformers import AutoTokenizer, pipeline
3
  import gradio as gr
@@ -13,39 +14,64 @@ if api_key is None:
13
  login(api_key)
14
 
15
  # Load the model
16
- model = AutoPeftModelForCausalLM.from_pretrained("Moritz-Pfeifer/financial-times-classification-llama-2-7b-v1.3")
17
- tokenizer = AutoTokenizer.from_pretrained("Moritz-Pfeifer/financial-times-classification-llama-2-7b-v1.3")
18
-
19
- def predict_text(test, model, tokenizer):
20
- prompt = f"""
21
- You are given a news article regarding the greater Boston area.
22
- Analyze the sentiment of the article enclosed in square brackets,
23
- determine if it is positive, negative or other, and return the answer as the corresponding sentiment label
24
- "positive" or "negative". If the sentiment is neither positive or negative, return "other".
25
-
26
- [{test}] ="""
27
- pipe = pipeline(task="text-generation",
28
- model=model,
29
- tokenizer=tokenizer,
30
- max_new_tokens = 1,
31
- temperature = 0.1,
32
- )
33
- result = pipe(prompt)
34
- answer = result[0]['generated_text'].split("=")[-1]
35
- # print(answer)
36
- if "positive" in answer.lower():
37
- return "positive"
38
- elif "negative" in answer.lower():
39
- return "negative"
40
- else:
41
- return "other"
42
-
43
- def predict(input_text):
44
- return predict_text(input_text, model, tokenizer)
45
-
46
-
47
- interface = gr.Interface(fn=predict, inputs="text", outputs="text", title="Text Classifier", description="Insert your text and get the classification result.")
48
- interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
  if __name__ == "__main__":
51
  interface.launch(share=True)
 
1
+ from AICodeInit import *
2
  from peft import AutoPeftModelForCausalLM
3
  from transformers import AutoTokenizer, pipeline
4
  import gradio as gr
 
14
  login(api_key)
15
 
16
  # Load the model
17
+ # model = AutoPeftModelForCausalLM.from_pretrained("Moritz-Pfeifer/financial-times-classification-llama-2-7b-v1.3")
18
+ # tokenizer = AutoTokenizer.from_pretrained("Moritz-Pfeifer/financial-times-classification-llama-2-7b-v1.3")
19
+
20
+ # def predict_text(test, model, tokenizer):
21
+ # prompt = f"""
22
+ # You are given a news article regarding the greater Boston area.
23
+ # Analyze the sentiment of the article enclosed in square brackets,
24
+ # determine if it is positive, negative or other, and return the answer as the corresponding sentiment label
25
+ # "positive" or "negative". If the sentiment is neither positive or negative, return "other".
26
+
27
+ # [{test}] ="""
28
+ # pipe = pipeline(task="text-generation",
29
+ # model=model,
30
+ # tokenizer=tokenizer,
31
+ # max_new_tokens = 1,
32
+ # temperature = 0.1,
33
+ # )
34
+ # result = pipe(prompt)
35
+ # answer = result[0]['generated_text'].split("=")[-1]
36
+ # # print(answer)
37
+ # if "positive" in answer.lower():
38
+ # return "positive"
39
+ # elif "negative" in answer.lower():
40
+ # return "negative"
41
+ # else:
42
+ # return "other"
43
+
44
+ # def predict(input_text):
45
+ # return predict_text(input_text, model, tokenizer)
46
+
47
+
48
+ # interface = gr.Interface(fn=predict, inputs="text", outputs="text", title="Text Classifier", description="Insert your text and get the classification result.")
49
+ # interface.launch()
50
+
51
+ #initialize llama model model, tokenizer, prompt, pipe = create_pipeline()
52
+ nlp = load_model()
53
+ prompt, pipe = create_pipeline()
54
+ #demo function calls predict text from other file
55
+ def entity_sentiment(text):
56
+ entities = extract_entities(text,nlp)
57
+ entities_context = extract_entities_with_context(text, nlp)
58
+ #sentiment = get_sentiment(text)
59
+ entity_sentiment_scoreonly=analyze_entity_sentiments_score(entities_context)
60
+ #sentiment_category = categorize_sentiment(sentiment)
61
+ #average_sentiment = sum(entity_sentiments)/len(entity_sentiments)
62
+ average_score = calculate_avg_score(entity_sentiment_scoreonly)
63
+ average_sentiment = categorize_sentiment(average_score)
64
+
65
+ llama_sentiment = predict_text(text,pipe,prompt)
66
+ return entities,entities_context,entity_sentiment_scoreonly,average_score,average_sentiment,llama_sentiment
67
+
68
+ demo = gr.Interface(
69
+ fn=entity_sentiment,
70
+ inputs=["text"],
71
+ outputs=["text","text","text","text","text"],
72
+ )
73
+
74
+ demo.launch(share=True)
75
 
76
  if __name__ == "__main__":
77
  interface.launch(share=True)